parallel_tasks_reuse_handle.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. #define _GNU_SOURCE
  2. #include <starpu.h>
  3. #include <omp.h>
  4. #ifdef STARPU_QUICK_CHECK
  5. #define NTASKS 64
  6. #else
  7. #define NTASKS 100
  8. #endif
  9. #define SIZE 400
  10. struct context
  11. {
  12. int ncpus;
  13. int *cpus;
  14. unsigned id;
  15. };
  16. /* Helper for the task that will initiate everything */
  17. void parallel_task_prologue_init_once_and_for_all(void * sched_ctx_)
  18. {
  19. int sched_ctx = *(int *)sched_ctx_;
  20. int i;
  21. int *cpuids = NULL;
  22. int ncpuids = 0;
  23. starpu_sched_ctx_get_available_cpuids(sched_ctx, &cpuids, &ncpuids);
  24. printf("Context %d with %d threads \n", sched_ctx, ncpuids);
  25. #pragma omp parallel num_threads(ncpuids)
  26. {
  27. starpu_sched_ctx_bind_current_thread_to_cpuid(cpuids[omp_get_thread_num()]);
  28. }
  29. omp_set_num_threads(ncpuids);
  30. free(cpuids);
  31. return;
  32. }
  33. void noop(void * buffers[], void * cl_arg)
  34. {
  35. }
  36. static struct starpu_codelet init_parallel_worker_cl=
  37. {
  38. .where = STARPU_CPU,
  39. .cpu_funcs = {noop, NULL},
  40. .nbuffers = 0,
  41. .name = "init_parallel_worker"
  42. };
  43. /* function called to initialize the parallel "workers" */
  44. void parallel_task_init_one_context(unsigned * context_id)
  45. {
  46. struct starpu_task * t;
  47. t = starpu_task_build(&init_parallel_worker_cl,
  48. STARPU_SCHED_CTX, *context_id,
  49. 0);
  50. t->prologue_callback_pop_func=parallel_task_prologue_init_once_and_for_all;
  51. t->prologue_callback_pop_arg=context_id;
  52. t->prologue_callback_pop_arg_free=0;
  53. int ret=starpu_task_submit(t);
  54. }
  55. struct context main_context;
  56. struct context *contexts;
  57. void parallel_task_init()
  58. {
  59. /* Context creation */
  60. main_context.ncpus = starpu_cpu_worker_get_count();
  61. main_context.cpus = (int *) malloc(main_context.ncpus*sizeof(int));
  62. printf("ncpus : %d \n",main_context.ncpus);
  63. starpu_worker_get_ids_by_type(STARPU_CPU_WORKER, main_context.cpus, main_context.ncpus);
  64. main_context.id = starpu_sched_ctx_create(main_context.cpus,
  65. main_context.ncpus,"main_ctx",
  66. STARPU_SCHED_CTX_POLICY_NAME,"eager",
  67. 0);
  68. /* Initialize nested contexts */
  69. /* WARNING : the number of contexts must be a divisor of the number of available cpus*/
  70. contexts = malloc(sizeof(struct context)*2);
  71. int cpus_per_context = main_context.ncpus/2;
  72. int i;
  73. for(i = 0; i < 2; i++)
  74. {
  75. contexts[i].ncpus = cpus_per_context;
  76. contexts[i].cpus = main_context.cpus+i*cpus_per_context;
  77. }
  78. for(i = 0; i < 2; i++)
  79. contexts[i].id = starpu_sched_ctx_create(contexts[i].cpus,
  80. contexts[i].ncpus,"nested_ctx",
  81. STARPU_SCHED_CTX_NESTED,main_context.id,
  82. 0);
  83. for (i = 0; i < 2; i++)
  84. {
  85. parallel_task_init_one_context(&contexts[i].id);
  86. }
  87. starpu_task_wait_for_all();
  88. starpu_sched_ctx_set_context(&main_context.id);
  89. }
  90. void parallel_task_deinit()
  91. {
  92. int i;
  93. for (i=0; i<2;i++)
  94. starpu_sched_ctx_delete(contexts[i].id);
  95. }
  96. /* Codelet SUM */
  97. static void sum_cpu(void * descr[], void *cl_arg)
  98. {
  99. double * v_dst = (double *) STARPU_VECTOR_GET_PTR(descr[0]);
  100. double * v_src0 = (double *) STARPU_VECTOR_GET_PTR(descr[1]);
  101. double * v_src1 = (double *) STARPU_VECTOR_GET_PTR(descr[1]);
  102. int size;
  103. starpu_codelet_unpack_args(cl_arg, &size);
  104. int i, k;
  105. for (k=0;k<10;k++)
  106. {
  107. #pragma omp parallel for
  108. for (i=0; i<size; i++)
  109. {
  110. v_dst[i]+=v_src0[i]+v_src1[i];
  111. }
  112. }
  113. }
  114. static struct starpu_codelet sum_cl =
  115. {
  116. .cpu_funcs = {sum_cpu, NULL},
  117. .nbuffers = 3,
  118. .modes={STARPU_RW,STARPU_R, STARPU_R}
  119. };
  120. int main(int argc, char **argv)
  121. {
  122. int ntasks = NTASKS;
  123. int ret, j, k;
  124. unsigned ncpus = 0;
  125. ret = starpu_init(NULL);
  126. if (ret == -ENODEV)
  127. return 77;
  128. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  129. parallel_task_init();
  130. /* Data preparation */
  131. double array1[SIZE];
  132. double array2[SIZE];
  133. memset(array1, 0, sizeof(double));
  134. int i;
  135. for (i=0;i<SIZE;i++)
  136. {
  137. array2[i]=i*2;
  138. }
  139. starpu_data_handle_t handle1;
  140. starpu_data_handle_t handle2;
  141. starpu_vector_data_register(&handle1, 0, (uintptr_t)array1, SIZE, sizeof(double));
  142. starpu_vector_data_register(&handle2, 0, (uintptr_t)array2, SIZE, sizeof(double));
  143. int size;
  144. size=SIZE;
  145. for (i = 0; i < ntasks; i++)
  146. {
  147. struct starpu_task * t;
  148. t=starpu_task_build(&sum_cl,
  149. STARPU_RW,handle1,
  150. STARPU_R,handle2,
  151. STARPU_R,handle1,
  152. STARPU_VALUE,&size,sizeof(int),
  153. STARPU_SCHED_CTX, main_context.id,
  154. 0);
  155. ret=starpu_task_submit(t);
  156. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  157. }
  158. /* wait for all tasks at the end*/
  159. starpu_task_wait_for_all();
  160. starpu_data_unregister(handle1);
  161. starpu_data_unregister(handle2);
  162. parallel_task_deinit();
  163. starpu_shutdown();
  164. return 0;
  165. }