parallel_tasks_reuse_handle.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2015 INRIA
  4. * Copyright (C) 2015, 2016 CNRS
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <starpu.h>
  18. #include <omp.h>
  19. #ifdef STARPU_QUICK_CHECK
  20. #define NTASKS 64
  21. #define SIZE 40
  22. #define LOOPS 4
  23. #else
  24. #define NTASKS 100
  25. #define SIZE 400
  26. #define LOOPS 10
  27. #endif
  28. struct context
  29. {
  30. int ncpus;
  31. int *cpus;
  32. unsigned id;
  33. };
  34. /* Helper for the task that will initiate everything */
  35. void parallel_task_prologue_init_once_and_for_all(void * sched_ctx_)
  36. {
  37. int sched_ctx = *(int *)sched_ctx_;
  38. int i;
  39. int *cpuids = NULL;
  40. int ncpuids = 0;
  41. starpu_sched_ctx_get_available_cpuids(sched_ctx, &cpuids, &ncpuids);
  42. #pragma omp parallel num_threads(ncpuids)
  43. {
  44. starpu_sched_ctx_bind_current_thread_to_cpuid(cpuids[omp_get_thread_num()]);
  45. }
  46. omp_set_num_threads(ncpuids);
  47. free(cpuids);
  48. return;
  49. }
  50. void noop(void * buffers[], void * cl_arg)
  51. {
  52. }
  53. static struct starpu_codelet init_parallel_worker_cl=
  54. {
  55. .cpu_funcs = {noop},
  56. .nbuffers = 0,
  57. .name = "init_parallel_worker"
  58. };
  59. /* function called to initialize the parallel "workers" */
  60. void parallel_task_init_one_context(unsigned * context_id)
  61. {
  62. struct starpu_task * t;
  63. t = starpu_task_build(&init_parallel_worker_cl,
  64. STARPU_SCHED_CTX, *context_id,
  65. 0);
  66. t->destroy = 1;
  67. t->prologue_callback_pop_func=parallel_task_prologue_init_once_and_for_all;
  68. if (t->prologue_callback_pop_arg_free)
  69. free(t->prologue_callback_pop_arg);
  70. t->prologue_callback_pop_arg=context_id;
  71. t->prologue_callback_pop_arg_free=0;
  72. int ret=starpu_task_submit(t);
  73. }
  74. struct context main_context;
  75. struct context *contexts;
  76. void parallel_task_init()
  77. {
  78. /* Context creation */
  79. main_context.ncpus = starpu_cpu_worker_get_count();
  80. main_context.cpus = (int *) malloc(main_context.ncpus*sizeof(int));
  81. fprintf(stderr, "ncpus : %d \n",main_context.ncpus);
  82. starpu_worker_get_ids_by_type(STARPU_CPU_WORKER, main_context.cpus, main_context.ncpus);
  83. main_context.id = starpu_sched_ctx_create(main_context.cpus,
  84. main_context.ncpus,"main_ctx",
  85. STARPU_SCHED_CTX_POLICY_NAME,"prio",
  86. 0);
  87. /* Initialize nested contexts */
  88. /* WARNING : the number of contexts must be a divisor of the number of available cpus*/
  89. contexts = malloc(sizeof(struct context)*2);
  90. int cpus_per_context = main_context.ncpus/2;
  91. int i;
  92. for(i = 0; i < 2; i++)
  93. {
  94. fprintf(stderr, "ncpus %d for context %d \n",cpus_per_context, i);
  95. contexts[i].ncpus = cpus_per_context;
  96. contexts[i].cpus = main_context.cpus+i*cpus_per_context;
  97. }
  98. for(i = 0; i < 2; i++)
  99. contexts[i].id = starpu_sched_ctx_create(contexts[i].cpus,
  100. contexts[i].ncpus,"nested_ctx",
  101. STARPU_SCHED_CTX_NESTED,main_context.id,
  102. 0);
  103. for (i = 0; i < 2; i++)
  104. {
  105. parallel_task_init_one_context(&contexts[i].id);
  106. }
  107. starpu_task_wait_for_all();
  108. starpu_sched_ctx_set_context(&main_context.id);
  109. }
  110. void parallel_task_deinit()
  111. {
  112. int i;
  113. for (i=0; i<2;i++)
  114. starpu_sched_ctx_delete(contexts[i].id);
  115. free(contexts);
  116. free(main_context.cpus);
  117. }
  118. /* Codelet SUM */
  119. static void sum_cpu(void * descr[], void *cl_arg)
  120. {
  121. double *v_dst = (double *) STARPU_VECTOR_GET_PTR(descr[0]);
  122. double *v_src0 = (double *) STARPU_VECTOR_GET_PTR(descr[1]);
  123. double *v_src1 = (double *) STARPU_VECTOR_GET_PTR(descr[2]);
  124. int size = STARPU_VECTOR_GET_NX(descr[0]);
  125. int i, k;
  126. for (k=0;k<LOOPS;k++)
  127. {
  128. #pragma omp parallel for
  129. for (i=0; i<size; i++)
  130. {
  131. v_dst[i]+=v_src0[i]+v_src1[i];
  132. }
  133. }
  134. }
  135. static struct starpu_codelet sum_cl =
  136. {
  137. .cpu_funcs = {sum_cpu, NULL},
  138. .nbuffers = 3,
  139. .modes={STARPU_RW,STARPU_R, STARPU_R}
  140. };
  141. int main(int argc, char **argv)
  142. {
  143. int ntasks = NTASKS;
  144. int ret, j, k;
  145. unsigned ncpus = 0;
  146. ret = starpu_init(NULL);
  147. if (ret == -ENODEV)
  148. return 77;
  149. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  150. if (starpu_cpu_worker_get_count() < 2)
  151. {
  152. starpu_shutdown();
  153. return 77;
  154. }
  155. parallel_task_init();
  156. /* Data preparation */
  157. double array1[SIZE];
  158. double array2[SIZE];
  159. memset(array1, 0, sizeof(double));
  160. int i;
  161. for (i=0;i<SIZE;i++)
  162. {
  163. array2[i]=i*2;
  164. }
  165. starpu_data_handle_t handle1;
  166. starpu_data_handle_t handle2;
  167. starpu_vector_data_register(&handle1, 0, (uintptr_t)array1, SIZE, sizeof(double));
  168. starpu_vector_data_register(&handle2, 0, (uintptr_t)array2, SIZE, sizeof(double));
  169. for (i = 0; i < ntasks; i++)
  170. {
  171. struct starpu_task * t;
  172. t=starpu_task_build(&sum_cl,
  173. STARPU_RW,handle1,
  174. STARPU_R,handle2,
  175. STARPU_R,handle1,
  176. STARPU_SCHED_CTX, main_context.id,
  177. 0);
  178. t->destroy = 1;
  179. t->possibly_parallel = 1;
  180. ret=starpu_task_submit(t);
  181. if (ret == -ENODEV)
  182. goto out;
  183. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  184. }
  185. out:
  186. /* wait for all tasks at the end*/
  187. starpu_task_wait_for_all();
  188. starpu_data_unregister(handle1);
  189. starpu_data_unregister(handle2);
  190. parallel_task_deinit();
  191. starpu_shutdown();
  192. return 0;
  193. }