parallel_tasks_reuse_handle.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) Inria
  4. * Copyright (C) CNRS
  5. * Copyright (C) 2015-2016 Université de Bordeaux
  6. * Copyright (C) 2015,2017 Inria
  7. * Copyright (C) 2015-2017 CNRS
  8. *
  9. * StarPU is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU Lesser General Public License as published by
  11. * the Free Software Foundation; either version 2.1 of the License, or (at
  12. * your option) any later version.
  13. *
  14. * StarPU is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  17. *
  18. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  19. */
  20. #include <starpu.h>
  21. #include <omp.h>
  22. #include <pthread.h>
  23. #ifdef STARPU_QUICK_CHECK
  24. #define NTASKS 64
  25. #define SIZE 40
  26. #define LOOPS 4
  27. #else
  28. #define NTASKS 100
  29. #define SIZE 400
  30. #define LOOPS 10
  31. #endif
  32. #define N_NESTED_CTXS 2
  33. struct context
  34. {
  35. int ncpus;
  36. int *cpus;
  37. unsigned id;
  38. };
  39. /* Helper for the task that will initiate everything */
  40. void parallel_task_prologue_init_once_and_for_all(void * sched_ctx_)
  41. {
  42. fprintf(stderr, "%p: %s -->\n", (void*)pthread_self(), __func__);
  43. int sched_ctx = *(int *)sched_ctx_;
  44. int *cpuids = NULL;
  45. int ncpuids = 0;
  46. starpu_sched_ctx_get_available_cpuids(sched_ctx, &cpuids, &ncpuids);
  47. #pragma omp parallel num_threads(ncpuids)
  48. {
  49. starpu_sched_ctx_bind_current_thread_to_cpuid(cpuids[omp_get_thread_num()]);
  50. }
  51. omp_set_num_threads(ncpuids);
  52. free(cpuids);
  53. fprintf(stderr, "%p: %s <--\n", (void*)pthread_self(), __func__);
  54. return;
  55. }
  56. void noop(void * buffers[], void * cl_arg)
  57. {
  58. (void)buffers;
  59. (void)cl_arg;
  60. }
  61. static struct starpu_codelet init_parallel_worker_cl=
  62. {
  63. .cpu_funcs = {noop},
  64. .nbuffers = 0,
  65. .name = "init_parallel_worker"
  66. };
  67. /* function called to initialize the parallel "workers" */
  68. void parallel_task_init_one_context(unsigned * context_id)
  69. {
  70. struct starpu_task * t;
  71. int ret;
  72. t = starpu_task_build(&init_parallel_worker_cl,
  73. STARPU_SCHED_CTX, *context_id,
  74. 0);
  75. t->destroy = 1;
  76. t->prologue_callback_pop_func=parallel_task_prologue_init_once_and_for_all;
  77. if (t->prologue_callback_pop_arg_free)
  78. free(t->prologue_callback_pop_arg);
  79. t->prologue_callback_pop_arg=context_id;
  80. t->prologue_callback_pop_arg_free=0;
  81. ret = starpu_task_submit(t);
  82. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  83. }
  84. struct context main_context;
  85. struct context *contexts;
  86. void parallel_task_init()
  87. {
  88. /* Context creation */
  89. main_context.ncpus = starpu_cpu_worker_get_count();
  90. main_context.cpus = (int *) malloc(main_context.ncpus*sizeof(int));
  91. fprintf(stderr, "ncpus : %d \n",main_context.ncpus);
  92. starpu_worker_get_ids_by_type(STARPU_CPU_WORKER, main_context.cpus, main_context.ncpus);
  93. main_context.id = starpu_sched_ctx_create(main_context.cpus,
  94. main_context.ncpus,"main_ctx",
  95. STARPU_SCHED_CTX_POLICY_NAME,"prio",
  96. 0);
  97. /* Initialize nested contexts */
  98. contexts = malloc(sizeof(struct context)*N_NESTED_CTXS);
  99. int cpus_per_context = main_context.ncpus/N_NESTED_CTXS;
  100. int i;
  101. for(i = 0; i < N_NESTED_CTXS; i++)
  102. {
  103. contexts[i].ncpus = cpus_per_context;
  104. if (i == N_NESTED_CTXS-1)
  105. contexts[i].ncpus += main_context.ncpus%N_NESTED_CTXS;
  106. contexts[i].cpus = main_context.cpus+i*cpus_per_context;
  107. }
  108. for(i = 0; i < N_NESTED_CTXS; i++)
  109. contexts[i].id = starpu_sched_ctx_create(contexts[i].cpus,
  110. contexts[i].ncpus,"nested_ctx",
  111. STARPU_SCHED_CTX_NESTED,main_context.id,
  112. 0);
  113. for (i = 0; i < N_NESTED_CTXS; i++)
  114. {
  115. parallel_task_init_one_context(&contexts[i].id);
  116. }
  117. starpu_task_wait_for_all();
  118. starpu_sched_ctx_set_context(&main_context.id);
  119. }
  120. void parallel_task_deinit()
  121. {
  122. int i;
  123. for (i=0; i<N_NESTED_CTXS;i++)
  124. starpu_sched_ctx_delete(contexts[i].id);
  125. free(contexts);
  126. free(main_context.cpus);
  127. }
  128. /* Codelet SUM */
  129. static void sum_cpu(void * descr[], void *cl_arg)
  130. {
  131. (void)cl_arg;
  132. double *v_dst = (double *) STARPU_VECTOR_GET_PTR(descr[0]);
  133. double *v_src0 = (double *) STARPU_VECTOR_GET_PTR(descr[1]);
  134. double *v_src1 = (double *) STARPU_VECTOR_GET_PTR(descr[2]);
  135. int size = STARPU_VECTOR_GET_NX(descr[0]);
  136. int i, k;
  137. for (k=0;k<LOOPS;k++)
  138. {
  139. #pragma omp parallel for
  140. for (i=0; i<size; i++)
  141. {
  142. v_dst[i]+=v_src0[i]+v_src1[i];
  143. }
  144. }
  145. }
  146. static struct starpu_codelet sum_cl =
  147. {
  148. .cpu_funcs = {sum_cpu, NULL},
  149. .nbuffers = 3,
  150. .modes={STARPU_RW,STARPU_R, STARPU_R}
  151. };
  152. int main(void)
  153. {
  154. int ntasks = NTASKS;
  155. int ret, j, k;
  156. unsigned ncpus = 0;
  157. ret = starpu_init(NULL);
  158. if (ret == -ENODEV)
  159. return 77;
  160. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  161. if (starpu_cpu_worker_get_count() < N_NESTED_CTXS)
  162. {
  163. starpu_shutdown();
  164. return 77;
  165. }
  166. parallel_task_init();
  167. /* Data preparation */
  168. double array1[SIZE];
  169. double array2[SIZE];
  170. memset(array1, 0, sizeof(double));
  171. int i;
  172. for (i=0;i<SIZE;i++)
  173. {
  174. array2[i]=i*2;
  175. }
  176. starpu_data_handle_t handle1;
  177. starpu_data_handle_t handle2;
  178. starpu_vector_data_register(&handle1, 0, (uintptr_t)array1, SIZE, sizeof(double));
  179. starpu_vector_data_register(&handle2, 0, (uintptr_t)array2, SIZE, sizeof(double));
  180. for (i = 0; i < ntasks; i++)
  181. {
  182. struct starpu_task * t;
  183. t=starpu_task_build(&sum_cl,
  184. STARPU_RW,handle1,
  185. STARPU_R,handle2,
  186. STARPU_R,handle1,
  187. STARPU_SCHED_CTX, main_context.id,
  188. 0);
  189. t->destroy = 1;
  190. t->possibly_parallel = 1;
  191. ret=starpu_task_submit(t);
  192. if (ret == -ENODEV)
  193. goto out;
  194. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  195. }
  196. out:
  197. /* wait for all tasks at the end*/
  198. starpu_task_wait_for_all();
  199. starpu_data_unregister(handle1);
  200. starpu_data_unregister(handle2);
  201. parallel_task_deinit();
  202. starpu_shutdown();
  203. return 0;
  204. }