parallel_tasks_reuse_handle.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2015 Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <starpu.h>
  17. #include <omp.h>
  18. #ifdef STARPU_QUICK_CHECK
  19. #define NTASKS 64
  20. #else
  21. #define NTASKS 100
  22. #endif
  23. #define SIZE 400
  24. struct context
  25. {
  26. int ncpus;
  27. int *cpus;
  28. unsigned id;
  29. };
  30. /* Helper for the task that will initiate everything */
  31. void parallel_task_prologue_init_once_and_for_all(void * sched_ctx_)
  32. {
  33. int sched_ctx = *(int *)sched_ctx_;
  34. int i;
  35. int *cpuids = NULL;
  36. int ncpuids = 0;
  37. starpu_sched_ctx_get_available_cpuids(sched_ctx, &cpuids, &ncpuids);
  38. printf("Context %d with %d threads \n", sched_ctx, ncpuids);
  39. #pragma omp parallel num_threads(ncpuids)
  40. {
  41. starpu_sched_ctx_bind_current_thread_to_cpuid(cpuids[omp_get_thread_num()]);
  42. }
  43. omp_set_num_threads(ncpuids);
  44. free(cpuids);
  45. return;
  46. }
  47. void noop(void * buffers[], void * cl_arg)
  48. {
  49. }
  50. static struct starpu_codelet init_parallel_worker_cl=
  51. {
  52. .where = STARPU_CPU,
  53. .cpu_funcs = {noop, NULL},
  54. .nbuffers = 0,
  55. .name = "init_parallel_worker"
  56. };
  57. /* function called to initialize the parallel "workers" */
  58. void parallel_task_init_one_context(unsigned * context_id)
  59. {
  60. struct starpu_task * t;
  61. t = starpu_task_build(&init_parallel_worker_cl,
  62. STARPU_SCHED_CTX, *context_id,
  63. 0);
  64. t->prologue_callback_pop_func=parallel_task_prologue_init_once_and_for_all;
  65. t->prologue_callback_pop_arg=context_id;
  66. t->prologue_callback_pop_arg_free=0;
  67. int ret=starpu_task_submit(t);
  68. }
  69. struct context main_context;
  70. struct context *contexts;
  71. void parallel_task_init()
  72. {
  73. /* Context creation */
  74. main_context.ncpus = starpu_cpu_worker_get_count();
  75. main_context.cpus = (int *) malloc(main_context.ncpus*sizeof(int));
  76. printf("ncpus : %d \n",main_context.ncpus);
  77. starpu_worker_get_ids_by_type(STARPU_CPU_WORKER, main_context.cpus, main_context.ncpus);
  78. main_context.id = starpu_sched_ctx_create(main_context.cpus,
  79. main_context.ncpus,"main_ctx",
  80. STARPU_SCHED_CTX_POLICY_NAME,"prio",
  81. 0);
  82. /* Initialize nested contexts */
  83. /* WARNING : the number of contexts must be a divisor of the number of available cpus*/
  84. contexts = malloc(sizeof(struct context)*2);
  85. int cpus_per_context = main_context.ncpus/2;
  86. int i;
  87. for(i = 0; i < 2; i++)
  88. {
  89. contexts[i].ncpus = cpus_per_context;
  90. contexts[i].cpus = main_context.cpus+i*cpus_per_context;
  91. }
  92. for(i = 0; i < 2; i++)
  93. contexts[i].id = starpu_sched_ctx_create(contexts[i].cpus,
  94. contexts[i].ncpus,"nested_ctx",
  95. STARPU_SCHED_CTX_NESTED,main_context.id,
  96. 0);
  97. for (i = 0; i < 2; i++)
  98. {
  99. parallel_task_init_one_context(&contexts[i].id);
  100. }
  101. starpu_task_wait_for_all();
  102. starpu_sched_ctx_set_context(&main_context.id);
  103. }
  104. void parallel_task_deinit()
  105. {
  106. int i;
  107. for (i=0; i<2;i++)
  108. starpu_sched_ctx_delete(contexts[i].id);
  109. }
  110. /* Codelet SUM */
  111. static void sum_cpu(void * descr[], void *cl_arg)
  112. {
  113. double * v_dst = (double *) STARPU_VECTOR_GET_PTR(descr[0]);
  114. double * v_src0 = (double *) STARPU_VECTOR_GET_PTR(descr[1]);
  115. double * v_src1 = (double *) STARPU_VECTOR_GET_PTR(descr[1]);
  116. int size;
  117. starpu_codelet_unpack_args(cl_arg, &size);
  118. int i, k;
  119. for (k=0;k<10;k++)
  120. {
  121. #pragma omp parallel for
  122. for (i=0; i<size; i++)
  123. {
  124. v_dst[i]+=v_src0[i]+v_src1[i];
  125. }
  126. }
  127. }
  128. static struct starpu_codelet sum_cl =
  129. {
  130. .cpu_funcs = {sum_cpu, NULL},
  131. .nbuffers = 3,
  132. .modes={STARPU_RW,STARPU_R, STARPU_R}
  133. };
  134. int main(int argc, char **argv)
  135. {
  136. int ntasks = NTASKS;
  137. int ret, j, k;
  138. unsigned ncpus = 0;
  139. ret = starpu_init(NULL);
  140. if (ret == -ENODEV)
  141. return 77;
  142. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  143. parallel_task_init();
  144. /* Data preparation */
  145. double array1[SIZE];
  146. double array2[SIZE];
  147. memset(array1, 0, sizeof(double));
  148. int i;
  149. for (i=0;i<SIZE;i++)
  150. {
  151. array2[i]=i*2;
  152. }
  153. starpu_data_handle_t handle1;
  154. starpu_data_handle_t handle2;
  155. starpu_vector_data_register(&handle1, 0, (uintptr_t)array1, SIZE, sizeof(double));
  156. starpu_vector_data_register(&handle2, 0, (uintptr_t)array2, SIZE, sizeof(double));
  157. int size;
  158. size=SIZE;
  159. for (i = 0; i < ntasks; i++)
  160. {
  161. struct starpu_task * t;
  162. t=starpu_task_build(&sum_cl,
  163. STARPU_RW,handle1,
  164. STARPU_R,handle2,
  165. STARPU_R,handle1,
  166. STARPU_VALUE,&size,sizeof(int),
  167. STARPU_SCHED_CTX, main_context.id,
  168. 0);
  169. ret=starpu_task_submit(t);
  170. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  171. }
  172. /* wait for all tasks at the end*/
  173. starpu_task_wait_for_all();
  174. starpu_data_unregister(handle1);
  175. starpu_data_unregister(handle2);
  176. parallel_task_deinit();
  177. starpu_shutdown();
  178. return 0;
  179. }