reduction.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010 Université de Bordeaux 1
  4. * Copyright (C) 2011 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <starpu.h>
  18. #include <common/utils.h>
  19. #include <core/task.h>
  20. #include <datawizard/datawizard.h>
  21. void starpu_data_set_reduction_methods(starpu_data_handle_t handle,
  22. struct starpu_codelet *redux_cl,
  23. struct starpu_codelet *init_cl)
  24. {
  25. _starpu_spin_lock(&handle->header_lock);
  26. unsigned child;
  27. for (child = 0; child < handle->nchildren; child++)
  28. {
  29. /* make sure that the flags are applied to the children as well */
  30. struct _starpu_data_state *child_handle = &handle->children[child];
  31. if (child_handle->nchildren > 0)
  32. starpu_data_set_reduction_methods(child_handle, redux_cl, init_cl);
  33. }
  34. handle->redux_cl = redux_cl;
  35. handle->init_cl = init_cl;
  36. _starpu_spin_unlock(&handle->header_lock);
  37. }
  38. void _starpu_redux_init_data_replicate(starpu_data_handle_t handle, struct starpu_data_replicate_s *replicate, int workerid)
  39. {
  40. STARPU_ASSERT(replicate);
  41. STARPU_ASSERT(replicate->allocated);
  42. struct starpu_codelet *init_cl = handle->init_cl;
  43. STARPU_ASSERT(init_cl);
  44. cl_func init_func = NULL;
  45. /* TODO Check that worker may execute the codelet */
  46. switch (starpu_worker_get_type(workerid)) {
  47. case STARPU_CPU_WORKER:
  48. init_func = init_cl->cpu_func;
  49. break;
  50. case STARPU_CUDA_WORKER:
  51. init_func = init_cl->cuda_func;
  52. break;
  53. case STARPU_OPENCL_WORKER:
  54. init_func = init_cl->opencl_func;
  55. break;
  56. default:
  57. STARPU_ABORT();
  58. break;
  59. }
  60. STARPU_ASSERT(init_func);
  61. init_func(&replicate->data_interface, NULL);
  62. replicate->initialized = 1;
  63. }
  64. /* Enable reduction mode. This function must be called with the header lock
  65. * taken. */
  66. void starpu_data_start_reduction_mode(starpu_data_handle_t handle)
  67. {
  68. STARPU_ASSERT(handle->reduction_refcnt == 0);
  69. unsigned worker;
  70. unsigned nworkers = starpu_worker_get_count();
  71. for (worker = 0; worker < nworkers; worker++)
  72. {
  73. struct starpu_data_replicate_s *replicate;
  74. replicate = &handle->per_worker[worker];
  75. replicate->initialized = 0;
  76. }
  77. }
  78. //#define NO_TREE_REDUCTION
  79. /* Force reduction. The lock should already have been taken. */
  80. void starpu_data_end_reduction_mode(starpu_data_handle_t handle)
  81. {
  82. unsigned worker;
  83. /* Put every valid replicate in the same array */
  84. unsigned replicate_count = 0;
  85. starpu_data_handle_t replicate_array[STARPU_NMAXWORKERS];
  86. /* Register all valid per-worker replicates */
  87. unsigned nworkers = starpu_worker_get_count();
  88. for (worker = 0; worker < nworkers; worker++)
  89. {
  90. if (handle->per_worker[worker].initialized)
  91. {
  92. /* Make sure the replicate is not removed */
  93. handle->per_worker[worker].refcnt++;
  94. uint32_t home_node = starpu_worker_get_memory_node(worker);
  95. starpu_data_register(&handle->reduction_tmp_handles[worker],
  96. home_node, handle->per_worker[worker].data_interface, handle->ops);
  97. starpu_data_set_sequential_consistency_flag(handle->reduction_tmp_handles[worker], 0);
  98. replicate_array[replicate_count++] = handle->reduction_tmp_handles[worker];
  99. }
  100. else {
  101. handle->reduction_tmp_handles[worker] = NULL;
  102. }
  103. }
  104. #ifndef NO_TREE_REDUCTION
  105. handle->reduction_refcnt = 1;
  106. #else
  107. /* We know that in this reduction algorithm there is exactly one task per valid replicate. */
  108. handle->reduction_refcnt = replicate_count;
  109. #endif
  110. // fprintf(stderr, "REDUX REFCNT = %d\n", handle->reduction_refcnt);
  111. if (replicate_count > 0)
  112. {
  113. /* Temporarily unlock the handle */
  114. _starpu_spin_unlock(&handle->header_lock);
  115. #ifndef NO_TREE_REDUCTION
  116. /* We will store a pointer to the last task which should modify the
  117. * replicate */
  118. struct starpu_task *last_replicate_deps[replicate_count];
  119. memset(last_replicate_deps, 0, replicate_count*sizeof(struct starpu_task *));
  120. unsigned step = 1;
  121. while (step <= replicate_count)
  122. {
  123. unsigned i;
  124. for (i = 0; i < replicate_count; i+=2*step)
  125. {
  126. if (i + step < replicate_count)
  127. {
  128. /* Perform the reduction between replicates i
  129. * and i+step and put the result in replicate i */
  130. struct starpu_task *redux_task = starpu_task_create();
  131. redux_task->cl = handle->redux_cl;
  132. STARPU_ASSERT(redux_task->cl);
  133. redux_task->buffers[0].handle = replicate_array[i];
  134. redux_task->buffers[0].mode = STARPU_RW;
  135. redux_task->buffers[1].handle = replicate_array[i+step];
  136. redux_task->buffers[1].mode = STARPU_R;
  137. redux_task->detach = 0;
  138. int ndeps = 0;
  139. struct starpu_task *task_deps[2];
  140. if (last_replicate_deps[i])
  141. task_deps[ndeps++] = last_replicate_deps[i];
  142. if (last_replicate_deps[i+step])
  143. task_deps[ndeps++] = last_replicate_deps[i+step];
  144. /* i depends on this task */
  145. last_replicate_deps[i] = redux_task;
  146. /* we don't perform the reduction until both replicates are ready */
  147. starpu_task_declare_deps_array(redux_task, ndeps, task_deps);
  148. int ret = starpu_task_submit(redux_task);
  149. STARPU_ASSERT(!ret);
  150. }
  151. }
  152. step *= 2;
  153. }
  154. struct starpu_task *redux_task = starpu_task_create();
  155. /* Mark these tasks so that StarPU does not block them
  156. * when they try to access the handle (normal tasks are
  157. * data requests to that handle are frozen until the
  158. * data is coherent again). */
  159. starpu_job_t j = _starpu_get_job_associated_to_task(redux_task);
  160. j->reduction_task = 1;
  161. redux_task->cl = handle->redux_cl;
  162. STARPU_ASSERT(redux_task->cl);
  163. redux_task->buffers[0].handle = handle;
  164. redux_task->buffers[0].mode = STARPU_RW;
  165. redux_task->buffers[1].handle = replicate_array[0];
  166. redux_task->buffers[1].mode = STARPU_R;
  167. if (last_replicate_deps[0])
  168. starpu_task_declare_deps_array(redux_task, 1, &last_replicate_deps[0]);
  169. int ret = starpu_task_submit(redux_task);
  170. STARPU_ASSERT(!ret);
  171. #else
  172. /* Create a set of tasks to perform the reduction */
  173. unsigned replicate;
  174. for (replicate = 0; replicate < replicate_count; replicate++)
  175. {
  176. struct starpu_task *redux_task = starpu_task_create();
  177. /* Mark these tasks so that StarPU does not block them
  178. * when they try to access the handle (normal tasks are
  179. * data requests to that handle are frozen until the
  180. * data is coherent again). */
  181. starpu_job_t j = _starpu_get_job_associated_to_task(redux_task);
  182. j->reduction_task = 1;
  183. redux_task->cl = handle->redux_cl;
  184. STARPU_ASSERT(redux_task->cl);
  185. redux_task->buffers[0].handle = handle;
  186. redux_task->buffers[0].mode = STARPU_RW;
  187. redux_task->buffers[1].handle = replicate_array[replicate];
  188. redux_task->buffers[1].mode = STARPU_R;
  189. int ret = starpu_task_submit(redux_task);
  190. STARPU_ASSERT(!ret);
  191. }
  192. #endif
  193. /* Get the header lock back */
  194. _starpu_spin_lock(&handle->header_lock);
  195. }
  196. }
  197. void starpu_data_end_reduction_mode_terminate(starpu_data_handle_t handle)
  198. {
  199. unsigned nworkers = starpu_worker_get_count();
  200. // fprintf(stderr, "starpu_data_end_reduction_mode_terminate\n");
  201. unsigned worker;
  202. for (worker = 0; worker < nworkers; worker++)
  203. {
  204. struct starpu_data_replicate_s *replicate;
  205. replicate = &handle->per_worker[worker];
  206. replicate->initialized = 0;
  207. if (handle->reduction_tmp_handles[worker])
  208. {
  209. // fprintf(stderr, "unregister handle %p\n", handle);
  210. handle->reduction_tmp_handles[worker]->lazy_unregister = 1;
  211. starpu_data_unregister_no_coherency(handle->reduction_tmp_handles[worker]);
  212. handle->per_worker[worker].refcnt--;
  213. /* TODO put in cache */
  214. }
  215. }
  216. }