reduction.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2013 Université de Bordeaux 1
  4. * Copyright (C) 2011, 2012, 2013 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <starpu.h>
  18. #include <common/utils.h>
  19. #include <util/starpu_data_cpy.h>
  20. #include <core/task.h>
  21. #include <datawizard/datawizard.h>
  22. void starpu_data_set_reduction_methods(starpu_data_handle_t handle,
  23. struct starpu_codelet *redux_cl,
  24. struct starpu_codelet *init_cl)
  25. {
  26. _starpu_spin_lock(&handle->header_lock);
  27. _starpu_codelet_check_deprecated_fields(redux_cl);
  28. _starpu_codelet_check_deprecated_fields(init_cl);
  29. unsigned child;
  30. for (child = 0; child < handle->nchildren; child++)
  31. {
  32. /* make sure that the flags are applied to the children as well */
  33. starpu_data_handle_t child_handle = starpu_data_get_child(handle, child);
  34. if (child_handle->nchildren > 0)
  35. starpu_data_set_reduction_methods(child_handle, redux_cl, init_cl);
  36. }
  37. handle->redux_cl = redux_cl;
  38. handle->init_cl = init_cl;
  39. _starpu_spin_unlock(&handle->header_lock);
  40. }
  41. void _starpu_redux_init_data_replicate(starpu_data_handle_t handle, struct _starpu_data_replicate *replicate, int workerid)
  42. {
  43. STARPU_ASSERT(replicate);
  44. STARPU_ASSERT(replicate->allocated);
  45. struct starpu_codelet *init_cl = handle->init_cl;
  46. STARPU_ASSERT(init_cl);
  47. _starpu_cl_func_t init_func = NULL;
  48. /* TODO Check that worker may execute the codelet */
  49. switch (starpu_worker_get_type(workerid))
  50. {
  51. case STARPU_CPU_WORKER:
  52. init_func = _starpu_task_get_cpu_nth_implementation(init_cl, 0);
  53. break;
  54. case STARPU_CUDA_WORKER:
  55. init_func = _starpu_task_get_cuda_nth_implementation(init_cl, 0);
  56. break;
  57. case STARPU_OPENCL_WORKER:
  58. init_func = _starpu_task_get_opencl_nth_implementation(init_cl, 0);
  59. break;
  60. default:
  61. STARPU_ABORT();
  62. break;
  63. }
  64. STARPU_ASSERT(init_func);
  65. init_func(&replicate->data_interface, NULL);
  66. replicate->initialized = 1;
  67. }
  68. /* Enable reduction mode. This function must be called with the header lock
  69. * taken. */
  70. void _starpu_data_start_reduction_mode(starpu_data_handle_t handle)
  71. {
  72. STARPU_ASSERT(handle->reduction_refcnt == 0);
  73. unsigned worker;
  74. unsigned nworkers = starpu_worker_get_count();
  75. for (worker = 0; worker < nworkers; worker++)
  76. {
  77. struct _starpu_data_replicate *replicate;
  78. replicate = &handle->per_worker[worker];
  79. replicate->initialized = 0;
  80. replicate->relaxed_coherency = 2;
  81. if (replicate->mc)
  82. replicate->mc->relaxed_coherency = 2;
  83. }
  84. }
  85. //#define NO_TREE_REDUCTION
  86. /* Force reduction. The lock should already have been taken. */
  87. void _starpu_data_end_reduction_mode(starpu_data_handle_t handle)
  88. {
  89. unsigned worker;
  90. unsigned node;
  91. unsigned empty; /* Whether the handle is initially unallocated */
  92. /* Put every valid replicate in the same array */
  93. unsigned replicate_count = 0;
  94. starpu_data_handle_t replicate_array[1 + STARPU_NMAXWORKERS];
  95. _starpu_spin_checklocked(&handle->header_lock);
  96. for (node = 0; node < STARPU_MAXNODES; node++)
  97. {
  98. if (handle->per_node[node].state != STARPU_INVALID)
  99. break;
  100. }
  101. empty = node == STARPU_MAXNODES;
  102. #ifndef NO_TREE_REDUCTION
  103. if (!empty)
  104. /* Include the initial value into the reduction tree */
  105. replicate_array[replicate_count++] = handle;
  106. #endif
  107. /* Register all valid per-worker replicates */
  108. unsigned nworkers = starpu_worker_get_count();
  109. for (worker = 0; worker < nworkers; worker++)
  110. {
  111. if (handle->per_worker[worker].initialized)
  112. {
  113. /* Make sure the replicate is not removed */
  114. handle->per_worker[worker].refcnt++;
  115. unsigned home_node = starpu_worker_get_memory_node(worker);
  116. starpu_data_register(&handle->reduction_tmp_handles[worker],
  117. home_node, handle->per_worker[worker].data_interface, handle->ops);
  118. starpu_data_set_sequential_consistency_flag(handle->reduction_tmp_handles[worker], 0);
  119. replicate_array[replicate_count++] = handle->reduction_tmp_handles[worker];
  120. }
  121. else
  122. {
  123. handle->reduction_tmp_handles[worker] = NULL;
  124. }
  125. }
  126. #ifndef NO_TREE_REDUCTION
  127. if (empty)
  128. {
  129. /* Only the final copy will touch the actual handle */
  130. handle->reduction_refcnt = 1;
  131. }
  132. else
  133. {
  134. unsigned step = 1;
  135. handle->reduction_refcnt = 0;
  136. while (step < replicate_count)
  137. {
  138. /* Each stage will touch the actual handle */
  139. handle->reduction_refcnt++;
  140. step *= 2;
  141. }
  142. }
  143. #else
  144. /* We know that in this reduction algorithm there is exactly one task per valid replicate. */
  145. handle->reduction_refcnt = replicate_count + empty;
  146. #endif
  147. // fprintf(stderr, "REDUX REFCNT = %d\n", handle->reduction_refcnt);
  148. if (replicate_count >
  149. #ifndef NO_TREE_REDUCTION
  150. !empty
  151. #else
  152. 0
  153. #endif
  154. )
  155. {
  156. /* Temporarily unlock the handle */
  157. _starpu_spin_unlock(&handle->header_lock);
  158. #ifndef NO_TREE_REDUCTION
  159. /* We will store a pointer to the last task which should modify the
  160. * replicate */
  161. struct starpu_task *last_replicate_deps[replicate_count];
  162. memset(last_replicate_deps, 0, replicate_count*sizeof(struct starpu_task *));
  163. struct starpu_task *redux_tasks[replicate_count];
  164. /* Redux step-by-step for step from 1 to replicate_count/2, i.e.
  165. * 1-by-1, then 2-by-2, then 4-by-4, etc. */
  166. unsigned step;
  167. unsigned redux_task_idx = 0;
  168. for (step = 1; step < replicate_count; step *=2)
  169. {
  170. unsigned i;
  171. for (i = 0; i < replicate_count; i+=2*step)
  172. {
  173. if (i + step < replicate_count)
  174. {
  175. /* Perform the reduction between replicates i
  176. * and i+step and put the result in replicate i */
  177. struct starpu_task *redux_task = starpu_task_create();
  178. /* Mark these tasks so that StarPU does not block them
  179. * when they try to access the handle (normal tasks are
  180. * data requests to that handle are frozen until the
  181. * data is coherent again). */
  182. struct _starpu_job *j = _starpu_get_job_associated_to_task(redux_task);
  183. j->reduction_task = 1;
  184. redux_task->cl = handle->redux_cl;
  185. STARPU_ASSERT(redux_task->cl);
  186. if (!redux_task->cl->modes[0])
  187. redux_task->cl->modes[0] = STARPU_RW;
  188. if (!redux_task->cl->modes[1])
  189. redux_task->cl->modes[1] = STARPU_R;
  190. STARPU_ASSERT_MSG(redux_task->cl->modes[0] == STARPU_RW, "First parameter of reduction codelet has to be RW");
  191. STARPU_ASSERT_MSG(redux_task->cl->modes[1] == STARPU_R, "Second parameter of reduction codelet has to be R");
  192. redux_task->handles[0] = replicate_array[i];
  193. redux_task->handles[1] = replicate_array[i+step];
  194. int ndeps = 0;
  195. struct starpu_task *task_deps[2];
  196. if (last_replicate_deps[i])
  197. task_deps[ndeps++] = last_replicate_deps[i];
  198. if (last_replicate_deps[i+step])
  199. task_deps[ndeps++] = last_replicate_deps[i+step];
  200. /* i depends on this task */
  201. last_replicate_deps[i] = redux_task;
  202. /* we don't perform the reduction until both replicates are ready */
  203. starpu_task_declare_deps_array(redux_task, ndeps, task_deps);
  204. /* We cannot submit tasks here : we do
  205. * not want to depend on tasks that have
  206. * been completed, so we juste store
  207. * this task : it will be submitted
  208. * later. */
  209. redux_tasks[redux_task_idx++] = redux_task;
  210. }
  211. }
  212. }
  213. if (empty)
  214. /* The handle was empty, we just need to copy the reduced value. */
  215. _starpu_data_cpy(handle, replicate_array[0], 1, NULL, 0, 1, last_replicate_deps[0]);
  216. /* Let's submit all the reduction tasks. */
  217. unsigned i;
  218. for (i = 0; i < redux_task_idx; i++)
  219. {
  220. int ret = _starpu_task_submit_internally(redux_tasks[i]);
  221. STARPU_ASSERT(ret == 0);
  222. }
  223. #else
  224. if (empty)
  225. {
  226. struct starpu_task *redux_task = starpu_task_create();
  227. /* Mark these tasks so that StarPU does not block them
  228. * when they try to access the handle (normal tasks are
  229. * data requests to that handle are frozen until the
  230. * data is coherent again). */
  231. struct _starpu_job *j = _starpu_get_job_associated_to_task(redux_task);
  232. j->reduction_task = 1;
  233. redux_task->cl = handle->init_cl;
  234. STARPU_ASSERT(redux_task->cl);
  235. #ifdef STARPU_DEVEL
  236. # warning the mode should already be set in the codelet. Only check they are valid?
  237. #endif
  238. redux_task->cl->modes[0] = STARPU_W;
  239. redux_task->handles[0] = handle;
  240. int ret = _starpu_task_submit_internally(redux_task);
  241. STARPU_ASSERT(!ret);
  242. }
  243. /* Create a set of tasks to perform the reduction */
  244. unsigned replicate;
  245. for (replicate = 0; replicate < replicate_count; replicate++)
  246. {
  247. struct starpu_task *redux_task = starpu_task_create();
  248. /* Mark these tasks so that StarPU does not block them
  249. * when they try to access the handle (normal tasks are
  250. * data requests to that handle are frozen until the
  251. * data is coherent again). */
  252. struct _starpu_job *j = _starpu_get_job_associated_to_task(redux_task);
  253. j->reduction_task = 1;
  254. redux_task->cl = handle->redux_cl;
  255. STARPU_ASSERT(redux_task->cl);
  256. #ifdef STARPU_DEVEL
  257. # warning the modes should already be set in the codelet. Only check they are valid?
  258. #endif
  259. redux_task->cl->modes[0] = STARPU_RW;
  260. redux_task->cl->modes[1] = STARPU_R;
  261. redux_task->handles[0] = handle;
  262. redux_task->handles[1] = replicate_array[replicate];
  263. int ret = _starpu_task_submit_internally(redux_task);
  264. STARPU_ASSERT(!ret);
  265. }
  266. #endif
  267. /* Get the header lock back */
  268. _starpu_spin_lock(&handle->header_lock);
  269. }
  270. for (worker = 0; worker < nworkers; worker++)
  271. {
  272. struct _starpu_data_replicate *replicate;
  273. replicate = &handle->per_worker[worker];
  274. replicate->relaxed_coherency = 1;
  275. if (replicate->mc)
  276. replicate->mc->relaxed_coherency = 1;
  277. }
  278. }
  279. void _starpu_data_end_reduction_mode_terminate(starpu_data_handle_t handle)
  280. {
  281. unsigned nworkers = starpu_worker_get_count();
  282. // fprintf(stderr, "_starpu_data_end_reduction_mode_terminate\n");
  283. unsigned worker;
  284. _starpu_spin_checklocked(&handle->header_lock);
  285. for (worker = 0; worker < nworkers; worker++)
  286. {
  287. struct _starpu_data_replicate *replicate;
  288. replicate = &handle->per_worker[worker];
  289. replicate->initialized = 0;
  290. if (handle->reduction_tmp_handles[worker])
  291. {
  292. // fprintf(stderr, "unregister handle %p\n", handle);
  293. _starpu_spin_lock(&handle->reduction_tmp_handles[worker]->header_lock);
  294. handle->reduction_tmp_handles[worker]->lazy_unregister = 1;
  295. _starpu_spin_unlock(&handle->reduction_tmp_handles[worker]->header_lock);
  296. starpu_data_unregister_no_coherency(handle->reduction_tmp_handles[worker]);
  297. handle->per_worker[worker].refcnt--;
  298. /* TODO put in cache */
  299. }
  300. }
  301. }