starpu_mpi_task_insert.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011-2021 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. * Copyright (C) 2021 Federal University of Rio Grande do Sul (UFRGS)
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <stdarg.h>
  18. #include <mpi.h>
  19. #include <starpu.h>
  20. #include <starpu_data.h>
  21. #include <common/utils.h>
  22. #include <util/starpu_task_insert_utils.h>
  23. #include <datawizard/coherency.h>
  24. #include <core/task.h>
  25. #include <starpu_mpi_private.h>
  26. #include <starpu_mpi_cache.h>
  27. #include <starpu_mpi_select_node.h>
  28. #include "starpu_mpi_task_insert.h"
  29. #define _SEND_DATA(data, mode, dest, data_tag, prio, comm, callback, arg) \
  30. do { \
  31. if (mode & STARPU_SSEND) \
  32. starpu_mpi_issend_detached_prio(data, dest, data_tag, prio, comm, callback, arg); \
  33. else \
  34. starpu_mpi_isend_detached_prio(data, dest, data_tag, prio, comm, callback, arg); \
  35. } while (0)
  36. static void (*pre_submit_hook)(struct starpu_task *task) = NULL;
  37. int starpu_mpi_pre_submit_hook_register(void (*f)(struct starpu_task *))
  38. {
  39. if (pre_submit_hook)
  40. _STARPU_MSG("Warning: a pre_submit_hook has already been registered. Please check if you really want to erase the previously registered hook.\n");
  41. pre_submit_hook = f;
  42. return 0;
  43. }
  44. int starpu_mpi_pre_submit_hook_unregister()
  45. {
  46. pre_submit_hook = NULL;
  47. return 0;
  48. }
  49. int _starpu_mpi_find_executee_node(starpu_data_handle_t data, enum starpu_data_access_mode mode, int me, int *do_execute, int *inconsistent_execute, int *xrank)
  50. {
  51. if (mode & STARPU_W || mode & STARPU_REDUX)
  52. {
  53. if (!data)
  54. {
  55. /* We don't have anything allocated for this.
  56. * The application knows we won't do anything
  57. * about this task */
  58. /* Yes, the app could actually not call
  59. * task_insert at all itself, this is just a
  60. * safeguard. */
  61. _STARPU_MPI_DEBUG(3, "oh oh\n");
  62. _STARPU_MPI_LOG_OUT();
  63. return -EINVAL;
  64. }
  65. int mpi_rank = starpu_mpi_data_get_rank(data);
  66. if (mpi_rank == -1)
  67. {
  68. _STARPU_ERROR("Data %p with mode STARPU_W needs to have a valid rank", data);
  69. }
  70. if (*xrank == -1)
  71. {
  72. // No node has been selected yet
  73. *xrank = mpi_rank;
  74. _STARPU_MPI_DEBUG(100, "Codelet is going to be executed by node %d\n", *xrank);
  75. *do_execute = mpi_rank == STARPU_MPI_PER_NODE || (mpi_rank == me);
  76. }
  77. else if (mpi_rank != *xrank)
  78. {
  79. _STARPU_MPI_DEBUG(100, "Another node %d had already been selected to execute the codelet, can't now set %d\n", *xrank, mpi_rank);
  80. *inconsistent_execute = 1;
  81. }
  82. }
  83. _STARPU_MPI_DEBUG(100, "Executing: inconsistent=%d, do_execute=%d, xrank=%d\n", *inconsistent_execute, *do_execute, *xrank);
  84. return 0;
  85. }
  86. void _starpu_mpi_exchange_data_before_execution(starpu_data_handle_t data, enum starpu_data_access_mode mode, int me, int xrank, int do_execute, int prio, MPI_Comm comm)
  87. {
  88. if (data && xrank == STARPU_MPI_PER_NODE)
  89. {
  90. STARPU_ASSERT_MSG(starpu_mpi_data_get_rank(data) == STARPU_MPI_PER_NODE, "If task is replicated, it has to access only per-node data");
  91. }
  92. if (data && mode & STARPU_R && !(mode & STARPU_MPI_REDUX))
  93. {
  94. int mpi_rank = starpu_mpi_data_get_rank(data);
  95. starpu_mpi_tag_t data_tag = starpu_mpi_data_get_tag(data);
  96. if (mpi_rank == -1)
  97. {
  98. _STARPU_ERROR("StarPU needs to be told the MPI rank of this data, using starpu_mpi_data_register\n");
  99. }
  100. if (do_execute && mpi_rank != STARPU_MPI_PER_NODE && mpi_rank != me)
  101. {
  102. /* The node is going to execute the codelet, but it does not own the data, it needs to receive the data from the owner node */
  103. int already_received = starpu_mpi_cached_receive_set(data);
  104. if (already_received == 0)
  105. {
  106. if (data_tag == -1)
  107. _STARPU_ERROR("StarPU needs to be told the MPI tag of this data, using starpu_mpi_data_register\n");
  108. _STARPU_MPI_DEBUG(1, "Receiving data %p from %d\n", data, mpi_rank);
  109. starpu_mpi_irecv_detached_prio(data, mpi_rank, data_tag, prio, comm, NULL, NULL);
  110. }
  111. // else the node has already received the data
  112. }
  113. if (!do_execute && mpi_rank == me)
  114. {
  115. /* The node owns the data, but another node is going to execute the codelet, the node needs to send the data to the executee node. */
  116. int already_sent = starpu_mpi_cached_send_set(data, xrank);
  117. if (already_sent == 0)
  118. {
  119. if (data_tag == -1)
  120. _STARPU_ERROR("StarPU needs to be told the MPI tag of this data, using starpu_mpi_data_register\n");
  121. _STARPU_MPI_DEBUG(1, "Sending data %p to %d\n", data, xrank);
  122. _SEND_DATA(data, mode, xrank, data_tag, prio, comm, NULL, NULL);
  123. }
  124. // Else the data has already been sent
  125. }
  126. }
  127. }
  128. static
  129. void _starpu_mpi_exchange_data_after_execution(starpu_data_handle_t data, enum starpu_data_access_mode mode, int me, int xrank, int do_execute, int prio, MPI_Comm comm)
  130. {
  131. if (mode & STARPU_W && !(mode & STARPU_MPI_REDUX))
  132. {
  133. int mpi_rank = starpu_mpi_data_get_rank(data);
  134. starpu_mpi_tag_t data_tag = starpu_mpi_data_get_tag(data);
  135. if(mpi_rank == -1)
  136. {
  137. _STARPU_ERROR("StarPU needs to be told the MPI rank of this data, using starpu_mpi_data_register\n");
  138. }
  139. if (mpi_rank == STARPU_MPI_PER_NODE)
  140. {
  141. mpi_rank = me;
  142. }
  143. if (mpi_rank == me)
  144. {
  145. if (xrank != -1 && (xrank != STARPU_MPI_PER_NODE && me != xrank))
  146. {
  147. _STARPU_MPI_DEBUG(1, "Receive data %p back from the task %d which executed the codelet ...\n", data, xrank);
  148. if(data_tag == -1)
  149. _STARPU_ERROR("StarPU needs to be told the MPI tag of this data, using starpu_mpi_data_register\n");
  150. starpu_mpi_irecv_detached(data, xrank, data_tag, comm, NULL, NULL);
  151. }
  152. }
  153. else if (do_execute)
  154. {
  155. if(data_tag == -1)
  156. _STARPU_ERROR("StarPU needs to be told the MPI tag of this data, using starpu_mpi_data_register\n");
  157. _STARPU_MPI_DEBUG(1, "Send data %p back to its owner %d...\n", data, mpi_rank);
  158. _SEND_DATA(data, mode, mpi_rank, data_tag, prio, comm, NULL, NULL);
  159. }
  160. }
  161. }
  162. static
  163. void _starpu_mpi_clear_data_after_execution(starpu_data_handle_t data, enum starpu_data_access_mode mode, int me, int do_execute)
  164. {
  165. if (_starpu_cache_enabled)
  166. {
  167. if ((mode & STARPU_W && !(mode & STARPU_MPI_REDUX)) || mode & STARPU_REDUX)
  168. {
  169. /* The data has been modified, it MUST be removed from the cache */
  170. starpu_mpi_cached_send_clear(data);
  171. starpu_mpi_cached_receive_clear(data);
  172. }
  173. }
  174. else
  175. {
  176. /* We allocated a temporary buffer for the received data, now drop it */
  177. if ((mode & STARPU_R && !(mode & STARPU_MPI_REDUX)) && do_execute)
  178. {
  179. int mpi_rank = starpu_mpi_data_get_rank(data);
  180. if (mpi_rank == STARPU_MPI_PER_NODE)
  181. {
  182. mpi_rank = me;
  183. }
  184. if (mpi_rank != me && mpi_rank != -1)
  185. {
  186. starpu_data_invalidate_submit(data);
  187. }
  188. }
  189. }
  190. }
  191. static
  192. int _starpu_mpi_task_decode_v(struct starpu_codelet *codelet, int me, int nb_nodes, int *xrank, int *do_execute, struct starpu_data_descr **descrs_p, int *nb_data_p, int *prio_p, va_list varg_list)
  193. {
  194. /* XXX: _fstarpu_mpi_task_decode_v needs to be updated at the same time */
  195. va_list varg_list_copy;
  196. int inconsistent_execute = 0;
  197. int arg_type;
  198. int node_selected = 0;
  199. int nb_allocated_data = 16;
  200. struct starpu_data_descr *descrs;
  201. int nb_data;
  202. int prio = 0;
  203. int select_node_policy = STARPU_MPI_NODE_SELECTION_CURRENT_POLICY;
  204. _STARPU_TRACE_TASK_MPI_DECODE_START();
  205. _STARPU_MPI_MALLOC(descrs, nb_allocated_data * sizeof(struct starpu_data_descr));
  206. nb_data = 0;
  207. *do_execute = -1;
  208. *xrank = -1;
  209. va_copy(varg_list_copy, varg_list);
  210. while ((arg_type = va_arg(varg_list_copy, int)) != 0)
  211. {
  212. int arg_type_nocommute = arg_type & ~STARPU_COMMUTE;
  213. if (arg_type==STARPU_EXECUTE_ON_NODE)
  214. {
  215. *xrank = va_arg(varg_list_copy, int);
  216. if (node_selected == 0)
  217. {
  218. _STARPU_MPI_DEBUG(100, "Executing on node %d\n", *xrank);
  219. *do_execute = 1;
  220. node_selected = 1;
  221. inconsistent_execute = 0;
  222. }
  223. }
  224. else if (arg_type==STARPU_EXECUTE_ON_DATA)
  225. {
  226. starpu_data_handle_t data = va_arg(varg_list_copy, starpu_data_handle_t);
  227. if (node_selected == 0)
  228. {
  229. *xrank = starpu_mpi_data_get_rank(data);
  230. STARPU_ASSERT_MSG(*xrank != -1, "Rank of the data must be set using starpu_mpi_data_register() or starpu_data_set_rank()");
  231. _STARPU_MPI_DEBUG(100, "Executing on data node %d\n", *xrank);
  232. STARPU_ASSERT_MSG(*xrank <= nb_nodes, "Node %d to execute codelet is not a valid node (%d)", *xrank, nb_nodes);
  233. *do_execute = 1;
  234. node_selected = 1;
  235. inconsistent_execute = 0;
  236. }
  237. }
  238. else if (arg_type_nocommute & STARPU_R || arg_type_nocommute & STARPU_W || arg_type_nocommute & STARPU_RW || arg_type & STARPU_SCRATCH || arg_type & STARPU_REDUX || arg_type & STARPU_MPI_REDUX)
  239. {
  240. starpu_data_handle_t data = va_arg(varg_list_copy, starpu_data_handle_t);
  241. enum starpu_data_access_mode mode = (enum starpu_data_access_mode) arg_type;
  242. if (node_selected == 0)
  243. {
  244. int ret = _starpu_mpi_find_executee_node(data, mode, me, do_execute, &inconsistent_execute, xrank);
  245. if (ret == -EINVAL)
  246. {
  247. free(descrs);
  248. va_end(varg_list_copy);
  249. _STARPU_TRACE_TASK_MPI_DECODE_END();
  250. return ret;
  251. }
  252. }
  253. if (nb_data >= nb_allocated_data)
  254. {
  255. nb_allocated_data *= 2;
  256. _STARPU_MPI_REALLOC(descrs, nb_allocated_data * sizeof(struct starpu_data_descr));
  257. }
  258. descrs[nb_data].handle = data;
  259. descrs[nb_data].mode = mode;
  260. nb_data ++;
  261. }
  262. else if (arg_type == STARPU_DATA_ARRAY)
  263. {
  264. starpu_data_handle_t *datas = va_arg(varg_list_copy, starpu_data_handle_t *);
  265. int nb_handles = va_arg(varg_list_copy, int);
  266. int i;
  267. for(i=0 ; i<nb_handles ; i++)
  268. {
  269. STARPU_ASSERT_MSG(codelet->nbuffers == STARPU_VARIABLE_NBUFFERS || nb_data < codelet->nbuffers, "Too many data passed to starpu_mpi_task_insert");
  270. enum starpu_data_access_mode mode = STARPU_CODELET_GET_MODE(codelet, nb_data);
  271. if (node_selected == 0)
  272. {
  273. int ret = _starpu_mpi_find_executee_node(datas[i], mode, me, do_execute, &inconsistent_execute, xrank);
  274. if (ret == -EINVAL)
  275. {
  276. free(descrs);
  277. va_end(varg_list_copy);
  278. _STARPU_TRACE_TASK_MPI_DECODE_END();
  279. return ret;
  280. }
  281. }
  282. if (nb_data >= nb_allocated_data)
  283. {
  284. nb_allocated_data *= 2;
  285. _STARPU_MPI_REALLOC(descrs, nb_allocated_data * sizeof(struct starpu_data_descr));
  286. }
  287. descrs[nb_data].handle = datas[i];
  288. descrs[nb_data].mode = mode;
  289. nb_data ++;
  290. }
  291. }
  292. else if (arg_type == STARPU_DATA_MODE_ARRAY)
  293. {
  294. struct starpu_data_descr *_descrs = va_arg(varg_list_copy, struct starpu_data_descr*);
  295. int nb_handles = va_arg(varg_list_copy, int);
  296. int i;
  297. for(i=0 ; i<nb_handles ; i++)
  298. {
  299. enum starpu_data_access_mode mode = _descrs[i].mode;
  300. if (node_selected == 0)
  301. {
  302. int ret = _starpu_mpi_find_executee_node(_descrs[i].handle, mode, me, do_execute, &inconsistent_execute, xrank);
  303. if (ret == -EINVAL)
  304. {
  305. free(descrs);
  306. va_end(varg_list_copy);
  307. _STARPU_TRACE_TASK_MPI_DECODE_END();
  308. return ret;
  309. }
  310. }
  311. if (nb_data >= nb_allocated_data)
  312. {
  313. nb_allocated_data *= 2;
  314. _STARPU_MPI_REALLOC(descrs, nb_allocated_data * sizeof(struct starpu_data_descr));
  315. }
  316. descrs[nb_data].handle = _descrs[i].handle;
  317. descrs[nb_data].mode = mode;
  318. nb_data ++;
  319. }
  320. }
  321. else if (arg_type==STARPU_VALUE)
  322. {
  323. (void)va_arg(varg_list_copy, void *);
  324. (void)va_arg(varg_list_copy, size_t);
  325. }
  326. else if (arg_type==STARPU_CL_ARGS)
  327. {
  328. (void)va_arg(varg_list_copy, void *);
  329. (void)va_arg(varg_list_copy, size_t);
  330. }
  331. else if (arg_type==STARPU_CL_ARGS_NFREE)
  332. {
  333. (void)va_arg(varg_list_copy, void *);
  334. (void)va_arg(varg_list_copy, size_t);
  335. }
  336. else if (arg_type==STARPU_TASK_DEPS_ARRAY)
  337. {
  338. (void)va_arg(varg_list_copy, unsigned);
  339. (void)va_arg(varg_list_copy, struct starpu_task **);
  340. }
  341. else if (arg_type==STARPU_TASK_END_DEPS_ARRAY)
  342. {
  343. (void)va_arg(varg_list_copy, unsigned);
  344. (void)va_arg(varg_list_copy, struct starpu_task **);
  345. }
  346. else if (arg_type==STARPU_CALLBACK)
  347. {
  348. (void)va_arg(varg_list_copy, _starpu_callback_func_t);
  349. }
  350. else if (arg_type==STARPU_CALLBACK_WITH_ARG)
  351. {
  352. (void)va_arg(varg_list_copy, _starpu_callback_func_t);
  353. (void)va_arg(varg_list_copy, void *);
  354. }
  355. else if (arg_type==STARPU_CALLBACK_WITH_ARG_NFREE)
  356. {
  357. (void)va_arg(varg_list_copy, _starpu_callback_func_t);
  358. (void)va_arg(varg_list_copy, void *);
  359. }
  360. else if (arg_type==STARPU_CALLBACK_ARG)
  361. {
  362. (void)va_arg(varg_list_copy, void *);
  363. }
  364. else if (arg_type==STARPU_CALLBACK_ARG_NFREE)
  365. {
  366. (void)va_arg(varg_list_copy, void *);
  367. }
  368. else if (arg_type==STARPU_EPILOGUE_CALLBACK)
  369. {
  370. (void)va_arg(varg_list_copy, _starpu_callback_func_t);
  371. }
  372. else if (arg_type==STARPU_EPILOGUE_CALLBACK_ARG)
  373. {
  374. (void)va_arg(varg_list_copy, void *);
  375. }
  376. else if (arg_type==STARPU_PRIORITY)
  377. {
  378. prio = va_arg(varg_list_copy, int);
  379. }
  380. /* STARPU_EXECUTE_ON_NODE handled above */
  381. /* STARPU_EXECUTE_ON_DATA handled above */
  382. /* STARPU_DATA_ARRAY handled above */
  383. /* STARPU_DATA_MODE_ARRAY handled above */
  384. else if (arg_type==STARPU_TAG)
  385. {
  386. (void)va_arg(varg_list_copy, starpu_tag_t);
  387. }
  388. else if (arg_type==STARPU_HYPERVISOR_TAG)
  389. {
  390. (void)va_arg(varg_list_copy, int);
  391. }
  392. else if (arg_type==STARPU_FLOPS)
  393. {
  394. (void)va_arg(varg_list_copy, double);
  395. }
  396. else if (arg_type==STARPU_SCHED_CTX)
  397. {
  398. (void)va_arg(varg_list_copy, unsigned);
  399. }
  400. else if (arg_type==STARPU_PROLOGUE_CALLBACK)
  401. {
  402. (void)va_arg(varg_list_copy, _starpu_callback_func_t);
  403. }
  404. else if (arg_type==STARPU_PROLOGUE_CALLBACK_ARG)
  405. {
  406. (void)va_arg(varg_list_copy, void *);
  407. }
  408. else if (arg_type==STARPU_PROLOGUE_CALLBACK_ARG_NFREE)
  409. {
  410. (void)va_arg(varg_list_copy, void *);
  411. }
  412. else if (arg_type==STARPU_PROLOGUE_CALLBACK_POP)
  413. {
  414. (void)va_arg(varg_list_copy, _starpu_callback_func_t);
  415. }
  416. else if (arg_type==STARPU_PROLOGUE_CALLBACK_POP_ARG)
  417. {
  418. (void)va_arg(varg_list_copy, void *);
  419. }
  420. else if (arg_type==STARPU_PROLOGUE_CALLBACK_POP_ARG_NFREE)
  421. {
  422. (void)va_arg(varg_list_copy, void *);
  423. }
  424. else if (arg_type==STARPU_EXECUTE_WHERE)
  425. {
  426. // the flag is decoded and set later when
  427. // calling function _starpu_task_insert_create()
  428. (void)va_arg(varg_list_copy, unsigned long long);
  429. }
  430. else if (arg_type==STARPU_EXECUTE_ON_WORKER)
  431. {
  432. // the flag is decoded and set later when
  433. // calling function _starpu_task_insert_create()
  434. (void)va_arg(varg_list_copy, int);
  435. }
  436. else if (arg_type==STARPU_TAG_ONLY)
  437. {
  438. (void)va_arg(varg_list_copy, starpu_tag_t);
  439. }
  440. else if (arg_type==STARPU_NAME)
  441. {
  442. (void)va_arg(varg_list_copy, const char *);
  443. }
  444. else if (arg_type==STARPU_POSSIBLY_PARALLEL)
  445. {
  446. (void)va_arg(varg_list_copy, unsigned);
  447. }
  448. else if (arg_type==STARPU_WORKER_ORDER)
  449. {
  450. // the flag is decoded and set later when
  451. // calling function _starpu_task_insert_create()
  452. (void)va_arg(varg_list_copy, unsigned);
  453. }
  454. else if (arg_type==STARPU_NODE_SELECTION_POLICY)
  455. {
  456. select_node_policy = va_arg(varg_list_copy, int);
  457. }
  458. else if (arg_type==STARPU_TASK_COLOR)
  459. {
  460. (void)va_arg(varg_list_copy, int);
  461. }
  462. else if (arg_type==STARPU_TASK_SYNCHRONOUS)
  463. {
  464. (void)va_arg(varg_list_copy, int);
  465. }
  466. else if (arg_type==STARPU_HANDLES_SEQUENTIAL_CONSISTENCY)
  467. {
  468. (void)va_arg(varg_list_copy, char *);
  469. }
  470. else if (arg_type==STARPU_TASK_END_DEP)
  471. {
  472. (void)va_arg(varg_list_copy, int);
  473. }
  474. else if (arg_type==STARPU_TASK_WORKERIDS)
  475. {
  476. (void)va_arg(varg_list_copy, unsigned);
  477. (void)va_arg(varg_list_copy, uint32_t*);
  478. }
  479. else if (arg_type==STARPU_SEQUENTIAL_CONSISTENCY)
  480. {
  481. (void)va_arg(varg_list_copy, unsigned);
  482. }
  483. else if (arg_type==STARPU_TASK_PROFILING_INFO)
  484. {
  485. (void)va_arg(varg_list_copy, struct starpu_profiling_task_info *);
  486. }
  487. else if (arg_type==STARPU_TASK_NO_SUBMITORDER)
  488. {
  489. (void)va_arg(varg_list_copy, unsigned);
  490. }
  491. else if (arg_type==STARPU_TASK_SCHED_DATA)
  492. {
  493. (void)va_arg(varg_list_copy, void *);
  494. }
  495. else if (arg_type==STARPU_TASK_FILE)
  496. {
  497. (void)va_arg(varg_list_copy, const char *);
  498. }
  499. else if (arg_type==STARPU_TASK_LINE)
  500. {
  501. (void)va_arg(varg_list_copy, int);
  502. }
  503. else
  504. {
  505. STARPU_ABORT_MSG("Unrecognized argument %d, did you perhaps forget to end arguments with 0?\n", arg_type);
  506. }
  507. }
  508. va_end(varg_list_copy);
  509. if (inconsistent_execute == 1 || *xrank == -1)
  510. {
  511. // We need to find out which node is going to execute the codelet.
  512. _STARPU_MPI_DEBUG(100, "Different nodes are owning W data. The node to execute the codelet is going to be selected with the current selection node policy. See starpu_mpi_node_selection_set_current_policy() to change the policy, or use STARPU_EXECUTE_ON_NODE or STARPU_EXECUTE_ON_DATA to specify the node\n");
  513. *xrank = _starpu_mpi_select_node(me, nb_nodes, descrs, nb_data, select_node_policy);
  514. *do_execute = *xrank == STARPU_MPI_PER_NODE || (me == *xrank);
  515. }
  516. else
  517. {
  518. _STARPU_MPI_DEBUG(100, "Inconsistent=%d - xrank=%d\n", inconsistent_execute, *xrank);
  519. *do_execute = *xrank == STARPU_MPI_PER_NODE || (me == *xrank);
  520. }
  521. _STARPU_MPI_DEBUG(100, "do_execute=%d\n", *do_execute);
  522. *descrs_p = descrs;
  523. *nb_data_p = nb_data;
  524. *prio_p = prio;
  525. _STARPU_TRACE_TASK_MPI_DECODE_END();
  526. return 0;
  527. }
  528. static
  529. int _starpu_mpi_task_build_v(MPI_Comm comm, struct starpu_codelet *codelet, struct starpu_task **task, int *xrank_p, struct starpu_data_descr **descrs_p, int *nb_data_p, int *prio_p, va_list varg_list)
  530. {
  531. int me, do_execute, xrank, nb_nodes;
  532. int ret;
  533. int i;
  534. struct starpu_data_descr *descrs;
  535. int nb_data;
  536. int prio;
  537. _STARPU_MPI_LOG_IN();
  538. starpu_mpi_comm_rank(comm, &me);
  539. starpu_mpi_comm_size(comm, &nb_nodes);
  540. /* Find out whether we are to execute the data because we own the data to be written to. */
  541. ret = _starpu_mpi_task_decode_v(codelet, me, nb_nodes, &xrank, &do_execute, &descrs, &nb_data, &prio, varg_list);
  542. if (ret < 0)
  543. return ret;
  544. _STARPU_TRACE_TASK_MPI_PRE_START();
  545. /* Send and receive data as requested */
  546. for(i=0 ; i<nb_data ; i++)
  547. {
  548. _starpu_mpi_exchange_data_before_execution(descrs[i].handle, descrs[i].mode, me, xrank, do_execute, prio, comm);
  549. }
  550. if (xrank_p)
  551. *xrank_p = xrank;
  552. if (nb_data_p)
  553. *nb_data_p = nb_data;
  554. if (prio_p)
  555. *prio_p = prio;
  556. if (descrs_p)
  557. *descrs_p = descrs;
  558. else
  559. free(descrs);
  560. _STARPU_TRACE_TASK_MPI_PRE_END();
  561. if (do_execute == 0)
  562. {
  563. return 1;
  564. }
  565. else
  566. {
  567. va_list varg_list_copy;
  568. _STARPU_MPI_DEBUG(100, "Execution of the codelet %p (%s)\n", codelet, codelet?codelet->name:NULL);
  569. *task = starpu_task_create();
  570. (*task)->cl_arg_free = 1;
  571. (*task)->callback_arg_free = 1;
  572. (*task)->prologue_callback_arg_free = 1;
  573. (*task)->prologue_callback_pop_arg_free = 1;
  574. va_copy(varg_list_copy, varg_list);
  575. _starpu_task_insert_create(codelet, *task, varg_list_copy);
  576. va_end(varg_list_copy);
  577. return 0;
  578. }
  579. }
  580. int _starpu_mpi_task_postbuild_v(MPI_Comm comm, int xrank, int do_execute, struct starpu_data_descr *descrs, int nb_data, int prio)
  581. {
  582. int me, i;
  583. _STARPU_TRACE_TASK_MPI_POST_START();
  584. starpu_mpi_comm_rank(comm, &me);
  585. for(i=0 ; i<nb_data ; i++)
  586. {
  587. if ((descrs[i].mode & STARPU_REDUX || descrs[i].mode & STARPU_MPI_REDUX) && descrs[i].handle)
  588. {
  589. struct _starpu_mpi_data *mpi_data = (struct _starpu_mpi_data *) descrs[i].handle->mpi_data;
  590. int rrank = starpu_mpi_data_get_rank(descrs[i].handle);
  591. int size;
  592. starpu_mpi_comm_size(comm, &size);
  593. if (mpi_data->redux_map == NULL)
  594. _STARPU_CALLOC(mpi_data->redux_map, size, sizeof(mpi_data->redux_map[0]));
  595. mpi_data->redux_map [xrank] = 1;
  596. mpi_data->redux_map [rrank] = 1;
  597. }
  598. _starpu_mpi_exchange_data_after_execution(descrs[i].handle, descrs[i].mode, me, xrank, do_execute, prio, comm);
  599. _starpu_mpi_clear_data_after_execution(descrs[i].handle, descrs[i].mode, me, do_execute);
  600. }
  601. free(descrs);
  602. _STARPU_TRACE_TASK_MPI_POST_END();
  603. _STARPU_MPI_LOG_OUT();
  604. return 0;
  605. }
  606. static
  607. int _starpu_mpi_task_insert_v(MPI_Comm comm, struct starpu_codelet *codelet, va_list varg_list)
  608. {
  609. struct starpu_task *task;
  610. int ret;
  611. int xrank;
  612. int do_execute = 0;
  613. struct starpu_data_descr *descrs;
  614. int nb_data;
  615. int prio;
  616. ret = _starpu_mpi_task_build_v(comm, codelet, &task, &xrank, &descrs, &nb_data, &prio, varg_list);
  617. if (ret < 0)
  618. return ret;
  619. if (ret == 0)
  620. {
  621. do_execute = 1;
  622. ret = starpu_task_submit(task);
  623. if (STARPU_UNLIKELY(ret == -ENODEV))
  624. {
  625. _STARPU_MSG("submission of task %p wih codelet %p failed (symbol `%s') (err: ENODEV)\n",
  626. task, task->cl,
  627. (codelet == NULL) ? "none" :
  628. task->cl->name ? task->cl->name :
  629. (task->cl->model && task->cl->model->symbol)?task->cl->model->symbol:"none");
  630. task->destroy = 0;
  631. starpu_task_destroy(task);
  632. }
  633. }
  634. int val = _starpu_mpi_task_postbuild_v(comm, xrank, do_execute, descrs, nb_data, prio);
  635. if (ret == 0 && pre_submit_hook)
  636. pre_submit_hook(task);
  637. return val;
  638. }
  639. #undef starpu_mpi_task_insert
  640. int starpu_mpi_task_insert(MPI_Comm comm, struct starpu_codelet *codelet, ...)
  641. {
  642. va_list varg_list;
  643. int ret;
  644. va_start(varg_list, codelet);
  645. ret = _starpu_mpi_task_insert_v(comm, codelet, varg_list);
  646. va_end(varg_list);
  647. return ret;
  648. }
  649. #undef starpu_mpi_insert_task
  650. int starpu_mpi_insert_task(MPI_Comm comm, struct starpu_codelet *codelet, ...)
  651. {
  652. va_list varg_list;
  653. int ret;
  654. va_start(varg_list, codelet);
  655. ret = _starpu_mpi_task_insert_v(comm, codelet, varg_list);
  656. va_end(varg_list);
  657. return ret;
  658. }
  659. #undef starpu_mpi_task_build
  660. struct starpu_task *starpu_mpi_task_build(MPI_Comm comm, struct starpu_codelet *codelet, ...)
  661. {
  662. va_list varg_list;
  663. struct starpu_task *task;
  664. int ret;
  665. va_start(varg_list, codelet);
  666. ret = _starpu_mpi_task_build_v(comm, codelet, &task, NULL, NULL, NULL, NULL, varg_list);
  667. va_end(varg_list);
  668. STARPU_ASSERT(ret >= 0);
  669. return (ret > 0) ? NULL : task;
  670. }
  671. int starpu_mpi_task_post_build(MPI_Comm comm, struct starpu_codelet *codelet, ...)
  672. {
  673. int xrank, do_execute;
  674. int ret, me, nb_nodes;
  675. va_list varg_list;
  676. struct starpu_data_descr *descrs;
  677. int nb_data;
  678. int prio;
  679. starpu_mpi_comm_rank(comm, &me);
  680. starpu_mpi_comm_size(comm, &nb_nodes);
  681. va_start(varg_list, codelet);
  682. /* Find out whether we are to execute the data because we own the data to be written to. */
  683. ret = _starpu_mpi_task_decode_v(codelet, me, nb_nodes, &xrank, &do_execute, &descrs, &nb_data, &prio, varg_list);
  684. va_end(varg_list);
  685. if (ret < 0)
  686. return ret;
  687. return _starpu_mpi_task_postbuild_v(comm, xrank, do_execute, descrs, nb_data, prio);
  688. }
  689. struct starpu_codelet _starpu_mpi_redux_data_synchro_cl =
  690. {
  691. .where = STARPU_NOWHERE,
  692. .modes = {STARPU_R, STARPU_W},
  693. .nbuffers = 2
  694. };
  695. struct _starpu_mpi_redux_data_args
  696. {
  697. starpu_data_handle_t data_handle;
  698. starpu_data_handle_t new_handle;
  699. starpu_mpi_tag_t data_tag;
  700. int node;
  701. MPI_Comm comm;
  702. struct starpu_task *taskB;
  703. long taskC_jobid;
  704. };
  705. void _starpu_mpi_redux_fill_post_sync_jobid(const void * const redux_data_args, long * const post_sync_jobid)
  706. {
  707. *post_sync_jobid = ((const struct _starpu_mpi_redux_data_args *) redux_data_args)->taskC_jobid;
  708. }
  709. /* TODO: this should rather be implicitly called by starpu_mpi_task_insert when
  710. * * a data previously accessed in (MPI_)REDUX mode gets accessed in R mode. */
  711. void starpu_mpi_redux_data_prio_tree(MPI_Comm comm, starpu_data_handle_t data_handle, int prio, int arity)
  712. {
  713. int me, rank, nb_nodes;
  714. starpu_mpi_tag_t data_tag;
  715. rank = starpu_mpi_data_get_rank(data_handle);
  716. data_tag = starpu_mpi_data_get_tag(data_handle);
  717. struct _starpu_mpi_data *mpi_data = data_handle->mpi_data;
  718. if (rank == -1)
  719. {
  720. _STARPU_ERROR("StarPU needs to be told the MPI rank of this data, using starpu_mpi_data_register\n");
  721. }
  722. if (data_tag == -1)
  723. {
  724. _STARPU_ERROR("StarPU needs to be told the MPI tag of this data, using starpu_mpi_data_register\n");
  725. }
  726. starpu_mpi_comm_rank(comm, &me);
  727. starpu_mpi_comm_size(comm, &nb_nodes);
  728. int current_level, nb_contrib, next_nb_contrib;
  729. int i, j, step, node;
  730. char root_in_step, me_in_step;
  731. // https://stackoverflow.com/questions/109023/how-to-count-the-number-of-set-bits-in-a-32-bit-integer
  732. // https://stackoverflow.com/a/109025
  733. // see hamming weight
  734. //nb_contrib = std::popcount(mpi_data->redux_map); // most preferable
  735. nb_contrib=0;
  736. for (i=0;i<nb_nodes;i++)
  737. {
  738. _STARPU_MPI_DEBUG(5, "mpi_data->redux_map[%d] = %d\n", i, mpi_data->redux_map[i]);
  739. if (mpi_data->redux_map[i]) nb_contrib++;
  740. }
  741. if (nb_contrib == 0)
  742. {
  743. /* Nothing to do! */
  744. return;
  745. }
  746. if (arity < 2)
  747. {
  748. arity = nb_contrib;
  749. }
  750. _STARPU_MPI_DEBUG(5, "There is %d contributors\n", nb_contrib);
  751. int contributors[nb_contrib];
  752. int reducing_node;
  753. j=0;
  754. for (i=0;i<nb_nodes;i++)
  755. {
  756. _STARPU_MPI_DEBUG(5, "%d in reduction ? %d\n", i, mpi_data->redux_map[i]);
  757. if (mpi_data->redux_map[i])
  758. {
  759. contributors[j++] = i;
  760. }
  761. }
  762. for (i=0;i<nb_contrib;i++)
  763. {
  764. _STARPU_MPI_DEBUG(5, "%dth contributor = %d\n", i, contributors[i]);
  765. }
  766. // Creating synchronization task and use its jobid for tracing
  767. struct starpu_task *synchro = starpu_task_create();
  768. //const long synchro_jobid = starpu_task_get_job_id(synchro);
  769. synchro->cl = &_starpu_mpi_redux_data_synchro_cl;
  770. //STARPU_TASK_SET_HANDLE(synchro, data_handle, 0);
  771. _STARPU_MPI_DEBUG(15, "mpi_redux _ STARTING with %d-ary tree \n", arity);
  772. current_level = 0;
  773. while (nb_contrib != 1)
  774. {
  775. _STARPU_MPI_DEBUG(5, "%dth level in the reduction \n", current_level);
  776. if (nb_contrib%arity == 0) next_nb_contrib = nb_contrib/arity;
  777. else next_nb_contrib = nb_contrib/arity + 1;
  778. for (step = 0; step < next_nb_contrib; step++)
  779. {
  780. root_in_step = 0;
  781. me_in_step = 0;
  782. for (node = step*arity ; node < nb_contrib && node < (step+1)*arity ; node++)
  783. {
  784. if (contributors[node] == rank) root_in_step = 1;
  785. if (contributors[node] == me) me_in_step = 1;
  786. }
  787. /* FIXME: if the root node is note in the step, then we agree the node
  788. * with the lowest id reduces the step : we could agree on another
  789. * node to better load balance in the case of multiple reductions involving
  790. * the same sets of nodes
  791. * FIX: We chose to use the tag%arity-th contributor in the step
  792. */
  793. if (root_in_step)
  794. {
  795. reducing_node = rank;
  796. }
  797. else if (step*arity + data_tag%arity < nb_contrib)
  798. {
  799. reducing_node = contributors[step*arity + data_tag%arity];
  800. }
  801. else
  802. {
  803. reducing_node = contributors[step*arity];
  804. }
  805. if (me == reducing_node)
  806. {
  807. _STARPU_MPI_DEBUG(5, "mpi_redux _ %dth level, %dth step ; chose %d node\n", current_level, step, reducing_node);
  808. for (node = step*arity ; node < nb_contrib && node < (step+1)*arity ; node++)
  809. {
  810. if (me != contributors[node])
  811. {
  812. _STARPU_MPI_DEBUG(5, "%d takes part in the reduction of %p towards %d (%dth level ; %dth step) \n",
  813. contributors[node], data_handle, reducing_node, current_level, step);
  814. /* We need to make sure all is
  815. * executed after data_handle finished
  816. * its last read access, we hence do
  817. * the following:
  818. * - submit an empty task A reading
  819. * data_handle
  820. * - submit the reducing task B
  821. * reading and writing data_handle and
  822. * depending on task A through sequencial
  823. * consistency
  824. */
  825. starpu_data_handle_t new_handle;
  826. starpu_data_register_same(&new_handle, data_handle);
  827. /* Task A */
  828. starpu_task_insert(&_starpu_mpi_redux_data_synchro_cl,
  829. STARPU_R, data_handle,
  830. STARPU_W, new_handle, 0);
  831. starpu_mpi_irecv_detached_prio(new_handle, contributors[node], data_tag, prio, comm, NULL, NULL);
  832. /* Task B */
  833. starpu_task_insert(data_handle->redux_cl, STARPU_RW|STARPU_COMMUTE, data_handle, STARPU_R, new_handle, 0);
  834. starpu_data_unregister_submit(new_handle);
  835. }
  836. }
  837. }
  838. else if (me_in_step)
  839. {
  840. _STARPU_MPI_DEBUG(5, "Sending redux handle to %d ...\n", reducing_node);
  841. starpu_mpi_isend_detached_prio(data_handle, reducing_node, data_tag, prio, comm, NULL, NULL);
  842. starpu_data_invalidate_submit(data_handle);
  843. }
  844. contributors[step] = reducing_node;
  845. }
  846. nb_contrib = next_nb_contrib;
  847. current_level++;
  848. }
  849. }
  850. void starpu_mpi_redux_data(MPI_Comm comm, starpu_data_handle_t data_handle)
  851. {
  852. return starpu_mpi_redux_data_prio(comm, data_handle, 0);
  853. }
  854. void starpu_mpi_redux_data_tree(MPI_Comm comm, starpu_data_handle_t data_handle, int arity)
  855. {
  856. return starpu_mpi_redux_data_prio_tree(comm, data_handle, 0, arity);
  857. }
  858. void starpu_mpi_redux_data_prio(MPI_Comm comm, starpu_data_handle_t data_handle, int prio)
  859. {
  860. int nb_nodes, nb_contrib, i;
  861. struct _starpu_mpi_data *mpi_data = data_handle->mpi_data;
  862. starpu_mpi_comm_size(comm, &nb_nodes);
  863. nb_contrib=0;
  864. for (i=0;i<nb_nodes;i++)
  865. {
  866. if (mpi_data->redux_map[i])
  867. {
  868. nb_contrib++;
  869. }
  870. }
  871. return starpu_mpi_redux_data_prio_tree(comm, data_handle, prio, nb_contrib);
  872. }