starpu_mpi_task_insert.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2012,2014,2016,2017 Inria
  4. * Copyright (C) 2011-2019 CNRS
  5. * Copyright (C) 2011-2019 Université de Bordeaux
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. #include <stdarg.h>
  19. #include <mpi.h>
  20. #include <starpu.h>
  21. #include <starpu_data.h>
  22. #include <common/utils.h>
  23. #include <util/starpu_task_insert_utils.h>
  24. #include <datawizard/coherency.h>
  25. #include <core/task.h>
  26. #include <starpu_mpi_private.h>
  27. #include <starpu_mpi_cache.h>
  28. #include <starpu_mpi_select_node.h>
  29. #include "starpu_mpi_task_insert.h"
  30. #define _SEND_DATA(data, mode, dest, data_tag, prio, comm, callback, arg) \
  31. do { \
  32. if (mode & STARPU_SSEND) \
  33. starpu_mpi_issend_detached_prio(data, dest, data_tag, prio, comm, callback, arg); \
  34. else \
  35. starpu_mpi_isend_detached_prio(data, dest, data_tag, prio, comm, callback, arg); \
  36. } while (0)
  37. static void (*pre_submit_hook)(struct starpu_task *task) = NULL;
  38. int starpu_mpi_pre_submit_hook_register(void (*f)(struct starpu_task *))
  39. {
  40. if (pre_submit_hook)
  41. _STARPU_MSG("Warning: a pre_submit_hook has already been registered. Please check if you really want to erase the previously registered hook.\n");
  42. pre_submit_hook = f;
  43. return 0;
  44. }
  45. int starpu_mpi_pre_submit_hook_unregister()
  46. {
  47. pre_submit_hook = NULL;
  48. return 0;
  49. }
  50. int _starpu_mpi_find_executee_node(starpu_data_handle_t data, enum starpu_data_access_mode mode, int me, int *do_execute, int *inconsistent_execute, int *xrank)
  51. {
  52. if (mode & STARPU_W || mode & STARPU_REDUX)
  53. {
  54. if (!data)
  55. {
  56. /* We don't have anything allocated for this.
  57. * The application knows we won't do anything
  58. * about this task */
  59. /* Yes, the app could actually not call
  60. * task_insert at all itself, this is just a
  61. * safeguard. */
  62. _STARPU_MPI_DEBUG(3, "oh oh\n");
  63. _STARPU_MPI_LOG_OUT();
  64. return -EINVAL;
  65. }
  66. int mpi_rank = starpu_mpi_data_get_rank(data);
  67. if (mpi_rank == -1)
  68. {
  69. _STARPU_ERROR("Data %p with mode STARPU_W needs to have a valid rank", data);
  70. }
  71. if (*xrank == -1)
  72. {
  73. // No node has been selected yet
  74. *xrank = mpi_rank;
  75. _STARPU_MPI_DEBUG(100, "Codelet is going to be executed by node %d\n", *xrank);
  76. *do_execute = mpi_rank == STARPU_MPI_PER_NODE || (mpi_rank == me);
  77. }
  78. else if (mpi_rank != *xrank)
  79. {
  80. _STARPU_MPI_DEBUG(100, "Another node %d had already been selected to execute the codelet, can't now set %d\n", *xrank, mpi_rank);
  81. *inconsistent_execute = 1;
  82. }
  83. }
  84. _STARPU_MPI_DEBUG(100, "Executing: inconsistent=%d, do_execute=%d, xrank=%d\n", *inconsistent_execute, *do_execute, *xrank);
  85. return 0;
  86. }
  87. void _starpu_mpi_exchange_data_before_execution(starpu_data_handle_t data, enum starpu_data_access_mode mode, int me, int xrank, int do_execute, int prio, MPI_Comm comm)
  88. {
  89. if (data && xrank == STARPU_MPI_PER_NODE)
  90. {
  91. STARPU_ASSERT_MSG(starpu_mpi_data_get_rank(data) == STARPU_MPI_PER_NODE, "If task is replicated, it has to access only per-node data");
  92. }
  93. if (data && mode & STARPU_R)
  94. {
  95. int mpi_rank = starpu_mpi_data_get_rank(data);
  96. starpu_mpi_tag_t data_tag = starpu_mpi_data_get_tag(data);
  97. if (mpi_rank == -1)
  98. {
  99. _STARPU_ERROR("StarPU needs to be told the MPI rank of this data, using starpu_mpi_data_register\n");
  100. }
  101. if (do_execute && mpi_rank != STARPU_MPI_PER_NODE && mpi_rank != me)
  102. {
  103. /* The node is going to execute the codelet, but it does not own the data, it needs to receive the data from the owner node */
  104. int already_received = _starpu_mpi_cache_received_data_set(data);
  105. if (already_received == 0)
  106. {
  107. if (data_tag == -1)
  108. _STARPU_ERROR("StarPU needs to be told the MPI tag of this data, using starpu_mpi_data_register\n");
  109. _STARPU_MPI_DEBUG(1, "Receiving data %p from %d\n", data, mpi_rank);
  110. starpu_mpi_irecv_detached(data, mpi_rank, data_tag, comm, NULL, NULL);
  111. }
  112. // else the node has already received the data
  113. }
  114. if (!do_execute && mpi_rank == me)
  115. {
  116. /* The node owns the data, but another node is going to execute the codelet, the node needs to send the data to the executee node. */
  117. int already_sent = _starpu_mpi_cache_sent_data_set(data, xrank);
  118. if (already_sent == 0)
  119. {
  120. if (data_tag == -1)
  121. _STARPU_ERROR("StarPU needs to be told the MPI tag of this data, using starpu_mpi_data_register\n");
  122. _STARPU_MPI_DEBUG(1, "Sending data %p to %d\n", data, xrank);
  123. _SEND_DATA(data, mode, xrank, data_tag, prio, comm, NULL, NULL);
  124. }
  125. // Else the data has already been sent
  126. }
  127. }
  128. }
  129. static
  130. void _starpu_mpi_exchange_data_after_execution(starpu_data_handle_t data, enum starpu_data_access_mode mode, int me, int xrank, int do_execute, int prio, MPI_Comm comm)
  131. {
  132. if (mode & STARPU_W)
  133. {
  134. int mpi_rank = starpu_mpi_data_get_rank(data);
  135. starpu_mpi_tag_t data_tag = starpu_mpi_data_get_tag(data);
  136. if(mpi_rank == -1)
  137. {
  138. _STARPU_ERROR("StarPU needs to be told the MPI rank of this data, using starpu_mpi_data_register\n");
  139. }
  140. if (mpi_rank == STARPU_MPI_PER_NODE)
  141. {
  142. mpi_rank = me;
  143. }
  144. if (mpi_rank == me)
  145. {
  146. if (xrank != -1 && (xrank != STARPU_MPI_PER_NODE && me != xrank))
  147. {
  148. _STARPU_MPI_DEBUG(1, "Receive data %p back from the task %d which executed the codelet ...\n", data, xrank);
  149. if(data_tag == -1)
  150. _STARPU_ERROR("StarPU needs to be told the MPI tag of this data, using starpu_mpi_data_register\n");
  151. starpu_mpi_irecv_detached(data, xrank, data_tag, comm, NULL, NULL);
  152. }
  153. }
  154. else if (do_execute)
  155. {
  156. if(data_tag == -1)
  157. _STARPU_ERROR("StarPU needs to be told the MPI tag of this data, using starpu_mpi_data_register\n");
  158. _STARPU_MPI_DEBUG(1, "Send data %p back to its owner %d...\n", data, mpi_rank);
  159. _SEND_DATA(data, mode, mpi_rank, data_tag, prio, comm, NULL, NULL);
  160. }
  161. }
  162. }
  163. static
  164. void _starpu_mpi_clear_data_after_execution(starpu_data_handle_t data, enum starpu_data_access_mode mode, int me, int do_execute)
  165. {
  166. if (_starpu_cache_enabled)
  167. {
  168. if (mode & STARPU_W || mode & STARPU_REDUX)
  169. {
  170. /* The data has been modified, it MUST be removed from the cache */
  171. _starpu_mpi_cache_sent_data_clear(data);
  172. _starpu_mpi_cache_received_data_clear(data);
  173. }
  174. }
  175. else
  176. {
  177. /* We allocated a temporary buffer for the received data, now drop it */
  178. if ((mode & STARPU_R) && do_execute)
  179. {
  180. int mpi_rank = starpu_mpi_data_get_rank(data);
  181. if (mpi_rank == STARPU_MPI_PER_NODE)
  182. {
  183. mpi_rank = me;
  184. }
  185. if (mpi_rank != me && mpi_rank != -1)
  186. {
  187. starpu_data_invalidate_submit(data);
  188. }
  189. }
  190. }
  191. }
  192. static
  193. int _starpu_mpi_task_decode_v(struct starpu_codelet *codelet, int me, int nb_nodes, int *xrank, int *do_execute, struct starpu_data_descr **descrs_p, int *nb_data_p, int *prio_p, va_list varg_list)
  194. {
  195. /* XXX: _fstarpu_mpi_task_decode_v needs to be updated at the same time */
  196. va_list varg_list_copy;
  197. int inconsistent_execute = 0;
  198. int arg_type;
  199. int node_selected = 0;
  200. int nb_allocated_data = 16;
  201. struct starpu_data_descr *descrs;
  202. int nb_data;
  203. int prio = 0;
  204. int select_node_policy = STARPU_MPI_NODE_SELECTION_CURRENT_POLICY;
  205. _STARPU_TRACE_TASK_MPI_DECODE_START();
  206. _STARPU_MPI_MALLOC(descrs, nb_allocated_data * sizeof(struct starpu_data_descr));
  207. nb_data = 0;
  208. *do_execute = -1;
  209. *xrank = -1;
  210. va_copy(varg_list_copy, varg_list);
  211. while ((arg_type = va_arg(varg_list_copy, int)) != 0)
  212. {
  213. int arg_type_nocommute = arg_type & ~STARPU_COMMUTE;
  214. if (arg_type==STARPU_EXECUTE_ON_NODE)
  215. {
  216. *xrank = va_arg(varg_list_copy, int);
  217. if (node_selected == 0)
  218. {
  219. _STARPU_MPI_DEBUG(100, "Executing on node %d\n", *xrank);
  220. *do_execute = 1;
  221. node_selected = 1;
  222. inconsistent_execute = 0;
  223. }
  224. }
  225. else if (arg_type==STARPU_EXECUTE_ON_DATA)
  226. {
  227. starpu_data_handle_t data = va_arg(varg_list_copy, starpu_data_handle_t);
  228. if (node_selected == 0)
  229. {
  230. *xrank = starpu_mpi_data_get_rank(data);
  231. STARPU_ASSERT_MSG(*xrank != -1, "Rank of the data must be set using starpu_mpi_data_register() or starpu_data_set_rank()");
  232. _STARPU_MPI_DEBUG(100, "Executing on data node %d\n", *xrank);
  233. STARPU_ASSERT_MSG(*xrank <= nb_nodes, "Node %d to execute codelet is not a valid node (%d)", *xrank, nb_nodes);
  234. *do_execute = 1;
  235. node_selected = 1;
  236. inconsistent_execute = 0;
  237. }
  238. }
  239. else if (arg_type_nocommute & STARPU_R || arg_type_nocommute & STARPU_W || arg_type_nocommute & STARPU_RW || arg_type & STARPU_SCRATCH || arg_type & STARPU_REDUX)
  240. {
  241. starpu_data_handle_t data = va_arg(varg_list_copy, starpu_data_handle_t);
  242. enum starpu_data_access_mode mode = (enum starpu_data_access_mode) arg_type;
  243. if (node_selected == 0)
  244. {
  245. int ret = _starpu_mpi_find_executee_node(data, mode, me, do_execute, &inconsistent_execute, xrank);
  246. if (ret == -EINVAL)
  247. {
  248. free(descrs);
  249. va_end(varg_list_copy);
  250. _STARPU_TRACE_TASK_MPI_DECODE_END();
  251. return ret;
  252. }
  253. }
  254. if (nb_data >= nb_allocated_data)
  255. {
  256. nb_allocated_data *= 2;
  257. _STARPU_MPI_REALLOC(descrs, nb_allocated_data * sizeof(struct starpu_data_descr));
  258. }
  259. descrs[nb_data].handle = data;
  260. descrs[nb_data].mode = mode;
  261. nb_data ++;
  262. }
  263. else if (arg_type == STARPU_DATA_ARRAY)
  264. {
  265. starpu_data_handle_t *datas = va_arg(varg_list_copy, starpu_data_handle_t *);
  266. int nb_handles = va_arg(varg_list_copy, int);
  267. int i;
  268. for(i=0 ; i<nb_handles ; i++)
  269. {
  270. STARPU_ASSERT_MSG(codelet->nbuffers == STARPU_VARIABLE_NBUFFERS || nb_data < codelet->nbuffers, "Too many data passed to starpu_mpi_task_insert");
  271. enum starpu_data_access_mode mode = STARPU_CODELET_GET_MODE(codelet, nb_data);
  272. if (node_selected == 0)
  273. {
  274. int ret = _starpu_mpi_find_executee_node(datas[i], mode, me, do_execute, &inconsistent_execute, xrank);
  275. if (ret == -EINVAL)
  276. {
  277. free(descrs);
  278. va_end(varg_list_copy);
  279. _STARPU_TRACE_TASK_MPI_DECODE_END();
  280. return ret;
  281. }
  282. }
  283. if (nb_data >= nb_allocated_data)
  284. {
  285. nb_allocated_data *= 2;
  286. _STARPU_MPI_REALLOC(descrs, nb_allocated_data * sizeof(struct starpu_data_descr));
  287. }
  288. descrs[nb_data].handle = datas[i];
  289. descrs[nb_data].mode = mode;
  290. nb_data ++;
  291. }
  292. }
  293. else if (arg_type == STARPU_DATA_MODE_ARRAY)
  294. {
  295. struct starpu_data_descr *_descrs = va_arg(varg_list_copy, struct starpu_data_descr*);
  296. int nb_handles = va_arg(varg_list_copy, int);
  297. int i;
  298. for(i=0 ; i<nb_handles ; i++)
  299. {
  300. enum starpu_data_access_mode mode = _descrs[i].mode;
  301. if (node_selected == 0)
  302. {
  303. int ret = _starpu_mpi_find_executee_node(_descrs[i].handle, mode, me, do_execute, &inconsistent_execute, xrank);
  304. if (ret == -EINVAL)
  305. {
  306. free(descrs);
  307. va_end(varg_list_copy);
  308. _STARPU_TRACE_TASK_MPI_DECODE_END();
  309. return ret;
  310. }
  311. }
  312. if (nb_data >= nb_allocated_data)
  313. {
  314. nb_allocated_data *= 2;
  315. _STARPU_MPI_REALLOC(descrs, nb_allocated_data * sizeof(struct starpu_data_descr));
  316. }
  317. descrs[nb_data].handle = _descrs[i].handle;
  318. descrs[nb_data].mode = mode;
  319. nb_data ++;
  320. }
  321. }
  322. else if (arg_type==STARPU_VALUE)
  323. {
  324. (void)va_arg(varg_list_copy, void *);
  325. (void)va_arg(varg_list_copy, size_t);
  326. }
  327. else if (arg_type==STARPU_CL_ARGS)
  328. {
  329. (void)va_arg(varg_list_copy, void *);
  330. (void)va_arg(varg_list_copy, size_t);
  331. }
  332. else if (arg_type==STARPU_CL_ARGS_NFREE)
  333. {
  334. (void)va_arg(varg_list_copy, void *);
  335. (void)va_arg(varg_list_copy, size_t);
  336. }
  337. else if (arg_type==STARPU_TASK_DEPS_ARRAY)
  338. {
  339. (void)va_arg(varg_list_copy, unsigned);
  340. (void)va_arg(varg_list_copy, struct starpu_task **);
  341. }
  342. else if (arg_type==STARPU_TASK_END_DEPS_ARRAY)
  343. {
  344. (void)va_arg(varg_list_copy, unsigned);
  345. (void)va_arg(varg_list_copy, struct starpu_task **);
  346. }
  347. else if (arg_type==STARPU_CALLBACK)
  348. {
  349. (void)va_arg(varg_list_copy, _starpu_callback_func_t);
  350. }
  351. else if (arg_type==STARPU_CALLBACK_WITH_ARG)
  352. {
  353. (void)va_arg(varg_list_copy, _starpu_callback_func_t);
  354. (void)va_arg(varg_list_copy, void *);
  355. }
  356. else if (arg_type==STARPU_CALLBACK_WITH_ARG_NFREE)
  357. {
  358. (void)va_arg(varg_list_copy, _starpu_callback_func_t);
  359. (void)va_arg(varg_list_copy, void *);
  360. }
  361. else if (arg_type==STARPU_CALLBACK_ARG)
  362. {
  363. (void)va_arg(varg_list_copy, void *);
  364. }
  365. else if (arg_type==STARPU_CALLBACK_ARG_NFREE)
  366. {
  367. (void)va_arg(varg_list_copy, void *);
  368. }
  369. else if (arg_type==STARPU_PRIORITY)
  370. {
  371. prio = va_arg(varg_list_copy, int);
  372. }
  373. /* STARPU_EXECUTE_ON_NODE handled above */
  374. /* STARPU_EXECUTE_ON_DATA handled above */
  375. /* STARPU_DATA_ARRAY handled above */
  376. /* STARPU_DATA_MODE_ARRAY handled above */
  377. else if (arg_type==STARPU_TAG)
  378. {
  379. (void)va_arg(varg_list_copy, starpu_tag_t);
  380. }
  381. else if (arg_type==STARPU_HYPERVISOR_TAG)
  382. {
  383. (void)va_arg(varg_list_copy, int);
  384. }
  385. else if (arg_type==STARPU_FLOPS)
  386. {
  387. (void)va_arg(varg_list_copy, double);
  388. }
  389. else if (arg_type==STARPU_SCHED_CTX)
  390. {
  391. (void)va_arg(varg_list_copy, unsigned);
  392. }
  393. else if (arg_type==STARPU_PROLOGUE_CALLBACK)
  394. {
  395. (void)va_arg(varg_list_copy, _starpu_callback_func_t);
  396. }
  397. else if (arg_type==STARPU_PROLOGUE_CALLBACK_ARG)
  398. {
  399. (void)va_arg(varg_list_copy, void *);
  400. }
  401. else if (arg_type==STARPU_PROLOGUE_CALLBACK_ARG_NFREE)
  402. {
  403. (void)va_arg(varg_list_copy, void *);
  404. }
  405. else if (arg_type==STARPU_PROLOGUE_CALLBACK_POP)
  406. {
  407. (void)va_arg(varg_list_copy, _starpu_callback_func_t);
  408. }
  409. else if (arg_type==STARPU_PROLOGUE_CALLBACK_POP_ARG)
  410. {
  411. (void)va_arg(varg_list_copy, void *);
  412. }
  413. else if (arg_type==STARPU_PROLOGUE_CALLBACK_POP_ARG_NFREE)
  414. {
  415. (void)va_arg(varg_list_copy, void *);
  416. }
  417. else if (arg_type==STARPU_EXECUTE_WHERE)
  418. {
  419. // the flag is decoded and set later when
  420. // calling function _starpu_task_insert_create()
  421. (void)va_arg(varg_list_copy, unsigned long long);
  422. }
  423. else if (arg_type==STARPU_EXECUTE_ON_WORKER)
  424. {
  425. // the flag is decoded and set later when
  426. // calling function _starpu_task_insert_create()
  427. (void)va_arg(varg_list_copy, int);
  428. }
  429. else if (arg_type==STARPU_TAG_ONLY)
  430. {
  431. (void)va_arg(varg_list_copy, starpu_tag_t);
  432. }
  433. else if (arg_type==STARPU_NAME)
  434. {
  435. (void)va_arg(varg_list_copy, const char *);
  436. }
  437. else if (arg_type==STARPU_POSSIBLY_PARALLEL)
  438. {
  439. (void)va_arg(varg_list_copy, unsigned);
  440. }
  441. else if (arg_type==STARPU_WORKER_ORDER)
  442. {
  443. // the flag is decoded and set later when
  444. // calling function _starpu_task_insert_create()
  445. (void)va_arg(varg_list_copy, unsigned);
  446. }
  447. else if (arg_type==STARPU_NODE_SELECTION_POLICY)
  448. {
  449. select_node_policy = va_arg(varg_list_copy, int);
  450. }
  451. else if (arg_type==STARPU_TASK_COLOR)
  452. {
  453. (void)va_arg(varg_list_copy, int);
  454. }
  455. else if (arg_type==STARPU_TASK_SYNCHRONOUS)
  456. {
  457. (void)va_arg(varg_list_copy, int);
  458. }
  459. else if (arg_type==STARPU_HANDLES_SEQUENTIAL_CONSISTENCY)
  460. {
  461. (void)va_arg(varg_list_copy, char *);
  462. }
  463. else if (arg_type==STARPU_TASK_END_DEP)
  464. {
  465. (void)va_arg(varg_list_copy, int);
  466. }
  467. else if (arg_type==STARPU_TASK_WORKERIDS)
  468. {
  469. (void)va_arg(varg_list_copy, unsigned);
  470. (void)va_arg(varg_list_copy, uint32_t*);
  471. }
  472. else if (arg_type==STARPU_SEQUENTIAL_CONSISTENCY)
  473. {
  474. (void)va_arg(varg_list_copy, unsigned);
  475. }
  476. else if (arg_type==STARPU_TASK_PROFILING_INFO)
  477. {
  478. (void)va_arg(varg_list_copy, struct starpu_profiling_task_info *);
  479. }
  480. else if (arg_type==STARPU_TASK_NO_SUBMITORDER)
  481. {
  482. (void)va_arg(varg_list_copy, unsigned);
  483. }
  484. else if (arg_type==STARPU_TASK_SCHED_DATA)
  485. {
  486. (void)va_arg(varg_list_copy, void *);
  487. }
  488. else
  489. {
  490. STARPU_ABORT_MSG("Unrecognized argument %d, did you perhaps forget to end arguments with 0?\n", arg_type);
  491. }
  492. }
  493. va_end(varg_list_copy);
  494. if (inconsistent_execute == 1 || *xrank == -1)
  495. {
  496. // We need to find out which node is going to execute the codelet.
  497. _STARPU_MPI_DEBUG(100, "Different nodes are owning W data. The node to execute the codelet is going to be selected with the current selection node policy. See starpu_mpi_node_selection_set_current_policy() to change the policy, or use STARPU_EXECUTE_ON_NODE or STARPU_EXECUTE_ON_DATA to specify the node\n");
  498. *xrank = _starpu_mpi_select_node(me, nb_nodes, descrs, nb_data, select_node_policy);
  499. *do_execute = *xrank == STARPU_MPI_PER_NODE || (me == *xrank);
  500. }
  501. else
  502. {
  503. _STARPU_MPI_DEBUG(100, "Inconsistent=%d - xrank=%d\n", inconsistent_execute, *xrank);
  504. *do_execute = *xrank == STARPU_MPI_PER_NODE || (me == *xrank);
  505. }
  506. _STARPU_MPI_DEBUG(100, "do_execute=%d\n", *do_execute);
  507. *descrs_p = descrs;
  508. *nb_data_p = nb_data;
  509. *prio_p = prio;
  510. _STARPU_TRACE_TASK_MPI_DECODE_END();
  511. return 0;
  512. }
  513. static
  514. int _starpu_mpi_task_build_v(MPI_Comm comm, struct starpu_codelet *codelet, struct starpu_task **task, int *xrank_p, struct starpu_data_descr **descrs_p, int *nb_data_p, int *prio_p, va_list varg_list)
  515. {
  516. int me, do_execute, xrank, nb_nodes;
  517. int ret;
  518. int i;
  519. struct starpu_data_descr *descrs;
  520. int nb_data;
  521. int prio;
  522. _STARPU_MPI_LOG_IN();
  523. starpu_mpi_comm_rank(comm, &me);
  524. starpu_mpi_comm_size(comm, &nb_nodes);
  525. /* Find out whether we are to execute the data because we own the data to be written to. */
  526. ret = _starpu_mpi_task_decode_v(codelet, me, nb_nodes, &xrank, &do_execute, &descrs, &nb_data, &prio, varg_list);
  527. if (ret < 0)
  528. return ret;
  529. _STARPU_TRACE_TASK_MPI_PRE_START();
  530. /* Send and receive data as requested */
  531. for(i=0 ; i<nb_data ; i++)
  532. {
  533. _starpu_mpi_exchange_data_before_execution(descrs[i].handle, descrs[i].mode, me, xrank, do_execute, prio, comm);
  534. }
  535. if (xrank_p)
  536. *xrank_p = xrank;
  537. if (nb_data_p)
  538. *nb_data_p = nb_data;
  539. if (prio_p)
  540. *prio_p = prio;
  541. if (descrs_p)
  542. *descrs_p = descrs;
  543. else
  544. free(descrs);
  545. _STARPU_TRACE_TASK_MPI_PRE_END();
  546. if (do_execute == 0)
  547. {
  548. return 1;
  549. }
  550. else
  551. {
  552. va_list varg_list_copy;
  553. _STARPU_MPI_DEBUG(100, "Execution of the codelet %p (%s)\n", codelet, codelet?codelet->name:NULL);
  554. *task = starpu_task_create();
  555. (*task)->cl_arg_free = 1;
  556. (*task)->callback_arg_free = 1;
  557. (*task)->prologue_callback_arg_free = 1;
  558. (*task)->prologue_callback_pop_arg_free = 1;
  559. va_copy(varg_list_copy, varg_list);
  560. _starpu_task_insert_create(codelet, *task, varg_list_copy);
  561. va_end(varg_list_copy);
  562. return 0;
  563. }
  564. }
  565. int _starpu_mpi_task_postbuild_v(MPI_Comm comm, int xrank, int do_execute, struct starpu_data_descr *descrs, int nb_data, int prio)
  566. {
  567. int me, i;
  568. _STARPU_TRACE_TASK_MPI_POST_START();
  569. starpu_mpi_comm_rank(comm, &me);
  570. for(i=0 ; i<nb_data ; i++)
  571. {
  572. _starpu_mpi_exchange_data_after_execution(descrs[i].handle, descrs[i].mode, me, xrank, do_execute, prio, comm);
  573. _starpu_mpi_clear_data_after_execution(descrs[i].handle, descrs[i].mode, me, do_execute);
  574. }
  575. free(descrs);
  576. _STARPU_TRACE_TASK_MPI_POST_END();
  577. _STARPU_MPI_LOG_OUT();
  578. return 0;
  579. }
  580. static
  581. int _starpu_mpi_task_insert_v(MPI_Comm comm, struct starpu_codelet *codelet, va_list varg_list)
  582. {
  583. struct starpu_task *task;
  584. int ret;
  585. int xrank;
  586. int do_execute = 0;
  587. struct starpu_data_descr *descrs;
  588. int nb_data;
  589. int prio;
  590. ret = _starpu_mpi_task_build_v(comm, codelet, &task, &xrank, &descrs, &nb_data, &prio, varg_list);
  591. if (ret < 0)
  592. return ret;
  593. if (ret == 0)
  594. {
  595. do_execute = 1;
  596. ret = starpu_task_submit(task);
  597. if (STARPU_UNLIKELY(ret == -ENODEV))
  598. {
  599. _STARPU_MSG("submission of task %p wih codelet %p failed (symbol `%s') (err: ENODEV)\n",
  600. task, task->cl,
  601. (codelet == NULL) ? "none" :
  602. task->cl->name ? task->cl->name :
  603. (task->cl->model && task->cl->model->symbol)?task->cl->model->symbol:"none");
  604. task->destroy = 0;
  605. starpu_task_destroy(task);
  606. }
  607. }
  608. int val = _starpu_mpi_task_postbuild_v(comm, xrank, do_execute, descrs, nb_data, prio);
  609. if (ret == 0 && pre_submit_hook)
  610. pre_submit_hook(task);
  611. return val;
  612. }
  613. int starpu_mpi_task_insert(MPI_Comm comm, struct starpu_codelet *codelet, ...)
  614. {
  615. va_list varg_list;
  616. int ret;
  617. va_start(varg_list, codelet);
  618. ret = _starpu_mpi_task_insert_v(comm, codelet, varg_list);
  619. va_end(varg_list);
  620. return ret;
  621. }
  622. int starpu_mpi_insert_task(MPI_Comm comm, struct starpu_codelet *codelet, ...)
  623. {
  624. va_list varg_list;
  625. int ret;
  626. va_start(varg_list, codelet);
  627. ret = _starpu_mpi_task_insert_v(comm, codelet, varg_list);
  628. va_end(varg_list);
  629. return ret;
  630. }
  631. struct starpu_task *starpu_mpi_task_build(MPI_Comm comm, struct starpu_codelet *codelet, ...)
  632. {
  633. va_list varg_list;
  634. struct starpu_task *task;
  635. int ret;
  636. va_start(varg_list, codelet);
  637. ret = _starpu_mpi_task_build_v(comm, codelet, &task, NULL, NULL, NULL, NULL, varg_list);
  638. va_end(varg_list);
  639. STARPU_ASSERT(ret >= 0);
  640. return (ret > 0) ? NULL : task;
  641. }
  642. int starpu_mpi_task_post_build(MPI_Comm comm, struct starpu_codelet *codelet, ...)
  643. {
  644. int xrank, do_execute;
  645. int ret, me, nb_nodes;
  646. va_list varg_list;
  647. struct starpu_data_descr *descrs;
  648. int nb_data;
  649. int prio;
  650. starpu_mpi_comm_rank(comm, &me);
  651. starpu_mpi_comm_size(comm, &nb_nodes);
  652. va_start(varg_list, codelet);
  653. /* Find out whether we are to execute the data because we own the data to be written to. */
  654. ret = _starpu_mpi_task_decode_v(codelet, me, nb_nodes, &xrank, &do_execute, &descrs, &nb_data, &prio, varg_list);
  655. va_end(varg_list);
  656. if (ret < 0)
  657. return ret;
  658. return _starpu_mpi_task_postbuild_v(comm, xrank, do_execute, descrs, nb_data, prio);
  659. }
  660. struct _starpu_mpi_redux_data_args
  661. {
  662. starpu_data_handle_t data_handle;
  663. starpu_data_handle_t new_handle;
  664. starpu_mpi_tag_t data_tag;
  665. int node;
  666. MPI_Comm comm;
  667. struct starpu_task *taskB;
  668. };
  669. void _starpu_mpi_redux_data_dummy_func(void *buffers[], void *cl_arg)
  670. {
  671. (void)buffers;
  672. (void)cl_arg;
  673. }
  674. /* Dummy cost function for simgrid */
  675. static double cost_function(struct starpu_task *task, unsigned nimpl)
  676. {
  677. (void)task;
  678. (void)nimpl;
  679. return 0.000001;
  680. }
  681. static struct starpu_perfmodel dumb_model =
  682. {
  683. .type = STARPU_COMMON,
  684. .cost_function = cost_function
  685. };
  686. static
  687. struct starpu_codelet _starpu_mpi_redux_data_read_cl =
  688. {
  689. .cpu_funcs = {_starpu_mpi_redux_data_dummy_func},
  690. .cuda_funcs = {_starpu_mpi_redux_data_dummy_func},
  691. .opencl_funcs = {_starpu_mpi_redux_data_dummy_func},
  692. .nbuffers = 1,
  693. .modes = {STARPU_R},
  694. .model = &dumb_model,
  695. .name = "_starpu_mpi_redux_data_read_cl"
  696. };
  697. struct starpu_codelet _starpu_mpi_redux_data_readwrite_cl =
  698. {
  699. .cpu_funcs = {_starpu_mpi_redux_data_dummy_func},
  700. .cuda_funcs = {_starpu_mpi_redux_data_dummy_func},
  701. .opencl_funcs = {_starpu_mpi_redux_data_dummy_func},
  702. .nbuffers = 1,
  703. .modes = {STARPU_RW},
  704. .model = &dumb_model,
  705. .name = "_starpu_mpi_redux_data_write_cl"
  706. };
  707. static
  708. void _starpu_mpi_redux_data_detached_callback(void *arg)
  709. {
  710. struct _starpu_mpi_redux_data_args *args = (struct _starpu_mpi_redux_data_args *) arg;
  711. STARPU_TASK_SET_HANDLE(args->taskB, args->new_handle, 1);
  712. int ret = starpu_task_submit(args->taskB);
  713. STARPU_ASSERT(ret == 0);
  714. starpu_data_unregister_submit(args->new_handle);
  715. free(args);
  716. }
  717. static
  718. void _starpu_mpi_redux_data_recv_callback(void *callback_arg)
  719. {
  720. struct _starpu_mpi_redux_data_args *args = (struct _starpu_mpi_redux_data_args *) callback_arg;
  721. starpu_data_register_same(&args->new_handle, args->data_handle);
  722. starpu_mpi_irecv_detached_sequential_consistency(args->new_handle, args->node, args->data_tag, args->comm, _starpu_mpi_redux_data_detached_callback, args, 0);
  723. }
  724. /* TODO: this should rather be implicitly called by starpu_mpi_task_insert when
  725. * a data previously accessed in REDUX mode gets accessed in R mode. */
  726. void starpu_mpi_redux_data_prio(MPI_Comm comm, starpu_data_handle_t data_handle, int prio)
  727. {
  728. int me, rank, nb_nodes;
  729. starpu_mpi_tag_t tag;
  730. rank = starpu_mpi_data_get_rank(data_handle);
  731. tag = starpu_mpi_data_get_tag(data_handle);
  732. if (rank == -1)
  733. {
  734. _STARPU_ERROR("StarPU needs to be told the MPI rank of this data, using starpu_mpi_data_register\n");
  735. }
  736. if (tag == -1)
  737. {
  738. _STARPU_ERROR("StarPU needs to be told the MPI tag of this data, using starpu_mpi_data_register\n");
  739. }
  740. starpu_mpi_comm_rank(comm, &me);
  741. starpu_mpi_comm_size(comm, &nb_nodes);
  742. _STARPU_MPI_DEBUG(1, "Doing reduction for data %p on node %d with %d nodes ...\n", data_handle, rank, nb_nodes);
  743. // need to count how many nodes have the data in redux mode
  744. if (me == rank)
  745. {
  746. int i, j=0;
  747. struct starpu_task *taskBs[nb_nodes];
  748. for(i=0 ; i<nb_nodes ; i++)
  749. {
  750. if (i != rank)
  751. {
  752. /* We need to make sure all is
  753. * executed after data_handle finished
  754. * its last read access, we hence do
  755. * the following:
  756. * - submit an empty task A reading
  757. * data_handle whose callback submits
  758. * the mpi comm with sequential
  759. * consistency set to 0, whose
  760. * callback submits the redux_cl task
  761. * B with sequential consistency set
  762. * to 0,
  763. * - submit an empty task C reading
  764. * and writing data_handle and
  765. * depending on task B, just to replug
  766. * with implicit data dependencies
  767. * with tasks inserted after this
  768. * reduction.
  769. */
  770. struct _starpu_mpi_redux_data_args *args;
  771. _STARPU_MPI_MALLOC(args, sizeof(struct _starpu_mpi_redux_data_args));
  772. args->data_handle = data_handle;
  773. args->data_tag = tag;
  774. args->node = i;
  775. args->comm = comm;
  776. // We need to create taskB early as
  777. // taskC declares a dependancy on it
  778. args->taskB = starpu_task_create();
  779. args->taskB->cl = args->data_handle->redux_cl;
  780. args->taskB->sequential_consistency = 0;
  781. STARPU_TASK_SET_HANDLE(args->taskB, args->data_handle, 0);
  782. taskBs[j] = args->taskB;
  783. j++;
  784. // Submit taskA
  785. starpu_task_insert(&_starpu_mpi_redux_data_read_cl,
  786. STARPU_R, data_handle,
  787. STARPU_CALLBACK_WITH_ARG_NFREE, _starpu_mpi_redux_data_recv_callback, args,
  788. 0);
  789. }
  790. }
  791. // Submit taskC which depends on all taskBs created
  792. struct starpu_task *taskC = starpu_task_create();
  793. taskC->cl = &_starpu_mpi_redux_data_readwrite_cl;
  794. STARPU_TASK_SET_HANDLE(taskC, data_handle, 0);
  795. starpu_task_declare_deps_array(taskC, j, taskBs);
  796. int ret = starpu_task_submit(taskC);
  797. STARPU_ASSERT(ret == 0);
  798. }
  799. else
  800. {
  801. _STARPU_MPI_DEBUG(1, "Sending redux handle to %d ...\n", rank);
  802. starpu_mpi_isend_detached_prio(data_handle, rank, tag, prio, comm, NULL, NULL);
  803. starpu_task_insert(data_handle->init_cl, STARPU_W, data_handle, 0);
  804. }
  805. /* FIXME: In order to prevent simultaneous receive submissions
  806. * on the same handle, we need to wait that all the starpu_mpi
  807. * tasks are done before submitting next tasks. The current
  808. * version of the implementation does not support multiple
  809. * simultaneous receive requests on the same handle.*/
  810. starpu_task_wait_for_all();
  811. }
  812. void starpu_mpi_redux_data(MPI_Comm comm, starpu_data_handle_t data_handle)
  813. {
  814. return starpu_mpi_redux_data_prio(comm, data_handle, 0);
  815. }