mpi-support.texi 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544
  1. @c -*-texinfo-*-
  2. @c This file is part of the StarPU Handbook.
  3. @c Copyright (C) 2009--2011 Universit@'e de Bordeaux 1
  4. @c Copyright (C) 2010, 2011, 2012 Centre National de la Recherche Scientifique
  5. @c Copyright (C) 2011 Institut National de Recherche en Informatique et Automatique
  6. @c See the file starpu.texi for copying conditions.
  7. The integration of MPI transfers within task parallelism is done in a
  8. very natural way by the means of asynchronous interactions between the
  9. application and StarPU. This is implemented in a separate libstarpumpi library
  10. which basically provides "StarPU" equivalents of @code{MPI_*} functions, where
  11. @code{void *} buffers are replaced with @code{starpu_data_handle_t}s, and all
  12. GPU-RAM-NIC transfers are handled efficiently by StarPU-MPI. The user has to
  13. use the usual @code{mpirun} command of the MPI implementation to start StarPU on
  14. the different MPI nodes.
  15. An MPI Insert Task function provides an even more seamless transition to a
  16. distributed application, by automatically issuing all required data transfers
  17. according to the task graph and an application-provided distribution.
  18. @menu
  19. * The API::
  20. * Simple Example::
  21. * Exchanging User Defined Data Interface::
  22. * MPI Insert Task Utility::
  23. * MPI Collective Operations::
  24. @end menu
  25. @node The API
  26. @section The API
  27. @subsection Compilation
  28. The flags required to compile or link against the MPI layer are then
  29. accessible with the following commands:
  30. @example
  31. % pkg-config --cflags starpumpi-1.0 # options for the compiler
  32. % pkg-config --libs starpumpi-1.0 # options for the linker
  33. @end example
  34. Also pass the @code{--static} option if the application is to be linked statically.
  35. @subsection Initialisation
  36. @deftypefun int starpu_mpi_initialize (void)
  37. Initializes the starpumpi library. This must be called between calling
  38. @code{starpu_init} and other @code{starpu_mpi} functions. This
  39. function does not call @code{MPI_Init}, it should be called beforehand.
  40. @end deftypefun
  41. @deftypefun int starpu_mpi_initialize_extended (int *@var{rank}, int *@var{world_size})
  42. Initializes the starpumpi library. This must be called between calling
  43. @code{starpu_init} and other @code{starpu_mpi} functions.
  44. This function calls @code{MPI_Init}, and therefore should be prefered
  45. to the previous one for MPI implementations which are not thread-safe.
  46. Returns the current MPI node rank and world size.
  47. @end deftypefun
  48. @deftypefun int starpu_mpi_shutdown (void)
  49. Cleans the starpumpi library. This must be called between calling
  50. @code{starpu_mpi} functions and @code{starpu_shutdown}.
  51. @code{MPI_Finalize} will be called if StarPU-MPI has been initialized
  52. by calling @code{starpu_mpi_initialize_extended}.
  53. @end deftypefun
  54. @subsection Communication
  55. The standard point to point communications of MPI have been
  56. implemented. The semantic is similar to the MPI one, but adapted to
  57. the DSM provided by StarPU. A MPI request will only be submitted when
  58. the data is available in the main memory of the node submitting the
  59. request.
  60. @deftypefun int starpu_mpi_send (starpu_data_handle_t @var{data_handle}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm})
  61. Performs a standard-mode, blocking send of @var{data_handle} to the
  62. node @var{dest} using the message tag @code{mpi_tag} within the
  63. communicator @var{comm}.
  64. @end deftypefun
  65. @deftypefun int starpu_mpi_recv (starpu_data_handle_t @var{data_handle}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm}, MPI_Status *@var{status})
  66. Performs a standard-mode, blocking receive in @var{data_handle} from the
  67. node @var{source} using the message tag @code{mpi_tag} within the
  68. communicator @var{comm}.
  69. @end deftypefun
  70. @deftypefun int starpu_mpi_isend (starpu_data_handle_t @var{data_handle}, starpu_mpi_req *@var{req}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm})
  71. Posts a standard-mode, non blocking send of @var{data_handle} to the
  72. node @var{dest} using the message tag @code{mpi_tag} within the
  73. communicator @var{comm}. After the call, the pointer to the request
  74. @var{req} can be used to test the completion of the communication.
  75. @end deftypefun
  76. @deftypefun int starpu_mpi_irecv (starpu_data_handle_t @var{data_handle}, starpu_mpi_req *@var{req}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm})
  77. Posts a nonblocking receive in @var{data_handle} from the
  78. node @var{source} using the message tag @code{mpi_tag} within the
  79. communicator @var{comm}. After the call, the pointer to the request
  80. @var{req} can be used to test the completion of the communication.
  81. @end deftypefun
  82. @deftypefun int starpu_mpi_isend_detached (starpu_data_handle_t @var{data_handle}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm}, void (*@var{callback})(void *), void *@var{arg})
  83. Posts a standard-mode, non blocking send of @var{data_handle} to the
  84. node @var{dest} using the message tag @code{mpi_tag} within the
  85. communicator @var{comm}. On completion, the @var{callback} function is
  86. called with the argument @var{arg}.
  87. @end deftypefun
  88. @deftypefun int starpu_mpi_irecv_detached (starpu_data_handle_t @var{data_handle}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm}, void (*@var{callback})(void *), void *@var{arg})
  89. Posts a nonblocking receive in @var{data_handle} from the
  90. node @var{source} using the message tag @code{mpi_tag} within the
  91. communicator @var{comm}. On completion, the @var{callback} function is
  92. called with the argument @var{arg}.
  93. @end deftypefun
  94. @deftypefun int starpu_mpi_wait (starpu_mpi_req *@var{req}, MPI_Status *@var{status})
  95. Returns when the operation identified by request @var{req} is complete.
  96. @end deftypefun
  97. @deftypefun int starpu_mpi_test (starpu_mpi_req *@var{req}, int *@var{flag}, MPI_Status *@var{status})
  98. If the operation identified by @var{req} is complete, set @var{flag}
  99. to 1. The @var{status} object is set to contain information on the
  100. completed operation.
  101. @end deftypefun
  102. @deftypefun int starpu_mpi_barrier (MPI_Comm @var{comm})
  103. Blocks the caller until all group members of the communicator
  104. @var{comm} have called it.
  105. @end deftypefun
  106. @deftypefun int starpu_mpi_isend_detached_unlock_tag (starpu_data_handle_t @var{data_handle}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm}, starpu_tag_t @var{tag})
  107. Posts a standard-mode, non blocking send of @var{data_handle} to the
  108. node @var{dest} using the message tag @code{mpi_tag} within the
  109. communicator @var{comm}. On completion, @var{tag} is unlocked.
  110. @end deftypefun
  111. @deftypefun int starpu_mpi_irecv_detached_unlock_tag (starpu_data_handle_t @var{data_handle}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm}, starpu_tag_t @var{tag})
  112. Posts a nonblocking receive in @var{data_handle} from the
  113. node @var{source} using the message tag @code{mpi_tag} within the
  114. communicator @var{comm}. On completion, @var{tag} is unlocked.
  115. @end deftypefun
  116. @deftypefun int starpu_mpi_isend_array_detached_unlock_tag (unsigned @var{array_size}, starpu_data_handle_t *@var{data_handle}, int *@var{dest}, int *@var{mpi_tag}, MPI_Comm *@var{comm}, starpu_tag_t @var{tag})
  117. Posts @var{array_size} standard-mode, non blocking send. Each post
  118. sends the n-th data of the array @var{data_handle} to the n-th node of
  119. the array @var{dest}
  120. using the n-th message tag of the array @code{mpi_tag} within the n-th
  121. communicator of the array
  122. @var{comm}. On completion of the all the requests, @var{tag} is unlocked.
  123. @end deftypefun
  124. @deftypefun int starpu_mpi_irecv_array_detached_unlock_tag (unsigned @var{array_size}, starpu_data_handle_t *@var{data_handle}, int *@var{source}, int *@var{mpi_tag}, MPI_Comm *@var{comm}, starpu_tag_t @var{tag})
  125. Posts @var{array_size} nonblocking receive. Each post receives in the
  126. n-th data of the array @var{data_handle} from the n-th
  127. node of the array @var{source} using the n-th message tag of the array
  128. @code{mpi_tag} within the n-th communicator of the array @var{comm}.
  129. On completion of the all the requests, @var{tag} is unlocked.
  130. @end deftypefun
  131. @page
  132. @node Simple Example
  133. @section Simple Example
  134. @cartouche
  135. @smallexample
  136. void increment_token(void)
  137. @{
  138. struct starpu_task *task = starpu_task_create();
  139. task->cl = &increment_cl;
  140. task->handles[0] = token_handle;
  141. starpu_task_submit(task);
  142. @}
  143. @end smallexample
  144. @end cartouche
  145. @cartouche
  146. @smallexample
  147. int main(int argc, char **argv)
  148. @{
  149. int rank, size;
  150. starpu_init(NULL);
  151. starpu_mpi_initialize_extended(&rank, &size);
  152. starpu_vector_data_register(&token_handle, 0, (uintptr_t)&token, 1, sizeof(unsigned));
  153. unsigned nloops = NITER;
  154. unsigned loop;
  155. unsigned last_loop = nloops - 1;
  156. unsigned last_rank = size - 1;
  157. @end smallexample
  158. @end cartouche
  159. @cartouche
  160. @smallexample
  161. for (loop = 0; loop < nloops; loop++) @{
  162. int tag = loop*size + rank;
  163. if (loop == 0 && rank == 0)
  164. @{
  165. token = 0;
  166. fprintf(stdout, "Start with token value %d\n", token);
  167. @}
  168. else
  169. @{
  170. starpu_mpi_irecv_detached(token_handle, (rank+size-1)%size, tag,
  171. MPI_COMM_WORLD, NULL, NULL);
  172. @}
  173. increment_token();
  174. if (loop == last_loop && rank == last_rank)
  175. @{
  176. starpu_data_acquire(token_handle, STARPU_R);
  177. fprintf(stdout, "Finished: token value %d\n", token);
  178. starpu_data_release(token_handle);
  179. @}
  180. else
  181. @{
  182. starpu_mpi_isend_detached(token_handle, (rank+1)%size, tag+1,
  183. MPI_COMM_WORLD, NULL, NULL);
  184. @}
  185. @}
  186. starpu_task_wait_for_all();
  187. @end smallexample
  188. @end cartouche
  189. @cartouche
  190. @smallexample
  191. starpu_mpi_shutdown();
  192. starpu_shutdown();
  193. if (rank == last_rank)
  194. @{
  195. fprintf(stderr, "[%d] token = %d == %d * %d ?\n", rank, token, nloops, size);
  196. STARPU_ASSERT(token == nloops*size);
  197. @}
  198. @end smallexample
  199. @end cartouche
  200. @page
  201. @node Exchanging User Defined Data Interface
  202. @section Exchanging User Defined Data Interface
  203. New data interfaces defined as explained in @ref{An example
  204. of data interface} can also be used within StarPU-MPI and exchanged
  205. between nodes. Two functions needs to be defined through
  206. the type @code{struct starpu_data_interface_ops} (@pxref{Data
  207. Interface API}). The pack function takes a handle and returns a
  208. contiguous memory buffer where data to be conveyed to another node
  209. should be copied. The reversed operation is implemented in the unpack
  210. function which takes a contiguous memory buffer and recreates the data
  211. handle.
  212. @cartouche
  213. @smallexample
  214. static int complex_pack_data(starpu_data_handle_t handle, uint32_t node, void **ptr)
  215. @{
  216. STARPU_ASSERT(starpu_data_test_if_allocated_on_node(handle, node));
  217. struct starpu_complex_interface *complex_interface =
  218. (struct starpu_complex_interface *) starpu_data_get_interface_on_node(handle, node);
  219. *ptr = malloc(complex_get_size(handle));
  220. memcpy(*ptr, complex_interface->real, complex_interface->nx*sizeof(double));
  221. memcpy(*ptr+complex_interface->nx*sizeof(double), complex_interface->imaginary,
  222. complex_interface->nx*sizeof(double));
  223. return 0;
  224. @}
  225. @end smallexample
  226. @end cartouche
  227. @cartouche
  228. @smallexample
  229. static int complex_unpack_data(starpu_data_handle_t handle, uint32_t node, void *ptr)
  230. @{
  231. STARPU_ASSERT(starpu_data_test_if_allocated_on_node(handle, node));
  232. struct starpu_complex_interface *complex_interface =
  233. (struct starpu_complex_interface *) starpu_data_get_interface_on_node(handle, node);
  234. memcpy(complex_interface->real, ptr, complex_interface->nx*sizeof(double));
  235. memcpy(complex_interface->imaginary, ptr+complex_interface->nx*sizeof(double),
  236. complex_interface->nx*sizeof(double));
  237. return 0;
  238. @}
  239. @end smallexample
  240. @end cartouche
  241. @cartouche
  242. @smallexample
  243. static struct starpu_data_interface_ops interface_complex_ops =
  244. @{
  245. ...
  246. .pack_data = complex_pack_data,
  247. .unpack_data = complex_unpack_data
  248. @};
  249. @end smallexample
  250. @end cartouche
  251. @page
  252. @node MPI Insert Task Utility
  253. @section MPI Insert Task Utility
  254. To save the programmer from having to explicit all communications, StarPU
  255. provides an "MPI Insert Task Utility". The principe is that the application
  256. decides a distribution of the data over the MPI nodes by allocating it and
  257. notifying StarPU of that decision, i.e. tell StarPU which MPI node "owns"
  258. which data. It also decides, for each handle, an MPI tag which will be used to
  259. exchange the content of the handle. All MPI nodes then process the whole task
  260. graph, and StarPU automatically determines which node actually execute which
  261. task, and trigger the required MPI transfers.
  262. @deftypefun int starpu_data_set_tag (starpu_data_handle_t @var{handle}, int @var{tag})
  263. Tell StarPU-MPI which MPI tag to use when exchanging the data.
  264. @end deftypefun
  265. @deftypefun int starpu_data_get_tag (starpu_data_handle_t @var{handle})
  266. Returns the MPI tag to be used when exchanging the data.
  267. @end deftypefun
  268. @deftypefun int starpu_data_set_rank (starpu_data_handle_t @var{handle}, int @var{rank})
  269. Tell StarPU-MPI which MPI node "owns" a given data, that is, the node which will
  270. always keep an up-to-date value, and will by default execute tasks which write
  271. to it.
  272. @end deftypefun
  273. @deftypefun int starpu_data_get_rank (starpu_data_handle_t @var{handle})
  274. Returns the last value set by @code{starpu_data_set_rank}.
  275. @end deftypefun
  276. @defmac STARPU_EXECUTE_ON_NODE
  277. this macro is used when calling @code{starpu_mpi_insert_task}, and
  278. must be followed by a integer value which specified the node on which
  279. to execute the codelet.
  280. @end defmac
  281. @defmac STARPU_EXECUTE_ON_DATA
  282. this macro is used when calling @code{starpu_mpi_insert_task}, and
  283. must be followed by a data handle to specify that the node owning the
  284. given data will execute the codelet.
  285. @end defmac
  286. @deftypefun int starpu_mpi_insert_task (MPI_Comm @var{comm}, struct starpu_codelet *@var{codelet}, ...)
  287. Create and submit a task corresponding to @var{codelet} with the following
  288. arguments. The argument list must be zero-terminated.
  289. The arguments following the codelets are the same types as for the
  290. function @code{starpu_insert_task} defined in @ref{Insert Task
  291. Utility}. The extra argument @code{STARPU_EXECUTE_ON_NODE} followed by an
  292. integer allows to specify the MPI node to execute the codelet. It is also
  293. possible to specify that the node owning a specific data will execute
  294. the codelet, by using @code{STARPU_EXECUTE_ON_DATA} followed by a data
  295. handle.
  296. The internal algorithm is as follows:
  297. @enumerate
  298. @item Find out whether we (as an MPI node) are to execute the codelet
  299. because we own the data to be written to. If different nodes own data
  300. to be written to, the argument @code{STARPU_EXECUTE_ON_NODE} or
  301. @code{STARPU_EXECUTE_ON_DATA} has to be used to specify which MPI node will
  302. execute the task.
  303. @item Send and receive data as requested. Nodes owning data which need to be
  304. read by the task are sending them to the MPI node which will execute it. The
  305. latter receives them.
  306. @item Execute the codelet. This is done by the MPI node selected in the
  307. 1st step of the algorithm.
  308. @item In the case when different MPI nodes own data to be written to, send
  309. written data back to their owners.
  310. @end enumerate
  311. The algorithm also includes a cache mechanism that allows not to send
  312. data twice to the same MPI node, unless the data has been modified.
  313. @end deftypefun
  314. @deftypefun void starpu_mpi_get_data_on_node (MPI_Comm @var{comm}, starpu_data_handle_t @var{data_handle}, int @var{node})
  315. Transfer data @var{data_handle} to MPI node @var{node}, sending it from its
  316. owner if needed. At least the target node and the owner have to call the
  317. function.
  318. @end deftypefun
  319. Here an stencil example showing how to use @code{starpu_mpi_insert_task}. One
  320. first needs to define a distribution function which specifies the
  321. locality of the data. Note that that distribution information needs to
  322. be given to StarPU by calling @code{starpu_data_set_rank}.
  323. @cartouche
  324. @smallexample
  325. /* Returns the MPI node number where data is */
  326. int my_distrib(int x, int y, int nb_nodes) @{
  327. /* Block distrib */
  328. return ((int)(x / sqrt(nb_nodes) + (y / sqrt(nb_nodes)) * sqrt(nb_nodes))) % nb_nodes;
  329. // /* Other examples useful for other kinds of computations */
  330. // /* / distrib */
  331. // return (x+y) % nb_nodes;
  332. // /* Block cyclic distrib */
  333. // unsigned side = sqrt(nb_nodes);
  334. // return x % side + (y % side) * size;
  335. @}
  336. @end smallexample
  337. @end cartouche
  338. Now the data can be registered within StarPU. Data which are not
  339. owned but will be needed for computations can be registered through
  340. the lazy allocation mechanism, i.e. with a @code{home_node} set to -1.
  341. StarPU will automatically allocate the memory when it is used for the
  342. first time.
  343. One can note an optimization here (the @code{else if} test): we only register
  344. data which will be needed by the tasks that we will execute.
  345. @cartouche
  346. @smallexample
  347. unsigned matrix[X][Y];
  348. starpu_data_handle_t data_handles[X][Y];
  349. for(x = 0; x < X; x++) @{
  350. for (y = 0; y < Y; y++) @{
  351. int mpi_rank = my_distrib(x, y, size);
  352. if (mpi_rank == my_rank)
  353. /* Owning data */
  354. starpu_variable_data_register(&data_handles[x][y], 0,
  355. (uintptr_t)&(matrix[x][y]), sizeof(unsigned));
  356. else if (my_rank == my_distrib(x+1, y, size) || my_rank == my_distrib(x-1, y, size)
  357. || my_rank == my_distrib(x, y+1, size) || my_rank == my_distrib(x, y-1, size))
  358. /* I don't own that index, but will need it for my computations */
  359. starpu_variable_data_register(&data_handles[x][y], -1,
  360. (uintptr_t)NULL, sizeof(unsigned));
  361. else
  362. /* I know it's useless to allocate anything for this */
  363. data_handles[x][y] = NULL;
  364. if (data_handles[x][y])
  365. starpu_data_set_rank(data_handles[x][y], mpi_rank);
  366. @}
  367. @}
  368. @end smallexample
  369. @end cartouche
  370. Now @code{starpu_mpi_insert_task()} can be called for the different
  371. steps of the application.
  372. @cartouche
  373. @smallexample
  374. for(loop=0 ; loop<niter; loop++)
  375. for (x = 1; x < X-1; x++)
  376. for (y = 1; y < Y-1; y++)
  377. starpu_mpi_insert_task(MPI_COMM_WORLD, &stencil5_cl,
  378. STARPU_RW, data_handles[x][y],
  379. STARPU_R, data_handles[x-1][y],
  380. STARPU_R, data_handles[x+1][y],
  381. STARPU_R, data_handles[x][y-1],
  382. STARPU_R, data_handles[x][y+1],
  383. 0);
  384. starpu_task_wait_for_all();
  385. @end smallexample
  386. @end cartouche
  387. I.e. all MPI nodes process the whole task graph, but as mentioned above, for
  388. each task, only the MPI node which owns the data being written to (here,
  389. @code{data_handles[x][y]}) will actually run the task. The other MPI nodes will
  390. automatically send the required data.
  391. @node MPI Collective Operations
  392. @section MPI Collective Operations
  393. @deftypefun int starpu_mpi_scatter_detached (starpu_data_handle_t *@var{data_handles}, int @var{count}, int @var{root}, MPI_Comm @var{comm})
  394. Scatter data among processes of the communicator based on the ownership of
  395. the data. For each data of the array @var{data_handles}, the
  396. process @var{root} sends the data to the process owning this data.
  397. Processes receiving data must have valid data handles to receive them.
  398. @end deftypefun
  399. @deftypefun int starpu_mpi_gather_detached (starpu_data_handle_t *@var{data_handles}, int @var{count}, int @var{root}, MPI_Comm @var{comm})
  400. Gather data from the different processes of the communicator onto the
  401. process @var{root}. Each process owning data handle in the array
  402. @var{data_handles} will send them to the process @var{root}. The
  403. process @var{root} must have valid data handles to receive the data.
  404. @end deftypefun
  405. @page
  406. @cartouche
  407. @smallexample
  408. if (rank == root)
  409. @{
  410. /* Allocate the vector */
  411. vector = malloc(nblocks * sizeof(float *));
  412. for(x=0 ; x<nblocks ; x++)
  413. @{
  414. starpu_malloc((void **)&vector[x], block_size*sizeof(float));
  415. @}
  416. @}
  417. /* Allocate data handles and register data to StarPU */
  418. data_handles = malloc(nblocks*sizeof(starpu_data_handle_t *));
  419. for(x = 0; x < nblocks ; x++)
  420. @{
  421. int mpi_rank = my_distrib(x, nodes);
  422. if (rank == root) @{
  423. starpu_vector_data_register(&data_handles[x], 0, (uintptr_t)vector[x],
  424. blocks_size, sizeof(float));
  425. @}
  426. else if ((mpi_rank == rank) || ((rank == mpi_rank+1 || rank == mpi_rank-1))) @{
  427. /* I own that index, or i will need it for my computations */
  428. starpu_vector_data_register(&data_handles[x], -1, (uintptr_t)NULL,
  429. block_size, sizeof(float));
  430. @}
  431. else @{
  432. /* I know it's useless to allocate anything for this */
  433. data_handles[x] = NULL;
  434. @}
  435. if (data_handles[x]) @{
  436. starpu_data_set_rank(data_handles[x], mpi_rank);
  437. @}
  438. @}
  439. /* Scatter the matrix among the nodes */
  440. starpu_mpi_scatter_detached(data_handles, nblocks, root, MPI_COMM_WORLD);
  441. /* Calculation */
  442. for(x = 0; x < nblocks ; x++) @{
  443. if (data_handles[x]) @{
  444. int owner = starpu_data_get_rank(data_handles[x]);
  445. if (owner == rank) @{
  446. starpu_insert_task(&cl, STARPU_RW, data_handles[x], 0);
  447. @}
  448. @}
  449. @}
  450. /* Gather the matrix on main node */
  451. starpu_mpi_gather_detached(data_handles, nblocks, 0, MPI_COMM_WORLD);
  452. @end smallexample
  453. @end cartouche