mpi-support.texi 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597
  1. @c -*-texinfo-*-
  2. @c This file is part of the StarPU Handbook.
  3. @c Copyright (C) 2009--2011 Universit@'e de Bordeaux 1
  4. @c Copyright (C) 2010, 2011, 2012 Centre National de la Recherche Scientifique
  5. @c Copyright (C) 2011 Institut National de Recherche en Informatique et Automatique
  6. @c See the file starpu.texi for copying conditions.
  7. The integration of MPI transfers within task parallelism is done in a
  8. very natural way by the means of asynchronous interactions between the
  9. application and StarPU. This is implemented in a separate libstarpumpi library
  10. which basically provides "StarPU" equivalents of @code{MPI_*} functions, where
  11. @code{void *} buffers are replaced with @code{starpu_data_handle_t}s, and all
  12. GPU-RAM-NIC transfers are handled efficiently by StarPU-MPI. The user has to
  13. use the usual @code{mpirun} command of the MPI implementation to start StarPU on
  14. the different MPI nodes.
  15. An MPI Insert Task function provides an even more seamless transition to a
  16. distributed application, by automatically issuing all required data transfers
  17. according to the task graph and an application-provided distribution.
  18. @menu
  19. * The API::
  20. * Simple Example::
  21. * Exchanging User Defined Data Interface::
  22. * MPI Insert Task Utility::
  23. * MPI Collective Operations::
  24. @end menu
  25. @node The API
  26. @section The API
  27. @menu
  28. * Compilation::
  29. * Initialisation::
  30. * Communication::
  31. * Communication cache::
  32. @end menu
  33. @node Compilation
  34. @subsection Compilation
  35. The flags required to compile or link against the MPI layer are then
  36. accessible with the following commands:
  37. @example
  38. % pkg-config --cflags starpumpi-1.0 # options for the compiler
  39. % pkg-config --libs starpumpi-1.0 # options for the linker
  40. @end example
  41. Also pass the @code{--static} option if the application is to be linked statically.
  42. @node Initialisation
  43. @subsection Initialisation
  44. @deftypefun int starpu_mpi_init (int *@var{argc}, char ***@var{argv}, int initialize_mpi)
  45. Initializes the starpumpi library. @code{initialize_mpi} indicates if
  46. MPI should be initialized or not by StarPU. If the value is not @code{0},
  47. MPI will be initialized by calling @code{MPI_Init_Thread(argc, argv,
  48. MPI_THREAD_SERIALIZED, ...)}.
  49. @end deftypefun
  50. @deftypefun int starpu_mpi_initialize (void)
  51. This function has been made deprecated. One should use instead the
  52. function @code{starpu_mpi_init()} defined above.
  53. This function does not call @code{MPI_Init}, it should be called beforehand.
  54. @end deftypefun
  55. @deftypefun int starpu_mpi_initialize_extended (int *@var{rank}, int *@var{world_size})
  56. This function has been made deprecated. One should use instead the
  57. function @code{starpu_mpi_init()} defined above.
  58. MPI will be initialized by starpumpi by calling @code{MPI_Init_Thread(argc, argv,
  59. MPI_THREAD_SERIALIZED, ...)}.
  60. @end deftypefun
  61. @deftypefun int starpu_mpi_shutdown (void)
  62. Cleans the starpumpi library. This must be called between calling
  63. @code{starpu_mpi} functions and @code{starpu_shutdown()}.
  64. @code{MPI_Finalize()} will be called if StarPU-MPI has been initialized
  65. by @code{starpu_mpi_init()}.
  66. @end deftypefun
  67. @deftypefun void starpu_mpi_comm_amounts_retrieve (size_t *@var{comm_amounts})
  68. Retrieve the current amount of communications from the current node in
  69. the array @code{comm_amounts} which must have a size greater or equal
  70. to the world size. Communications statistics must be enabled
  71. (@pxref{STARPU_COMM_STATS}).
  72. @end deftypefun
  73. @node Communication
  74. @subsection Communication
  75. The standard point to point communications of MPI have been
  76. implemented. The semantic is similar to the MPI one, but adapted to
  77. the DSM provided by StarPU. A MPI request will only be submitted when
  78. the data is available in the main memory of the node submitting the
  79. request.
  80. @deftypefun int starpu_mpi_send (starpu_data_handle_t @var{data_handle}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm})
  81. Performs a standard-mode, blocking send of @var{data_handle} to the
  82. node @var{dest} using the message tag @code{mpi_tag} within the
  83. communicator @var{comm}.
  84. @end deftypefun
  85. @deftypefun int starpu_mpi_recv (starpu_data_handle_t @var{data_handle}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm}, MPI_Status *@var{status})
  86. Performs a standard-mode, blocking receive in @var{data_handle} from the
  87. node @var{source} using the message tag @code{mpi_tag} within the
  88. communicator @var{comm}.
  89. @end deftypefun
  90. @deftypefun int starpu_mpi_isend (starpu_data_handle_t @var{data_handle}, starpu_mpi_req *@var{req}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm})
  91. Posts a standard-mode, non blocking send of @var{data_handle} to the
  92. node @var{dest} using the message tag @code{mpi_tag} within the
  93. communicator @var{comm}. After the call, the pointer to the request
  94. @var{req} can be used to test the completion of the communication.
  95. @end deftypefun
  96. @deftypefun int starpu_mpi_irecv (starpu_data_handle_t @var{data_handle}, starpu_mpi_req *@var{req}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm})
  97. Posts a nonblocking receive in @var{data_handle} from the
  98. node @var{source} using the message tag @code{mpi_tag} within the
  99. communicator @var{comm}. After the call, the pointer to the request
  100. @var{req} can be used to test the completion of the communication.
  101. @end deftypefun
  102. @deftypefun int starpu_mpi_isend_detached (starpu_data_handle_t @var{data_handle}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm}, void (*@var{callback})(void *), void *@var{arg})
  103. Posts a standard-mode, non blocking send of @var{data_handle} to the
  104. node @var{dest} using the message tag @code{mpi_tag} within the
  105. communicator @var{comm}. On completion, the @var{callback} function is
  106. called with the argument @var{arg}.
  107. @end deftypefun
  108. @deftypefun int starpu_mpi_irecv_detached (starpu_data_handle_t @var{data_handle}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm}, void (*@var{callback})(void *), void *@var{arg})
  109. Posts a nonblocking receive in @var{data_handle} from the
  110. node @var{source} using the message tag @code{mpi_tag} within the
  111. communicator @var{comm}. On completion, the @var{callback} function is
  112. called with the argument @var{arg}.
  113. @end deftypefun
  114. @deftypefun int starpu_mpi_wait (starpu_mpi_req *@var{req}, MPI_Status *@var{status})
  115. Returns when the operation identified by request @var{req} is complete.
  116. @end deftypefun
  117. @deftypefun int starpu_mpi_test (starpu_mpi_req *@var{req}, int *@var{flag}, MPI_Status *@var{status})
  118. If the operation identified by @var{req} is complete, set @var{flag}
  119. to 1. The @var{status} object is set to contain information on the
  120. completed operation.
  121. @end deftypefun
  122. @deftypefun int starpu_mpi_barrier (MPI_Comm @var{comm})
  123. Blocks the caller until all group members of the communicator
  124. @var{comm} have called it.
  125. @end deftypefun
  126. @deftypefun int starpu_mpi_isend_detached_unlock_tag (starpu_data_handle_t @var{data_handle}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm}, starpu_tag_t @var{tag})
  127. Posts a standard-mode, non blocking send of @var{data_handle} to the
  128. node @var{dest} using the message tag @code{mpi_tag} within the
  129. communicator @var{comm}. On completion, @var{tag} is unlocked.
  130. @end deftypefun
  131. @deftypefun int starpu_mpi_irecv_detached_unlock_tag (starpu_data_handle_t @var{data_handle}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm}, starpu_tag_t @var{tag})
  132. Posts a nonblocking receive in @var{data_handle} from the
  133. node @var{source} using the message tag @code{mpi_tag} within the
  134. communicator @var{comm}. On completion, @var{tag} is unlocked.
  135. @end deftypefun
  136. @deftypefun int starpu_mpi_isend_array_detached_unlock_tag (unsigned @var{array_size}, starpu_data_handle_t *@var{data_handle}, int *@var{dest}, int *@var{mpi_tag}, MPI_Comm *@var{comm}, starpu_tag_t @var{tag})
  137. Posts @var{array_size} standard-mode, non blocking send. Each post
  138. sends the n-th data of the array @var{data_handle} to the n-th node of
  139. the array @var{dest}
  140. using the n-th message tag of the array @code{mpi_tag} within the n-th
  141. communicator of the array
  142. @var{comm}. On completion of the all the requests, @var{tag} is unlocked.
  143. @end deftypefun
  144. @deftypefun int starpu_mpi_irecv_array_detached_unlock_tag (unsigned @var{array_size}, starpu_data_handle_t *@var{data_handle}, int *@var{source}, int *@var{mpi_tag}, MPI_Comm *@var{comm}, starpu_tag_t @var{tag})
  145. Posts @var{array_size} nonblocking receive. Each post receives in the
  146. n-th data of the array @var{data_handle} from the n-th
  147. node of the array @var{source} using the n-th message tag of the array
  148. @code{mpi_tag} within the n-th communicator of the array @var{comm}.
  149. On completion of the all the requests, @var{tag} is unlocked.
  150. @end deftypefun
  151. @node Communication cache
  152. @subsection Communication cache
  153. @deftypefun void starpu_mpi_cache_flush (MPI_Comm @var{comm}, starpu_data_handle_t @var{data_handle})
  154. Clear the send and receive communication cache for the data
  155. @var{data_handle}. The function has to be called synchronously by all
  156. the MPI nodes.
  157. The function does nothing if the cache mechanism is disabled (@pxref{STARPU_MPI_CACHE}).
  158. @end deftypefun
  159. @deftypefun void starpu_mpi_cache_flush_all_data (MPI_Comm @var{comm})
  160. Clear the send and receive communication cache for all data. The
  161. function has to be called synchronously by all the MPI nodes.
  162. The function does nothing if the cache mechanism is disabled (@pxref{STARPU_MPI_CACHE}).
  163. @end deftypefun
  164. @page
  165. @node Simple Example
  166. @section Simple Example
  167. @cartouche
  168. @smallexample
  169. void increment_token(void)
  170. @{
  171. struct starpu_task *task = starpu_task_create();
  172. task->cl = &increment_cl;
  173. task->handles[0] = token_handle;
  174. starpu_task_submit(task);
  175. @}
  176. @end smallexample
  177. @end cartouche
  178. @cartouche
  179. @smallexample
  180. int main(int argc, char **argv)
  181. @{
  182. int rank, size;
  183. starpu_init(NULL);
  184. starpu_mpi_initialize_extended(&rank, &size);
  185. starpu_vector_data_register(&token_handle, 0, (uintptr_t)&token, 1, sizeof(unsigned));
  186. unsigned nloops = NITER;
  187. unsigned loop;
  188. unsigned last_loop = nloops - 1;
  189. unsigned last_rank = size - 1;
  190. @end smallexample
  191. @end cartouche
  192. @cartouche
  193. @smallexample
  194. for (loop = 0; loop < nloops; loop++) @{
  195. int tag = loop*size + rank;
  196. if (loop == 0 && rank == 0)
  197. @{
  198. token = 0;
  199. fprintf(stdout, "Start with token value %d\n", token);
  200. @}
  201. else
  202. @{
  203. starpu_mpi_irecv_detached(token_handle, (rank+size-1)%size, tag,
  204. MPI_COMM_WORLD, NULL, NULL);
  205. @}
  206. increment_token();
  207. if (loop == last_loop && rank == last_rank)
  208. @{
  209. starpu_data_acquire(token_handle, STARPU_R);
  210. fprintf(stdout, "Finished: token value %d\n", token);
  211. starpu_data_release(token_handle);
  212. @}
  213. else
  214. @{
  215. starpu_mpi_isend_detached(token_handle, (rank+1)%size, tag+1,
  216. MPI_COMM_WORLD, NULL, NULL);
  217. @}
  218. @}
  219. starpu_task_wait_for_all();
  220. @end smallexample
  221. @end cartouche
  222. @cartouche
  223. @smallexample
  224. starpu_mpi_shutdown();
  225. starpu_shutdown();
  226. if (rank == last_rank)
  227. @{
  228. fprintf(stderr, "[%d] token = %d == %d * %d ?\n", rank, token, nloops, size);
  229. STARPU_ASSERT(token == nloops*size);
  230. @}
  231. @end smallexample
  232. @end cartouche
  233. @page
  234. @node Exchanging User Defined Data Interface
  235. @section Exchanging User Defined Data Interface
  236. New data interfaces defined as explained in @ref{An example
  237. of data interface} can also be used within StarPU-MPI and exchanged
  238. between nodes. Two functions needs to be defined through
  239. the type @code{struct starpu_data_interface_ops} (@pxref{Data
  240. Interface API}). The pack function takes a handle and returns a
  241. contiguous memory buffer along with its size where data to be conveyed to another node
  242. should be copied. The reversed operation is implemented in the unpack
  243. function which takes a contiguous memory buffer and recreates the data
  244. handle.
  245. @cartouche
  246. @smallexample
  247. static int complex_pack_data(starpu_data_handle_t handle, uint32_t node, void **ptr, size_t *count)
  248. @{
  249. STARPU_ASSERT(starpu_data_test_if_allocated_on_node(handle, node));
  250. struct starpu_complex_interface *complex_interface =
  251. (struct starpu_complex_interface *) starpu_data_get_interface_on_node(handle, node);
  252. *count = complex_get_size(handle);
  253. *ptr = malloc(*count);
  254. memcpy(*ptr, complex_interface->real, complex_interface->nx*sizeof(double));
  255. memcpy(*ptr+complex_interface->nx*sizeof(double), complex_interface->imaginary,
  256. complex_interface->nx*sizeof(double));
  257. return 0;
  258. @}
  259. @end smallexample
  260. @end cartouche
  261. @cartouche
  262. @smallexample
  263. static int complex_unpack_data(starpu_data_handle_t handle, uint32_t node, void *ptr, size_t count)
  264. @{
  265. STARPU_ASSERT(starpu_data_test_if_allocated_on_node(handle, node));
  266. struct starpu_complex_interface *complex_interface =
  267. (struct starpu_complex_interface *) starpu_data_get_interface_on_node(handle, node);
  268. memcpy(complex_interface->real, ptr, complex_interface->nx*sizeof(double));
  269. memcpy(complex_interface->imaginary, ptr+complex_interface->nx*sizeof(double),
  270. complex_interface->nx*sizeof(double));
  271. return 0;
  272. @}
  273. @end smallexample
  274. @end cartouche
  275. @cartouche
  276. @smallexample
  277. static struct starpu_data_interface_ops interface_complex_ops =
  278. @{
  279. ...
  280. .pack_data = complex_pack_data,
  281. .unpack_data = complex_unpack_data
  282. @};
  283. @end smallexample
  284. @end cartouche
  285. @page
  286. @node MPI Insert Task Utility
  287. @section MPI Insert Task Utility
  288. To save the programmer from having to explicit all communications, StarPU
  289. provides an "MPI Insert Task Utility". The principe is that the application
  290. decides a distribution of the data over the MPI nodes by allocating it and
  291. notifying StarPU of that decision, i.e. tell StarPU which MPI node "owns"
  292. which data. It also decides, for each handle, an MPI tag which will be used to
  293. exchange the content of the handle. All MPI nodes then process the whole task
  294. graph, and StarPU automatically determines which node actually execute which
  295. task, and trigger the required MPI transfers.
  296. @deftypefun int starpu_data_set_tag (starpu_data_handle_t @var{handle}, int @var{tag})
  297. Tell StarPU-MPI which MPI tag to use when exchanging the data.
  298. @end deftypefun
  299. @deftypefun int starpu_data_get_tag (starpu_data_handle_t @var{handle})
  300. Returns the MPI tag to be used when exchanging the data.
  301. @end deftypefun
  302. @deftypefun int starpu_data_set_rank (starpu_data_handle_t @var{handle}, int @var{rank})
  303. Tell StarPU-MPI which MPI node "owns" a given data, that is, the node which will
  304. always keep an up-to-date value, and will by default execute tasks which write
  305. to it.
  306. @end deftypefun
  307. @deftypefun int starpu_data_get_rank (starpu_data_handle_t @var{handle})
  308. Returns the last value set by @code{starpu_data_set_rank}.
  309. @end deftypefun
  310. @defmac STARPU_EXECUTE_ON_NODE
  311. this macro is used when calling @code{starpu_mpi_insert_task}, and
  312. must be followed by a integer value which specified the node on which
  313. to execute the codelet.
  314. @end defmac
  315. @defmac STARPU_EXECUTE_ON_DATA
  316. this macro is used when calling @code{starpu_mpi_insert_task}, and
  317. must be followed by a data handle to specify that the node owning the
  318. given data will execute the codelet.
  319. @end defmac
  320. @deftypefun int starpu_mpi_insert_task (MPI_Comm @var{comm}, struct starpu_codelet *@var{codelet}, ...)
  321. Create and submit a task corresponding to @var{codelet} with the following
  322. arguments. The argument list must be zero-terminated.
  323. The arguments following the codelets are the same types as for the
  324. function @code{starpu_insert_task} defined in @ref{Insert Task
  325. Utility}. The extra argument @code{STARPU_EXECUTE_ON_NODE} followed by an
  326. integer allows to specify the MPI node to execute the codelet. It is also
  327. possible to specify that the node owning a specific data will execute
  328. the codelet, by using @code{STARPU_EXECUTE_ON_DATA} followed by a data
  329. handle.
  330. The internal algorithm is as follows:
  331. @enumerate
  332. @item Find out whether we (as an MPI node) are to execute the codelet
  333. because we own the data to be written to. If different nodes own data
  334. to be written to, the argument @code{STARPU_EXECUTE_ON_NODE} or
  335. @code{STARPU_EXECUTE_ON_DATA} has to be used to specify which MPI node will
  336. execute the task.
  337. @item Send and receive data as requested. Nodes owning data which need to be
  338. read by the task are sending them to the MPI node which will execute it. The
  339. latter receives them.
  340. @item Execute the codelet. This is done by the MPI node selected in the
  341. 1st step of the algorithm.
  342. @item In the case when different MPI nodes own data to be written to, send
  343. written data back to their owners.
  344. @end enumerate
  345. The algorithm also includes a communication cache mechanism that
  346. allows not to send data twice to the same MPI node, unless the data
  347. has been modified. The cache can be disabled
  348. (@pxref{STARPU_MPI_CACHE}).
  349. @end deftypefun
  350. @deftypefun void starpu_mpi_get_data_on_node (MPI_Comm @var{comm}, starpu_data_handle_t @var{data_handle}, int @var{node})
  351. Transfer data @var{data_handle} to MPI node @var{node}, sending it from its
  352. owner if needed. At least the target node and the owner have to call the
  353. function.
  354. @end deftypefun
  355. Here an stencil example showing how to use @code{starpu_mpi_insert_task}. One
  356. first needs to define a distribution function which specifies the
  357. locality of the data. Note that that distribution information needs to
  358. be given to StarPU by calling @code{starpu_data_set_rank}.
  359. @cartouche
  360. @smallexample
  361. /* Returns the MPI node number where data is */
  362. int my_distrib(int x, int y, int nb_nodes) @{
  363. /* Block distrib */
  364. return ((int)(x / sqrt(nb_nodes) + (y / sqrt(nb_nodes)) * sqrt(nb_nodes))) % nb_nodes;
  365. // /* Other examples useful for other kinds of computations */
  366. // /* / distrib */
  367. // return (x+y) % nb_nodes;
  368. // /* Block cyclic distrib */
  369. // unsigned side = sqrt(nb_nodes);
  370. // return x % side + (y % side) * size;
  371. @}
  372. @end smallexample
  373. @end cartouche
  374. Now the data can be registered within StarPU. Data which are not
  375. owned but will be needed for computations can be registered through
  376. the lazy allocation mechanism, i.e. with a @code{home_node} set to -1.
  377. StarPU will automatically allocate the memory when it is used for the
  378. first time.
  379. One can note an optimization here (the @code{else if} test): we only register
  380. data which will be needed by the tasks that we will execute.
  381. @cartouche
  382. @smallexample
  383. unsigned matrix[X][Y];
  384. starpu_data_handle_t data_handles[X][Y];
  385. for(x = 0; x < X; x++) @{
  386. for (y = 0; y < Y; y++) @{
  387. int mpi_rank = my_distrib(x, y, size);
  388. if (mpi_rank == my_rank)
  389. /* Owning data */
  390. starpu_variable_data_register(&data_handles[x][y], 0,
  391. (uintptr_t)&(matrix[x][y]), sizeof(unsigned));
  392. else if (my_rank == my_distrib(x+1, y, size) || my_rank == my_distrib(x-1, y, size)
  393. || my_rank == my_distrib(x, y+1, size) || my_rank == my_distrib(x, y-1, size))
  394. /* I don't own that index, but will need it for my computations */
  395. starpu_variable_data_register(&data_handles[x][y], -1,
  396. (uintptr_t)NULL, sizeof(unsigned));
  397. else
  398. /* I know it's useless to allocate anything for this */
  399. data_handles[x][y] = NULL;
  400. if (data_handles[x][y])
  401. starpu_data_set_rank(data_handles[x][y], mpi_rank);
  402. @}
  403. @}
  404. @end smallexample
  405. @end cartouche
  406. Now @code{starpu_mpi_insert_task()} can be called for the different
  407. steps of the application.
  408. @cartouche
  409. @smallexample
  410. for(loop=0 ; loop<niter; loop++)
  411. for (x = 1; x < X-1; x++)
  412. for (y = 1; y < Y-1; y++)
  413. starpu_mpi_insert_task(MPI_COMM_WORLD, &stencil5_cl,
  414. STARPU_RW, data_handles[x][y],
  415. STARPU_R, data_handles[x-1][y],
  416. STARPU_R, data_handles[x+1][y],
  417. STARPU_R, data_handles[x][y-1],
  418. STARPU_R, data_handles[x][y+1],
  419. 0);
  420. starpu_task_wait_for_all();
  421. @end smallexample
  422. @end cartouche
  423. I.e. all MPI nodes process the whole task graph, but as mentioned above, for
  424. each task, only the MPI node which owns the data being written to (here,
  425. @code{data_handles[x][y]}) will actually run the task. The other MPI nodes will
  426. automatically send the required data.
  427. This can be a concern with a growing number of nodes. To avoid this, the
  428. application can prune the task for loops according to the data distribution,
  429. so as to only submit tasks on nodes which have to care about them (either to
  430. execute them, or to send the required data).
  431. @node MPI Collective Operations
  432. @section MPI Collective Operations
  433. @deftypefun int starpu_mpi_scatter_detached (starpu_data_handle_t *@var{data_handles}, int @var{count}, int @var{root}, MPI_Comm @var{comm}, {void (*}@var{scallback})(void *), {void *}@var{sarg}, {void (*}@var{rcallback})(void *), {void *}@var{rarg})
  434. Scatter data among processes of the communicator based on the ownership of
  435. the data. For each data of the array @var{data_handles}, the
  436. process @var{root} sends the data to the process owning this data.
  437. Processes receiving data must have valid data handles to receive them.
  438. On completion of the collective communication, the @var{scallback} function is
  439. called with the argument @var{sarg} on the process @var{root}, the @var{rcallback} function is
  440. called with the argument @var{rarg} on any other process.
  441. @end deftypefun
  442. @deftypefun int starpu_mpi_gather_detached (starpu_data_handle_t *@var{data_handles}, int @var{count}, int @var{root}, MPI_Comm @var{comm}, {void (*}@var{scallback})(void *), {void *}@var{sarg}, {void (*}@var{rcallback})(void *), {void *}@var{rarg})
  443. Gather data from the different processes of the communicator onto the
  444. process @var{root}. Each process owning data handle in the array
  445. @var{data_handles} will send them to the process @var{root}. The
  446. process @var{root} must have valid data handles to receive the data.
  447. On completion of the collective communication, the @var{rcallback} function is
  448. called with the argument @var{rarg} on the process @var{root}, the @var{scallback} function is
  449. called with the argument @var{sarg} on any other process.
  450. @end deftypefun
  451. @page
  452. @cartouche
  453. @smallexample
  454. if (rank == root)
  455. @{
  456. /* Allocate the vector */
  457. vector = malloc(nblocks * sizeof(float *));
  458. for(x=0 ; x<nblocks ; x++)
  459. @{
  460. starpu_malloc((void **)&vector[x], block_size*sizeof(float));
  461. @}
  462. @}
  463. /* Allocate data handles and register data to StarPU */
  464. data_handles = malloc(nblocks*sizeof(starpu_data_handle_t *));
  465. for(x = 0; x < nblocks ; x++)
  466. @{
  467. int mpi_rank = my_distrib(x, nodes);
  468. if (rank == root) @{
  469. starpu_vector_data_register(&data_handles[x], 0, (uintptr_t)vector[x],
  470. blocks_size, sizeof(float));
  471. @}
  472. else if ((mpi_rank == rank) || ((rank == mpi_rank+1 || rank == mpi_rank-1))) @{
  473. /* I own that index, or i will need it for my computations */
  474. starpu_vector_data_register(&data_handles[x], -1, (uintptr_t)NULL,
  475. block_size, sizeof(float));
  476. @}
  477. else @{
  478. /* I know it's useless to allocate anything for this */
  479. data_handles[x] = NULL;
  480. @}
  481. if (data_handles[x]) @{
  482. starpu_data_set_rank(data_handles[x], mpi_rank);
  483. @}
  484. @}
  485. /* Scatter the matrix among the nodes */
  486. starpu_mpi_scatter_detached(data_handles, nblocks, root, MPI_COMM_WORLD);
  487. /* Calculation */
  488. for(x = 0; x < nblocks ; x++) @{
  489. if (data_handles[x]) @{
  490. int owner = starpu_data_get_rank(data_handles[x]);
  491. if (owner == rank) @{
  492. starpu_insert_task(&cl, STARPU_RW, data_handles[x], 0);
  493. @}
  494. @}
  495. @}
  496. /* Gather the matrix on main node */
  497. starpu_mpi_gather_detached(data_handles, nblocks, 0, MPI_COMM_WORLD);
  498. @end smallexample
  499. @end cartouche