starpu_mpi.h 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2020 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #ifndef __STARPU_MPI_H__
  17. #define __STARPU_MPI_H__
  18. #include <starpu.h>
  19. #if defined(STARPU_USE_MPI)
  20. #include <mpi.h>
  21. #include <stdint.h>
  22. //TODO: #if defined(STARPU_USE_MPI_FT)
  23. #include <starpu_mpi_ft.h>
  24. //#endif
  25. #ifdef __cplusplus
  26. extern "C"
  27. {
  28. #endif
  29. /**
  30. @defgroup API_MPI_Support MPI Support
  31. @{
  32. */
  33. /**
  34. @name Initialisation
  35. @{
  36. */
  37. /**
  38. Initialize the StarPU library with the given \p conf, and
  39. initialize the StarPU-MPI library with the given MPI communicator
  40. \p comm. \p initialize_mpi indicates if MPI should be initialized
  41. or not by StarPU. StarPU-MPI takes the opportunity to modify \p
  42. conf to either reserve a core for its MPI thread (by default), or
  43. execute MPI calls on the CPU driver 0 between tasks.
  44. */
  45. int starpu_mpi_init_conf(int *argc, char ***argv, int initialize_mpi, MPI_Comm comm, struct starpu_conf *conf);
  46. /**
  47. Same as starpu_mpi_init_conf(), except that this does not
  48. initialize the StarPU library. The caller thus has to call
  49. starpu_init() before this.
  50. */
  51. int starpu_mpi_init_comm(int *argc, char ***argv, int initialize_mpi, MPI_Comm comm);
  52. /**
  53. Call starpu_mpi_init_comm() with the MPI communicator \c MPI_COMM_WORLD.
  54. */
  55. int starpu_mpi_init(int *argc, char ***argv, int initialize_mpi);
  56. /**
  57. @deprecated
  58. This function has been made deprecated. One should use instead the
  59. function starpu_mpi_init(). This function does not call \c
  60. MPI_Init(), it should be called beforehand.
  61. */
  62. int starpu_mpi_initialize(void) STARPU_DEPRECATED;
  63. /**
  64. @deprecated
  65. This function has been made deprecated. One should use instead the
  66. function starpu_mpi_init(). MPI will be initialized by starpumpi by
  67. calling <c>MPI_Init_Thread(argc, argv, MPI_THREAD_SERIALIZED,
  68. ...)</c>.
  69. */
  70. int starpu_mpi_initialize_extended(int *rank, int *world_size) STARPU_DEPRECATED;
  71. /**
  72. Clean the starpumpi library. This must be called after calling any
  73. \c starpu_mpi functions and before the call to starpu_shutdown(),
  74. if any. \c MPI_Finalize() will be called if StarPU-MPI has been
  75. initialized by starpu_mpi_init().
  76. */
  77. int starpu_mpi_shutdown(void);
  78. /**
  79. Retrieve the current amount of communications from the current node
  80. in the array \p comm_amounts which must have a size greater or
  81. equal to the world size. Communications statistics must be enabled
  82. (see \ref STARPU_COMM_STATS).
  83. */
  84. void starpu_mpi_comm_amounts_retrieve(size_t *comm_amounts);
  85. /**
  86. Return in \p size the size of the communicator \p comm
  87. */
  88. int starpu_mpi_comm_size(MPI_Comm comm, int *size);
  89. /**
  90. Return in \p rank the rank of the calling process in the
  91. communicator \p comm
  92. */
  93. int starpu_mpi_comm_rank(MPI_Comm comm, int *rank);
  94. /**
  95. Return the rank of the calling process in the communicator \c
  96. MPI_COMM_WORLD
  97. */
  98. int starpu_mpi_world_rank(void);
  99. /**
  100. Return the size of the communicator \c MPI_COMM_WORLD
  101. */
  102. int starpu_mpi_world_size(void);
  103. /**
  104. When given to the function starpu_mpi_comm_get_attr(), retrieve the
  105. value for the upper bound for tag value.
  106. */
  107. #define STARPU_MPI_TAG_UB MPI_TAG_UB
  108. /**
  109. Retrieve an attribute value by key, similarly to the MPI function
  110. \c MPI_comm_get_attr(), except that the value is a pointer to
  111. int64_t instead of int. If an attribute is attached on \p comm to
  112. \p keyval, then the call returns \p flag equal to \c 1, and the
  113. attribute value in \p attribute_val. Otherwise, \p flag is set to
  114. \0.
  115. */
  116. int starpu_mpi_comm_get_attr(MPI_Comm comm, int keyval, void *attribute_val, int *flag);
  117. int starpu_mpi_get_communication_tag(void);
  118. void starpu_mpi_set_communication_tag(int tag);
  119. /** @} */
  120. /**
  121. @name Communication
  122. \anchor MPIPtpCommunication
  123. @{
  124. */
  125. /**
  126. Opaque type for communication request
  127. */
  128. typedef void *starpu_mpi_req;
  129. /**
  130. Type of the message tag.
  131. */
  132. typedef int64_t starpu_mpi_tag_t;
  133. /**
  134. Post a standard-mode, non blocking send of \p data_handle to the
  135. node \p dest using the message tag \p data_tag within the
  136. communicator \p comm. After the call, the pointer to the request \p
  137. req can be used to test or to wait for the completion of the
  138. communication.
  139. */
  140. int starpu_mpi_isend(starpu_data_handle_t data_handle, starpu_mpi_req *req, int dest, starpu_mpi_tag_t data_tag, MPI_Comm comm);
  141. /**
  142. Similar to starpu_mpi_isend(), but take a priority \p prio.
  143. */
  144. int starpu_mpi_isend_prio(starpu_data_handle_t data_handle, starpu_mpi_req *req, int dest, starpu_mpi_tag_t data_tag, int prio, MPI_Comm comm);
  145. /**
  146. Post a nonblocking receive in \p data_handle from the node \p
  147. source using the message tag \p data_tag within the communicator \p
  148. comm. After the call, the pointer to the request \p req can be used
  149. to test or to wait for the completion of the communication.
  150. */
  151. int starpu_mpi_irecv(starpu_data_handle_t data_handle, starpu_mpi_req *req, int source, starpu_mpi_tag_t data_tag, MPI_Comm comm);
  152. /**
  153. Perform a standard-mode, blocking send of \p data_handle to the
  154. node \p dest using the message tag \p data_tag within the
  155. communicator \p comm.
  156. */
  157. int starpu_mpi_send(starpu_data_handle_t data_handle, int dest, starpu_mpi_tag_t data_tag, MPI_Comm comm);
  158. /**
  159. Similar to starpu_mpi_send(), but take a priority \p prio.
  160. */
  161. int starpu_mpi_send_prio(starpu_data_handle_t data_handle, int dest, starpu_mpi_tag_t data_tag, int prio, MPI_Comm comm);
  162. /**
  163. Perform a standard-mode, blocking receive in \p data_handle from
  164. the node \p source using the message tag \p data_tag within the
  165. communicator \p comm.
  166. */
  167. int starpu_mpi_recv(starpu_data_handle_t data_handle, int source, starpu_mpi_tag_t data_tag, MPI_Comm comm, MPI_Status *status);
  168. /**
  169. Post a standard-mode, non blocking send of \p data_handle to the
  170. node \p dest using the message tag \p data_tag within the
  171. communicator \p comm. On completion, the \p callback function is
  172. called with the argument \p arg.
  173. Similarly to the pthread detached functionality, when a detached
  174. communication completes, its resources are automatically released
  175. back to the system, there is no need to test or to wait for the
  176. completion of the request.
  177. */
  178. int starpu_mpi_isend_detached(starpu_data_handle_t data_handle, int dest, starpu_mpi_tag_t data_tag, MPI_Comm comm, void (*callback)(void *), void *arg);
  179. /**
  180. Similar to starpu_mpi_isend_detached, but take a priority \p prio.
  181. */
  182. int starpu_mpi_isend_detached_prio(starpu_data_handle_t data_handle, int dest, starpu_mpi_tag_t data_tag, int prio, MPI_Comm comm, void (*callback)(void *), void *arg);
  183. /**
  184. Post a nonblocking receive in \p data_handle from the node \p
  185. source using the message tag \p data_tag within the communicator \p
  186. comm. On completion, the \p callback function is called with the
  187. argument \p arg.
  188. Similarly to the pthread detached functionality, when a detached
  189. communication completes, its resources are automatically released
  190. back to the system, there is no need to test or to wait for the
  191. completion of the request.
  192. */
  193. int starpu_mpi_irecv_detached(starpu_data_handle_t data_handle, int source, starpu_mpi_tag_t data_tag, MPI_Comm comm, void (*callback)(void *), void *arg);
  194. /**
  195. Post a nonblocking receive in \p data_handle from the node \p
  196. source using the message tag \p data_tag within the communicator \p
  197. comm. On completion, the \p callback function is called with the
  198. argument \p arg.
  199. The parameter \p sequential_consistency allows to enable or disable
  200. the sequential consistency for \p data handle (sequential
  201. consistency will be enabled or disabled based on the value of the
  202. parameter \p sequential_consistency and the value of the sequential
  203. consistency defined for \p data_handle).
  204. Similarly to the pthread detached functionality, when a detached
  205. communication completes, its resources are automatically released
  206. back to the system, there is no need to test or to wait for the
  207. completion of the request.
  208. */
  209. int starpu_mpi_irecv_detached_sequential_consistency(starpu_data_handle_t data_handle, int source, starpu_mpi_tag_t data_tag, MPI_Comm comm, void (*callback)(void *), void *arg, int sequential_consistency);
  210. /**
  211. Perform a synchronous-mode, non-blocking send of \p data_handle to
  212. the node \p dest using the message tag \p data_tag within the
  213. communicator \p comm.
  214. */
  215. int starpu_mpi_issend(starpu_data_handle_t data_handle, starpu_mpi_req *req, int dest, starpu_mpi_tag_t data_tag, MPI_Comm comm);
  216. /**
  217. Similar to starpu_mpi_issend(), but take a priority \p prio.
  218. */
  219. int starpu_mpi_issend_prio(starpu_data_handle_t data_handle, starpu_mpi_req *req, int dest, starpu_mpi_tag_t data_tag, int prio, MPI_Comm comm);
  220. /**
  221. Perform a synchronous-mode, non-blocking send of \p data_handle to
  222. the node \p dest using the message tag \p data_tag within the
  223. communicator \p comm. On completion, the \p callback function is
  224. called with the argument \p arg.
  225. Similarly to the pthread detached functionality, when a detached
  226. communication completes, its resources are automatically released
  227. back to the system, there is no need to test or to wait for the
  228. completion of the request.
  229. */
  230. int starpu_mpi_issend_detached(starpu_data_handle_t data_handle, int dest, starpu_mpi_tag_t data_tag, MPI_Comm comm, void (*callback)(void *), void *arg);
  231. /**
  232. Similar to starpu_mpi_issend_detached(), but take a priority \p prio.
  233. */
  234. int starpu_mpi_issend_detached_prio(starpu_data_handle_t data_handle, int dest, starpu_mpi_tag_t data_tag, int prio, MPI_Comm comm, void (*callback)(void *), void *arg);
  235. /**
  236. Return when the operation identified by request \p req is complete.
  237. */
  238. int starpu_mpi_wait(starpu_mpi_req *req, MPI_Status *status);
  239. /**
  240. If the operation identified by \p req is complete, set \p flag to
  241. 1. The \p status object is set to contain information on the
  242. completed operation.
  243. */
  244. int starpu_mpi_test(starpu_mpi_req *req, int *flag, MPI_Status *status);
  245. /**
  246. Block the caller until all group members of the communicator \p
  247. comm have called it.
  248. */
  249. int starpu_mpi_barrier(MPI_Comm comm);
  250. /**
  251. Wait until all StarPU tasks and communications for the given
  252. communicator are completed.
  253. */
  254. int starpu_mpi_wait_for_all(MPI_Comm comm);
  255. /**
  256. Post a standard-mode, non blocking send of \p data_handle to the
  257. node \p dest using the message tag \p data_tag within the
  258. communicator \p comm. On completion, \p tag is unlocked.
  259. */
  260. int starpu_mpi_isend_detached_unlock_tag(starpu_data_handle_t data_handle, int dest, starpu_mpi_tag_t data_tag, MPI_Comm comm, starpu_tag_t tag);
  261. /**
  262. Similar to starpu_mpi_isend_detached_unlock_tag(), but take a
  263. priority \p prio.
  264. */
  265. int starpu_mpi_isend_detached_unlock_tag_prio(starpu_data_handle_t data_handle, int dest, starpu_mpi_tag_t data_tag, int prio, MPI_Comm comm, starpu_tag_t tag);
  266. /**
  267. Post a nonblocking receive in \p data_handle from the node \p
  268. source using the message tag \p data_tag within the communicator \p
  269. comm. On completion, \p tag is unlocked.
  270. */
  271. int starpu_mpi_irecv_detached_unlock_tag(starpu_data_handle_t data_handle, int source, starpu_mpi_tag_t data_tag, MPI_Comm comm, starpu_tag_t tag);
  272. /**
  273. Post \p array_size standard-mode, non blocking send. Each post
  274. sends the n-th data of the array \p data_handle to the n-th node of
  275. the array \p dest using the n-th message tag of the array \p
  276. data_tag within the n-th communicator of the array \p comm. On
  277. completion of the all the requests, \p tag is unlocked.
  278. */
  279. int starpu_mpi_isend_array_detached_unlock_tag(unsigned array_size, starpu_data_handle_t *data_handle, int *dest, starpu_mpi_tag_t *data_tag, MPI_Comm *comm, starpu_tag_t tag);
  280. /**
  281. Similar to starpu_mpi_isend_array_detached_unlock_tag(), but take a
  282. priority \p prio.
  283. */
  284. int starpu_mpi_isend_array_detached_unlock_tag_prio(unsigned array_size, starpu_data_handle_t *data_handle, int *dest, starpu_mpi_tag_t *data_tag, int *prio, MPI_Comm *comm, starpu_tag_t tag);
  285. /**
  286. Post \p array_size nonblocking receive. Each post receives in the
  287. n-th data of the array \p data_handle from the n-th node of the
  288. array \p source using the n-th message tag of the array \p data_tag
  289. within the n-th communicator of the array \p comm. On completion of
  290. the all the requests, \p tag is unlocked.
  291. */
  292. int starpu_mpi_irecv_array_detached_unlock_tag(unsigned array_size, starpu_data_handle_t *data_handle, int *source, starpu_mpi_tag_t *data_tag, MPI_Comm *comm, starpu_tag_t tag);
  293. typedef int (*starpu_mpi_datatype_allocate_func_t)(starpu_data_handle_t, MPI_Datatype *);
  294. typedef void (*starpu_mpi_datatype_free_func_t)(MPI_Datatype *);
  295. /**
  296. Register functions to create and free a MPI datatype for the given
  297. handle.
  298. It is important that the function is called before any
  299. communication can take place for a data with the given handle. See
  300. \ref ExchangingUserDefinedDataInterface for an example.
  301. */
  302. int starpu_mpi_datatype_register(starpu_data_handle_t handle, starpu_mpi_datatype_allocate_func_t allocate_datatype_func, starpu_mpi_datatype_free_func_t free_datatype_func);
  303. /**
  304. Register functions to create and free a MPI datatype for the given
  305. interface id.
  306. Similar to starpu_mpi_datatype_register().
  307. It is important that the function is called before any
  308. communication can take place for a data with the given handle. See
  309. \ref ExchangingUserDefinedDataInterface for an example.
  310. */
  311. int starpu_mpi_interface_datatype_register(enum starpu_data_interface_id id, starpu_mpi_datatype_allocate_func_t allocate_datatype_func, starpu_mpi_datatype_free_func_t free_datatype_func);
  312. /**
  313. Unregister the MPI datatype functions stored for the interface of
  314. the given handle.
  315. */
  316. int starpu_mpi_datatype_unregister(starpu_data_handle_t handle);
  317. /**
  318. Unregister the MPI datatype functions stored for the interface of
  319. the given interface id. Similar to starpu_mpi_datatype_unregister().
  320. */
  321. int starpu_mpi_interface_datatype_unregister(enum starpu_data_interface_id id);
  322. /** @} */
  323. /**
  324. @name Communication Cache
  325. @{
  326. */
  327. /**
  328. Return 1 if the communication cache is enabled, 0 otherwise
  329. */
  330. int starpu_mpi_cache_is_enabled();
  331. /**
  332. If \p enabled is 1, enable the communication cache. Otherwise,
  333. clean the cache if it was enabled and disable it.
  334. */
  335. int starpu_mpi_cache_set(int enabled);
  336. /**
  337. Clear the send and receive communication cache for the data \p
  338. data_handle and invalidate the value. The function has to be called
  339. at the same point of task graph submission by all the MPI nodes on
  340. which the handle was registered. The function does nothing if the
  341. cache mechanism is disabled (see \ref STARPU_MPI_CACHE).
  342. */
  343. void starpu_mpi_cache_flush(MPI_Comm comm, starpu_data_handle_t data_handle);
  344. /**
  345. Clear the send and receive communication cache for all data and
  346. invalidate their values. The function has to be called at the same
  347. point of task graph submission by all the MPI nodes. The function
  348. does nothing if the cache mechanism is disabled (see \ref
  349. STARPU_MPI_CACHE).
  350. */
  351. void starpu_mpi_cache_flush_all_data(MPI_Comm comm);
  352. /**
  353. Test whether \p data_handle is cached for reception, i.e. the value
  354. was previously received from the owner node, and not flushed since
  355. then.
  356. */
  357. int starpu_mpi_cached_receive(starpu_data_handle_t data_handle);
  358. /**
  359. Test whether \p data_handle is cached for emission to node \p dest,
  360. i.e. the value was previously sent to \p dest, and not flushed
  361. since then.
  362. */
  363. int starpu_mpi_cached_send(starpu_data_handle_t data_handle, int dest);
  364. /** @} */
  365. /**
  366. @name MPI Insert Task
  367. \anchor MPIInsertTask
  368. @{
  369. */
  370. /**
  371. Can be used as rank when calling starpu_mpi_data_register() and
  372. alike, to specify that the data is per-node: each node will have
  373. its own value. Tasks writing to such data will be replicated on all
  374. nodes (and all parameters then have to be per-node). Tasks not
  375. writing to such data will just take the node-local value without
  376. any MPI communication.
  377. */
  378. #define STARPU_MPI_PER_NODE -2
  379. /**
  380. Register to MPI a StarPU data handle with the given tag, rank and
  381. MPI communicator. It also automatically clears the MPI
  382. communication cache when unregistering the data.
  383. */
  384. void starpu_mpi_data_register_comm(starpu_data_handle_t data_handle, starpu_mpi_tag_t data_tag, int rank, MPI_Comm comm);
  385. /**
  386. Register to MPI a StarPU data handle with the given tag, rank and
  387. the MPI communicator \c MPI_COMM_WORLD.
  388. It also automatically clears the MPI communication cache when
  389. unregistering the data.
  390. */
  391. #define starpu_mpi_data_register(data_handle, data_tag, rank) starpu_mpi_data_register_comm(data_handle, data_tag, rank, MPI_COMM_WORLD)
  392. /**
  393. Register to MPI a StarPU data handle with the given tag. No rank
  394. will be defined.
  395. It also automatically clears the MPI communication cache when
  396. unregistering the data.
  397. */
  398. void starpu_mpi_data_set_tag(starpu_data_handle_t handle, starpu_mpi_tag_t data_tag);
  399. /**
  400. Symbol kept for backward compatibility. Call function starpu_mpi_data_set_tag()
  401. */
  402. #define starpu_data_set_tag starpu_mpi_data_set_tag
  403. /**
  404. Register to MPI a StarPU data handle with the given rank and given
  405. communicator. No tag will be defined.
  406. It also automatically clears the MPI communication cache when
  407. unregistering the data.
  408. */
  409. void starpu_mpi_data_set_rank_comm(starpu_data_handle_t handle, int rank, MPI_Comm comm);
  410. /**
  411. Register to MPI a StarPU data handle with the given rank and the
  412. MPI communicator \c MPI_COMM_WORLD. No tag will be defined.
  413. It also automatically clears the MPI communication cache when
  414. unregistering the data.
  415. */
  416. #define starpu_mpi_data_set_rank(handle, rank) starpu_mpi_data_set_rank_comm(handle, rank, MPI_COMM_WORLD)
  417. /**
  418. Symbol kept for backward compatibility. Call function starpu_mpi_data_set_rank()
  419. */
  420. #define starpu_data_set_rank starpu_mpi_data_set_rank
  421. /**
  422. Return the rank of the given data.
  423. */
  424. int starpu_mpi_data_get_rank(starpu_data_handle_t handle);
  425. /**
  426. Symbol kept for backward compatibility. Call function starpu_mpi_data_get_rank()
  427. */
  428. #define starpu_data_get_rank starpu_mpi_data_get_rank
  429. /**
  430. Return the tag of the given data.
  431. */
  432. starpu_mpi_tag_t starpu_mpi_data_get_tag(starpu_data_handle_t handle);
  433. /**
  434. Symbol kept for backward compatibility. Call function starpu_mpi_data_get_tag()
  435. */
  436. #define starpu_data_get_tag starpu_mpi_data_get_tag
  437. /**
  438. Create and submit a task corresponding to codelet with the
  439. following arguments. The argument list must be zero-terminated.
  440. The arguments following the codelet are the same types as for the
  441. function starpu_task_insert().
  442. Access modes for data can also be
  443. set with ::STARPU_SSEND to specify the data has to be sent using a
  444. synchronous and non-blocking mode (see starpu_mpi_issend()).
  445. The extra argument ::STARPU_EXECUTE_ON_NODE followed by an integer
  446. allows to specify the MPI node to execute the codelet. It is also
  447. possible to specify that the node owning a specific data will
  448. execute the codelet, by using ::STARPU_EXECUTE_ON_DATA followed by
  449. a data handle.
  450. The internal algorithm is as follows:
  451. <ol>
  452. <li>
  453. Find out which MPI node is going to execute the codelet.
  454. <ul>
  455. <li>
  456. If there is only one node owning data in ::STARPU_W mode, it
  457. will be selected;
  458. <li>
  459. If there is several nodes owning data in ::STARPU_W mode, a
  460. node will be selected according to a given node selection
  461. policy (see ::STARPU_NODE_SELECTION_POLICY or
  462. starpu_mpi_node_selection_set_current_policy())
  463. <li>
  464. The argument ::STARPU_EXECUTE_ON_NODE followed by an integer
  465. can be used to specify the node;
  466. <li>
  467. The argument ::STARPU_EXECUTE_ON_DATA followed by a data handle can be used to specify that the node owing the given data will execute the codelet.
  468. </ul>
  469. </li>
  470. <li>
  471. Send and receive data as requested. Nodes owning data which need to
  472. be read by the task are sending them to the MPI node which will
  473. execute it. The latter receives them.
  474. </li>
  475. <li>
  476. Execute the codelet. This is done by the MPI node selected in the
  477. 1st step of the algorithm.
  478. </li>
  479. <li>
  480. If several MPI nodes own data to be written to, send written data
  481. back to their owners.
  482. </li>
  483. </ol>
  484. The algorithm also includes a communication cache mechanism that
  485. allows not to send data twice to the same MPI node, unless the data
  486. has been modified. The cache can be disabled (see \ref
  487. STARPU_MPI_CACHE).
  488. */
  489. int starpu_mpi_task_insert(MPI_Comm comm, struct starpu_codelet *codelet, ...);
  490. /**
  491. Call starpu_mpi_task_insert(). Symbol kept for backward compatibility.
  492. */
  493. int starpu_mpi_insert_task(MPI_Comm comm, struct starpu_codelet *codelet, ...);
  494. /**
  495. Create a task corresponding to \p codelet with the following given
  496. arguments. The argument list must be zero-terminated. The function
  497. performs the first two steps of the function
  498. starpu_mpi_task_insert(), i.e. submitting the MPI communications
  499. needed before the execution of the task, and the creation of the
  500. task on one node. Only the MPI node selected in the first step of
  501. the algorithm will return a valid task structure which can then be
  502. submitted, others will return <c>NULL</c>. The function
  503. starpu_mpi_task_post_build() MUST be called after that on all
  504. nodes, and after the submission of the task on the node which
  505. creates it, with the SAME list of arguments.
  506. */
  507. struct starpu_task *starpu_mpi_task_build(MPI_Comm comm, struct starpu_codelet *codelet, ...);
  508. /**
  509. MUST be called after a call to starpu_mpi_task_build(),
  510. with the SAME list of arguments. Perform the fourth -- last -- step of
  511. the algorithm described in starpu_mpi_task_insert().
  512. */
  513. int starpu_mpi_task_post_build(MPI_Comm comm, struct starpu_codelet *codelet, ...);
  514. /**
  515. Transfer data \p data_handle to MPI node \p node, sending it from
  516. its owner if needed. At least the target node and the owner have to
  517. call the function.
  518. */
  519. void starpu_mpi_get_data_on_node(MPI_Comm comm, starpu_data_handle_t data_handle, int node);
  520. /**
  521. Transfer data \p data_handle to MPI node \p node, sending it from
  522. its owner if needed. At least the target node and the owner have to
  523. call the function. On reception, the \p callback function is called
  524. with the argument \p arg.
  525. */
  526. void starpu_mpi_get_data_on_node_detached(MPI_Comm comm, starpu_data_handle_t data_handle, int node, void (*callback)(void*), void *arg);
  527. /**
  528. Transfer data \p data_handle to all MPI nodes, sending it from its
  529. owner if needed. All nodes have to call the function.
  530. */
  531. void starpu_mpi_get_data_on_all_nodes_detached(MPI_Comm comm, starpu_data_handle_t data_handle);
  532. /**
  533. Submit migration of the data onto the \p new_rank MPI node. This
  534. means both submitting the transfer of the data to node \p new_rank
  535. if it hasn't been submitted already, and setting the home node of
  536. the data to the new node. Further data transfers submitted by
  537. starpu_mpi_task_insert() will be done from that new node. This
  538. function thus needs to be called on all nodes which have registered
  539. the data at the same point of tasks submissions. This also flushes
  540. the cache for this data to avoid incoherencies.
  541. */
  542. void starpu_mpi_data_migrate(MPI_Comm comm, starpu_data_handle_t handle, int new_rank);
  543. /** @} */
  544. /**
  545. @name Node Selection Policy
  546. \anchor MPINodeSelectionPolicy
  547. @{
  548. */
  549. /**
  550. Define the current policy
  551. */
  552. #define STARPU_MPI_NODE_SELECTION_CURRENT_POLICY -1
  553. /**
  554. Define the policy in which the selected node is the one having the
  555. most data in ::STARPU_R mode
  556. */
  557. #define STARPU_MPI_NODE_SELECTION_MOST_R_DATA 0
  558. typedef int (*starpu_mpi_select_node_policy_func_t)(int me, int nb_nodes, struct starpu_data_descr *descr, int nb_data);
  559. /**
  560. Register a new policy which can then be used when there is several
  561. nodes owning data in ::STARPU_W mode.
  562. Here an example of function defining a node selection policy.
  563. The codelet will be executed on the node owing the first data with
  564. a size bigger than 1M, or on the node 0 if no data fits the given
  565. size.
  566. \code{.c}
  567. int my_node_selection_policy(int me, int nb_nodes, struct starpu_data_descr *descr, int nb_data)
  568. {
  569. // me is the current MPI rank
  570. // nb_nodes is the number of MPI nodes
  571. // descr is the description of the data specified when calling starpu_mpi_task_insert
  572. // nb_data is the number of data in descr
  573. int i;
  574. for(i= 0 ; i<nb_data ; i++)
  575. {
  576. starpu_data_handle_t data = descr[i].handle;
  577. enum starpu_data_access_mode mode = descr[i].mode;
  578. if (mode & STARPU_R)
  579. {
  580. int rank = starpu_data_get_rank(data);
  581. size_t size = starpu_data_get_size(data);
  582. if (size > 1024*1024) return rank;
  583. }
  584. }
  585. return 0;
  586. }
  587. \endcode
  588. */
  589. int starpu_mpi_node_selection_register_policy(starpu_mpi_select_node_policy_func_t policy_func);
  590. /**
  591. Unregister a previously registered policy.
  592. */
  593. int starpu_mpi_node_selection_unregister_policy(int policy);
  594. /**
  595. Return the current policy used to select the node which will
  596. execute the codelet
  597. */
  598. int starpu_mpi_node_selection_get_current_policy();
  599. /**
  600. Set the current policy used to select the node which will execute
  601. the codelet. The policy ::STARPU_MPI_NODE_SELECTION_MOST_R_DATA
  602. selects the node having the most data in ::STARPU_R mode so as to
  603. minimize the amount of data to be transfered.
  604. */
  605. int starpu_mpi_node_selection_set_current_policy(int policy);
  606. /** @} */
  607. /**
  608. @name Collective Operations
  609. \anchor MPICollectiveOperations
  610. @{
  611. */
  612. /**
  613. Perform a reduction on the given data \p handle. All nodes send the
  614. data to its owner node which will perform a reduction.
  615. */
  616. void starpu_mpi_redux_data(MPI_Comm comm, starpu_data_handle_t data_handle);
  617. /**
  618. Similar to starpu_mpi_redux_data, but take a priority \p prio.
  619. */
  620. void starpu_mpi_redux_data_prio(MPI_Comm comm, starpu_data_handle_t data_handle, int prio);
  621. /**
  622. Scatter data among processes of the communicator based on the
  623. ownership of the data. For each data of the array \p data_handles,
  624. the process \p root sends the data to the process owning this data.
  625. Processes receiving data must have valid data handles to receive
  626. them. On completion of the collective communication, the \p
  627. scallback function is called with the argument \p sarg on the
  628. process \p root, the \p rcallback function is called with the
  629. argument \p rarg on any other process.
  630. */
  631. int starpu_mpi_scatter_detached(starpu_data_handle_t *data_handles, int count, int root, MPI_Comm comm, void (*scallback)(void *), void *sarg, void (*rcallback)(void *), void *rarg);
  632. /**
  633. Gather data from the different processes of the communicator onto
  634. the process \p root. Each process owning data handle in the array
  635. \p data_handles will send them to the process \p root. The process
  636. \p root must have valid data handles to receive the data. On
  637. completion of the collective communication, the \p rcallback
  638. function is called with the argument \p rarg on the process root,
  639. the \p scallback function is called with the argument \p sarg on
  640. any other process.
  641. */
  642. int starpu_mpi_gather_detached(starpu_data_handle_t *data_handles, int count, int root, MPI_Comm comm, void (*scallback)(void *), void *sarg, void (*rcallback)(void *), void *rarg);
  643. /** @} */
  644. int starpu_mpi_pre_submit_hook_register(void (*f)(struct starpu_task *));
  645. int starpu_mpi_pre_submit_hook_unregister();
  646. /** @} */
  647. #ifdef __cplusplus
  648. }
  649. #endif
  650. #endif // STARPU_USE_MPI
  651. #endif // __STARPU_MPI_H__