starpu_mpi.c 61 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009, 2010-2015 Université de Bordeaux
  4. * Copyright (C) 2010, 2011, 2012, 2013, 2014, 2015 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <stdlib.h>
  18. #include <starpu_mpi.h>
  19. #include <starpu_mpi_datatype.h>
  20. #include <starpu_mpi_private.h>
  21. #include <starpu_mpi_cache.h>
  22. #include <starpu_profiling.h>
  23. #include <starpu_mpi_stats.h>
  24. #include <starpu_mpi_cache.h>
  25. #include <starpu_mpi_sync_data.h>
  26. #include <starpu_mpi_early_data.h>
  27. #include <starpu_mpi_early_request.h>
  28. #include <starpu_mpi_select_node.h>
  29. #include <starpu_mpi_tag.h>
  30. #include <starpu_mpi_comm.h>
  31. #include <common/config.h>
  32. #include <common/thread.h>
  33. #include <datawizard/interfaces/data_interface.h>
  34. #include <datawizard/coherency.h>
  35. #include <core/simgrid.h>
  36. static void _starpu_mpi_add_sync_point_in_fxt(void);
  37. static void _starpu_mpi_submit_ready_request(void *arg);
  38. static void _starpu_mpi_handle_ready_request(struct _starpu_mpi_req *req);
  39. static void _starpu_mpi_handle_request_termination(struct _starpu_mpi_req *req);
  40. #ifdef STARPU_VERBOSE
  41. static char *_starpu_mpi_request_type(enum _starpu_mpi_request_type request_type);
  42. #endif
  43. static struct _starpu_mpi_req *_starpu_mpi_isend_common(starpu_data_handle_t data_handle,
  44. int dest, int data_tag, MPI_Comm comm,
  45. unsigned detached, unsigned sync, void (*callback)(void *), void *arg,
  46. int sequential_consistency);
  47. static struct _starpu_mpi_req *_starpu_mpi_irecv_common(starpu_data_handle_t data_handle,
  48. int source, int data_tag, MPI_Comm comm,
  49. unsigned detached, unsigned sync, void (*callback)(void *), void *arg,
  50. int sequential_consistency, int is_internal_req,
  51. starpu_ssize_t count);
  52. static void _starpu_mpi_handle_detached_request(struct _starpu_mpi_req *req);
  53. static void _starpu_mpi_early_data_cb(void* arg);
  54. /* The list of ready requests */
  55. static struct _starpu_mpi_req_list *ready_requests;
  56. /* The list of detached requests that have already been submitted to MPI */
  57. static struct _starpu_mpi_req_list *detached_requests;
  58. static starpu_pthread_mutex_t detached_requests_mutex;
  59. /* Condition to wake up progression thread */
  60. static starpu_pthread_cond_t cond_progression;
  61. /* Condition to wake up waiting for all current MPI requests to finish */
  62. static starpu_pthread_cond_t cond_finished;
  63. static starpu_pthread_mutex_t mutex;
  64. static starpu_pthread_t progress_thread;
  65. static int running = 0;
  66. #ifdef STARPU_SIMGRID
  67. static int _mpi_world_size;
  68. static int _mpi_world_rank;
  69. #endif
  70. /* Count requests posted by the application and not yet submitted to MPI */
  71. static starpu_pthread_mutex_t mutex_posted_requests;
  72. static int posted_requests = 0, newer_requests, barrier_running = 0;
  73. #define _STARPU_MPI_INC_POSTED_REQUESTS(value) { STARPU_PTHREAD_MUTEX_LOCK(&mutex_posted_requests); posted_requests += value; STARPU_PTHREAD_MUTEX_UNLOCK(&mutex_posted_requests); }
  74. #pragma weak smpi_simulated_main_
  75. extern int smpi_simulated_main_(int argc, char *argv[]);
  76. static void _starpu_mpi_request_init(struct _starpu_mpi_req **req)
  77. {
  78. *req = calloc(1, sizeof(struct _starpu_mpi_req));
  79. STARPU_MPI_ASSERT_MSG(*req, "Invalid request");
  80. /* Initialize the request structure */
  81. (*req)->data_handle = NULL;
  82. (*req)->datatype = 0;
  83. (*req)->ptr = NULL;
  84. (*req)->count = -1;
  85. (*req)->user_datatype = -1;
  86. (*req)->node_tag.rank = -1;
  87. (*req)->node_tag.data_tag = -1;
  88. (*req)->node_tag.comm = NULL;
  89. (*req)->func = NULL;
  90. (*req)->status = NULL;
  91. (*req)->request = 0;
  92. (*req)->flag = NULL;
  93. (*req)->ret = -1;
  94. STARPU_PTHREAD_MUTEX_INIT(&((*req)->req_mutex), NULL);
  95. STARPU_PTHREAD_COND_INIT(&((*req)->req_cond), NULL);
  96. STARPU_PTHREAD_MUTEX_INIT(&((*req)->posted_mutex), NULL);
  97. STARPU_PTHREAD_COND_INIT(&((*req)->posted_cond), NULL);
  98. (*req)->request_type = UNKNOWN_REQ;
  99. (*req)->submitted = 0;
  100. (*req)->completed = 0;
  101. (*req)->posted = 0;
  102. (*req)->other_request = NULL;
  103. (*req)->sync = 0;
  104. (*req)->detached = -1;
  105. (*req)->callback = NULL;
  106. (*req)->callback_arg = NULL;
  107. (*req)->size_req = 0;
  108. (*req)->internal_req = NULL;
  109. (*req)->is_internal_req = 0;
  110. (*req)->envelope = NULL;
  111. (*req)->sequential_consistency = 1;
  112. }
  113. /********************************************************/
  114. /* */
  115. /* Send/Receive functionalities */
  116. /* */
  117. /********************************************************/
  118. struct _starpu_mpi_early_data_cb_args
  119. {
  120. starpu_data_handle_t data_handle;
  121. starpu_data_handle_t early_handle;
  122. struct _starpu_mpi_req *req;
  123. void *buffer;
  124. };
  125. static void _starpu_mpi_submit_ready_request(void *arg)
  126. {
  127. _STARPU_MPI_LOG_IN();
  128. struct _starpu_mpi_req *req = arg;
  129. _STARPU_MPI_INC_POSTED_REQUESTS(-1);
  130. _STARPU_MPI_DEBUG(3, "new req %p srcdst %d tag %d and type %s %d\n", req, req->node_tag.rank, req->node_tag.data_tag, _starpu_mpi_request_type(req->request_type), req->is_internal_req);
  131. STARPU_PTHREAD_MUTEX_LOCK(&mutex);
  132. if (req->request_type == RECV_REQ)
  133. {
  134. /* Case : the request is the internal receive request submitted
  135. * by StarPU-MPI to receive incoming data without a matching
  136. * early_request from the application. We immediately allocate the
  137. * pointer associated to the data_handle, and push it into the
  138. * ready_requests list, so as the real MPI request can be submitted
  139. * before the next submission of the envelope-catching request. */
  140. if (req->is_internal_req)
  141. {
  142. _starpu_mpi_handle_allocate_datatype(req->data_handle, &req->datatype, &req->user_datatype);
  143. if (req->user_datatype == 0)
  144. {
  145. req->count = 1;
  146. req->ptr = starpu_data_get_local_ptr(req->data_handle);
  147. }
  148. else
  149. {
  150. STARPU_ASSERT(req->count);
  151. req->ptr = malloc(req->count);
  152. STARPU_MPI_ASSERT_MSG(req->ptr, "cannot allocate message of size %ld\n", req->count);
  153. }
  154. _STARPU_MPI_DEBUG(3, "Pushing internal starpu_mpi_irecv request %p type %s tag %d src %d data %p ptr %p datatype '%s' count %d user_datatype %d \n",
  155. req, _starpu_mpi_request_type(req->request_type), req->node_tag.data_tag, req->node_tag.rank, req->data_handle, req->ptr,
  156. _starpu_mpi_datatype(req->datatype), (int)req->count, req->user_datatype);
  157. _starpu_mpi_req_list_push_front(ready_requests, req);
  158. /* inform the starpu mpi thread that the request has been pushed in the ready_requests list */
  159. STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
  160. STARPU_PTHREAD_MUTEX_LOCK(&req->posted_mutex);
  161. req->posted = 1;
  162. STARPU_PTHREAD_COND_BROADCAST(&req->posted_cond);
  163. STARPU_PTHREAD_MUTEX_UNLOCK(&req->posted_mutex);
  164. STARPU_PTHREAD_MUTEX_LOCK(&mutex);
  165. }
  166. else
  167. {
  168. /* test whether some data with the given tag and source have already been received by StarPU-MPI*/
  169. struct _starpu_mpi_early_data_handle *early_data_handle = _starpu_mpi_early_data_find(&req->node_tag);
  170. /* Case: a receive request for a data with the given tag and source has already been
  171. * posted by StarPU. Asynchronously requests a Read permission over the temporary handle ,
  172. * so as when the internal receive is completed, the _starpu_mpi_early_data_cb function
  173. * will be called to bring the data back to the original data handle associated to the request.*/
  174. if (early_data_handle)
  175. {
  176. STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
  177. STARPU_PTHREAD_MUTEX_LOCK(&(early_data_handle->req_mutex));
  178. while (!(early_data_handle->req_ready))
  179. STARPU_PTHREAD_COND_WAIT(&(early_data_handle->req_cond), &(early_data_handle->req_mutex));
  180. STARPU_PTHREAD_MUTEX_UNLOCK(&(early_data_handle->req_mutex));
  181. STARPU_PTHREAD_MUTEX_LOCK(&mutex);
  182. _STARPU_MPI_DEBUG(3, "The RECV request %p with tag %d has already been received, copying previously received data into handle's pointer..\n", req, req->node_tag.data_tag);
  183. STARPU_ASSERT(req->data_handle != early_data_handle->handle);
  184. req->internal_req = early_data_handle->req;
  185. struct _starpu_mpi_early_data_cb_args *cb_args = malloc(sizeof(struct _starpu_mpi_early_data_cb_args));
  186. cb_args->data_handle = req->data_handle;
  187. cb_args->early_handle = early_data_handle->handle;
  188. cb_args->buffer = early_data_handle->buffer;
  189. cb_args->req = req;
  190. _STARPU_MPI_DEBUG(3, "Calling data_acquire_cb on starpu_mpi_copy_cb..\n");
  191. starpu_data_acquire_cb(early_data_handle->handle,STARPU_R,_starpu_mpi_early_data_cb,(void*) cb_args);
  192. }
  193. /* Case: no matching data has been received. Store the receive request as an early_request. */
  194. else
  195. {
  196. struct _starpu_mpi_req *sync_req = _starpu_mpi_sync_data_find(req->node_tag.data_tag, req->node_tag.rank, req->node_tag.comm);
  197. _STARPU_MPI_DEBUG(3, "----------> Looking for sync data for tag %d and src %d = %p\n", req->node_tag.data_tag, req->node_tag.rank, sync_req);
  198. if (sync_req)
  199. {
  200. req->sync = 1;
  201. _starpu_mpi_handle_allocate_datatype(req->data_handle, &req->datatype, &req->user_datatype);
  202. if (req->user_datatype == 0)
  203. {
  204. req->count = 1;
  205. req->ptr = starpu_data_get_local_ptr(req->data_handle);
  206. }
  207. else
  208. {
  209. req->count = sync_req->count;
  210. STARPU_ASSERT(req->count);
  211. req->ptr = malloc(req->count);
  212. STARPU_MPI_ASSERT_MSG(req->ptr, "cannot allocate message of size %ld\n", req->count);
  213. }
  214. _starpu_mpi_req_list_push_front(ready_requests, req);
  215. free(sync_req);
  216. }
  217. else
  218. {
  219. _STARPU_MPI_DEBUG(3, "Adding the pending receive request %p (srcdst %d tag %d) into the request hashmap\n", req, req->node_tag.rank, req->node_tag.data_tag);
  220. _starpu_mpi_early_request_add(req);
  221. }
  222. }
  223. }
  224. }
  225. else
  226. {
  227. _starpu_mpi_req_list_push_front(ready_requests, req);
  228. _STARPU_MPI_DEBUG(3, "Pushing new request %p type %s tag %d src %d data %p ptr %p datatype '%s' count %d user_datatype %d \n",
  229. req, _starpu_mpi_request_type(req->request_type), req->node_tag.data_tag, req->node_tag.rank, req->data_handle, req->ptr, _starpu_mpi_datatype(req->datatype), (int)req->count, req->user_datatype);
  230. }
  231. newer_requests = 1;
  232. STARPU_PTHREAD_COND_BROADCAST(&cond_progression);
  233. STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
  234. _STARPU_MPI_LOG_OUT();
  235. }
  236. static struct _starpu_mpi_req *_starpu_mpi_isend_irecv_common(starpu_data_handle_t data_handle,
  237. int srcdst, int data_tag, MPI_Comm comm,
  238. unsigned detached, unsigned sync, void (*callback)(void *), void *arg,
  239. enum _starpu_mpi_request_type request_type, void (*func)(struct _starpu_mpi_req *),
  240. enum starpu_data_access_mode mode,
  241. int sequential_consistency,
  242. int is_internal_req,
  243. starpu_ssize_t count)
  244. {
  245. struct _starpu_mpi_req *req;
  246. _STARPU_MPI_LOG_IN();
  247. _STARPU_MPI_INC_POSTED_REQUESTS(1);
  248. /* Initialize the request structure */
  249. _starpu_mpi_request_init(&req);
  250. req->request_type = request_type;
  251. req->data_handle = data_handle;
  252. req->node_tag.rank = srcdst;
  253. req->node_tag.data_tag = data_tag;
  254. req->node_tag.comm = comm;
  255. req->detached = detached;
  256. req->sync = sync;
  257. req->callback = callback;
  258. req->callback_arg = arg;
  259. req->func = func;
  260. req->sequential_consistency = sequential_consistency;
  261. req->is_internal_req = is_internal_req;
  262. req->count = count;
  263. /* Asynchronously request StarPU to fetch the data in main memory: when
  264. * it is available in main memory, _starpu_mpi_submit_ready_request(req) is called and
  265. * the request is actually submitted */
  266. starpu_data_acquire_cb_sequential_consistency(data_handle, mode, _starpu_mpi_submit_ready_request, (void *)req, sequential_consistency);
  267. _STARPU_MPI_LOG_OUT();
  268. return req;
  269. }
  270. /********************************************************/
  271. /* */
  272. /* Send functionalities */
  273. /* */
  274. /********************************************************/
  275. static void _starpu_mpi_isend_data_func(struct _starpu_mpi_req *req)
  276. {
  277. _STARPU_MPI_LOG_IN();
  278. _STARPU_MPI_DEBUG(20, "post MPI isend request %p type %s tag %d src %d data %p datasize %ld ptr %p datatype '%s' count %d user_datatype %d sync %d\n", req, _starpu_mpi_request_type(req->request_type), req->node_tag.data_tag, req->node_tag.rank, req->data_handle, starpu_data_get_size(req->data_handle), req->ptr, _starpu_mpi_datatype(req->datatype), (int)req->count, req->user_datatype, req->sync);
  279. _starpu_mpi_comm_amounts_inc(req->node_tag.comm, req->node_tag.rank, req->datatype, req->count);
  280. _STARPU_MPI_TRACE_ISEND_SUBMIT_BEGIN(req->node_tag.rank, req->node_tag.data_tag, 0);
  281. if (req->sync == 0)
  282. {
  283. _STARPU_MPI_COMM_TO_DEBUG(req->count, req->datatype, req->node_tag.rank, _STARPU_MPI_TAG_DATA, req->node_tag.data_tag);
  284. req->ret = MPI_Isend(req->ptr, req->count, req->datatype, req->node_tag.rank, _STARPU_MPI_TAG_DATA, req->node_tag.comm, &req->request);
  285. STARPU_MPI_ASSERT_MSG(req->ret == MPI_SUCCESS, "MPI_Isend returning %s", _starpu_mpi_get_mpi_code(req->ret));
  286. }
  287. else
  288. {
  289. _STARPU_MPI_COMM_TO_DEBUG(req->count, req->datatype, req->node_tag.rank, _STARPU_MPI_TAG_SYNC_DATA, req->node_tag.data_tag);
  290. req->ret = MPI_Issend(req->ptr, req->count, req->datatype, req->node_tag.rank, _STARPU_MPI_TAG_SYNC_DATA, req->node_tag.comm, &req->request);
  291. STARPU_MPI_ASSERT_MSG(req->ret == MPI_SUCCESS, "MPI_Issend returning %s", _starpu_mpi_get_mpi_code(req->ret));
  292. }
  293. _STARPU_MPI_TRACE_ISEND_SUBMIT_END(req->node_tag.rank, req->node_tag.data_tag, 0);
  294. /* somebody is perhaps waiting for the MPI request to be posted */
  295. STARPU_PTHREAD_MUTEX_LOCK(&req->req_mutex);
  296. req->submitted = 1;
  297. STARPU_PTHREAD_COND_BROADCAST(&req->req_cond);
  298. STARPU_PTHREAD_MUTEX_UNLOCK(&req->req_mutex);
  299. _starpu_mpi_handle_detached_request(req);
  300. _STARPU_MPI_LOG_OUT();
  301. }
  302. static void _starpu_mpi_isend_size_func(struct _starpu_mpi_req *req)
  303. {
  304. _starpu_mpi_handle_allocate_datatype(req->data_handle, &req->datatype, &req->user_datatype);
  305. req->envelope = calloc(1,sizeof(struct _starpu_mpi_envelope));
  306. req->envelope->mode = _STARPU_MPI_ENVELOPE_DATA;
  307. req->envelope->data_tag = req->node_tag.data_tag;
  308. req->envelope->sync = req->sync;
  309. if (req->user_datatype == 0)
  310. {
  311. int size;
  312. req->count = 1;
  313. req->ptr = starpu_data_get_local_ptr(req->data_handle);
  314. MPI_Type_size(req->datatype, &size);
  315. req->envelope->size = (starpu_ssize_t)req->count * size;
  316. _STARPU_MPI_DEBUG(20, "Post MPI isend count (%ld) datatype_size %ld request to %d\n",req->count,starpu_data_get_size(req->data_handle), req->node_tag.rank);
  317. _STARPU_MPI_COMM_TO_DEBUG(sizeof(struct _starpu_mpi_envelope), MPI_BYTE, req->node_tag.rank, _STARPU_MPI_TAG_ENVELOPE, _STARPU_MPI_TAG_ENVELOPE);
  318. MPI_Isend(req->envelope, sizeof(struct _starpu_mpi_envelope), MPI_BYTE, req->node_tag.rank, _STARPU_MPI_TAG_ENVELOPE, req->node_tag.comm, &req->size_req);
  319. }
  320. else
  321. {
  322. int ret;
  323. // Do not pack the data, just try to find out the size
  324. starpu_data_pack(req->data_handle, NULL, &(req->envelope->size));
  325. if (req->envelope->size != -1)
  326. {
  327. // We already know the size of the data, let's send it to overlap with the packing of the data
  328. _STARPU_MPI_DEBUG(20, "Sending size %ld (%ld %s) to node %d (first call to pack)\n", req->envelope->size, sizeof(req->count), _starpu_mpi_datatype(MPI_BYTE), req->node_tag.rank);
  329. req->count = req->envelope->size;
  330. _STARPU_MPI_COMM_TO_DEBUG(sizeof(struct _starpu_mpi_envelope), MPI_BYTE, req->node_tag.rank, _STARPU_MPI_TAG_ENVELOPE, _STARPU_MPI_TAG_ENVELOPE);
  331. ret = MPI_Isend(req->envelope, sizeof(struct _starpu_mpi_envelope), MPI_BYTE, req->node_tag.rank, _STARPU_MPI_TAG_ENVELOPE, req->node_tag.comm, &req->size_req);
  332. STARPU_MPI_ASSERT_MSG(ret == MPI_SUCCESS, "when sending size, MPI_Isend returning %s", _starpu_mpi_get_mpi_code(ret));
  333. }
  334. // Pack the data
  335. starpu_data_pack(req->data_handle, &req->ptr, &req->count);
  336. if (req->envelope->size == -1)
  337. {
  338. // We know the size now, let's send it
  339. _STARPU_MPI_DEBUG(20, "Sending size %ld (%ld %s) to node %d (second call to pack)\n", req->envelope->size, sizeof(req->count), _starpu_mpi_datatype(MPI_BYTE), req->node_tag.rank);
  340. _STARPU_MPI_COMM_TO_DEBUG(sizeof(struct _starpu_mpi_envelope), MPI_BYTE, req->node_tag.rank, _STARPU_MPI_TAG_ENVELOPE, _STARPU_MPI_TAG_ENVELOPE);
  341. ret = MPI_Isend(req->envelope, sizeof(struct _starpu_mpi_envelope), MPI_BYTE, req->node_tag.rank, _STARPU_MPI_TAG_ENVELOPE, req->node_tag.comm, &req->size_req);
  342. STARPU_MPI_ASSERT_MSG(ret == MPI_SUCCESS, "when sending size, MPI_Isend returning %s", _starpu_mpi_get_mpi_code(ret));
  343. }
  344. else
  345. {
  346. // We check the size returned with the 2 calls to pack is the same
  347. STARPU_MPI_ASSERT_MSG(req->count == req->envelope->size, "Calls to pack_data returned different sizes %ld != %ld", req->count, req->envelope->size);
  348. }
  349. // We can send the data now
  350. }
  351. if (req->sync)
  352. {
  353. // If the data is to be sent in synchronous mode, we need to wait for the receiver ready message
  354. _starpu_mpi_sync_data_add(req);
  355. }
  356. else
  357. {
  358. // Otherwise we can send the data
  359. _starpu_mpi_isend_data_func(req);
  360. }
  361. }
  362. static struct _starpu_mpi_req *_starpu_mpi_isend_common(starpu_data_handle_t data_handle,
  363. int dest, int data_tag, MPI_Comm comm,
  364. unsigned detached, unsigned sync, void (*callback)(void *), void *arg,
  365. int sequential_consistency)
  366. {
  367. return _starpu_mpi_isend_irecv_common(data_handle, dest, data_tag, comm, detached, sync, callback, arg, SEND_REQ, _starpu_mpi_isend_size_func, STARPU_R, sequential_consistency, 0, 0);
  368. }
  369. int starpu_mpi_isend(starpu_data_handle_t data_handle, starpu_mpi_req *public_req, int dest, int data_tag, MPI_Comm comm)
  370. {
  371. _STARPU_MPI_LOG_IN();
  372. STARPU_MPI_ASSERT_MSG(public_req, "starpu_mpi_isend needs a valid starpu_mpi_req");
  373. struct _starpu_mpi_req *req;
  374. _STARPU_MPI_TRACE_ISEND_COMPLETE_BEGIN(dest, data_tag, 0);
  375. req = _starpu_mpi_isend_common(data_handle, dest, data_tag, comm, 0, 0, NULL, NULL, 1);
  376. _STARPU_MPI_TRACE_ISEND_COMPLETE_END(dest, data_tag, 0);
  377. STARPU_MPI_ASSERT_MSG(req, "Invalid return for _starpu_mpi_isend_common");
  378. *public_req = req;
  379. _STARPU_MPI_LOG_OUT();
  380. return 0;
  381. }
  382. int starpu_mpi_isend_detached(starpu_data_handle_t data_handle,
  383. int dest, int data_tag, MPI_Comm comm, void (*callback)(void *), void *arg)
  384. {
  385. _STARPU_MPI_LOG_IN();
  386. _starpu_mpi_isend_common(data_handle, dest, data_tag, comm, 1, 0, callback, arg, 1);
  387. _STARPU_MPI_LOG_OUT();
  388. return 0;
  389. }
  390. int starpu_mpi_send(starpu_data_handle_t data_handle, int dest, int data_tag, MPI_Comm comm)
  391. {
  392. starpu_mpi_req req;
  393. MPI_Status status;
  394. _STARPU_MPI_LOG_IN();
  395. memset(&status, 0, sizeof(MPI_Status));
  396. starpu_mpi_isend(data_handle, &req, dest, data_tag, comm);
  397. starpu_mpi_wait(&req, &status);
  398. _STARPU_MPI_LOG_OUT();
  399. return 0;
  400. }
  401. int starpu_mpi_issend(starpu_data_handle_t data_handle, starpu_mpi_req *public_req, int dest, int data_tag, MPI_Comm comm)
  402. {
  403. _STARPU_MPI_LOG_IN();
  404. STARPU_MPI_ASSERT_MSG(public_req, "starpu_mpi_issend needs a valid starpu_mpi_req");
  405. struct _starpu_mpi_req *req;
  406. req = _starpu_mpi_isend_common(data_handle, dest, data_tag, comm, 0, 1, NULL, NULL, 1);
  407. STARPU_MPI_ASSERT_MSG(req, "Invalid return for _starpu_mpi_isend_common");
  408. *public_req = req;
  409. _STARPU_MPI_LOG_OUT();
  410. return 0;
  411. }
  412. int starpu_mpi_issend_detached(starpu_data_handle_t data_handle, int dest, int data_tag, MPI_Comm comm, void (*callback)(void *), void *arg)
  413. {
  414. _STARPU_MPI_LOG_IN();
  415. _starpu_mpi_isend_common(data_handle, dest, data_tag, comm, 1, 1, callback, arg, 1);
  416. _STARPU_MPI_LOG_OUT();
  417. return 0;
  418. }
  419. /********************************************************/
  420. /* */
  421. /* receive functionalities */
  422. /* */
  423. /********************************************************/
  424. static void _starpu_mpi_irecv_data_func(struct _starpu_mpi_req *req)
  425. {
  426. _STARPU_MPI_LOG_IN();
  427. _STARPU_MPI_DEBUG(20, "post MPI irecv request %p type %s tag %d src %d data %p ptr %p datatype '%s' count %d user_datatype %d \n", req, _starpu_mpi_request_type(req->request_type), req->node_tag.data_tag, req->node_tag.rank, req->data_handle, req->ptr, _starpu_mpi_datatype(req->datatype), (int)req->count, req->user_datatype);
  428. _STARPU_MPI_TRACE_IRECV_SUBMIT_BEGIN(req->node_tag.rank, req->node_tag.data_tag);
  429. if (req->sync)
  430. {
  431. struct _starpu_mpi_envelope *_envelope = calloc(1,sizeof(struct _starpu_mpi_envelope));
  432. _envelope->mode = _STARPU_MPI_ENVELOPE_SYNC_READY;
  433. _envelope->data_tag = req->node_tag.data_tag;
  434. _STARPU_MPI_DEBUG(20, "Telling node %d it can send the data and waiting for the data back ...\n", req->node_tag.rank);
  435. _STARPU_MPI_COMM_TO_DEBUG(sizeof(struct _starpu_mpi_envelope), MPI_BYTE, req->node_tag.rank, _STARPU_MPI_TAG_ENVELOPE, _STARPU_MPI_TAG_ENVELOPE);
  436. req->ret = MPI_Send(_envelope, sizeof(struct _starpu_mpi_envelope), MPI_BYTE, req->node_tag.rank, _STARPU_MPI_TAG_ENVELOPE, req->node_tag.comm);
  437. STARPU_MPI_ASSERT_MSG(req->ret == MPI_SUCCESS, "MPI_Send returning %s", _starpu_mpi_get_mpi_code(req->ret));
  438. free(_envelope);
  439. }
  440. if (req->sync)
  441. {
  442. _STARPU_MPI_COMM_FROM_DEBUG(req->count, req->datatype, req->node_tag.rank, _STARPU_MPI_TAG_SYNC_DATA, req->node_tag.data_tag);
  443. req->ret = MPI_Irecv(req->ptr, req->count, req->datatype, req->node_tag.rank, _STARPU_MPI_TAG_SYNC_DATA, req->node_tag.comm, &req->request);
  444. }
  445. else
  446. {
  447. _STARPU_MPI_COMM_FROM_DEBUG(req->count, req->datatype, req->node_tag.rank, _STARPU_MPI_TAG_DATA, req->node_tag.data_tag);
  448. req->ret = MPI_Irecv(req->ptr, req->count, req->datatype, req->node_tag.rank, _STARPU_MPI_TAG_DATA, req->node_tag.comm, &req->request);
  449. }
  450. STARPU_MPI_ASSERT_MSG(req->ret == MPI_SUCCESS, "MPI_IRecv returning %s", _starpu_mpi_get_mpi_code(req->ret));
  451. _STARPU_MPI_TRACE_IRECV_SUBMIT_END(req->node_tag.rank, req->node_tag.data_tag);
  452. /* somebody is perhaps waiting for the MPI request to be posted */
  453. STARPU_PTHREAD_MUTEX_LOCK(&req->req_mutex);
  454. req->submitted = 1;
  455. STARPU_PTHREAD_COND_BROADCAST(&req->req_cond);
  456. STARPU_PTHREAD_MUTEX_UNLOCK(&req->req_mutex);
  457. _starpu_mpi_handle_detached_request(req);
  458. _STARPU_MPI_LOG_OUT();
  459. }
  460. static struct _starpu_mpi_req *_starpu_mpi_irecv_common(starpu_data_handle_t data_handle, int source, int data_tag, MPI_Comm comm, unsigned detached, unsigned sync, void (*callback)(void *), void *arg, int sequential_consistency, int is_internal_req, starpu_ssize_t count)
  461. {
  462. return _starpu_mpi_isend_irecv_common(data_handle, source, data_tag, comm, detached, sync, callback, arg, RECV_REQ, _starpu_mpi_irecv_data_func, STARPU_W, sequential_consistency, is_internal_req, count);
  463. }
  464. int starpu_mpi_irecv(starpu_data_handle_t data_handle, starpu_mpi_req *public_req, int source, int data_tag, MPI_Comm comm)
  465. {
  466. _STARPU_MPI_LOG_IN();
  467. STARPU_MPI_ASSERT_MSG(public_req, "starpu_mpi_irecv needs a valid starpu_mpi_req");
  468. // // We check if a tag is defined for the data handle, if not,
  469. // // we define the one given for the communication.
  470. // // A tag is necessary for the internal mpi engine.
  471. // int tag = starpu_data_get_tag(data_handle);
  472. // if (tag == -1)
  473. // starpu_data_set_tag(data_handle, data_tag);
  474. struct _starpu_mpi_req *req;
  475. _STARPU_MPI_TRACE_IRECV_COMPLETE_BEGIN(source, data_tag);
  476. req = _starpu_mpi_irecv_common(data_handle, source, data_tag, comm, 0, 0, NULL, NULL, 1, 0, 0);
  477. _STARPU_MPI_TRACE_IRECV_COMPLETE_END(source, data_tag);
  478. STARPU_MPI_ASSERT_MSG(req, "Invalid return for _starpu_mpi_irecv_common");
  479. *public_req = req;
  480. _STARPU_MPI_LOG_OUT();
  481. return 0;
  482. }
  483. int starpu_mpi_irecv_detached(starpu_data_handle_t data_handle, int source, int data_tag, MPI_Comm comm, void (*callback)(void *), void *arg)
  484. {
  485. _STARPU_MPI_LOG_IN();
  486. // // We check if a tag is defined for the data handle, if not,
  487. // // we define the one given for the communication.
  488. // // A tag is necessary for the internal mpi engine.
  489. // int tag = starpu_data_get_tag(data_handle);
  490. // if (tag == -1)
  491. // starpu_data_set_tag(data_handle, data_tag);
  492. _starpu_mpi_irecv_common(data_handle, source, data_tag, comm, 1, 0, callback, arg, 1, 0, 0);
  493. _STARPU_MPI_LOG_OUT();
  494. return 0;
  495. }
  496. int starpu_mpi_irecv_detached_sequential_consistency(starpu_data_handle_t data_handle, int source, int data_tag, MPI_Comm comm, void (*callback)(void *), void *arg, int sequential_consistency)
  497. {
  498. _STARPU_MPI_LOG_IN();
  499. // // We check if a tag is defined for the data handle, if not,
  500. // // we define the one given for the communication.
  501. // // A tag is necessary for the internal mpi engine.
  502. // int tag = starpu_data_get_tag(data_handle);
  503. // if (tag == -1)
  504. // starpu_data_set_tag(data_handle, data_tag);
  505. _starpu_mpi_irecv_common(data_handle, source, data_tag, comm, 1, 0, callback, arg, sequential_consistency, 0, 0);
  506. _STARPU_MPI_LOG_OUT();
  507. return 0;
  508. }
  509. int starpu_mpi_recv(starpu_data_handle_t data_handle, int source, int data_tag, MPI_Comm comm, MPI_Status *status)
  510. {
  511. starpu_mpi_req req;
  512. _STARPU_MPI_LOG_IN();
  513. // // We check if a tag is defined for the data handle, if not,
  514. // // we define the one given for the communication.
  515. // // A tag is necessary for the internal mpi engine.
  516. // int tag = starpu_data_get_tag(data_handle);
  517. // if (tag == -1)
  518. // starpu_data_set_tag(data_handle, data_tag);
  519. starpu_mpi_irecv(data_handle, &req, source, data_tag, comm);
  520. starpu_mpi_wait(&req, status);
  521. _STARPU_MPI_LOG_OUT();
  522. return 0;
  523. }
  524. /********************************************************/
  525. /* */
  526. /* Wait functionalities */
  527. /* */
  528. /********************************************************/
  529. static void _starpu_mpi_wait_func(struct _starpu_mpi_req *waiting_req)
  530. {
  531. _STARPU_MPI_LOG_IN();
  532. /* Which is the mpi request we are waiting for ? */
  533. struct _starpu_mpi_req *req = waiting_req->other_request;
  534. _STARPU_MPI_TRACE_UWAIT_BEGIN(req->node_tag.rank, req->node_tag.data_tag);
  535. req->ret = MPI_Wait(&req->request, waiting_req->status);
  536. STARPU_MPI_ASSERT_MSG(req->ret == MPI_SUCCESS, "MPI_Wait returning %s", _starpu_mpi_get_mpi_code(req->ret));
  537. _STARPU_MPI_TRACE_UWAIT_END(req->node_tag.rank, req->node_tag.data_tag);
  538. _starpu_mpi_handle_request_termination(req);
  539. _STARPU_MPI_LOG_OUT();
  540. }
  541. int starpu_mpi_wait(starpu_mpi_req *public_req, MPI_Status *status)
  542. {
  543. int ret;
  544. struct _starpu_mpi_req *req = *public_req;
  545. struct _starpu_mpi_req *waiting_req;
  546. _STARPU_MPI_LOG_IN();
  547. _STARPU_MPI_INC_POSTED_REQUESTS(1);
  548. /* We cannot try to complete a MPI request that was not actually posted
  549. * to MPI yet. */
  550. STARPU_PTHREAD_MUTEX_LOCK(&(req->req_mutex));
  551. while (!(req->submitted))
  552. STARPU_PTHREAD_COND_WAIT(&(req->req_cond), &(req->req_mutex));
  553. STARPU_PTHREAD_MUTEX_UNLOCK(&(req->req_mutex));
  554. /* Initialize the request structure */
  555. _starpu_mpi_request_init(&waiting_req);
  556. waiting_req->status = status;
  557. waiting_req->other_request = req;
  558. waiting_req->func = _starpu_mpi_wait_func;
  559. waiting_req->request_type = WAIT_REQ;
  560. _starpu_mpi_submit_ready_request(waiting_req);
  561. /* We wait for the MPI request to finish */
  562. STARPU_PTHREAD_MUTEX_LOCK(&req->req_mutex);
  563. while (!req->completed)
  564. STARPU_PTHREAD_COND_WAIT(&req->req_cond, &req->req_mutex);
  565. STARPU_PTHREAD_MUTEX_UNLOCK(&req->req_mutex);
  566. ret = req->ret;
  567. /* The internal request structure was automatically allocated */
  568. *public_req = NULL;
  569. if (req->internal_req)
  570. {
  571. free(req->internal_req); req->internal_req = NULL;
  572. }
  573. free(req);
  574. free(waiting_req);
  575. _STARPU_MPI_LOG_OUT();
  576. return ret;
  577. }
  578. /********************************************************/
  579. /* */
  580. /* Test functionalities */
  581. /* */
  582. /********************************************************/
  583. static void _starpu_mpi_test_func(struct _starpu_mpi_req *testing_req)
  584. {
  585. _STARPU_MPI_LOG_IN();
  586. /* Which is the mpi request we are testing for ? */
  587. struct _starpu_mpi_req *req = testing_req->other_request;
  588. _STARPU_MPI_DEBUG(2, "Test request %p type %s tag %d src %d data %p ptr %p datatype '%s' count %d user_datatype %d \n",
  589. req, _starpu_mpi_request_type(req->request_type), req->node_tag.data_tag, req->node_tag.rank, req->data_handle, req->ptr, _starpu_mpi_datatype(req->datatype), (int)req->count, req->user_datatype);
  590. _STARPU_MPI_TRACE_UTESTING_BEGIN(req->node_tag.rank, req->node_tag.data_tag);
  591. req->ret = MPI_Test(&req->request, testing_req->flag, testing_req->status);
  592. STARPU_MPI_ASSERT_MSG(req->ret == MPI_SUCCESS, "MPI_Test returning %s", _starpu_mpi_get_mpi_code(req->ret));
  593. _STARPU_MPI_TRACE_UTESTING_END(req->node_tag.rank, req->node_tag.data_tag);
  594. if (*testing_req->flag)
  595. {
  596. testing_req->ret = req->ret;
  597. _starpu_mpi_handle_request_termination(req);
  598. }
  599. STARPU_PTHREAD_MUTEX_LOCK(&testing_req->req_mutex);
  600. testing_req->completed = 1;
  601. STARPU_PTHREAD_COND_SIGNAL(&testing_req->req_cond);
  602. STARPU_PTHREAD_MUTEX_UNLOCK(&testing_req->req_mutex);
  603. _STARPU_MPI_LOG_OUT();
  604. }
  605. int starpu_mpi_test(starpu_mpi_req *public_req, int *flag, MPI_Status *status)
  606. {
  607. _STARPU_MPI_LOG_IN();
  608. int ret = 0;
  609. STARPU_MPI_ASSERT_MSG(public_req, "starpu_mpi_test needs a valid starpu_mpi_req");
  610. struct _starpu_mpi_req *req = *public_req;
  611. STARPU_MPI_ASSERT_MSG(!req->detached, "MPI_Test cannot be called on a detached request");
  612. STARPU_PTHREAD_MUTEX_LOCK(&req->req_mutex);
  613. unsigned submitted = req->submitted;
  614. STARPU_PTHREAD_MUTEX_UNLOCK(&req->req_mutex);
  615. if (submitted)
  616. {
  617. struct _starpu_mpi_req *testing_req;
  618. _starpu_mpi_request_init(&testing_req);
  619. /* Initialize the request structure */
  620. STARPU_PTHREAD_MUTEX_INIT(&(testing_req->req_mutex), NULL);
  621. STARPU_PTHREAD_COND_INIT(&(testing_req->req_cond), NULL);
  622. testing_req->flag = flag;
  623. testing_req->status = status;
  624. testing_req->other_request = req;
  625. testing_req->func = _starpu_mpi_test_func;
  626. testing_req->completed = 0;
  627. testing_req->request_type = TEST_REQ;
  628. _STARPU_MPI_INC_POSTED_REQUESTS(1);
  629. _starpu_mpi_submit_ready_request(testing_req);
  630. /* We wait for the test request to finish */
  631. STARPU_PTHREAD_MUTEX_LOCK(&(testing_req->req_mutex));
  632. while (!(testing_req->completed))
  633. STARPU_PTHREAD_COND_WAIT(&(testing_req->req_cond), &(testing_req->req_mutex));
  634. STARPU_PTHREAD_MUTEX_UNLOCK(&(testing_req->req_mutex));
  635. ret = testing_req->ret;
  636. if (*(testing_req->flag))
  637. {
  638. /* The request was completed so we free the internal
  639. * request structure which was automatically allocated
  640. * */
  641. *public_req = NULL;
  642. if (req->internal_req)
  643. {
  644. free(req->internal_req); req->internal_req = NULL;
  645. }
  646. free(req);
  647. }
  648. free(testing_req);
  649. }
  650. else
  651. {
  652. *flag = 0;
  653. }
  654. _STARPU_MPI_LOG_OUT();
  655. return ret;
  656. }
  657. /********************************************************/
  658. /* */
  659. /* Barrier functionalities */
  660. /* */
  661. /********************************************************/
  662. static void _starpu_mpi_barrier_func(struct _starpu_mpi_req *barrier_req)
  663. {
  664. _STARPU_MPI_LOG_IN();
  665. barrier_req->ret = MPI_Barrier(barrier_req->node_tag.comm);
  666. STARPU_MPI_ASSERT_MSG(barrier_req->ret == MPI_SUCCESS, "MPI_Barrier returning %s", _starpu_mpi_get_mpi_code(barrier_req->ret));
  667. _starpu_mpi_handle_request_termination(barrier_req);
  668. _STARPU_MPI_LOG_OUT();
  669. }
  670. int starpu_mpi_barrier(MPI_Comm comm)
  671. {
  672. int ret;
  673. struct _starpu_mpi_req *barrier_req;
  674. _STARPU_MPI_LOG_IN();
  675. _starpu_mpi_request_init(&barrier_req);
  676. /* First wait for *both* all tasks and MPI requests to finish, in case
  677. * some tasks generate MPI requests, MPI requests generate tasks, etc.
  678. */
  679. STARPU_PTHREAD_MUTEX_LOCK(&mutex);
  680. STARPU_MPI_ASSERT_MSG(!barrier_running, "Concurrent starpu_mpi_barrier is not implemented, even on different communicators");
  681. barrier_running = 1;
  682. do
  683. {
  684. while (posted_requests)
  685. /* Wait for all current MPI requests to finish */
  686. STARPU_PTHREAD_COND_WAIT(&cond_finished, &mutex);
  687. /* No current request, clear flag */
  688. newer_requests = 0;
  689. STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
  690. /* Now wait for all tasks */
  691. starpu_task_wait_for_all();
  692. STARPU_PTHREAD_MUTEX_LOCK(&mutex);
  693. /* Check newer_requests again, in case some MPI requests
  694. * triggered by tasks completed and triggered tasks between
  695. * wait_for_all finished and we take the lock */
  696. } while (posted_requests || newer_requests);
  697. barrier_running = 0;
  698. STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
  699. /* Initialize the request structure */
  700. STARPU_PTHREAD_MUTEX_INIT(&(barrier_req->req_mutex), NULL);
  701. STARPU_PTHREAD_COND_INIT(&(barrier_req->req_cond), NULL);
  702. barrier_req->func = _starpu_mpi_barrier_func;
  703. barrier_req->request_type = BARRIER_REQ;
  704. barrier_req->node_tag.comm = comm;
  705. _STARPU_MPI_INC_POSTED_REQUESTS(1);
  706. _starpu_mpi_submit_ready_request(barrier_req);
  707. /* We wait for the MPI request to finish */
  708. STARPU_PTHREAD_MUTEX_LOCK(&barrier_req->req_mutex);
  709. while (!barrier_req->completed)
  710. STARPU_PTHREAD_COND_WAIT(&barrier_req->req_cond, &barrier_req->req_mutex);
  711. STARPU_PTHREAD_MUTEX_UNLOCK(&barrier_req->req_mutex);
  712. ret = barrier_req->ret;
  713. free(barrier_req);
  714. _STARPU_MPI_LOG_OUT();
  715. return ret;
  716. }
  717. /********************************************************/
  718. /* */
  719. /* Progression */
  720. /* */
  721. /********************************************************/
  722. #ifdef STARPU_VERBOSE
  723. static char *_starpu_mpi_request_type(enum _starpu_mpi_request_type request_type)
  724. {
  725. switch (request_type)
  726. {
  727. case SEND_REQ: return "SEND_REQ";
  728. case RECV_REQ: return "RECV_REQ";
  729. case WAIT_REQ: return "WAIT_REQ";
  730. case TEST_REQ: return "TEST_REQ";
  731. case BARRIER_REQ: return "BARRIER_REQ";
  732. case UNKNOWN_REQ: return "UNSET_REQ";
  733. default: return "unknown request type";
  734. }
  735. }
  736. #endif
  737. static void _starpu_mpi_handle_request_termination(struct _starpu_mpi_req *req)
  738. {
  739. _STARPU_MPI_LOG_IN();
  740. _STARPU_MPI_DEBUG(2, "complete MPI request %p type %s tag %d src %d data %p ptr %p datatype '%s' count %d user_datatype %d internal_req %p\n",
  741. req, _starpu_mpi_request_type(req->request_type), req->node_tag.data_tag, req->node_tag.rank, req->data_handle, req->ptr,
  742. _starpu_mpi_datatype(req->datatype), (int)req->count, req->user_datatype, req->internal_req);
  743. if (req->internal_req)
  744. {
  745. struct _starpu_mpi_early_data_handle *early_data_handle = _starpu_mpi_early_data_find(&req->node_tag);
  746. STARPU_MPI_ASSERT_MSG(early_data_handle, "Could not find a copy data handle with the tag %d and the node %d\n", req->node_tag.data_tag, req->node_tag.rank);
  747. _STARPU_MPI_DEBUG(3, "Handling deleting of early_data structure from the hashmap..\n");
  748. _starpu_mpi_early_data_delete(early_data_handle);
  749. free(early_data_handle);
  750. }
  751. else
  752. {
  753. if (req->request_type == RECV_REQ || req->request_type == SEND_REQ)
  754. {
  755. if (req->user_datatype == 1)
  756. {
  757. if (req->request_type == SEND_REQ)
  758. {
  759. // We need to make sure the communication for sending the size
  760. // has completed, as MPI can re-order messages, let's call
  761. // MPI_Wait to make sure data have been sent
  762. int ret;
  763. ret = MPI_Wait(&req->size_req, MPI_STATUS_IGNORE);
  764. STARPU_MPI_ASSERT_MSG(ret == MPI_SUCCESS, "MPI_Wait returning %s", _starpu_mpi_get_mpi_code(ret));
  765. free(req->ptr);
  766. }
  767. else if (req->request_type == RECV_REQ)
  768. {
  769. // req->ptr is freed by starpu_data_unpack
  770. starpu_data_unpack(req->data_handle, req->ptr, req->count);
  771. }
  772. }
  773. else
  774. {
  775. _starpu_mpi_handle_free_datatype(req->data_handle, &req->datatype);
  776. }
  777. }
  778. }
  779. if (req->data_handle)
  780. starpu_data_release(req->data_handle);
  781. if (req->envelope)
  782. {
  783. free(req->envelope);
  784. req->envelope = NULL;
  785. }
  786. /* Execute the specified callback, if any */
  787. if (req->callback)
  788. req->callback(req->callback_arg);
  789. /* tell anyone potentially waiting on the request that it is
  790. * terminated now */
  791. STARPU_PTHREAD_MUTEX_LOCK(&req->req_mutex);
  792. req->completed = 1;
  793. STARPU_PTHREAD_COND_BROADCAST(&req->req_cond);
  794. STARPU_PTHREAD_MUTEX_UNLOCK(&req->req_mutex);
  795. _STARPU_MPI_LOG_OUT();
  796. }
  797. static void _starpu_mpi_early_data_cb(void* arg)
  798. {
  799. struct _starpu_mpi_early_data_cb_args *args = arg;
  800. // We store in the application request the internal MPI
  801. // request so that it can be used by starpu_mpi_wait
  802. args->req->request = args->req->internal_req->request;
  803. args->req->submitted = 1;
  804. if (args->buffer)
  805. {
  806. /* Data has been received as a raw memory, it has to be unpacked */
  807. struct starpu_data_interface_ops *itf_src = starpu_data_get_interface_ops(args->early_handle);
  808. struct starpu_data_interface_ops *itf_dst = starpu_data_get_interface_ops(args->data_handle);
  809. STARPU_MPI_ASSERT_MSG(itf_dst->unpack_data, "The data interface does not define an unpack function\n");
  810. itf_dst->unpack_data(args->data_handle, STARPU_MAIN_RAM, args->buffer, itf_src->get_size(args->early_handle));
  811. free(args->buffer);
  812. }
  813. else
  814. {
  815. struct starpu_data_interface_ops *itf = starpu_data_get_interface_ops(args->early_handle);
  816. void* itf_src = starpu_data_get_interface_on_node(args->early_handle, STARPU_MAIN_RAM);
  817. void* itf_dst = starpu_data_get_interface_on_node(args->data_handle, STARPU_MAIN_RAM);
  818. if (!itf->copy_methods->ram_to_ram)
  819. {
  820. _STARPU_MPI_DEBUG(3, "Initiating any_to_any copy..\n");
  821. itf->copy_methods->any_to_any(itf_src, STARPU_MAIN_RAM, itf_dst, STARPU_MAIN_RAM, NULL);
  822. }
  823. else
  824. {
  825. _STARPU_MPI_DEBUG(3, "Initiating ram_to_ram copy..\n");
  826. itf->copy_methods->ram_to_ram(itf_src, STARPU_MAIN_RAM, itf_dst, STARPU_MAIN_RAM);
  827. }
  828. }
  829. _STARPU_MPI_DEBUG(3, "Done, handling release of early_handle..\n");
  830. starpu_data_release(args->early_handle);
  831. _STARPU_MPI_DEBUG(3, "Done, handling unregister of early_handle..\n");
  832. starpu_data_unregister_submit(args->early_handle);
  833. _STARPU_MPI_DEBUG(3, "Done, handling request %p termination of the already received request\n",args->req);
  834. // If the request is detached, we need to call _starpu_mpi_handle_request_termination
  835. // as it will not be called automatically as the request is not in the list detached_requests
  836. if (args->req->detached)
  837. _starpu_mpi_handle_request_termination(args->req);
  838. // else: If the request is not detached its termination will
  839. // be handled when calling starpu_mpi_wait
  840. free(args);
  841. }
  842. #ifdef STARPU_MPI_ACTIVITY
  843. static unsigned _starpu_mpi_progression_hook_func(void *arg STARPU_ATTRIBUTE_UNUSED)
  844. {
  845. unsigned may_block = 1;
  846. STARPU_PTHREAD_MUTEX_LOCK(&detached_requests_mutex);
  847. if (!_starpu_mpi_req_list_empty(detached_requests))
  848. {
  849. STARPU_PTHREAD_MUTEX_UNLOCK(&detached_requests_mutex);
  850. STARPU_PTHREAD_MUTEX_LOCK(&mutex);
  851. STARPU_PTHREAD_COND_SIGNAL(&cond_progression);
  852. STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
  853. may_block = 0;
  854. }
  855. else
  856. STARPU_PTHREAD_MUTEX_UNLOCK(&detached_requests_mutex);
  857. return may_block;
  858. }
  859. #endif /* STARPU_MPI_ACTIVITY */
  860. static void _starpu_mpi_test_detached_requests(void)
  861. {
  862. _STARPU_MPI_LOG_IN();
  863. int flag;
  864. MPI_Status status;
  865. struct _starpu_mpi_req *req, *next_req;
  866. STARPU_PTHREAD_MUTEX_LOCK(&detached_requests_mutex);
  867. for (req = _starpu_mpi_req_list_begin(detached_requests);
  868. req != _starpu_mpi_req_list_end(detached_requests);
  869. req = next_req)
  870. {
  871. next_req = _starpu_mpi_req_list_next(req);
  872. STARPU_PTHREAD_MUTEX_UNLOCK(&detached_requests_mutex);
  873. //_STARPU_MPI_DEBUG(3, "Test detached request %p - mpitag %d - TYPE %s %d\n", &req->request, req->node_tag.data_tag, _starpu_mpi_request_type(req->request_type), req->node_tag.rank);
  874. req->ret = MPI_Test(&req->request, &flag, &status);
  875. STARPU_MPI_ASSERT_MSG(req->ret == MPI_SUCCESS, "MPI_Test returning %s", _starpu_mpi_get_mpi_code(req->ret));
  876. if (flag)
  877. {
  878. if (req->request_type == RECV_REQ)
  879. {
  880. _STARPU_MPI_TRACE_IRECV_COMPLETE_BEGIN(req->node_tag.rank, req->node_tag.data_tag);
  881. }
  882. else if (req->request_type == SEND_REQ)
  883. {
  884. _STARPU_MPI_TRACE_ISEND_COMPLETE_BEGIN(req->node_tag.rank, req->node_tag.data_tag, 0);
  885. }
  886. _starpu_mpi_handle_request_termination(req);
  887. if (req->request_type == RECV_REQ)
  888. {
  889. _STARPU_MPI_TRACE_IRECV_COMPLETE_END(req->node_tag.rank, req->node_tag.data_tag);
  890. }
  891. else if (req->request_type == SEND_REQ)
  892. {
  893. _STARPU_MPI_TRACE_ISEND_COMPLETE_END(req->node_tag.rank, req->node_tag.data_tag, 0);
  894. }
  895. }
  896. STARPU_PTHREAD_MUTEX_LOCK(&detached_requests_mutex);
  897. if (flag)
  898. {
  899. _starpu_mpi_req_list_erase(detached_requests, req);
  900. #ifdef STARPU_DEVEL
  901. #warning FIXME: when do we free internal requests
  902. #endif
  903. if (!req->is_internal_req)
  904. free(req);
  905. }
  906. }
  907. STARPU_PTHREAD_MUTEX_UNLOCK(&detached_requests_mutex);
  908. _STARPU_MPI_LOG_OUT();
  909. }
  910. static void _starpu_mpi_handle_detached_request(struct _starpu_mpi_req *req)
  911. {
  912. if (req->detached)
  913. {
  914. /* put the submitted request into the list of pending requests
  915. * so that it can be handled by the progression mechanisms */
  916. STARPU_PTHREAD_MUTEX_LOCK(&detached_requests_mutex);
  917. _starpu_mpi_req_list_push_front(detached_requests, req);
  918. STARPU_PTHREAD_MUTEX_UNLOCK(&detached_requests_mutex);
  919. starpu_wake_all_blocked_workers();
  920. STARPU_PTHREAD_MUTEX_LOCK(&mutex);
  921. STARPU_PTHREAD_COND_SIGNAL(&cond_progression);
  922. STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
  923. }
  924. }
  925. static void _starpu_mpi_handle_ready_request(struct _starpu_mpi_req *req)
  926. {
  927. _STARPU_MPI_LOG_IN();
  928. STARPU_MPI_ASSERT_MSG(req, "Invalid request");
  929. /* submit the request to MPI */
  930. _STARPU_MPI_DEBUG(2, "Handling new request %p type %s tag %d src %d data %p ptr %p datatype '%s' count %d user_datatype %d \n",
  931. req, _starpu_mpi_request_type(req->request_type), req->node_tag.data_tag, req->node_tag.rank, req->data_handle, req->ptr, _starpu_mpi_datatype(req->datatype), (int)req->count, req->user_datatype);
  932. req->func(req);
  933. _STARPU_MPI_LOG_OUT();
  934. }
  935. struct _starpu_mpi_argc_argv
  936. {
  937. int initialize_mpi;
  938. int *argc;
  939. char ***argv;
  940. MPI_Comm comm;
  941. };
  942. static void _starpu_mpi_print_thread_level_support(int thread_level, char *msg)
  943. {
  944. switch (thread_level)
  945. {
  946. case MPI_THREAD_SERIALIZED:
  947. {
  948. _STARPU_DISP("MPI%s MPI_THREAD_SERIALIZED; Multiple threads may make MPI calls, but only one at a time.\n", msg);
  949. break;
  950. }
  951. case MPI_THREAD_FUNNELED:
  952. {
  953. _STARPU_DISP("MPI%s MPI_THREAD_FUNNELED; The application can safely make calls to StarPU-MPI functions, but should not call directly MPI communication functions.\n", msg);
  954. break;
  955. }
  956. case MPI_THREAD_SINGLE:
  957. {
  958. _STARPU_DISP("MPI%s MPI_THREAD_SINGLE; MPI does not have multi-thread support, this might cause problems. The application can make calls to StarPU-MPI functions, but not call directly MPI Communication functions.\n", msg);
  959. break;
  960. }
  961. }
  962. }
  963. static void _starpu_mpi_receive_early_data(struct _starpu_mpi_envelope *envelope, MPI_Status status, MPI_Comm comm)
  964. {
  965. _STARPU_MPI_DEBUG(20, "Request with tag %d and source %d not found, creating a early_handle to receive incoming data..\n", envelope->data_tag, status.MPI_SOURCE);
  966. _STARPU_MPI_DEBUG(20, "Request sync %d\n", envelope->sync);
  967. struct _starpu_mpi_early_data_handle* early_data_handle = _starpu_mpi_early_data_create(envelope, status.MPI_SOURCE, comm);
  968. starpu_data_handle_t data_handle = NULL;
  969. STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
  970. data_handle = _starpu_mpi_data_get_data_handle_from_tag(envelope->data_tag);
  971. STARPU_PTHREAD_MUTEX_LOCK(&mutex);
  972. if (data_handle && starpu_data_get_interface_id(data_handle) < STARPU_MAX_INTERFACE_ID)
  973. {
  974. /* We know which data will receive it and we won't have to unpack, use just the same kind of data. */
  975. early_data_handle->buffer = NULL;
  976. starpu_data_register_same(&early_data_handle->handle, data_handle);
  977. _starpu_mpi_early_data_add(early_data_handle);
  978. }
  979. else
  980. {
  981. /* The application has not registered yet a data with the tag,
  982. * we are going to receive the data as a raw memory, and give it
  983. * to the application when it post a receive for this tag
  984. */
  985. _STARPU_MPI_DEBUG(3, "Posting a receive for a data of size %d which has not yet been registered\n", (int)early_data_handle->env->size);
  986. early_data_handle->buffer = malloc(early_data_handle->env->size);
  987. starpu_variable_data_register(&early_data_handle->handle, STARPU_MAIN_RAM, (uintptr_t) early_data_handle->buffer, early_data_handle->env->size);
  988. _starpu_mpi_early_data_add(early_data_handle);
  989. }
  990. _STARPU_MPI_DEBUG(20, "Posting internal detached irecv on early_handle with tag %d from comm %p src %d ..\n", early_data_handle->node_tag.data_tag, comm, status.MPI_SOURCE);
  991. STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
  992. early_data_handle->req = _starpu_mpi_irecv_common(early_data_handle->handle, status.MPI_SOURCE,
  993. early_data_handle->node_tag.data_tag, comm, 1, 0,
  994. NULL, NULL, 1, 1, envelope->size);
  995. STARPU_PTHREAD_MUTEX_LOCK(&mutex);
  996. // We wait until the request is pushed in the
  997. // ready_request list, that ensures that the next loop
  998. // will call _starpu_mpi_handle_ready_request
  999. // on the request and post the corresponding mpi_irecv,
  1000. // otherwise, it may lead to read data as envelop
  1001. STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
  1002. STARPU_PTHREAD_MUTEX_LOCK(&(early_data_handle->req->posted_mutex));
  1003. while (!(early_data_handle->req->posted))
  1004. STARPU_PTHREAD_COND_WAIT(&(early_data_handle->req->posted_cond), &(early_data_handle->req->posted_mutex));
  1005. STARPU_PTHREAD_MUTEX_UNLOCK(&(early_data_handle->req->posted_mutex));
  1006. STARPU_PTHREAD_MUTEX_LOCK(&early_data_handle->req_mutex);
  1007. early_data_handle->req_ready = 1;
  1008. STARPU_PTHREAD_COND_BROADCAST(&early_data_handle->req_cond);
  1009. STARPU_PTHREAD_MUTEX_UNLOCK(&early_data_handle->req_mutex);
  1010. STARPU_PTHREAD_MUTEX_LOCK(&mutex);
  1011. }
  1012. static void *_starpu_mpi_progress_thread_func(void *arg)
  1013. {
  1014. struct _starpu_mpi_argc_argv *argc_argv = (struct _starpu_mpi_argc_argv *) arg;
  1015. int rank, worldsize;
  1016. if (argc_argv->initialize_mpi)
  1017. {
  1018. int thread_support;
  1019. _STARPU_DEBUG("Calling MPI_Init_thread\n");
  1020. if (MPI_Init_thread(argc_argv->argc, argc_argv->argv, MPI_THREAD_SERIALIZED, &thread_support) != MPI_SUCCESS)
  1021. {
  1022. _STARPU_ERROR("MPI_Init_thread failed\n");
  1023. }
  1024. _starpu_mpi_print_thread_level_support(thread_support, "_Init_thread level =");
  1025. }
  1026. else
  1027. {
  1028. int provided;
  1029. MPI_Query_thread(&provided);
  1030. _starpu_mpi_print_thread_level_support(provided, " has been initialized with");
  1031. }
  1032. MPI_Comm_rank(argc_argv->comm, &rank);
  1033. MPI_Comm_size(argc_argv->comm, &worldsize);
  1034. MPI_Comm_set_errhandler(argc_argv->comm, MPI_ERRORS_RETURN);
  1035. #ifdef STARPU_SIMGRID
  1036. _mpi_world_size = worldsize;
  1037. _mpi_world_rank = rank;
  1038. /* Now that MPI is set up, let the rest of simgrid get initialized */
  1039. MSG_process_create_with_arguments("main", smpi_simulated_main_, NULL, _starpu_simgrid_get_host_by_name("MAIN"), *(argc_argv->argc), *(argc_argv->argv));
  1040. #endif
  1041. {
  1042. _STARPU_MPI_TRACE_START(rank, worldsize);
  1043. #ifdef STARPU_USE_FXT
  1044. starpu_profiling_set_id(rank);
  1045. #endif //STARPU_USE_FXT
  1046. }
  1047. _starpu_mpi_add_sync_point_in_fxt();
  1048. _starpu_mpi_comm_amounts_init(argc_argv->comm);
  1049. _starpu_mpi_cache_init(argc_argv->comm);
  1050. _starpu_mpi_select_node_init();
  1051. _starpu_mpi_tag_init();
  1052. _starpu_mpi_comm_init(argc_argv->comm);
  1053. _starpu_mpi_early_request_init();
  1054. _starpu_mpi_early_data_init();
  1055. _starpu_mpi_sync_data_init();
  1056. /* notify the main thread that the progression thread is ready */
  1057. STARPU_PTHREAD_MUTEX_LOCK(&mutex);
  1058. running = 1;
  1059. STARPU_PTHREAD_COND_SIGNAL(&cond_progression);
  1060. STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
  1061. STARPU_PTHREAD_MUTEX_LOCK(&mutex);
  1062. int envelope_request_submitted = 0;
  1063. while (running || posted_requests || !(_starpu_mpi_req_list_empty(ready_requests)) || !(_starpu_mpi_req_list_empty(detached_requests)))// || !(_starpu_mpi_early_request_count()) || !(_starpu_mpi_sync_data_count()))
  1064. {
  1065. /* shall we block ? */
  1066. unsigned block = _starpu_mpi_req_list_empty(ready_requests) && _starpu_mpi_early_request_count() == 0 && _starpu_mpi_sync_data_count() == 0;
  1067. #ifndef STARPU_MPI_ACTIVITY
  1068. STARPU_PTHREAD_MUTEX_LOCK(&detached_requests_mutex);
  1069. block = block && _starpu_mpi_req_list_empty(detached_requests);
  1070. STARPU_PTHREAD_MUTEX_UNLOCK(&detached_requests_mutex);
  1071. #endif /* STARPU_MPI_ACTIVITY */
  1072. if (block)
  1073. {
  1074. _STARPU_MPI_DEBUG(3, "NO MORE REQUESTS TO HANDLE\n");
  1075. _STARPU_MPI_TRACE_SLEEP_BEGIN();
  1076. if (barrier_running)
  1077. /* Tell mpi_barrier */
  1078. STARPU_PTHREAD_COND_SIGNAL(&cond_finished);
  1079. STARPU_PTHREAD_COND_WAIT(&cond_progression, &mutex);
  1080. _STARPU_MPI_TRACE_SLEEP_END();
  1081. }
  1082. /* get one request */
  1083. struct _starpu_mpi_req *req;
  1084. while (!_starpu_mpi_req_list_empty(ready_requests))
  1085. {
  1086. req = _starpu_mpi_req_list_pop_back(ready_requests);
  1087. /* handling a request is likely to block for a while
  1088. * (on a sync_data_with_mem call), we want to let the
  1089. * application submit requests in the meantime, so we
  1090. * release the lock. */
  1091. STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
  1092. _starpu_mpi_handle_ready_request(req);
  1093. STARPU_PTHREAD_MUTEX_LOCK(&mutex);
  1094. }
  1095. /* If there is no currently submitted envelope_request submitted to
  1096. * catch envelopes from senders, and there is some pending
  1097. * receive requests on our side, we resubmit a header request. */
  1098. if (((_starpu_mpi_early_request_count() > 0) || (_starpu_mpi_sync_data_count() > 0)) && (envelope_request_submitted == 0))// && (HASH_COUNT(_starpu_mpi_early_data_handle_hashmap) == 0))
  1099. {
  1100. _starpu_mpi_comm_post_recv();
  1101. envelope_request_submitted = 1;
  1102. }
  1103. /* test whether there are some terminated "detached request" */
  1104. STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
  1105. _starpu_mpi_test_detached_requests();
  1106. STARPU_PTHREAD_MUTEX_LOCK(&mutex);
  1107. if (envelope_request_submitted == 1)
  1108. {
  1109. int flag;
  1110. struct _starpu_mpi_envelope *envelope;
  1111. MPI_Status envelope_status;
  1112. MPI_Comm envelope_comm;
  1113. /* test whether an envelope has arrived. */
  1114. flag = _starpu_mpi_comm_test_recv(&envelope_status, &envelope, &envelope_comm);
  1115. if (flag)
  1116. {
  1117. _STARPU_MPI_DEBUG(4, "Envelope received with mode %d\n", envelope->mode);
  1118. if (envelope->mode == _STARPU_MPI_ENVELOPE_SYNC_READY)
  1119. {
  1120. struct _starpu_mpi_req *_sync_req = _starpu_mpi_sync_data_find(envelope->data_tag, envelope_status.MPI_SOURCE, envelope_comm);
  1121. _STARPU_MPI_DEBUG(20, "Sending data with tag %d to node %d\n", _sync_req->node_tag.data_tag, envelope_status.MPI_SOURCE);
  1122. STARPU_MPI_ASSERT_MSG(envelope->data_tag == _sync_req->node_tag.data_tag, "Tag mismatch (envelope %d != req %d)\n", envelope->data_tag, _sync_req->node_tag.data_tag);
  1123. STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
  1124. _starpu_mpi_isend_data_func(_sync_req);
  1125. STARPU_PTHREAD_MUTEX_LOCK(&mutex);
  1126. }
  1127. else
  1128. {
  1129. _STARPU_MPI_DEBUG(3, "Searching for application request with tag %d and source %d (size %ld)\n", envelope->data_tag, envelope_status.MPI_SOURCE, envelope->size);
  1130. struct _starpu_mpi_req *early_request = _starpu_mpi_early_request_find(envelope->data_tag, envelope_status.MPI_SOURCE, envelope_comm);
  1131. /* Case: a data will arrive before a matching receive is
  1132. * posted by the application. Create a temporary handle to
  1133. * store the incoming data, submit a starpu_mpi_irecv_detached
  1134. * on this handle, and store it as an early_data
  1135. */
  1136. if (early_request == NULL)
  1137. {
  1138. if (envelope->sync)
  1139. {
  1140. _STARPU_MPI_DEBUG(2000, "-------------------------> adding request for tag %d\n", envelope->data_tag);
  1141. struct _starpu_mpi_req *new_req;
  1142. #ifdef STARPU_DEVEL
  1143. #warning creating a request is not really useful.
  1144. #endif
  1145. /* Initialize the request structure */
  1146. _starpu_mpi_request_init(&new_req);
  1147. new_req->request_type = RECV_REQ;
  1148. new_req->data_handle = NULL;
  1149. new_req->node_tag.rank = envelope_status.MPI_SOURCE;
  1150. new_req->node_tag.data_tag = envelope->data_tag;
  1151. new_req->node_tag.comm = envelope_comm;
  1152. new_req->detached = 1;
  1153. new_req->sync = 1;
  1154. new_req->callback = NULL;
  1155. new_req->callback_arg = NULL;
  1156. new_req->func = _starpu_mpi_irecv_data_func;
  1157. new_req->sequential_consistency = 1;
  1158. new_req->is_internal_req = 0; // ????
  1159. new_req->count = envelope->size;
  1160. _starpu_mpi_sync_data_add(new_req);
  1161. }
  1162. else
  1163. {
  1164. _starpu_mpi_receive_early_data(envelope, envelope_status, envelope_comm);
  1165. }
  1166. }
  1167. /* Case: a matching application request has been found for
  1168. * the incoming data, we handle the correct allocation
  1169. * of the pointer associated to the data handle, then
  1170. * submit the corresponding receive with
  1171. * _starpu_mpi_handle_ready_request. */
  1172. else
  1173. {
  1174. _STARPU_MPI_DEBUG(2000, "A matching application request has been found for the incoming data with tag %d\n", envelope->data_tag);
  1175. _STARPU_MPI_DEBUG(2000, "Request sync %d\n", envelope->sync);
  1176. _starpu_mpi_early_request_delete(early_request);
  1177. early_request->sync = envelope->sync;
  1178. _starpu_mpi_handle_allocate_datatype(early_request->data_handle, &early_request->datatype, &early_request->user_datatype);
  1179. if (early_request->user_datatype == 0)
  1180. {
  1181. early_request->count = 1;
  1182. early_request->ptr = starpu_data_get_local_ptr(early_request->data_handle);
  1183. }
  1184. else
  1185. {
  1186. early_request->count = envelope->size;
  1187. early_request->ptr = malloc(early_request->count);
  1188. STARPU_MPI_ASSERT_MSG(early_request->ptr, "cannot allocate message of size %ld\n", early_request->count);
  1189. }
  1190. _STARPU_MPI_DEBUG(3, "Handling new request... \n");
  1191. /* handling a request is likely to block for a while
  1192. * (on a sync_data_with_mem call), we want to let the
  1193. * application submit requests in the meantime, so we
  1194. * release the lock. */
  1195. STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
  1196. _starpu_mpi_handle_ready_request(early_request);
  1197. STARPU_PTHREAD_MUTEX_LOCK(&mutex);
  1198. }
  1199. }
  1200. envelope_request_submitted = 0;
  1201. }
  1202. else
  1203. {
  1204. //_STARPU_MPI_DEBUG(4, "Nothing received, continue ..\n");
  1205. }
  1206. }
  1207. }
  1208. if (envelope_request_submitted)
  1209. {
  1210. _starpu_mpi_comm_cancel_recv();
  1211. envelope_request_submitted = 0;
  1212. }
  1213. STARPU_MPI_ASSERT_MSG(_starpu_mpi_req_list_empty(detached_requests), "List of detached requests not empty");
  1214. STARPU_MPI_ASSERT_MSG(_starpu_mpi_req_list_empty(ready_requests), "List of ready requests not empty");
  1215. STARPU_MPI_ASSERT_MSG(posted_requests == 0, "Number of posted request is not zero");
  1216. _starpu_mpi_early_request_check_termination();
  1217. _starpu_mpi_early_data_check_termination();
  1218. _starpu_mpi_sync_data_check_termination();
  1219. if (argc_argv->initialize_mpi)
  1220. {
  1221. _STARPU_MPI_DEBUG(3, "Calling MPI_Finalize()\n");
  1222. MPI_Finalize();
  1223. }
  1224. STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
  1225. _starpu_mpi_sync_data_free();
  1226. _starpu_mpi_early_data_free();
  1227. _starpu_mpi_early_request_free();
  1228. free(argc_argv);
  1229. return NULL;
  1230. }
  1231. /********************************************************/
  1232. /* */
  1233. /* (De)Initialization methods */
  1234. /* */
  1235. /********************************************************/
  1236. #ifdef STARPU_MPI_ACTIVITY
  1237. static int hookid = - 1;
  1238. #endif /* STARPU_MPI_ACTIVITY */
  1239. static void _starpu_mpi_add_sync_point_in_fxt(void)
  1240. {
  1241. #ifdef STARPU_USE_FXT
  1242. int rank;
  1243. int worldsize;
  1244. int ret;
  1245. starpu_mpi_comm_rank(MPI_COMM_WORLD, &rank);
  1246. starpu_mpi_comm_size(MPI_COMM_WORLD, &worldsize);
  1247. ret = MPI_Barrier(MPI_COMM_WORLD);
  1248. STARPU_MPI_ASSERT_MSG(ret == MPI_SUCCESS, "MPI_Barrier returning %s", _starpu_mpi_get_mpi_code(ret));
  1249. /* We generate a "unique" key so that we can make sure that different
  1250. * FxT traces come from the same MPI run. */
  1251. int random_number;
  1252. /* XXX perhaps we don't want to generate a new seed if the application
  1253. * specified some reproductible behaviour ? */
  1254. if (rank == 0)
  1255. {
  1256. srand(time(NULL));
  1257. random_number = rand();
  1258. }
  1259. ret = MPI_Bcast(&random_number, 1, MPI_INT, 0, MPI_COMM_WORLD);
  1260. STARPU_MPI_ASSERT_MSG(ret == MPI_SUCCESS, "MPI_Bcast returning %s", _starpu_mpi_get_mpi_code(ret));
  1261. _STARPU_MPI_TRACE_BARRIER(rank, worldsize, random_number);
  1262. _STARPU_MPI_DEBUG(3, "unique key %x\n", random_number);
  1263. #endif
  1264. }
  1265. static
  1266. int _starpu_mpi_initialize(int *argc, char ***argv, int initialize_mpi, MPI_Comm comm)
  1267. {
  1268. STARPU_PTHREAD_MUTEX_INIT(&mutex, NULL);
  1269. STARPU_PTHREAD_COND_INIT(&cond_progression, NULL);
  1270. STARPU_PTHREAD_COND_INIT(&cond_finished, NULL);
  1271. ready_requests = _starpu_mpi_req_list_new();
  1272. STARPU_PTHREAD_MUTEX_INIT(&detached_requests_mutex, NULL);
  1273. detached_requests = _starpu_mpi_req_list_new();
  1274. STARPU_PTHREAD_MUTEX_INIT(&mutex_posted_requests, NULL);
  1275. struct _starpu_mpi_argc_argv *argc_argv = malloc(sizeof(struct _starpu_mpi_argc_argv));
  1276. argc_argv->initialize_mpi = initialize_mpi;
  1277. argc_argv->argc = argc;
  1278. argc_argv->argv = argv;
  1279. argc_argv->comm = comm;
  1280. #ifdef STARPU_MPI_ACTIVITY
  1281. hookid = starpu_progression_hook_register(_starpu_mpi_progression_hook_func, NULL);
  1282. STARPU_MPI_ASSERT_MSG(hookid >= 0, "starpu_progression_hook_register failed");
  1283. #endif /* STARPU_MPI_ACTIVITY */
  1284. #ifdef STARPU_SIMGRID
  1285. _starpu_mpi_progress_thread_func(argc_argv);
  1286. return 0;
  1287. #else
  1288. STARPU_PTHREAD_CREATE(&progress_thread, NULL, _starpu_mpi_progress_thread_func, argc_argv);
  1289. STARPU_PTHREAD_MUTEX_LOCK(&mutex);
  1290. while (!running)
  1291. STARPU_PTHREAD_COND_WAIT(&cond_progression, &mutex);
  1292. STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
  1293. return 0;
  1294. #endif
  1295. }
  1296. #ifdef STARPU_SIMGRID
  1297. /* This is called before application's main, to initialize SMPI before we can
  1298. * create MSG processes to run application's main */
  1299. int _starpu_mpi_simgrid_init(int argc, char *argv[])
  1300. {
  1301. return _starpu_mpi_initialize(&argc, &argv, 1, MPI_COMM_WORLD);
  1302. }
  1303. #endif
  1304. int starpu_mpi_init_comm(int *argc, char ***argv, int initialize_mpi, MPI_Comm comm)
  1305. {
  1306. #ifdef STARPU_SIMGRID
  1307. STARPU_MPI_ASSERT_MSG(initialize_mpi, "application has to let StarPU initialize MPI");
  1308. return 0;
  1309. #else
  1310. return _starpu_mpi_initialize(argc, argv, initialize_mpi, comm);
  1311. #endif
  1312. }
  1313. int starpu_mpi_init(int *argc, char ***argv, int initialize_mpi)
  1314. {
  1315. return starpu_mpi_init_comm(argc, argv, initialize_mpi, MPI_COMM_WORLD);
  1316. }
  1317. int starpu_mpi_initialize(void)
  1318. {
  1319. #ifdef STARPU_SIMGRID
  1320. STARPU_MPI_ASSERT_MSG(0, "application has to let StarPU initialize MPI");
  1321. return 0;
  1322. #else
  1323. return _starpu_mpi_initialize(NULL, NULL, 0, MPI_COMM_WORLD);
  1324. #endif
  1325. }
  1326. int starpu_mpi_initialize_extended(int *rank, int *world_size)
  1327. {
  1328. #ifdef STARPU_SIMGRID
  1329. *world_size = _mpi_world_size;
  1330. *rank = _mpi_world_rank;
  1331. return 0;
  1332. #else
  1333. int ret;
  1334. ret = _starpu_mpi_initialize(NULL, NULL, 1, MPI_COMM_WORLD);
  1335. if (ret == 0)
  1336. {
  1337. _STARPU_DEBUG("Calling MPI_Comm_rank\n");
  1338. MPI_Comm_rank(MPI_COMM_WORLD, rank);
  1339. MPI_Comm_size(MPI_COMM_WORLD, world_size);
  1340. }
  1341. return ret;
  1342. #endif
  1343. }
  1344. int starpu_mpi_shutdown(void)
  1345. {
  1346. void *value;
  1347. int rank, world_size;
  1348. /* We need to get the rank before calling MPI_Finalize to pass to _starpu_mpi_comm_amounts_display() */
  1349. starpu_mpi_comm_rank(MPI_COMM_WORLD, &rank);
  1350. starpu_mpi_comm_size(MPI_COMM_WORLD, &world_size);
  1351. /* kill the progression thread */
  1352. STARPU_PTHREAD_MUTEX_LOCK(&mutex);
  1353. running = 0;
  1354. STARPU_PTHREAD_COND_BROADCAST(&cond_progression);
  1355. STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
  1356. starpu_pthread_join(progress_thread, &value);
  1357. #ifdef STARPU_MPI_ACTIVITY
  1358. starpu_progression_hook_deregister(hookid);
  1359. #endif /* STARPU_MPI_ACTIVITY */
  1360. _STARPU_MPI_TRACE_STOP(rank, world_size);
  1361. /* free the request queues */
  1362. _starpu_mpi_req_list_delete(detached_requests);
  1363. _starpu_mpi_req_list_delete(ready_requests);
  1364. _starpu_mpi_comm_amounts_display(rank);
  1365. _starpu_mpi_comm_amounts_free();
  1366. _starpu_mpi_cache_free(world_size);
  1367. _starpu_mpi_tag_free();
  1368. _starpu_mpi_comm_free();
  1369. return 0;
  1370. }
  1371. void _starpu_mpi_clear_cache(starpu_data_handle_t data_handle)
  1372. {
  1373. _starpu_mpi_data_release_tag(data_handle);
  1374. struct _starpu_mpi_node_tag *mpi_data = data_handle->mpi_data;
  1375. _starpu_mpi_cache_flush(mpi_data->comm, data_handle);
  1376. free(data_handle->mpi_data);
  1377. }
  1378. void starpu_mpi_data_register_comm(starpu_data_handle_t data_handle, int tag, int rank, MPI_Comm comm)
  1379. {
  1380. struct _starpu_mpi_node_tag *mpi_data;
  1381. if (data_handle->mpi_data)
  1382. {
  1383. mpi_data = data_handle->mpi_data;
  1384. }
  1385. else
  1386. {
  1387. mpi_data = calloc(1, sizeof(struct _starpu_mpi_node_tag));
  1388. data_handle->mpi_data = mpi_data;
  1389. _starpu_mpi_data_register_tag(data_handle, tag);
  1390. _starpu_data_set_unregister_hook(data_handle, _starpu_mpi_clear_cache);
  1391. }
  1392. if (tag != -1)
  1393. {
  1394. mpi_data->data_tag = tag;
  1395. }
  1396. if (rank != -1)
  1397. {
  1398. mpi_data->rank = rank;
  1399. mpi_data->comm = comm;
  1400. _starpu_mpi_comm_register(comm);
  1401. }
  1402. }
  1403. void starpu_mpi_data_set_rank_comm(starpu_data_handle_t handle, int rank, MPI_Comm comm)
  1404. {
  1405. starpu_mpi_data_register_comm(handle, -1, rank, comm);
  1406. }
  1407. void starpu_mpi_data_set_tag(starpu_data_handle_t handle, int tag)
  1408. {
  1409. starpu_mpi_data_register_comm(handle, tag, -1, MPI_COMM_WORLD);
  1410. }
  1411. int starpu_mpi_data_get_rank(starpu_data_handle_t data)
  1412. {
  1413. STARPU_ASSERT_MSG(data->mpi_data, "starpu_mpi_data_register MUST be called for data %p\n", data);
  1414. return ((struct _starpu_mpi_node_tag *)(data->mpi_data))->rank;
  1415. }
  1416. int starpu_mpi_data_get_tag(starpu_data_handle_t data)
  1417. {
  1418. STARPU_ASSERT_MSG(data->mpi_data, "starpu_mpi_data_register MUST be called for data %p\n", data);
  1419. return ((struct _starpu_mpi_node_tag *)(data->mpi_data))->data_tag;
  1420. }
  1421. int starpu_mpi_comm_size(MPI_Comm comm, int *size)
  1422. {
  1423. #ifdef STARPU_SIMGRID
  1424. STARPU_MPI_ASSERT_MSG(comm == MPI_COMM_WORLD, "StarPU-SMPI only works with MPI_COMM_WORLD for now");
  1425. *size = _mpi_world_size;
  1426. return 0;
  1427. #else
  1428. return MPI_Comm_size(comm, size);
  1429. #endif
  1430. }
  1431. int starpu_mpi_comm_rank(MPI_Comm comm, int *rank)
  1432. {
  1433. #ifdef STARPU_SIMGRID
  1434. STARPU_MPI_ASSERT_MSG(comm == MPI_COMM_WORLD, "StarPU-SMPI only works with MPI_COMM_WORLD for now");
  1435. *rank = _mpi_world_rank;
  1436. return 0;
  1437. #else
  1438. return MPI_Comm_rank(comm, rank);
  1439. #endif
  1440. }
  1441. int starpu_mpi_world_rank(void)
  1442. {
  1443. int rank;
  1444. starpu_mpi_comm_rank(MPI_COMM_WORLD, &rank);
  1445. return rank;
  1446. }