driver_mpi_common.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2015 Mathieu Lirzin <mthl@openmailbox.org>
  4. * Copyright (C) 2016 Inria
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <mpi.h>
  18. #include <core/workers.h>
  19. #include <core/perfmodel/perfmodel.h>
  20. #include "driver_mpi_common.h"
  21. #define NITER 32
  22. #define SIZE_BANDWIDTH (1024*1024)
  23. #define SYNC_TAG 42
  24. #define DRIVER_MPI_MASTER_NODE_DEFAULT 0
  25. static int mpi_initialized = 0;
  26. static int extern_initialized = 0;
  27. static int src_node_id;
  28. static void _starpu_mpi_set_src_node_id()
  29. {
  30. int node_id = starpu_get_env_number("STARPU_MPI_MASTER_NODE");
  31. if (node_id != -1)
  32. {
  33. int nb_proc, id_proc;
  34. MPI_Comm_size(MPI_COMM_WORLD, &nb_proc);
  35. MPI_Comm_rank(MPI_COMM_WORLD, &id_proc);
  36. if (node_id < nb_proc)
  37. {
  38. src_node_id = node_id;
  39. return;
  40. }
  41. else if (id_proc == DRIVER_MPI_MASTER_NODE_DEFAULT)
  42. {
  43. /* Only one node prints the error message. */
  44. fprintf(stderr, "The node you specify to be the master is "
  45. "greater than the total number of nodes.\n"
  46. "Taking node %d by default...\n", DRIVER_MPI_MASTER_NODE_DEFAULT);
  47. }
  48. }
  49. /* Node by default. */
  50. src_node_id = DRIVER_MPI_MASTER_NODE_DEFAULT;
  51. }
  52. int _starpu_mpi_common_mp_init()
  53. {
  54. //Here we supposed the programmer called two times starpu_init.
  55. if (mpi_initialized)
  56. return -ENODEV;
  57. mpi_initialized = 1;
  58. if (MPI_Initialized(&extern_initialized) != MPI_SUCCESS)
  59. STARPU_ABORT_MSG("Cannot check if MPI is initialized or not !");
  60. //Here MPI_Init or MPI_Init_thread is already called
  61. if (!extern_initialized)
  62. {
  63. #if defined(STARPU_MPI_MASTER_SLAVE_MULTIPLE_THREAD)
  64. int required = MPI_THREAD_MULTIPLE;
  65. #else
  66. int required = MPI_THREAD_FUNNELED;
  67. #endif
  68. int thread_support;
  69. STARPU_ASSERT(MPI_Init_thread(_starpu_get_argc(), _starpu_get_argv(), required, &thread_support) == MPI_SUCCESS);
  70. if (thread_support != required)
  71. {
  72. if (required == MPI_THREAD_MULTIPLE)
  73. fprintf(stderr, "MPI doesn't support MPI_THREAD_MULTIPLE option. MPI Master-Slave can have problems if multiple slaves are launched. \n");
  74. if (required == MPI_THREAD_FUNNELED)
  75. fprintf(stderr, "MPI doesn't support MPI_THREAD_FUNNELED option. Many errors can occur. \n");
  76. }
  77. }
  78. /* Find which node is the master */
  79. _starpu_mpi_set_src_node_id();
  80. return 1;
  81. }
  82. void _starpu_mpi_common_mp_deinit()
  83. {
  84. if (!extern_initialized)
  85. MPI_Finalize();
  86. }
  87. int _starpu_mpi_common_is_src_node()
  88. {
  89. int id_proc;
  90. MPI_Comm_rank(MPI_COMM_WORLD, &id_proc);
  91. return id_proc == src_node_id;
  92. }
  93. int _starpu_mpi_common_get_src_node()
  94. {
  95. return src_node_id;
  96. }
  97. int _starpu_mpi_common_is_mp_initialized()
  98. {
  99. return mpi_initialized;
  100. }
  101. /* common parts to initialize a source or a sink node */
  102. void _starpu_mpi_common_mp_initialize_src_sink(struct _starpu_mp_node *node)
  103. {
  104. struct _starpu_machine_topology *topology = &_starpu_get_machine_config()->topology;
  105. node->nb_cores = topology->nhwcpus;
  106. }
  107. int _starpu_mpi_common_recv_is_ready(const struct _starpu_mp_node *mp_node)
  108. {
  109. int res, source;
  110. int flag = 0;
  111. int id_proc;
  112. MPI_Comm_rank(MPI_COMM_WORLD, &id_proc);
  113. if (id_proc == src_node_id)
  114. {
  115. /* Source has mp_node defined */
  116. source = mp_node->mp_connection.mpi_remote_nodeid;
  117. }
  118. else
  119. {
  120. /* Sink can have sink to sink message */
  121. source = MPI_ANY_SOURCE;
  122. }
  123. res = MPI_Iprobe(source, MPI_ANY_TAG, MPI_COMM_WORLD, &flag, MPI_STATUS_IGNORE);
  124. STARPU_ASSERT_MSG(res == MPI_SUCCESS, "MPI Master/Slave cannot test if we received a message !");
  125. return flag;
  126. }
  127. /* SEND to source node */
  128. void _starpu_mpi_common_send(const struct _starpu_mp_node *node, void *msg, int len)
  129. {
  130. int res;
  131. int id_proc;
  132. MPI_Comm_rank(MPI_COMM_WORLD, &id_proc);
  133. printf("envoi %d B to %d\n", len, node->mp_connection.mpi_remote_nodeid);
  134. res = MPI_Send(msg, len, MPI_BYTE, node->mp_connection.mpi_remote_nodeid, SYNC_TAG, MPI_COMM_WORLD);
  135. STARPU_ASSERT_MSG(res == MPI_SUCCESS, "MPI Master/Slave cannot receive a msg with a size of %d Bytes !", len);
  136. }
  137. /* RECV to source node */
  138. void _starpu_mpi_common_recv(const struct _starpu_mp_node *node, void *msg, int len)
  139. {
  140. int res;
  141. int id_proc;
  142. MPI_Status s;
  143. MPI_Comm_rank(MPI_COMM_WORLD, &id_proc);
  144. printf("recv %d B from %d in %p\n", len, node->mp_connection.mpi_remote_nodeid, msg);
  145. res = MPI_Recv(msg, len, MPI_BYTE, node->mp_connection.mpi_remote_nodeid, SYNC_TAG, MPI_COMM_WORLD, &s);
  146. int num_expected;
  147. MPI_Get_count(&s, MPI_BYTE, &num_expected);
  148. STARPU_ASSERT_MSG(num_expected == len, "MPI Master/Slave received a msg with a size of %d Bytes (expected %d Bytes) !", num_expected, len);
  149. STARPU_ASSERT_MSG(res == MPI_SUCCESS, "MPI Master/Slave cannot receive a msg with a size of %d Bytes !", len);
  150. }
  151. /* SEND to any node */
  152. void _starpu_mpi_common_send_to_device(const struct _starpu_mp_node *node, int dst_devid, void *msg, int len)
  153. {
  154. int res;
  155. int id_proc;
  156. MPI_Comm_rank(MPI_COMM_WORLD, &id_proc);
  157. printf("send %d bytes from %d from %p\n", len, dst_devid, msg);
  158. res = MPI_Send(msg, len, MPI_BYTE, dst_devid, SYNC_TAG, MPI_COMM_WORLD);
  159. STARPU_ASSERT_MSG(res == MPI_SUCCESS, "MPI Master/Slave cannot receive a msg with a size of %d Bytes !", len);
  160. }
  161. /* RECV to any node */
  162. void _starpu_mpi_common_recv_from_device(const struct _starpu_mp_node *node, int src_devid, void *msg, int len)
  163. {
  164. int res;
  165. int id_proc;
  166. MPI_Comm_rank(MPI_COMM_WORLD, &id_proc);
  167. printf("nop recv %d bytes from %d\n", len, src_devid);
  168. res = MPI_Recv(msg, len, MPI_BYTE, src_devid, SYNC_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
  169. STARPU_ASSERT_MSG(res == MPI_SUCCESS, "MPI Master/Slave cannot receive a msg with a size of %d Bytes !", len);
  170. }
  171. void _starpu_mpi_common_barrier(void)
  172. {
  173. MPI_Barrier(MPI_COMM_WORLD);
  174. }
  175. /* Compute bandwidth and latency between source and sink nodes
  176. * Source node has to have the entire set of times at the end
  177. */
  178. void _starpu_mpi_common_measure_bandwidth_latency(double * bandwidth_htod, double * bandwidth_dtoh, double * latency_htod, double * latency_dtoh)
  179. {
  180. int ret;
  181. unsigned iter;
  182. int nb_proc, id_proc;
  183. MPI_Comm_rank(MPI_COMM_WORLD, &id_proc);
  184. MPI_Comm_size(MPI_COMM_WORLD, &nb_proc);
  185. char * buf = malloc(SIZE_BANDWIDTH);
  186. memset(buf, 0, SIZE_BANDWIDTH);
  187. unsigned node;
  188. unsigned id = 0;
  189. for(node = 0; node < nb_proc; node++)
  190. {
  191. MPI_Barrier(MPI_COMM_WORLD);
  192. //Don't measure link master <-> master
  193. if(node == src_node_id)
  194. continue;
  195. if(_starpu_mpi_common_is_src_node())
  196. {
  197. double start, end;
  198. /* measure bandwidth host to device */
  199. start = starpu_timing_now();
  200. for (iter = 0; iter < NITER; iter++)
  201. {
  202. ret = MPI_Send(buf, SIZE_BANDWIDTH, MPI_BYTE, node, node, MPI_COMM_WORLD);
  203. STARPU_ASSERT_MSG(ret == MPI_SUCCESS, "Bandwidth of MPI Master/Slave cannot be measured !");
  204. }
  205. end = starpu_timing_now();
  206. bandwidth_htod[id] = (NITER*1000000)/(end - start);
  207. /* measure bandwidth device to host */
  208. start = starpu_timing_now();
  209. for (iter = 0; iter < NITER; iter++)
  210. {
  211. ret = MPI_Recv(buf, SIZE_BANDWIDTH, MPI_BYTE, node, node, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
  212. STARPU_ASSERT_MSG(ret == MPI_SUCCESS, "Bandwidth of MPI Master/Slave cannot be measured !");
  213. }
  214. end = starpu_timing_now();
  215. bandwidth_dtoh[id] = (NITER*1000000)/(end - start);
  216. /* measure latency host to device */
  217. start = starpu_timing_now();
  218. for (iter = 0; iter < NITER; iter++)
  219. {
  220. ret = MPI_Send(buf, 1, MPI_BYTE, node, node, MPI_COMM_WORLD);
  221. STARPU_ASSERT_MSG(ret == MPI_SUCCESS, "Latency of MPI Master/Slave cannot be measured !");
  222. }
  223. end = starpu_timing_now();
  224. latency_htod[id] = (end - start)/NITER;
  225. /* measure latency device to host */
  226. start = starpu_timing_now();
  227. for (iter = 0; iter < NITER; iter++)
  228. {
  229. ret = MPI_Recv(buf, 1, MPI_BYTE, node, node, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
  230. STARPU_ASSERT_MSG(ret == MPI_SUCCESS, "Bandwidth of MPI Master/Slave cannot be measured !");
  231. }
  232. end = starpu_timing_now();
  233. latency_dtoh[id] = (end - start)/NITER;
  234. }
  235. else if (node == id_proc) /* if we are the sink node evaluated */
  236. {
  237. /* measure bandwidth host to device */
  238. for (iter = 0; iter < NITER; iter++)
  239. {
  240. ret = MPI_Recv(buf, SIZE_BANDWIDTH, MPI_BYTE, src_node_id, node, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
  241. STARPU_ASSERT_MSG(ret == MPI_SUCCESS, "Bandwidth of MPI Master/Slave cannot be measured !");
  242. }
  243. /* measure bandwidth device to host */
  244. for (iter = 0; iter < NITER; iter++)
  245. {
  246. ret = MPI_Send(buf, SIZE_BANDWIDTH, MPI_BYTE, src_node_id, node, MPI_COMM_WORLD);
  247. STARPU_ASSERT_MSG(ret == MPI_SUCCESS, "Bandwidth of MPI Master/Slave cannot be measured !");
  248. }
  249. /* measure latency host to device */
  250. for (iter = 0; iter < NITER; iter++)
  251. {
  252. ret = MPI_Recv(buf, 1, MPI_BYTE, src_node_id, node, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
  253. STARPU_ASSERT_MSG(ret == MPI_SUCCESS, "Bandwidth of MPI Master/Slave cannot be measured !");
  254. }
  255. /* measure latency device to host */
  256. for (iter = 0; iter < NITER; iter++)
  257. {
  258. ret = MPI_Send(buf, 1, MPI_BYTE, src_node_id, node, MPI_COMM_WORLD);
  259. STARPU_ASSERT_MSG(ret == MPI_SUCCESS, "Latency of MPI Master/Slave cannot be measured !");
  260. }
  261. }
  262. id++;
  263. }
  264. free(buf);
  265. }