burst.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2020 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. /*
  17. * This test sends simultaneously many communications, with various configurations.
  18. *
  19. * Global purpose is to watch the behaviour with traces.
  20. */
  21. #include <starpu_mpi.h>
  22. #include "helper.h"
  23. #if defined(STARPU_SIMGRID) || defined(STARPU_QUICK_CHECK)
  24. #define NB_REQUESTS 10
  25. #else
  26. #define NB_REQUESTS 500
  27. #endif
  28. #define NX_ARRAY (320 * 320)
  29. static starpu_pthread_mutex_t mutex = STARPU_PTHREAD_MUTEX_INITIALIZER;
  30. static starpu_pthread_cond_t cond = STARPU_PTHREAD_COND_INITIALIZER;
  31. void recv_callback(void* arg)
  32. {
  33. int* received = arg;
  34. STARPU_PTHREAD_MUTEX_LOCK(&mutex);
  35. *received = 1;
  36. STARPU_PTHREAD_COND_SIGNAL(&cond);
  37. STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
  38. }
  39. int main(int argc, char **argv)
  40. {
  41. int ret, rank, size, mpi_init, other_rank;
  42. starpu_data_handle_t recv_handles[NB_REQUESTS];
  43. starpu_data_handle_t send_handles[NB_REQUESTS];
  44. float* recv_buffers[NB_REQUESTS];
  45. float* send_buffers[NB_REQUESTS];
  46. starpu_mpi_req recv_reqs[NB_REQUESTS];
  47. starpu_mpi_req send_reqs[NB_REQUESTS];
  48. MPI_INIT_THREAD(&argc, &argv, MPI_THREAD_SERIALIZED, &mpi_init);
  49. ret = starpu_mpi_init_conf(&argc, &argv, mpi_init, MPI_COMM_WORLD, NULL);
  50. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_init_conf");
  51. starpu_mpi_comm_rank(MPI_COMM_WORLD, &rank);
  52. starpu_mpi_comm_size(MPI_COMM_WORLD, &size);
  53. other_rank = (rank == 0) ? 1 : 0;
  54. if (rank == 0 || rank == 1)
  55. {
  56. for (int i = 0; i < NB_REQUESTS; i++)
  57. {
  58. send_buffers[i] = malloc(NX_ARRAY * sizeof(float));
  59. memset(send_buffers[i], 0, NX_ARRAY * sizeof(float));
  60. starpu_vector_data_register(&send_handles[i], STARPU_MAIN_RAM, (uintptr_t) send_buffers[i], NX_ARRAY, sizeof(float));
  61. recv_buffers[i] = malloc(NX_ARRAY * sizeof(float));
  62. memset(recv_buffers[i], 0, NX_ARRAY * sizeof(float));
  63. starpu_vector_data_register(&recv_handles[i], STARPU_MAIN_RAM, (uintptr_t) recv_buffers[i], NX_ARRAY, sizeof(float));
  64. }
  65. }
  66. {
  67. /* Burst simultaneous from both nodes: 0 and 1 post all the recvs, synchronise, and then post all the sends */
  68. FPRINTF(stderr, "Simultaneous....start (rank %d)\n", rank);
  69. if (rank == 0 || rank == 1)
  70. {
  71. for (int i = 0; i < NB_REQUESTS; i++)
  72. {
  73. recv_reqs[i] = NULL;
  74. starpu_mpi_irecv(recv_handles[i], &recv_reqs[i], other_rank, i, MPI_COMM_WORLD);
  75. }
  76. }
  77. starpu_mpi_barrier(MPI_COMM_WORLD);
  78. if (rank == 0 || rank == 1)
  79. {
  80. for (int i = 0; i < NB_REQUESTS; i++)
  81. {
  82. send_reqs[i] = NULL;
  83. starpu_mpi_isend_prio(send_handles[i], &send_reqs[i], other_rank, i, i, MPI_COMM_WORLD);
  84. }
  85. }
  86. if (rank == 0 || rank == 1)
  87. {
  88. for (int i = 0; i < NB_REQUESTS; i++)
  89. {
  90. if (recv_reqs[i]) starpu_mpi_wait(&recv_reqs[i], MPI_STATUS_IGNORE);
  91. if (send_reqs[i]) starpu_mpi_wait(&send_reqs[i], MPI_STATUS_IGNORE);
  92. }
  93. }
  94. starpu_mpi_wait_for_all(MPI_COMM_WORLD);
  95. FPRINTF(stderr, "Simultaneous....end (rank %d)\n", rank);
  96. starpu_mpi_barrier(MPI_COMM_WORLD);
  97. }
  98. {
  99. /* Burst from 0 to 1 : rank 1 posts all the recvs, barrier, then rank 0 posts all the sends */
  100. FPRINTF(stderr, "0 -> 1...start (rank %d)\n", rank);
  101. if (rank == 1)
  102. {
  103. for (int i = 0; i < NB_REQUESTS; i++)
  104. {
  105. recv_reqs[i] = NULL;
  106. starpu_mpi_irecv(recv_handles[i], &recv_reqs[i], other_rank, i, MPI_COMM_WORLD);
  107. }
  108. }
  109. starpu_mpi_barrier(MPI_COMM_WORLD);
  110. if (rank == 0)
  111. {
  112. for (int i = 0; i < NB_REQUESTS; i++)
  113. {
  114. send_reqs[i] = NULL;
  115. starpu_mpi_isend_prio(send_handles[i], &send_reqs[i], other_rank, i, i, MPI_COMM_WORLD);
  116. }
  117. }
  118. if (rank == 0 || rank == 1)
  119. {
  120. for (int i = 0; i < NB_REQUESTS; i++)
  121. {
  122. if (rank == 1 && recv_reqs[i]) starpu_mpi_wait(&recv_reqs[i], MPI_STATUS_IGNORE);
  123. if (rank == 0 && send_reqs[i]) starpu_mpi_wait(&send_reqs[i], MPI_STATUS_IGNORE);
  124. }
  125. }
  126. starpu_mpi_wait_for_all(MPI_COMM_WORLD);
  127. FPRINTF(stderr, "0 -> 1...done (rank %d)\n", rank);
  128. starpu_mpi_barrier(MPI_COMM_WORLD);
  129. }
  130. {
  131. FPRINTF(stderr, "1 -> 0...start (rank %d)\n", rank);
  132. /* Burst from 1 to 0 */
  133. if (rank == 0)
  134. {
  135. for (int i = 0; i < NB_REQUESTS; i++)
  136. {
  137. recv_reqs[i] = NULL;
  138. starpu_mpi_irecv(recv_handles[i], &recv_reqs[i], other_rank, i, MPI_COMM_WORLD);
  139. }
  140. }
  141. starpu_mpi_barrier(MPI_COMM_WORLD);
  142. if (rank == 1)
  143. {
  144. for (int i = 0; i < NB_REQUESTS; i++)
  145. {
  146. send_reqs[i] = NULL;
  147. starpu_mpi_isend_prio(send_handles[i], &send_reqs[i], other_rank, i, i, MPI_COMM_WORLD);
  148. }
  149. }
  150. if (rank == 0 || rank == 1)
  151. {
  152. for (int i = 0; i < NB_REQUESTS; i++)
  153. {
  154. if (rank == 0 && recv_reqs[i]) starpu_mpi_wait(&recv_reqs[i], MPI_STATUS_IGNORE);
  155. if (rank == 1 && send_reqs[i]) starpu_mpi_wait(&send_reqs[i], MPI_STATUS_IGNORE);
  156. }
  157. }
  158. starpu_mpi_wait_for_all(MPI_COMM_WORLD);
  159. FPRINTF(stderr, "1 -> 0...done (rank %d)\n", rank);
  160. starpu_mpi_barrier(MPI_COMM_WORLD);
  161. }
  162. {
  163. /* Half burst from both nodes, second half burst is triggered after some requests finished. */
  164. FPRINTF(stderr, "Half/half burst...start (rank %d)\n", rank);
  165. int received = 0;
  166. if (rank == 0 || rank == 1)
  167. {
  168. for (int i = 0; i < NB_REQUESTS; i++)
  169. {
  170. recv_reqs[i] = NULL;
  171. if (i % 2)
  172. {
  173. starpu_mpi_irecv_detached(recv_handles[i], other_rank, i, MPI_COMM_WORLD, recv_callback, &received);
  174. }
  175. else
  176. {
  177. starpu_mpi_irecv(recv_handles[i], &recv_reqs[i], other_rank, i, MPI_COMM_WORLD);
  178. }
  179. }
  180. }
  181. starpu_mpi_barrier(MPI_COMM_WORLD);
  182. if (rank == 0 || rank == 1)
  183. {
  184. for (int i = 0; i < (NB_REQUESTS / 2); i++)
  185. {
  186. send_reqs[i] = NULL;
  187. starpu_mpi_isend_prio(send_handles[i], &send_reqs[i], other_rank, i, i, MPI_COMM_WORLD);
  188. }
  189. }
  190. if (rank == 0 || rank == 1)
  191. {
  192. STARPU_PTHREAD_MUTEX_LOCK(&mutex);
  193. while (!received)
  194. STARPU_PTHREAD_COND_WAIT(&cond, &mutex);
  195. STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
  196. }
  197. if (rank == 0 || rank == 1)
  198. {
  199. for (int i = (NB_REQUESTS / 2); i < NB_REQUESTS; i++)
  200. {
  201. send_reqs[i] = NULL;
  202. starpu_mpi_isend_prio(send_handles[i], &send_reqs[i], other_rank, i, i, MPI_COMM_WORLD);
  203. }
  204. }
  205. if (rank == 0 || rank == 1)
  206. {
  207. for (int i = 0; i < NB_REQUESTS; i++)
  208. {
  209. if (recv_reqs[i]) starpu_mpi_wait(&recv_reqs[i], MPI_STATUS_IGNORE);
  210. if (send_reqs[i]) starpu_mpi_wait(&send_reqs[i], MPI_STATUS_IGNORE);
  211. }
  212. }
  213. starpu_mpi_wait_for_all(MPI_COMM_WORLD);
  214. FPRINTF(stderr, "Half/half burst...done (rank %d)\n", rank);
  215. starpu_mpi_barrier(MPI_COMM_WORLD);
  216. }
  217. /* Clear up */
  218. if (rank == 0 || rank == 1)
  219. {
  220. for (int i = 0; i < NB_REQUESTS; i++)
  221. {
  222. starpu_data_unregister(send_handles[i]);
  223. free(send_buffers[i]);
  224. starpu_data_unregister(recv_handles[i]);
  225. free(recv_buffers[i]);
  226. }
  227. }
  228. starpu_mpi_shutdown();
  229. if (!mpi_init)
  230. MPI_Finalize();
  231. return 0;
  232. }