burst.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2020 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. /*
  17. * This test sends simultaneously many communications, with various configurations.
  18. *
  19. * Global purpose is to watch the behaviour with traces.
  20. */
  21. #include <starpu_mpi.h>
  22. #include "helper.h"
  23. #define NB_REQUESTS 500
  24. #define NX_ARRAY (320 * 320)
  25. static starpu_pthread_mutex_t mutex = STARPU_PTHREAD_MUTEX_INITIALIZER;
  26. static starpu_pthread_cond_t cond = STARPU_PTHREAD_COND_INITIALIZER;
  27. void recv_callback(void* arg)
  28. {
  29. int* received = arg;
  30. STARPU_PTHREAD_MUTEX_LOCK(&mutex);
  31. *received = 1;
  32. STARPU_PTHREAD_COND_SIGNAL(&cond);
  33. STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
  34. }
  35. int main(int argc, char **argv)
  36. {
  37. int ret, rank, size, mpi_init, other_rank;
  38. starpu_data_handle_t recv_handles[NB_REQUESTS];
  39. starpu_data_handle_t send_handles[NB_REQUESTS];
  40. float* recv_buffers[NB_REQUESTS];
  41. float* send_buffers[NB_REQUESTS];
  42. starpu_mpi_req recv_reqs[NB_REQUESTS];
  43. starpu_mpi_req send_reqs[NB_REQUESTS];
  44. MPI_Status status;
  45. MPI_INIT_THREAD(&argc, &argv, MPI_THREAD_SERIALIZED, &mpi_init);
  46. ret = starpu_mpi_init_conf(&argc, &argv, mpi_init, MPI_COMM_WORLD, NULL);
  47. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_init_conf");
  48. starpu_mpi_comm_rank(MPI_COMM_WORLD, &rank);
  49. starpu_mpi_comm_size(MPI_COMM_WORLD, &size);
  50. if (rank > 1)
  51. {
  52. starpu_mpi_barrier(MPI_COMM_WORLD);
  53. starpu_mpi_wait_for_all(MPI_COMM_WORLD);
  54. starpu_mpi_barrier(MPI_COMM_WORLD);
  55. starpu_mpi_wait_for_all(MPI_COMM_WORLD);
  56. starpu_mpi_barrier(MPI_COMM_WORLD);
  57. starpu_mpi_wait_for_all(MPI_COMM_WORLD);
  58. starpu_mpi_barrier(MPI_COMM_WORLD);
  59. starpu_mpi_wait_for_all(MPI_COMM_WORLD);
  60. starpu_mpi_shutdown();
  61. if (!mpi_init)
  62. MPI_Finalize();
  63. return 0;
  64. }
  65. other_rank = (rank == 0) ? 1 : 0;
  66. /* Burst simultaneous from both nodes */
  67. if (rank == 0)
  68. {
  69. printf("Simultaneous....\n");
  70. }
  71. for (int i = 0; i < NB_REQUESTS; i++)
  72. {
  73. send_buffers[i] = malloc(NX_ARRAY * sizeof(float));
  74. memset(send_buffers[i], 0, NX_ARRAY * sizeof(float));
  75. starpu_vector_data_register(&send_handles[i], STARPU_MAIN_RAM, (uintptr_t) send_buffers[i], NX_ARRAY, sizeof(float));
  76. recv_buffers[i] = malloc(NX_ARRAY * sizeof(float));
  77. memset(recv_buffers[i], 0, NX_ARRAY * sizeof(float));
  78. starpu_vector_data_register(&recv_handles[i], STARPU_MAIN_RAM, (uintptr_t) recv_buffers[i], NX_ARRAY, sizeof(float));
  79. starpu_mpi_irecv(recv_handles[i], &recv_reqs[i], other_rank, i, MPI_COMM_WORLD);
  80. }
  81. starpu_mpi_barrier(MPI_COMM_WORLD);
  82. for (int i = 0; i < NB_REQUESTS; i++)
  83. {
  84. starpu_mpi_isend_prio(send_handles[i], &send_reqs[i], other_rank, i, i, MPI_COMM_WORLD);
  85. }
  86. starpu_mpi_wait_for_all(MPI_COMM_WORLD);
  87. /* Burst from 0 to 1 */
  88. if (rank == 0)
  89. {
  90. printf("Done.\n");
  91. printf("0 -> 1...\n");
  92. }
  93. else
  94. {
  95. for (int i = 0; i < NB_REQUESTS; i++)
  96. {
  97. starpu_mpi_irecv(recv_handles[i], &recv_reqs[i], other_rank, i, MPI_COMM_WORLD);
  98. }
  99. }
  100. starpu_mpi_barrier(MPI_COMM_WORLD);
  101. if (rank == 0)
  102. {
  103. for (int i = 0; i < NB_REQUESTS; i++)
  104. {
  105. starpu_mpi_isend_prio(send_handles[i], &send_reqs[i], other_rank, i, i, MPI_COMM_WORLD);
  106. }
  107. }
  108. starpu_mpi_wait_for_all(MPI_COMM_WORLD);
  109. /* Burst from 1 to 0 */
  110. if (rank == 0)
  111. {
  112. printf("Done.\n");
  113. printf("1 -> 0...\n");
  114. for (int i = 0; i < NB_REQUESTS; i++)
  115. {
  116. starpu_mpi_irecv(recv_handles[i], &recv_reqs[i], other_rank, i, MPI_COMM_WORLD);
  117. }
  118. }
  119. starpu_mpi_barrier(MPI_COMM_WORLD);
  120. if (rank == 1)
  121. {
  122. for (int i = 0; i < NB_REQUESTS; i++)
  123. {
  124. starpu_mpi_isend_prio(send_handles[i], &send_reqs[i], other_rank, i, i, MPI_COMM_WORLD);
  125. }
  126. }
  127. starpu_mpi_wait_for_all(MPI_COMM_WORLD);
  128. /* Half burst from both nodes, second half burst is triggered after some requests finished. */
  129. if (rank == 0)
  130. {
  131. printf("Done.\n");
  132. printf("Half/half burst...\n");
  133. }
  134. int received = 0;
  135. for (int i = 0; i < NB_REQUESTS; i++)
  136. {
  137. if (i == (NB_REQUESTS / 4))
  138. {
  139. starpu_mpi_irecv_detached(recv_handles[i], other_rank, i, MPI_COMM_WORLD, recv_callback, &received);
  140. }
  141. else
  142. {
  143. starpu_mpi_irecv(recv_handles[i], &recv_reqs[i], other_rank, i, MPI_COMM_WORLD);
  144. }
  145. }
  146. starpu_mpi_barrier(MPI_COMM_WORLD);
  147. for (int i = 0; i < (NB_REQUESTS / 2); i++)
  148. {
  149. starpu_mpi_isend_prio(send_handles[i], &send_reqs[i], other_rank, i, i, MPI_COMM_WORLD);
  150. }
  151. STARPU_PTHREAD_MUTEX_LOCK(&mutex);
  152. while (!received)
  153. STARPU_PTHREAD_COND_WAIT(&cond, &mutex);
  154. STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
  155. for (int i = (NB_REQUESTS / 2); i < NB_REQUESTS; i++)
  156. {
  157. starpu_mpi_isend_prio(send_handles[i], &send_reqs[i], other_rank, i, i, MPI_COMM_WORLD);
  158. }
  159. starpu_mpi_wait_for_all(MPI_COMM_WORLD);
  160. if (rank == 0)
  161. {
  162. printf("Done.\n");
  163. }
  164. for (int i = 0; i < NB_REQUESTS; i++)
  165. {
  166. starpu_data_unregister(send_handles[i]);
  167. free(send_buffers[i]);
  168. starpu_data_unregister(recv_handles[i]);
  169. free(recv_buffers[i]);
  170. }
  171. starpu_mpi_shutdown();
  172. if (!mpi_init)
  173. MPI_Finalize();
  174. return 0;
  175. }