burst_helper.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2020 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <starpu_mpi.h>
  17. #include "helper.h"
  18. #include "burst_helper.h"
  19. #if defined(STARPU_SIMGRID) || defined(STARPU_QUICK_CHECK)
  20. #define NB_REQUESTS 10
  21. #else
  22. #define NB_REQUESTS 50
  23. #endif
  24. #define NX_ARRAY (320 * 320)
  25. static starpu_data_handle_t* recv_handles;
  26. static starpu_data_handle_t* send_handles;
  27. static float** recv_buffers;
  28. static float** send_buffers;
  29. static starpu_mpi_req* recv_reqs;
  30. static starpu_mpi_req* send_reqs;
  31. int burst_nb_requests = NB_REQUESTS;
  32. void burst_init_data(int rank)
  33. {
  34. if (rank == 0 || rank == 1)
  35. {
  36. recv_handles = malloc(burst_nb_requests * sizeof(starpu_data_handle_t));
  37. send_handles = malloc(burst_nb_requests * sizeof(starpu_data_handle_t));
  38. recv_buffers = malloc(burst_nb_requests * sizeof(float*));
  39. send_buffers = malloc(burst_nb_requests * sizeof(float*));
  40. recv_reqs = malloc(burst_nb_requests * sizeof(starpu_mpi_req));
  41. send_reqs = malloc(burst_nb_requests * sizeof(starpu_mpi_req));
  42. int i = 0;
  43. for (i = 0; i < burst_nb_requests; i++)
  44. {
  45. send_buffers[i] = malloc(NX_ARRAY * sizeof(float));
  46. memset(send_buffers[i], 0, NX_ARRAY * sizeof(float));
  47. starpu_vector_data_register(&send_handles[i], STARPU_MAIN_RAM, (uintptr_t) send_buffers[i], NX_ARRAY, sizeof(float));
  48. recv_buffers[i] = malloc(NX_ARRAY * sizeof(float));
  49. memset(recv_buffers[i], 0, NX_ARRAY * sizeof(float));
  50. starpu_vector_data_register(&recv_handles[i], STARPU_MAIN_RAM, (uintptr_t) recv_buffers[i], NX_ARRAY, sizeof(float));
  51. }
  52. }
  53. }
  54. void burst_free_data(int rank)
  55. {
  56. if (rank == 0 || rank == 1)
  57. {
  58. int i = 0;
  59. for (i = 0; i < burst_nb_requests; i++)
  60. {
  61. starpu_data_unregister(send_handles[i]);
  62. free(send_buffers[i]);
  63. starpu_data_unregister(recv_handles[i]);
  64. free(recv_buffers[i]);
  65. }
  66. free(recv_handles);
  67. free(send_handles);
  68. free(recv_buffers);
  69. free(send_buffers);
  70. free(recv_reqs);
  71. free(send_reqs);
  72. }
  73. }
  74. /* Burst simultaneous from both nodes: 0 and 1 post all the recvs, synchronise, and then post all the sends */
  75. void burst_bidir(int rank)
  76. {
  77. int other_rank = (rank == 0) ? 1 : 0;
  78. int i = 0;
  79. FPRINTF(stderr, "Simultaneous....start (rank %d)\n", rank);
  80. if (rank == 0 || rank == 1)
  81. {
  82. for (i = 0; i < burst_nb_requests; i++)
  83. {
  84. recv_reqs[i] = NULL;
  85. starpu_mpi_irecv(recv_handles[i], &recv_reqs[i], other_rank, i, MPI_COMM_WORLD);
  86. }
  87. }
  88. starpu_mpi_barrier(MPI_COMM_WORLD);
  89. if (rank == 0 || rank == 1)
  90. {
  91. for (i = 0; i < burst_nb_requests; i++)
  92. {
  93. send_reqs[i] = NULL;
  94. starpu_mpi_isend_prio(send_handles[i], &send_reqs[i], other_rank, i, i, MPI_COMM_WORLD);
  95. }
  96. for (i = 0; i < burst_nb_requests; i++)
  97. {
  98. if (recv_reqs[i]) starpu_mpi_wait(&recv_reqs[i], MPI_STATUS_IGNORE);
  99. if (send_reqs[i]) starpu_mpi_wait(&send_reqs[i], MPI_STATUS_IGNORE);
  100. }
  101. }
  102. FPRINTF(stderr, "Simultaneous....end (rank %d)\n", rank);
  103. starpu_mpi_barrier(MPI_COMM_WORLD);
  104. }
  105. void burst_unidir(int sender, int receiver, int rank)
  106. {
  107. FPRINTF(stderr, "%d -> %d... start (rank %d)\n", sender, receiver, rank);
  108. int i = 0;
  109. if (rank == receiver)
  110. {
  111. for (i = 0; i < burst_nb_requests; i++)
  112. {
  113. recv_reqs[i] = NULL;
  114. starpu_mpi_irecv(recv_handles[i], &recv_reqs[i], sender, i, MPI_COMM_WORLD);
  115. }
  116. }
  117. starpu_mpi_barrier(MPI_COMM_WORLD);
  118. if (rank == sender)
  119. {
  120. for (i = 0; i < burst_nb_requests; i++)
  121. {
  122. send_reqs[i] = NULL;
  123. starpu_mpi_isend_prio(send_handles[i], &send_reqs[i], receiver, i, i, MPI_COMM_WORLD);
  124. }
  125. }
  126. if (rank == sender || rank == receiver)
  127. {
  128. for (i = 0; i < burst_nb_requests; i++)
  129. {
  130. if (rank != sender && recv_reqs[i]) starpu_mpi_wait(&recv_reqs[i], MPI_STATUS_IGNORE);
  131. if (rank == sender && send_reqs[i]) starpu_mpi_wait(&send_reqs[i], MPI_STATUS_IGNORE);
  132. }
  133. }
  134. FPRINTF(stderr, "%d -> %d... end (rank %d)\n", sender, receiver, rank);
  135. starpu_mpi_barrier(MPI_COMM_WORLD);
  136. }
  137. /* Half burst from both nodes, second half burst is triggered after some requests finished. */
  138. void burst_bidir_half_postponed(int rank)
  139. {
  140. int other_rank = (rank == 0) ? 1 : 0;
  141. int received = 0;
  142. int i = 0;
  143. FPRINTF(stderr, "Half/half burst...start (rank %d)\n", rank);
  144. if (rank == 0 || rank == 1)
  145. {
  146. for (i = 0; i < burst_nb_requests; i++)
  147. {
  148. recv_reqs[i] = NULL;
  149. starpu_mpi_irecv(recv_handles[i], &recv_reqs[i], other_rank, i, MPI_COMM_WORLD);
  150. }
  151. }
  152. starpu_mpi_barrier(MPI_COMM_WORLD);
  153. if (rank == 0 || rank == 1)
  154. {
  155. for (i = 0; i < (burst_nb_requests / 2); i++)
  156. {
  157. send_reqs[i] = NULL;
  158. starpu_mpi_isend_prio(send_handles[i], &send_reqs[i], other_rank, i, i, MPI_COMM_WORLD);
  159. }
  160. if (recv_reqs[burst_nb_requests / 4]) starpu_mpi_wait(&recv_reqs[burst_nb_requests / 4], MPI_STATUS_IGNORE);
  161. for (i = (burst_nb_requests / 2); i < burst_nb_requests; i++)
  162. {
  163. send_reqs[i] = NULL;
  164. starpu_mpi_isend_prio(send_handles[i], &send_reqs[i], other_rank, i, i, MPI_COMM_WORLD);
  165. }
  166. for (i = 0; i < burst_nb_requests; i++)
  167. {
  168. if (recv_reqs[i]) starpu_mpi_wait(&recv_reqs[i], MPI_STATUS_IGNORE);
  169. if (send_reqs[i]) starpu_mpi_wait(&send_reqs[i], MPI_STATUS_IGNORE);
  170. }
  171. }
  172. FPRINTF(stderr, "Half/half burst...done (rank %d)\n", rank);
  173. starpu_mpi_barrier(MPI_COMM_WORLD);
  174. }
  175. void burst_all(int rank)
  176. {
  177. double start, end;
  178. start = starpu_timing_now();
  179. /* Burst simultaneous from both nodes: 0 and 1 post all the recvs, synchronise, and then post all the sends */
  180. burst_bidir(rank);
  181. /* Burst from 0 to 1 : rank 1 posts all the recvs, barrier, then rank 0 posts all the sends */
  182. burst_unidir(0, 1, rank);
  183. /* Burst from 1 to 0 : rank 0 posts all the recvs, barrier, then rank 1 posts all the sends */
  184. burst_unidir(1, 0, rank);
  185. /* Half burst from both nodes, second half burst is triggered after some requests finished. */
  186. burst_bidir_half_postponed(rank);
  187. end = starpu_timing_now();
  188. FPRINTF(stderr, "All bursts took %.0f ms\n", (end - start) / 1000.0);
  189. }