sendrecv_parallel_tasks_bench.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2020-2021 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. /*
  17. * sendrecv benchmark from different tasks, executed simultaneously on serveral
  18. * workers.
  19. * Inspired a lot from NewMadeleine examples/piom/nm_piom_pingpong.c
  20. *
  21. * The goal is to measure impact of calls to starpu_mpi_* from different threads.
  22. *
  23. * Use STARPU_NCPU to set the number of parallel ping pongs
  24. *
  25. *
  26. * Note: This currently can not work with the MPI backend with more than 1 CPU,
  27. * since with big sizes, the MPI_Wait call in the MPI thread may block waiting
  28. * for the peer to call MPI_Recv+Wait, and there is no guarantee that the peer
  29. * will call MPI_Recv+Wait for the same data since tasks can proceed in any
  30. * order.
  31. */
  32. #include <starpu_mpi.h>
  33. #include "helper.h"
  34. #include "bench_helper.h"
  35. #define NB_WARMUP_PINGPONGS 10
  36. /* We reduce NX_MAX, since some NICs don't support exchanging simultaneously such amount of memory */
  37. #undef NX_MAX
  38. #ifdef STARPU_QUICK_CHECK
  39. #define NX_MAX (1024)
  40. #else
  41. #define NX_MAX (64 * 1024 * 1024)
  42. #endif
  43. void cpu_task(void* descr[], void* args)
  44. {
  45. int mpi_rank;
  46. uint64_t iterations =
  47. #ifdef STARPU_QUICK_CHECK
  48. 10;
  49. #else
  50. LOOPS_DEFAULT / 100;
  51. #endif
  52. uint64_t s;
  53. starpu_data_handle_t handle_send, handle_recv;
  54. double t1, t2;
  55. int asked_worker;
  56. int current_worker = starpu_worker_get_id();
  57. uint64_t j = 0;
  58. uint64_t k = 0;
  59. starpu_codelet_unpack_args(args, &mpi_rank, &asked_worker, &s, &handle_send, &handle_recv);
  60. STARPU_ASSERT(asked_worker == current_worker);
  61. iterations = bench_nb_iterations(iterations, s);
  62. double* lats = malloc(sizeof(double) * iterations);
  63. for (j = 0; j < NB_WARMUP_PINGPONGS; j++)
  64. {
  65. if (mpi_rank == 0)
  66. {
  67. starpu_mpi_send(handle_send, 1, 0, MPI_COMM_WORLD);
  68. starpu_mpi_recv(handle_recv, 1, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
  69. }
  70. else
  71. {
  72. starpu_mpi_recv(handle_recv, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
  73. starpu_mpi_send(handle_send, 0, 1, MPI_COMM_WORLD);
  74. }
  75. }
  76. for (j = 0; j < iterations; j++)
  77. {
  78. if (mpi_rank == 0)
  79. {
  80. t1 = starpu_timing_now();
  81. starpu_mpi_send(handle_send, 1, 0, MPI_COMM_WORLD);
  82. starpu_mpi_recv(handle_recv, 1, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
  83. t2 = starpu_timing_now();
  84. lats[j] = (t2 - t1) / 2;
  85. }
  86. else
  87. {
  88. starpu_mpi_recv(handle_recv, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
  89. starpu_mpi_send(handle_send, 0, 1, MPI_COMM_WORLD);
  90. }
  91. }
  92. if (mpi_rank == 0)
  93. {
  94. qsort(lats, iterations, sizeof(double), &comp_double);
  95. const double min_lat = lats[0];
  96. const double max_lat = lats[iterations - 1];
  97. const double med_lat = lats[(iterations - 1) / 2];
  98. const double d1_lat = lats[(iterations - 1) / 10];
  99. const double d9_lat = lats[9 * (iterations - 1) / 10];
  100. double avg_lat = 0.0;
  101. for(k = 0; k < iterations; k++)
  102. {
  103. avg_lat += lats[k];
  104. }
  105. avg_lat /= iterations;
  106. const double bw_million_byte = s / min_lat;
  107. const double bw_mbyte = bw_million_byte / 1.048576;
  108. printf("%2d\t\t%9lld\t%9.3lf\t%9.3f\t%9.3f\t%9.3lf\t%9.3lf\t%9.3lf\t%9.3lf\t%9.3lf\n",
  109. current_worker, (long long) s, min_lat, bw_million_byte, bw_mbyte, d1_lat, med_lat, avg_lat, d9_lat, max_lat);
  110. fflush(stdout);
  111. }
  112. free(lats);
  113. }
  114. static struct starpu_codelet cl =
  115. {
  116. .cpu_funcs = { cpu_task },
  117. .cpu_funcs_name = { "cpu_task" },
  118. .nbuffers = 0
  119. };
  120. int main(int argc, char **argv)
  121. {
  122. int ret, rank, worldsize;
  123. ret = starpu_mpi_init_conf(&argc, &argv, 1, MPI_COMM_WORLD, NULL);
  124. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_init_conf");
  125. starpu_mpi_comm_rank(MPI_COMM_WORLD, &rank);
  126. starpu_mpi_comm_size(MPI_COMM_WORLD, &worldsize);
  127. if (worldsize < 2)
  128. {
  129. if (rank == 0)
  130. FPRINTF(stderr, "We need 2 processes.\n");
  131. starpu_mpi_shutdown();
  132. return STARPU_TEST_SKIPPED;
  133. }
  134. if (rank == 0)
  135. {
  136. printf("Times in us\n");
  137. printf("# worker | size (Bytes)\t| latency \t| 10^6 B/s \t| MB/s \t| d1 \t|median \t| avg \t| d9 \t| max\n");
  138. }
  139. else if (rank >= 2)
  140. {
  141. starpu_mpi_shutdown();
  142. return 0;
  143. }
  144. unsigned cpu_count = starpu_cpu_worker_get_count();
  145. unsigned* mpi_tags = malloc(cpu_count * sizeof(unsigned));
  146. unsigned tag = 0;
  147. uint64_t s = 0;
  148. unsigned i = 0;
  149. int* workers = malloc(cpu_count * sizeof(int));
  150. float** vectors_send = malloc(cpu_count * sizeof(float*));
  151. float** vectors_recv = malloc(cpu_count * sizeof(float*));
  152. starpu_data_handle_t* handles_send = malloc(cpu_count * sizeof(starpu_data_handle_t));
  153. starpu_data_handle_t* handles_recv = malloc(cpu_count * sizeof(starpu_data_handle_t));
  154. for (s = NX_MIN; s <= NX_MAX; s = bench_next_size(s))
  155. {
  156. starpu_pause();
  157. for (i = 0; i < cpu_count; i++)
  158. {
  159. workers[i] = i;
  160. vectors_send[i] = malloc(s);
  161. vectors_recv[i] = malloc(s);
  162. memset(vectors_send[i], 0, s);
  163. memset(vectors_recv[i], 0, s);
  164. starpu_vector_data_register(&handles_send[i], STARPU_MAIN_RAM, (uintptr_t) vectors_send[i], s, 1);
  165. starpu_vector_data_register(&handles_recv[i], STARPU_MAIN_RAM, (uintptr_t) vectors_recv[i], s, 1);
  166. starpu_task_insert(&cl,
  167. STARPU_EXECUTE_ON_WORKER, workers[i],
  168. STARPU_VALUE, &rank, sizeof(int),
  169. STARPU_VALUE, workers + i, sizeof(int),
  170. STARPU_VALUE, &s, sizeof(uint64_t),
  171. STARPU_VALUE, &handles_send[i], sizeof(starpu_data_handle_t),
  172. STARPU_VALUE, &handles_recv[i], sizeof(starpu_data_handle_t), 0);
  173. }
  174. starpu_resume();
  175. starpu_task_wait_for_all();
  176. for (i = 0; i < cpu_count; i++)
  177. {
  178. starpu_data_unregister(handles_send[i]);
  179. starpu_data_unregister(handles_recv[i]);
  180. free(vectors_send[i]);
  181. free(vectors_recv[i]);
  182. }
  183. }
  184. free(workers);
  185. free(vectors_send);
  186. free(vectors_recv);
  187. free(handles_send);
  188. free(handles_recv);
  189. free(mpi_tags);
  190. starpu_mpi_shutdown();
  191. return 0;
  192. }