abstract_sendrecv_bench.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2020-2021 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include "bench_helper.h"
  17. #include "abstract_sendrecv_bench.h"
  18. void sendrecv_bench(int mpi_rank, starpu_pthread_barrier_t* thread_barrier, int bidir)
  19. {
  20. uint64_t iterations = LOOPS_DEFAULT;
  21. uint64_t s = 0;
  22. uint64_t j = 0;
  23. uint64_t k = 0;
  24. if (mpi_rank >= 2)
  25. {
  26. starpu_pause();
  27. if (thread_barrier != NULL)
  28. {
  29. STARPU_PTHREAD_BARRIER_WAIT(thread_barrier);
  30. }
  31. for (s = NX_MIN; s <= NX_MAX; s = bench_next_size(s))
  32. {
  33. iterations = bench_nb_iterations(iterations, s);
  34. starpu_mpi_barrier(MPI_COMM_WORLD);
  35. for (j = 0; j < iterations; j++)
  36. {
  37. starpu_mpi_barrier(MPI_COMM_WORLD);
  38. }
  39. }
  40. starpu_resume();
  41. return;
  42. }
  43. if (mpi_rank == 0)
  44. {
  45. printf("Times in us\n");
  46. printf("# size (Bytes)\t| latency \t| 10^6 B/s \t| MB/s \t| d1 \t|median \t| avg \t| d9 \t| max\n");
  47. }
  48. int array_size = 0;
  49. starpu_data_handle_t handle_send, handle_recv;
  50. float* vector_send = NULL;
  51. float* vector_recv = NULL;
  52. double t1, t2, global_tstart, global_tend;
  53. double* lats = malloc(sizeof(double) * LOOPS_DEFAULT);
  54. starpu_mpi_req send_req, recv_req;
  55. if (thread_barrier != NULL)
  56. {
  57. STARPU_PTHREAD_BARRIER_WAIT(thread_barrier);
  58. }
  59. global_tstart = starpu_timing_now();
  60. for (s = NX_MIN; s <= NX_MAX; s = bench_next_size(s))
  61. {
  62. vector_send = malloc(s);
  63. vector_recv = malloc(s);
  64. memset(vector_send, 0, s);
  65. memset(vector_recv, 0, s);
  66. starpu_vector_data_register(&handle_send, STARPU_MAIN_RAM, (uintptr_t) vector_send, s, 1);
  67. starpu_vector_data_register(&handle_recv, STARPU_MAIN_RAM, (uintptr_t) vector_recv, s, 1);
  68. iterations = bench_nb_iterations(iterations, s);
  69. starpu_mpi_barrier(MPI_COMM_WORLD);
  70. for (j = 0; j < iterations; j++)
  71. {
  72. if (mpi_rank == 0)
  73. {
  74. t1 = starpu_timing_now();
  75. if (bidir)
  76. {
  77. starpu_mpi_isend(handle_send, &send_req, 1, 0, MPI_COMM_WORLD);
  78. starpu_mpi_irecv(handle_recv, &recv_req, 1, 1, MPI_COMM_WORLD);
  79. starpu_mpi_wait(&send_req, MPI_STATUS_IGNORE);
  80. starpu_mpi_wait(&recv_req, MPI_STATUS_IGNORE);
  81. }
  82. else
  83. {
  84. starpu_mpi_send(handle_send, 1, 0, MPI_COMM_WORLD);
  85. starpu_mpi_recv(handle_recv, 1, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
  86. }
  87. t2 = starpu_timing_now();
  88. const double t = (t2 - t1) / 2;
  89. lats[j] = t;
  90. }
  91. else
  92. {
  93. if (bidir)
  94. {
  95. starpu_mpi_irecv(handle_recv, &recv_req, 0, 0, MPI_COMM_WORLD);
  96. starpu_mpi_isend(handle_send, &send_req, 0, 1, MPI_COMM_WORLD);
  97. starpu_mpi_wait(&recv_req, MPI_STATUS_IGNORE);
  98. starpu_mpi_wait(&send_req, MPI_STATUS_IGNORE);
  99. }
  100. else
  101. {
  102. starpu_mpi_recv(handle_recv, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
  103. starpu_mpi_send(handle_send, 0, 1, MPI_COMM_WORLD);
  104. }
  105. }
  106. starpu_mpi_barrier(MPI_COMM_WORLD);
  107. }
  108. if (mpi_rank == 0)
  109. {
  110. qsort(lats, iterations, sizeof(double), &comp_double);
  111. const double min_lat = lats[0];
  112. const double max_lat = lats[iterations - 1];
  113. const double med_lat = lats[(iterations - 1) / 2];
  114. const double d1_lat = lats[(iterations - 1) / 10];
  115. const double d9_lat = lats[9 * (iterations - 1) / 10];
  116. double avg_lat = 0.0;
  117. for(k = 0; k < iterations; k++)
  118. {
  119. avg_lat += lats[k];
  120. }
  121. avg_lat /= iterations;
  122. const double bw_million_byte = s / min_lat;
  123. const double bw_mbyte = bw_million_byte / 1.048576;
  124. printf("%9lld\t%9.3lf\t%9.3f\t%9.3f\t%9.3lf\t%9.3lf\t%9.3lf\t%9.3lf\t%9.3lf\n",
  125. (long long)s, min_lat, bw_million_byte, bw_mbyte, d1_lat, med_lat, avg_lat, d9_lat, max_lat);
  126. fflush(stdout);
  127. }
  128. starpu_data_unregister(handle_recv);
  129. starpu_data_unregister(handle_send);
  130. free(vector_send);
  131. free(vector_recv);
  132. }
  133. global_tend = starpu_timing_now();
  134. if (mpi_rank == 0)
  135. {
  136. printf("Comm bench took %9.3lf ms\n", (global_tend - global_tstart) / 1000);
  137. }
  138. free(lats);
  139. }