sendrecv_bench.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2019 Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. *
  16. * Basic send receive benchmark.
  17. * Inspired a lot from NewMadeleine examples/benchmarks/nm_bench_sendrecv.c
  18. */
  19. #include <math.h>
  20. #include <starpu_mpi.h>
  21. #include "helper.h"
  22. #define NX_MAX (512 * 1024 * 1024) // kB
  23. #define NX_MIN 0
  24. #define MULT_DEFAULT 2
  25. #define INCR_DEFAULT 0
  26. #define NX_STEP 1.4 // multiplication
  27. #define LOOPS_DEFAULT 10000
  28. int times_nb_nodes;
  29. int times_size;
  30. int worldsize;
  31. static int comp_double(const void*_a, const void*_b)
  32. {
  33. const double* a = _a;
  34. const double* b = _b;
  35. if(*a < *b)
  36. return -1;
  37. else if(*a > *b)
  38. return 1;
  39. else
  40. return 0;
  41. }
  42. static inline uint64_t _next(uint64_t len, double multiplier, uint64_t increment)
  43. {
  44. uint64_t next = len * multiplier + increment;
  45. if(next <= len)
  46. next++;
  47. return next;
  48. }
  49. static inline uint64_t _iterations(int iterations, uint64_t len)
  50. {
  51. const uint64_t max_data = 512 * 1024 * 1024;
  52. if(len <= 0)
  53. len = 1;
  54. uint64_t data_size = ((uint64_t)iterations * (uint64_t)len);
  55. if(data_size > max_data)
  56. {
  57. iterations = (max_data / (uint64_t)len);
  58. if(iterations < 2)
  59. iterations = 2;
  60. }
  61. return iterations;
  62. }
  63. int main(int argc, char **argv)
  64. {
  65. int ret, rank;
  66. starpu_data_handle_t handle_send, handle_recv;
  67. int mpi_init;
  68. float* vector_send = NULL;
  69. float* vector_recv = NULL;
  70. double t1, t2;
  71. double* lats = malloc(sizeof(double) * LOOPS_DEFAULT);
  72. uint64_t iterations = LOOPS_DEFAULT;
  73. double multiplier = MULT_DEFAULT;
  74. uint64_t increment = INCR_DEFAULT;
  75. MPI_INIT_THREAD(&argc, &argv, MPI_THREAD_SERIALIZED, &mpi_init);
  76. ret = starpu_mpi_init_conf(&argc, &argv, mpi_init, MPI_COMM_WORLD, NULL);
  77. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_init_conf");
  78. starpu_mpi_comm_rank(MPI_COMM_WORLD, &rank);
  79. starpu_mpi_comm_size(MPI_COMM_WORLD, &worldsize);
  80. if (worldsize != 2)
  81. {
  82. if (rank == 0)
  83. FPRINTF(stderr, "We need 2 processes.\n");
  84. starpu_mpi_shutdown();
  85. if (!mpi_init)
  86. MPI_Finalize();
  87. return STARPU_TEST_SKIPPED;
  88. }
  89. if (rank == 0)
  90. {
  91. printf("Times in us\n");
  92. printf("# size (Bytes)\t| latency \t| 10^6 B/s \t| MB/s \t| d1 \t|median \t| avg \t| d9 \t| max\n");
  93. }
  94. int array_size = 0;
  95. for (uint64_t s = NX_MIN; s <= NX_MAX; s = _next(s, multiplier, increment))
  96. {
  97. vector_send = malloc(s);
  98. vector_recv = malloc(s);
  99. memset(vector_send, 0, s);
  100. memset(vector_recv, 0, s);
  101. starpu_vector_data_register(&handle_send, STARPU_MAIN_RAM, (uintptr_t) vector_send, s, 1);
  102. starpu_vector_data_register(&handle_recv, STARPU_MAIN_RAM, (uintptr_t) vector_recv, s, 1);
  103. iterations = _iterations(iterations, s);
  104. starpu_mpi_barrier(MPI_COMM_WORLD);
  105. for (uint64_t j = 0; j < iterations; j++)
  106. {
  107. if (rank == 0)
  108. {
  109. t1 = starpu_timing_now();
  110. starpu_mpi_send(handle_send, 1, 0, MPI_COMM_WORLD);
  111. starpu_mpi_recv(handle_recv, 1, 1, MPI_COMM_WORLD, NULL);
  112. t2 = starpu_timing_now();
  113. const double delay = t2 - t1;
  114. const double t = delay / 2;
  115. lats[j] = t;
  116. }
  117. else
  118. {
  119. starpu_mpi_recv(handle_recv, 0, 0, MPI_COMM_WORLD, NULL);
  120. starpu_mpi_send(handle_send, 0, 1, MPI_COMM_WORLD);
  121. }
  122. starpu_mpi_barrier(MPI_COMM_WORLD);
  123. }
  124. if (rank == 0)
  125. {
  126. qsort(lats, iterations, sizeof(double), &comp_double);
  127. const double min_lat = lats[0];
  128. const double max_lat = lats[iterations - 1];
  129. const double med_lat = lats[(iterations - 1) / 2];
  130. const double d1_lat = lats[(iterations - 1) / 10];
  131. const double d9_lat = lats[9 * (iterations - 1) / 10];
  132. double avg_lat = 0.0;
  133. for(uint64_t k = 0; k < iterations; k++)
  134. {
  135. avg_lat += lats[k];
  136. }
  137. avg_lat /= iterations;
  138. const double bw_million_byte = s / min_lat;
  139. const double bw_mbyte = bw_million_byte / 1.048576;
  140. printf("%9lld\t%9.3lf\t%9.3f\t%9.3f\t%9.3lf\t%9.3lf\t%9.3lf\t%9.3lf\t%9.3lf\n",
  141. (long long)s, min_lat, bw_million_byte, bw_mbyte, d1_lat, med_lat, avg_lat, d9_lat, max_lat);
  142. fflush(stdout);
  143. }
  144. starpu_data_unregister(handle_recv);
  145. starpu_data_unregister(handle_send);
  146. free(vector_send);
  147. free(vector_recv);
  148. }
  149. starpu_mpi_shutdown();
  150. return 0;
  151. }