mpi_reduction.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2013, 2015 Université de Bordeaux
  4. * Copyright (C) 2012, 2013, 2014, 2015, 2016, 2017 CNRS
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <starpu_mpi.h>
  18. #include <math.h>
  19. #include "helper.h"
  20. extern void init_cpu_func(void *descr[], void *cl_arg);
  21. extern void redux_cpu_func(void *descr[], void *cl_arg);
  22. extern void dot_cpu_func(void *descr[], void *cl_arg);
  23. extern void display_cpu_func(void *descr[], void *cl_arg);
  24. #ifdef STARPU_SIMGRID
  25. /* Dummy cost function for simgrid */
  26. static double cost_function(struct starpu_task *task STARPU_ATTRIBUTE_UNUSED, unsigned nimpl STARPU_ATTRIBUTE_UNUSED)
  27. {
  28. return 0.000001;
  29. }
  30. static struct starpu_perfmodel dumb_model =
  31. {
  32. .type = STARPU_COMMON,
  33. .cost_function = cost_function
  34. };
  35. #endif
  36. static struct starpu_codelet init_codelet =
  37. {
  38. .cpu_funcs = {init_cpu_func},
  39. .nbuffers = 1,
  40. .modes = {STARPU_W},
  41. #ifdef STARPU_SIMGRID
  42. .model = &dumb_model,
  43. #endif
  44. .name = "init_codelet"
  45. };
  46. static struct starpu_codelet redux_codelet =
  47. {
  48. .cpu_funcs = {redux_cpu_func},
  49. .modes = {STARPU_RW, STARPU_R},
  50. .nbuffers = 2,
  51. #ifdef STARPU_SIMGRID
  52. .model = &dumb_model,
  53. #endif
  54. .name = "redux_codelet"
  55. };
  56. static struct starpu_codelet dot_codelet =
  57. {
  58. .cpu_funcs = {dot_cpu_func},
  59. .nbuffers = 2,
  60. .modes = {STARPU_R, STARPU_REDUX},
  61. #ifdef STARPU_SIMGRID
  62. .model = &dumb_model,
  63. #endif
  64. .name = "dot_codelet"
  65. };
  66. static struct starpu_codelet display_codelet =
  67. {
  68. .cpu_funcs = {display_cpu_func},
  69. .nbuffers = 1,
  70. .modes = {STARPU_R},
  71. #ifdef STARPU_SIMGRID
  72. .model = &dumb_model,
  73. #endif
  74. .name = "display_codelet"
  75. };
  76. /* Returns the MPI node number where data indexes index is */
  77. int my_distrib(int x, int nb_nodes)
  78. {
  79. return x % nb_nodes;
  80. }
  81. int main(int argc, char **argv)
  82. {
  83. int my_rank, size, x, y, i;
  84. long int *vector;
  85. long int dot, sum=0;
  86. starpu_data_handle_t *handles;
  87. starpu_data_handle_t dot_handle;
  88. int nb_elements, step, loops;
  89. STARPU_SKIP_IF_VALGRIND_RETURN_SKIP;
  90. /* Not supported yet */
  91. if (starpu_get_env_number_default("STARPU_GLOBAL_ARBITER", 0) > 0)
  92. return STARPU_TEST_SKIPPED;
  93. int ret = starpu_init(NULL);
  94. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  95. ret = starpu_mpi_init(&argc, &argv, 1);
  96. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_init");
  97. starpu_mpi_comm_rank(MPI_COMM_WORLD, &my_rank);
  98. starpu_mpi_comm_size(MPI_COMM_WORLD, &size);
  99. if (starpu_cpu_worker_get_count() == 0)
  100. {
  101. if (my_rank == 0)
  102. FPRINTF(stderr, "We need at least 1 CPU worker.\n");
  103. starpu_mpi_shutdown();
  104. starpu_shutdown();
  105. return STARPU_TEST_SKIPPED;
  106. }
  107. nb_elements = size*8000;
  108. step = 4;
  109. loops = 5;
  110. vector = (long int *) malloc(nb_elements*sizeof(vector[0]));
  111. for(x = 0; x < nb_elements; x+=step)
  112. {
  113. int mpi_rank = my_distrib(x/step, size);
  114. if (mpi_rank == my_rank)
  115. {
  116. for(y=0 ; y<step ; y++)
  117. {
  118. vector[x+y] = x+y+1;
  119. }
  120. }
  121. }
  122. if (my_rank == 0)
  123. {
  124. dot = 14;
  125. sum = (nb_elements * (nb_elements + 1)) / 2;
  126. sum *= loops;
  127. sum += dot;
  128. starpu_variable_data_register(&dot_handle, STARPU_MAIN_RAM, (uintptr_t)&dot, sizeof(dot));
  129. }
  130. else
  131. {
  132. starpu_variable_data_register(&dot_handle, -1, (uintptr_t)NULL, sizeof(dot));
  133. }
  134. handles = (starpu_data_handle_t *) malloc(nb_elements*sizeof(handles[0]));
  135. for(x = 0; x < nb_elements; x+=step)
  136. {
  137. handles[x] = NULL;
  138. int mpi_rank = my_distrib(x/step, size);
  139. if (mpi_rank == my_rank)
  140. {
  141. /* Owning data */
  142. starpu_vector_data_register(&handles[x], STARPU_MAIN_RAM, (uintptr_t)&(vector[x]), step, sizeof(vector[0]));
  143. }
  144. else
  145. {
  146. starpu_vector_data_register(&handles[x], -1, (uintptr_t)NULL, step, sizeof(vector[0]));
  147. }
  148. if (handles[x])
  149. {
  150. starpu_mpi_data_register(handles[x], x, mpi_rank);
  151. }
  152. }
  153. starpu_mpi_data_register(dot_handle, nb_elements+1, 0);
  154. starpu_data_set_reduction_methods(dot_handle, &redux_codelet, &init_codelet);
  155. for (i = 0; i < loops; i++)
  156. {
  157. for (x = 0; x < nb_elements; x+=step)
  158. {
  159. starpu_mpi_task_insert(MPI_COMM_WORLD,
  160. &dot_codelet,
  161. STARPU_R, handles[x],
  162. STARPU_REDUX, dot_handle,
  163. 0);
  164. }
  165. starpu_mpi_redux_data(MPI_COMM_WORLD, dot_handle);
  166. starpu_mpi_task_insert(MPI_COMM_WORLD, &display_codelet, STARPU_R, dot_handle, 0);
  167. }
  168. FPRINTF_MPI(stderr, "Waiting ...\n");
  169. starpu_task_wait_for_all();
  170. for(x = 0; x < nb_elements; x+=step)
  171. {
  172. if (handles[x])
  173. starpu_data_unregister(handles[x]);
  174. }
  175. starpu_data_unregister(dot_handle);
  176. free(vector);
  177. free(handles);
  178. starpu_mpi_shutdown();
  179. starpu_shutdown();
  180. if (my_rank == 0)
  181. {
  182. FPRINTF(stderr, "[%d] sum=%ld\n", my_rank, sum);
  183. }
  184. #ifndef STARPU_SIMGRID
  185. if (my_rank == 0)
  186. {
  187. FPRINTF(stderr, "[%d] dot=%ld\n", my_rank, dot);
  188. FPRINTF(stderr, "%s when computing reduction\n", (sum == dot) ? "Success" : "Error");
  189. if (sum != dot)
  190. return 1;
  191. }
  192. #endif
  193. return 0;
  194. }