mpi_scatter_gather.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2012-2013 Inria
  4. * Copyright (C) 2011-2018 CNRS
  5. * Copyright (C) 2013-2015,2017 Université de Bordeaux
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. #include <starpu_mpi.h>
  19. #include "helper.h"
  20. /* Returns the MPI node number where data indexes index is */
  21. int my_distrib(int x, int nb_nodes)
  22. {
  23. return x % nb_nodes;
  24. }
  25. void cpu_codelet(void *descr[], void *_args)
  26. {
  27. int *vector = (int *)STARPU_VECTOR_GET_PTR(descr[0]);
  28. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  29. unsigned i;
  30. int rank;
  31. starpu_codelet_unpack_args(_args, &rank);
  32. for (i = 0; i < nx; i++)
  33. {
  34. //fprintf(stderr,"rank %d v[%d] = %d\n", rank, i, vector[i]);
  35. vector[i] *= rank+2;
  36. }
  37. }
  38. static struct starpu_codelet cl =
  39. {
  40. .cpu_funcs = {cpu_codelet},
  41. .nbuffers = 1,
  42. .modes = {STARPU_RW},
  43. #ifdef STARPU_SIMGRID
  44. .model = &starpu_perfmodel_nop,
  45. #endif
  46. };
  47. void scallback(void *arg)
  48. {
  49. char *msg = arg;
  50. FPRINTF_MPI(stderr, "Sending completed for <%s>\n", msg);
  51. }
  52. void rcallback(void *arg)
  53. {
  54. char *msg = arg;
  55. FPRINTF_MPI(stderr, "Reception completed for <%s>\n", msg);
  56. }
  57. int main(int argc, char **argv)
  58. {
  59. int rank, nodes, ret, x;
  60. int *vector = NULL;
  61. starpu_data_handle_t *data_handles;
  62. int size=10;
  63. ret = starpu_init(NULL);
  64. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  65. ret = starpu_mpi_init(&argc, &argv, 1);
  66. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_init");
  67. starpu_mpi_comm_rank(MPI_COMM_WORLD, &rank);
  68. starpu_mpi_comm_size(MPI_COMM_WORLD, &nodes);
  69. if (starpu_cpu_worker_get_count() == 0)
  70. {
  71. if (rank == 0)
  72. FPRINTF(stderr, "We need at least 1 CPU worker.\n");
  73. starpu_mpi_shutdown();
  74. starpu_shutdown();
  75. return STARPU_TEST_SKIPPED;
  76. }
  77. if (rank == 0)
  78. {
  79. /* Allocate the vector */
  80. vector = malloc(size * sizeof(int));
  81. for(x=0 ; x<size ; x++)
  82. vector[x] = x+10;
  83. // Print vector
  84. FPRINTF_MPI(stderr, " Input vector: ");
  85. for(x=0 ; x<size ; x++)
  86. {
  87. FPRINTF(stderr, "%d\t", vector[x]);
  88. }
  89. FPRINTF(stderr,"\n");
  90. }
  91. /* Allocate data handles and register data to StarPU */
  92. data_handles = (starpu_data_handle_t *) calloc(size, sizeof(starpu_data_handle_t));
  93. for(x = 0; x < size ; x++)
  94. {
  95. int mpi_rank = my_distrib(x, nodes);
  96. if (rank == 0)
  97. {
  98. starpu_vector_data_register(&data_handles[x], 0, (uintptr_t)&vector[x], 1, sizeof(int));
  99. }
  100. else if (mpi_rank == rank)
  101. {
  102. /* I do not own this index but i will need it for my computations */
  103. starpu_vector_data_register(&data_handles[x], -1, (uintptr_t)NULL, 1, sizeof(int));
  104. }
  105. else
  106. {
  107. /* I know it's useless to allocate anything for this */
  108. data_handles[x] = NULL;
  109. }
  110. if (data_handles[x])
  111. {
  112. starpu_mpi_data_register(data_handles[x], x, 0);
  113. }
  114. }
  115. /* Scatter the matrix among the nodes */
  116. for(x = 0; x < size ; x++)
  117. {
  118. if (data_handles[x])
  119. {
  120. int mpi_rank = my_distrib(x, nodes);
  121. starpu_mpi_data_set_rank(data_handles[x], mpi_rank);
  122. }
  123. }
  124. starpu_mpi_scatter_detached(data_handles, size, 0, MPI_COMM_WORLD, scallback, "scatter", NULL, NULL);
  125. /* Calculation */
  126. for(x = 0; x < size ; x++)
  127. {
  128. if (data_handles[x])
  129. {
  130. int owner = starpu_mpi_data_get_rank(data_handles[x]);
  131. if (owner == rank)
  132. {
  133. FPRINTF_MPI(stderr,"Computing on data[%d]\n", x);
  134. starpu_task_insert(&cl,
  135. STARPU_VALUE, &rank, sizeof(rank),
  136. STARPU_RW, data_handles[x],
  137. 0);
  138. }
  139. }
  140. }
  141. /* Gather the matrix on main node */
  142. starpu_mpi_gather_detached(data_handles, size, 0, MPI_COMM_WORLD, scallback, "gather", rcallback, "gather");
  143. for(x = 0; x < size ; x++)
  144. {
  145. if (data_handles[x])
  146. {
  147. starpu_mpi_data_set_rank(data_handles[x], 0);
  148. }
  149. }
  150. /* Unregister matrix from StarPU */
  151. for(x=0 ; x<size ; x++)
  152. {
  153. if (data_handles[x])
  154. {
  155. starpu_data_unregister(data_handles[x]);
  156. }
  157. }
  158. // Print vector
  159. if (rank == 0)
  160. {
  161. FPRINTF_MPI(stderr, "Output vector: ");
  162. for(x=0 ; x<size ; x++)
  163. {
  164. FPRINTF(stderr, "%d\t", vector[x]);
  165. }
  166. FPRINTF(stderr,"\n");
  167. for(x=0 ; x<size ; x++)
  168. {
  169. int mpi_rank = my_distrib(x, nodes);
  170. if (vector[x] != (x+10) * (mpi_rank+2))
  171. {
  172. FPRINTF_MPI(stderr, "Incorrect value for vector[%d]. computed %d != expected %d\n", x, vector[x], (x+10) * (mpi_rank+2));
  173. ret = 1;
  174. }
  175. }
  176. free(vector);
  177. }
  178. // Free memory
  179. free(data_handles);
  180. starpu_mpi_shutdown();
  181. starpu_shutdown();
  182. return (rank == 0) ? ret : 0;
  183. }