mpi_scatter_gather.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011, 2012, 2013, 2014, 2015, 2016 CNRS
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <starpu_mpi.h>
  17. #include "helper.h"
  18. /* Returns the MPI node number where data indexes index is */
  19. int my_distrib(int x, int nb_nodes)
  20. {
  21. return x % nb_nodes;
  22. }
  23. void cpu_codelet(void *descr[], void *_args)
  24. {
  25. int *vector = (int *)STARPU_VECTOR_GET_PTR(descr[0]);
  26. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  27. unsigned i;
  28. int rank;
  29. starpu_codelet_unpack_args(_args, &rank);
  30. for (i = 0; i < nx; i++)
  31. {
  32. //fprintf(stderr,"rank %d v[%d] = %d\n", rank, i, vector[i]);
  33. vector[i] *= rank+2;
  34. }
  35. }
  36. #ifdef STARPU_SIMGRID
  37. /* Dummy cost function for simgrid */
  38. static double cost_function(struct starpu_task *task STARPU_ATTRIBUTE_UNUSED, unsigned nimpl STARPU_ATTRIBUTE_UNUSED)
  39. {
  40. return 0.000001;
  41. }
  42. static struct starpu_perfmodel dumb_model =
  43. {
  44. .type = STARPU_COMMON,
  45. .cost_function = cost_function
  46. };
  47. #endif
  48. static struct starpu_codelet cl =
  49. {
  50. .cpu_funcs = {cpu_codelet},
  51. .nbuffers = 1,
  52. .modes = {STARPU_RW},
  53. #ifdef STARPU_SIMGRID
  54. .model = &dumb_model,
  55. #endif
  56. };
  57. void scallback(void *arg STARPU_ATTRIBUTE_UNUSED)
  58. {
  59. char *msg = arg;
  60. FPRINTF_MPI(stderr, "Sending completed for <%s>\n", msg);
  61. }
  62. void rcallback(void *arg STARPU_ATTRIBUTE_UNUSED)
  63. {
  64. char *msg = arg;
  65. FPRINTF_MPI(stderr, "Reception completed for <%s>\n", msg);
  66. }
  67. int main(int argc, char **argv)
  68. {
  69. int rank, nodes, ret, x;
  70. int *vector = NULL;
  71. starpu_data_handle_t *data_handles;
  72. int size=10;
  73. ret = starpu_init(NULL);
  74. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  75. ret = starpu_mpi_init(&argc, &argv, 1);
  76. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_init");
  77. starpu_mpi_comm_rank(MPI_COMM_WORLD, &rank);
  78. starpu_mpi_comm_size(MPI_COMM_WORLD, &nodes);
  79. if (starpu_cpu_worker_get_count() == 0)
  80. {
  81. if (rank == 0)
  82. FPRINTF(stderr, "We need at least 1 CPU worker.\n");
  83. starpu_mpi_shutdown();
  84. starpu_shutdown();
  85. return STARPU_TEST_SKIPPED;
  86. }
  87. if (rank == 0)
  88. {
  89. /* Allocate the vector */
  90. vector = malloc(size * sizeof(int));
  91. for(x=0 ; x<size ; x++)
  92. vector[x] = x+10;
  93. // Print vector
  94. FPRINTF_MPI(stderr, " Input vector: ");
  95. for(x=0 ; x<size ; x++)
  96. {
  97. FPRINTF(stderr, "%d\t", vector[x]);
  98. }
  99. FPRINTF(stderr,"\n");
  100. }
  101. /* Allocate data handles and register data to StarPU */
  102. data_handles = (starpu_data_handle_t *) calloc(size, sizeof(starpu_data_handle_t));
  103. for(x = 0; x < size ; x++)
  104. {
  105. int mpi_rank = my_distrib(x, nodes);
  106. if (rank == 0)
  107. {
  108. starpu_vector_data_register(&data_handles[x], 0, (uintptr_t)&vector[x], 1, sizeof(int));
  109. }
  110. else if ((mpi_rank == rank))
  111. {
  112. /* I do not own that index but i will need it for my computations */
  113. starpu_vector_data_register(&data_handles[x], -1, (uintptr_t)NULL, 1, sizeof(int));
  114. }
  115. else
  116. {
  117. /* I know it's useless to allocate anything for this */
  118. data_handles[x] = NULL;
  119. }
  120. if (data_handles[x])
  121. {
  122. starpu_mpi_data_register(data_handles[x], x, 0);
  123. }
  124. }
  125. /* Scatter the matrix among the nodes */
  126. for(x = 0; x < size ; x++)
  127. {
  128. if (data_handles[x])
  129. {
  130. int mpi_rank = my_distrib(x, nodes);
  131. starpu_mpi_data_set_rank(data_handles[x], mpi_rank);
  132. }
  133. }
  134. starpu_mpi_scatter_detached(data_handles, size, 0, MPI_COMM_WORLD, scallback, "scatter", NULL, NULL);
  135. /* Calculation */
  136. for(x = 0; x < size ; x++)
  137. {
  138. if (data_handles[x])
  139. {
  140. int owner = starpu_mpi_data_get_rank(data_handles[x]);
  141. if (owner == rank)
  142. {
  143. FPRINTF_MPI(stderr,"Computing on data[%d]\n", x);
  144. starpu_task_insert(&cl,
  145. STARPU_VALUE, &rank, sizeof(rank),
  146. STARPU_RW, data_handles[x],
  147. 0);
  148. }
  149. }
  150. }
  151. /* Gather the matrix on main node */
  152. starpu_mpi_gather_detached(data_handles, size, 0, MPI_COMM_WORLD, scallback, "gather", rcallback, "gather");
  153. for(x = 0; x < size ; x++)
  154. {
  155. if (data_handles[x])
  156. {
  157. starpu_mpi_data_set_rank(data_handles[x], 0);
  158. }
  159. }
  160. /* Unregister matrix from StarPU */
  161. for(x=0 ; x<size ; x++)
  162. {
  163. if (data_handles[x])
  164. {
  165. starpu_data_unregister(data_handles[x]);
  166. }
  167. }
  168. // Print vector
  169. if (rank == 0)
  170. {
  171. FPRINTF_MPI(stderr, "Output vector: ");
  172. for(x=0 ; x<size ; x++)
  173. {
  174. FPRINTF(stderr, "%d\t", vector[x]);
  175. }
  176. FPRINTF(stderr,"\n");
  177. for(x=0 ; x<size ; x++)
  178. {
  179. int mpi_rank = my_distrib(x, nodes);
  180. if (vector[x] != (x+10) * (mpi_rank+2))
  181. {
  182. FPRINTF_MPI(stderr, "Incorrect value for vector[%d]. computed %d != expected %d\n", x, vector[x], (x+10) * (mpi_rank+2));
  183. ret = 1;
  184. }
  185. }
  186. free(vector);
  187. }
  188. // Free memory
  189. free(data_handles);
  190. starpu_mpi_shutdown();
  191. starpu_shutdown();
  192. return (rank == 0) ? ret : 0;
  193. }