stencil5.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011, 2013, 2015 Université Bordeaux
  4. * Copyright (C) 2011, 2012, 2013, 2014, 2015, 2016 CNRS
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <starpu_mpi.h>
  18. #include <math.h>
  19. #define FPRINTF(ofile, fmt, ...) do { if (!getenv("STARPU_SSILENT")) {fprintf(ofile, fmt, ## __VA_ARGS__); }} while(0)
  20. #define FPRINTF_MPI(ofile, fmt, ...) do { if (!getenv("STARPU_SSILENT")) { \
  21. int _disp_rank; starpu_mpi_comm_rank(MPI_COMM_WORLD, &_disp_rank); \
  22. fprintf(ofile, "[%d][starpu_mpi][%s] " fmt , _disp_rank, __starpu_func__ ,## __VA_ARGS__); \
  23. fflush(ofile); }} while(0);
  24. void stencil5_cpu(void *descr[], STARPU_ATTRIBUTE_UNUSED void *_args)
  25. {
  26. float *xy = (float *)STARPU_VARIABLE_GET_PTR(descr[0]);
  27. float *xm1y = (float *)STARPU_VARIABLE_GET_PTR(descr[1]);
  28. float *xp1y = (float *)STARPU_VARIABLE_GET_PTR(descr[2]);
  29. float *xym1 = (float *)STARPU_VARIABLE_GET_PTR(descr[3]);
  30. float *xyp1 = (float *)STARPU_VARIABLE_GET_PTR(descr[4]);
  31. // fprintf(stdout, "VALUES: %2.2f %2.2f %2.2f %2.2f %2.2f\n", *xy, *xm1y, *xp1y, *xym1, *xyp1);
  32. *xy = (*xy + *xm1y + *xp1y + *xym1 + *xyp1) / 5;
  33. // fprintf(stdout, "VALUES: %2.2f %2.2f %2.2f %2.2f %2.2f\n", *xy, *xm1y, *xp1y, *xym1, *xyp1);
  34. }
  35. struct starpu_codelet stencil5_cl =
  36. {
  37. .cpu_funcs = {stencil5_cpu},
  38. .nbuffers = 5,
  39. .modes = {STARPU_RW, STARPU_R, STARPU_R, STARPU_R, STARPU_R}
  40. };
  41. #ifdef STARPU_QUICK_CHECK
  42. # define NITER_DEF 10
  43. # define X 5
  44. # define Y 5
  45. #else
  46. # define NITER_DEF 100
  47. # define X 20
  48. # define Y 20
  49. #endif
  50. int display = 0;
  51. int niter = NITER_DEF;
  52. /* Returns the MPI node number where data indexes index is */
  53. int my_distrib(int x, int y, int nb_nodes)
  54. {
  55. /* Block distrib */
  56. return ((int)(x / sqrt(nb_nodes) + (y / sqrt(nb_nodes)) * sqrt(nb_nodes))) % nb_nodes;
  57. }
  58. /* Shifted distribution, for migration example */
  59. int my_distrib2(int x, int y, int nb_nodes)
  60. {
  61. return (my_distrib(x, y, nb_nodes) + 1) % nb_nodes;
  62. }
  63. static void parse_args(int argc, char **argv)
  64. {
  65. int i;
  66. for (i = 1; i < argc; i++)
  67. {
  68. if (strcmp(argv[i], "-iter") == 0)
  69. {
  70. char *argptr;
  71. niter = strtol(argv[++i], &argptr, 10);
  72. }
  73. if (strcmp(argv[i], "-display") == 0)
  74. {
  75. display = 1;
  76. }
  77. }
  78. }
  79. int main(int argc, char **argv)
  80. {
  81. int my_rank, size, x, y, loop;
  82. float mean=0;
  83. float matrix[X][Y];
  84. starpu_data_handle_t data_handles[X][Y];
  85. int ret = starpu_init(NULL);
  86. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  87. starpu_mpi_init(&argc, &argv, 1);
  88. starpu_mpi_comm_rank(MPI_COMM_WORLD, &my_rank);
  89. starpu_mpi_comm_size(MPI_COMM_WORLD, &size);
  90. if (starpu_cpu_worker_get_count() == 0)
  91. {
  92. FPRINTF(stderr, "We need at least 1 CPU worker.\n");
  93. starpu_mpi_shutdown();
  94. starpu_shutdown();
  95. return 77;
  96. }
  97. parse_args(argc, argv);
  98. /* Initial data values */
  99. starpu_srand48((long int)time(NULL));
  100. for(x = 0; x < X; x++)
  101. {
  102. for (y = 0; y < Y; y++)
  103. {
  104. matrix[x][y] = (float)starpu_drand48();
  105. mean += matrix[x][y];
  106. }
  107. }
  108. mean /= (X*Y);
  109. if (display)
  110. {
  111. FPRINTF_MPI(stdout, "mean=%2.2f\n", mean);
  112. for(x = 0; x < X; x++)
  113. {
  114. fprintf(stdout, "[%d] ", my_rank);
  115. for (y = 0; y < Y; y++)
  116. {
  117. fprintf(stdout, "%2.2f ", matrix[x][y]);
  118. }
  119. fprintf(stdout, "\n");
  120. }
  121. }
  122. /* Initial distribution */
  123. for(x = 0; x < X; x++)
  124. {
  125. for (y = 0; y < Y; y++)
  126. {
  127. int mpi_rank = my_distrib(x, y, size);
  128. if (mpi_rank == my_rank)
  129. {
  130. //FPRINTF(stderr, "[%d] Owning data[%d][%d]\n", my_rank, x, y);
  131. starpu_variable_data_register(&data_handles[x][y], 0, (uintptr_t)&(matrix[x][y]), sizeof(float));
  132. }
  133. else if (my_rank == my_distrib(x+1, y, size) || my_rank == my_distrib(x-1, y, size)
  134. || my_rank == my_distrib(x, y+1, size) || my_rank == my_distrib(x, y-1, size))
  135. {
  136. /* I don't own that index, but will need it for my computations */
  137. //FPRINTF(stderr, "[%d] Neighbour of data[%d][%d]\n", my_rank, x, y);
  138. starpu_variable_data_register(&data_handles[x][y], -1, (uintptr_t)NULL, sizeof(float));
  139. }
  140. else
  141. {
  142. /* I know it's useless to allocate anything for this */
  143. data_handles[x][y] = NULL;
  144. }
  145. if (data_handles[x][y])
  146. {
  147. starpu_mpi_data_register(data_handles[x][y], (y*X)+x, mpi_rank);
  148. }
  149. }
  150. }
  151. /* First computation with initial distribution */
  152. for(loop=0 ; loop<niter; loop++)
  153. {
  154. for (x = 1; x < X-1; x++)
  155. {
  156. for (y = 1; y < Y-1; y++)
  157. {
  158. starpu_mpi_task_insert(MPI_COMM_WORLD, &stencil5_cl, STARPU_RW, data_handles[x][y],
  159. STARPU_R, data_handles[x-1][y], STARPU_R, data_handles[x+1][y],
  160. STARPU_R, data_handles[x][y-1], STARPU_R, data_handles[x][y+1],
  161. 0);
  162. }
  163. }
  164. }
  165. FPRINTF(stderr, "Waiting ...\n");
  166. starpu_task_wait_for_all();
  167. /* Now migrate data to a new distribution */
  168. /* First register newly needed data */
  169. for(x = 0; x < X; x++)
  170. {
  171. for (y = 0; y < Y; y++)
  172. {
  173. int mpi_rank = my_distrib2(x, y, size);
  174. if (!data_handles[x][y] && (mpi_rank == my_rank
  175. || my_rank == my_distrib2(x+1, y, size) || my_rank == my_distrib2(x-1, y, size)
  176. || my_rank == my_distrib2(x, y+1, size) || my_rank == my_distrib2(x, y-1, size)))
  177. {
  178. /* Register newly-needed data */
  179. starpu_variable_data_register(&data_handles[x][y], -1, (uintptr_t)NULL, sizeof(float));
  180. starpu_mpi_data_register(data_handles[x][y], (y*X)+x, mpi_rank);
  181. }
  182. if (data_handles[x][y] && mpi_rank != starpu_mpi_data_get_rank(data_handles[x][y]))
  183. {
  184. /* Migrate the data */
  185. starpu_mpi_get_data_on_node_detached(MPI_COMM_WORLD, data_handles[x][y], mpi_rank, NULL, NULL);
  186. /* And register new rank of the matrix */
  187. starpu_mpi_data_set_rank(data_handles[x][y], mpi_rank);
  188. }
  189. }
  190. }
  191. /* Second computation with new distribution */
  192. for(loop=0 ; loop<niter; loop++)
  193. {
  194. for (x = 1; x < X-1; x++)
  195. {
  196. for (y = 1; y < Y-1; y++)
  197. {
  198. starpu_mpi_task_insert(MPI_COMM_WORLD, &stencil5_cl, STARPU_RW, data_handles[x][y],
  199. STARPU_R, data_handles[x-1][y], STARPU_R, data_handles[x+1][y],
  200. STARPU_R, data_handles[x][y-1], STARPU_R, data_handles[x][y+1],
  201. 0);
  202. }
  203. }
  204. }
  205. FPRINTF(stderr, "Waiting ...\n");
  206. starpu_task_wait_for_all();
  207. /* Unregister data */
  208. for(x = 0; x < X; x++)
  209. {
  210. for (y = 0; y < Y; y++)
  211. {
  212. if (data_handles[x][y])
  213. {
  214. int mpi_rank = my_distrib(x, y, size);
  215. /* Get back data to original place where the user-provided buffer is. */
  216. starpu_mpi_get_data_on_node_detached(MPI_COMM_WORLD, data_handles[x][y], mpi_rank, NULL, NULL);
  217. /* Register original rank of the matrix (although useless) */
  218. starpu_mpi_data_set_rank(data_handles[x][y], mpi_rank);
  219. /* And unregister it */
  220. starpu_data_unregister(data_handles[x][y]);
  221. }
  222. }
  223. }
  224. starpu_mpi_shutdown();
  225. starpu_shutdown();
  226. if (display)
  227. {
  228. FPRINTF(stdout, "[%d] mean=%2.2f\n", my_rank, mean);
  229. for(x = 0; x < X; x++)
  230. {
  231. FPRINTF(stdout, "[%d] ", my_rank);
  232. for (y = 0; y < Y; y++)
  233. {
  234. FPRINTF(stdout, "%2.2f ", matrix[x][y]);
  235. }
  236. FPRINTF(stdout, "\n");
  237. }
  238. }
  239. return 0;
  240. }