stencil5_lb.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011, 2013, 2015-2016 Université Bordeaux
  4. * Copyright (C) 2011, 2012, 2013, 2014, 2015, 2016, 2017 CNRS
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <starpu_mpi.h>
  18. #include <starpu_mpi_lb.h>
  19. #include <math.h>
  20. #define FPRINTF(ofile, fmt, ...) do { if (!getenv("STARPU_SSILENT")) {fprintf(ofile, fmt, ## __VA_ARGS__); }} while(0)
  21. #define FPRINTF_MPI(ofile, fmt, ...) do { if (!getenv("STARPU_SSILENT")) { \
  22. int _disp_rank; starpu_mpi_comm_rank(MPI_COMM_WORLD, &_disp_rank); \
  23. fprintf(ofile, "[%d][starpu_mpi][%s] " fmt , _disp_rank, __starpu_func__ ,## __VA_ARGS__); \
  24. fflush(ofile); }} while(0);
  25. void stencil5_cpu(void *descr[], STARPU_ATTRIBUTE_UNUSED void *_args)
  26. {
  27. float *xy = (float *)STARPU_VARIABLE_GET_PTR(descr[0]);
  28. float *xm1y = (float *)STARPU_VARIABLE_GET_PTR(descr[1]);
  29. float *xp1y = (float *)STARPU_VARIABLE_GET_PTR(descr[2]);
  30. float *xym1 = (float *)STARPU_VARIABLE_GET_PTR(descr[3]);
  31. float *xyp1 = (float *)STARPU_VARIABLE_GET_PTR(descr[4]);
  32. // fprintf(stdout, "VALUES: %2.2f %2.2f %2.2f %2.2f %2.2f\n", *xy, *xm1y, *xp1y, *xym1, *xyp1);
  33. *xy = (*xy + *xm1y + *xp1y + *xym1 + *xyp1) / 5;
  34. // fprintf(stdout, "VALUES: %2.2f %2.2f %2.2f %2.2f %2.2f\n", *xy, *xm1y, *xp1y, *xym1, *xyp1);
  35. }
  36. struct starpu_codelet stencil5_cl =
  37. {
  38. .cpu_funcs = {stencil5_cpu},
  39. .nbuffers = 5,
  40. .modes = {STARPU_RW, STARPU_R, STARPU_R, STARPU_R, STARPU_R}
  41. };
  42. #ifdef STARPU_QUICK_CHECK
  43. # define NITER_DEF 10
  44. # define X 2
  45. # define Y 2
  46. #elif !defined(STARPU_LONG_CHECK)
  47. # define NITER_DEF 10
  48. # define X 5
  49. # define Y 5
  50. #else
  51. # define NITER_DEF 100
  52. # define X 20
  53. # define Y 20
  54. #endif
  55. int display = 0;
  56. int niter = NITER_DEF;
  57. /* Returns the MPI node number where data indexes index is */
  58. int my_distrib(int x, int y, int nb_nodes)
  59. {
  60. /* Block distrib */
  61. return ((int)(x / sqrt(nb_nodes) + (y / sqrt(nb_nodes)) * sqrt(nb_nodes))) % nb_nodes;
  62. }
  63. /* Shifted distribution, for migration example */
  64. int my_distrib2(int x, int y, int nb_nodes)
  65. {
  66. return (my_distrib(x, y, nb_nodes) + 1) % nb_nodes;
  67. }
  68. static void parse_args(int argc, char **argv)
  69. {
  70. int i;
  71. for (i = 1; i < argc; i++)
  72. {
  73. if (strcmp(argv[i], "-iter") == 0)
  74. {
  75. char *argptr;
  76. niter = strtol(argv[++i], &argptr, 10);
  77. }
  78. if (strcmp(argv[i], "-display") == 0)
  79. {
  80. display = 1;
  81. }
  82. }
  83. }
  84. void get_neighbors(int **neighbor_ids, int *nneighbors)
  85. {
  86. int ret, rank, size;
  87. starpu_mpi_comm_rank(MPI_COMM_WORLD, &rank);
  88. starpu_mpi_comm_size(MPI_COMM_WORLD, &size);
  89. *nneighbors = 1;
  90. *neighbor_ids = malloc(sizeof(int));
  91. *neighbor_ids[0] = rank==size-1?0:rank+1;
  92. }
  93. struct data_node
  94. {
  95. starpu_data_handle_t data_handle;
  96. int node;
  97. };
  98. struct data_node data_nodes[X][Y];
  99. void get_data_unit_to_migrate(starpu_data_handle_t **handle_unit, int *nhandles, int dst_node)
  100. {
  101. int rank, x, y;
  102. starpu_mpi_comm_rank(MPI_COMM_WORLD, &rank);
  103. fprintf(stderr, "Looking to move data from %d to %d\n", rank, dst_node);
  104. for(x = 0; x < X; x++)
  105. {
  106. for (y = 0; y < Y; y++)
  107. {
  108. if (data_nodes[x][y].node == rank)
  109. {
  110. *handle_unit = malloc(sizeof(starpu_data_handle_t));
  111. *handle_unit[0] = data_nodes[x][y].data_handle;
  112. *nhandles = 1;
  113. data_nodes[x][y].node = dst_node;
  114. return;
  115. }
  116. }
  117. }
  118. *nhandles = 0;
  119. }
  120. int main(int argc, char **argv)
  121. {
  122. int my_rank, size, x, y, loop;
  123. float mean=0;
  124. float matrix[X][Y];
  125. struct starpu_mpi_lb_conf itf;
  126. itf.get_neighbors = get_neighbors;
  127. itf.get_data_unit_to_migrate = get_data_unit_to_migrate;
  128. int ret = starpu_init(NULL);
  129. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  130. ret = starpu_mpi_init(&argc, &argv, 1);
  131. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_init");
  132. starpu_mpi_comm_rank(MPI_COMM_WORLD, &my_rank);
  133. starpu_mpi_comm_size(MPI_COMM_WORLD, &size);
  134. if (starpu_cpu_worker_get_count() == 0)
  135. {
  136. FPRINTF(stderr, "We need at least 1 CPU worker.\n");
  137. starpu_mpi_shutdown();
  138. starpu_shutdown();
  139. return 77;
  140. }
  141. setenv("LB_HEAT_SLEEP_THRESHOLD", "5", 1);
  142. starpu_mpi_lb_init("heat", &itf);
  143. parse_args(argc, argv);
  144. /* Initial data values */
  145. starpu_srand48((long int)time(NULL));
  146. for(x = 0; x < X; x++)
  147. {
  148. for (y = 0; y < Y; y++)
  149. {
  150. matrix[x][y] = (float)starpu_drand48();
  151. mean += matrix[x][y];
  152. }
  153. }
  154. mean /= (X*Y);
  155. if (display)
  156. {
  157. FPRINTF_MPI(stdout, "mean=%2.2f\n", mean);
  158. for(x = 0; x < X; x++)
  159. {
  160. fprintf(stdout, "[%d] ", my_rank);
  161. for (y = 0; y < Y; y++)
  162. {
  163. fprintf(stdout, "%2.2f ", matrix[x][y]);
  164. }
  165. fprintf(stdout, "\n");
  166. }
  167. }
  168. /* Initial distribution */
  169. for(x = 0; x < X; x++)
  170. {
  171. for (y = 0; y < Y; y++)
  172. {
  173. data_nodes[x][y].node = my_distrib(x, y, size);
  174. if (data_nodes[x][y].node == my_rank)
  175. {
  176. //FPRINTF(stderr, "[%d] Owning data[%d][%d]\n", my_rank, x, y);
  177. starpu_variable_data_register(&data_nodes[x][y].data_handle, 0, (uintptr_t)&(matrix[x][y]), sizeof(float));
  178. }
  179. else if (my_rank == my_distrib(x+1, y, size) || my_rank == my_distrib(x-1, y, size)
  180. || my_rank == my_distrib(x, y+1, size) || my_rank == my_distrib(x, y-1, size))
  181. {
  182. /* I don't own that index, but will need it for my computations */
  183. //FPRINTF(stderr, "[%d] Neighbour of data[%d][%d]\n", my_rank, x, y);
  184. starpu_variable_data_register(&data_nodes[x][y].data_handle, -1, (uintptr_t)NULL, sizeof(float));
  185. }
  186. else
  187. {
  188. /* I know it's useless to allocate anything for this */
  189. data_nodes[x][y].data_handle = NULL;
  190. }
  191. if (data_nodes[x][y].data_handle)
  192. {
  193. starpu_mpi_data_register(data_nodes[x][y].data_handle, (y*X)+x, data_nodes[x][y].node);
  194. }
  195. }
  196. }
  197. /* First computation with initial distribution */
  198. for(loop=0 ; loop<niter; loop++)
  199. {
  200. for (x = 1; x < X-1; x++)
  201. {
  202. for (y = 1; y < Y-1; y++)
  203. {
  204. starpu_mpi_task_insert(MPI_COMM_WORLD, &stencil5_cl, STARPU_RW, data_nodes[x][y].data_handle,
  205. STARPU_R, data_nodes[x-1][y].data_handle, STARPU_R, data_nodes[x+1][y].data_handle,
  206. STARPU_R, data_nodes[x][y-1].data_handle, STARPU_R, data_nodes[x][y+1].data_handle,
  207. STARPU_TAG_ONLY, ((starpu_tag_t)X)*x + y,
  208. 0);
  209. }
  210. }
  211. }
  212. FPRINTF(stderr, "Waiting ...\n");
  213. starpu_task_wait_for_all();
  214. // The load balancer needs to be shutdown before unregistering data as it needs access to them
  215. starpu_mpi_lb_shutdown();
  216. /* Unregister data */
  217. for(x = 0; x < X; x++)
  218. {
  219. for (y = 0; y < Y; y++)
  220. {
  221. if (data_nodes[x][y].data_handle)
  222. {
  223. int mpi_rank = my_distrib(x, y, size);
  224. /* Get back data to original place where the user-provided buffer is. */
  225. starpu_mpi_data_migrate(MPI_COMM_WORLD, data_nodes[x][y].data_handle, mpi_rank);
  226. /* And unregister it */
  227. starpu_data_unregister(data_nodes[x][y].data_handle);
  228. }
  229. }
  230. }
  231. starpu_mpi_shutdown();
  232. starpu_shutdown();
  233. if (display)
  234. {
  235. FPRINTF(stdout, "[%d] mean=%2.2f\n", my_rank, mean);
  236. for(x = 0; x < X; x++)
  237. {
  238. FPRINTF(stdout, "[%d] ", my_rank);
  239. for (y = 0; y < Y; y++)
  240. {
  241. FPRINTF(stdout, "%2.2f ", matrix[x][y]);
  242. }
  243. FPRINTF(stdout, "\n");
  244. }
  245. }
  246. return 0;
  247. }