stencil5_lb.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011-2018 CNRS
  4. * Copyright (C) 2018 Inria
  5. * Copyright (C) 2011,2013,2015-2018 Université de Bordeaux
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. #include <starpu_mpi.h>
  19. #include <starpu_mpi_lb.h>
  20. #include <math.h>
  21. #define FPRINTF(ofile, fmt, ...) do { if (!getenv("STARPU_SSILENT")) {fprintf(ofile, fmt, ## __VA_ARGS__); }} while(0)
  22. #define FPRINTF_MPI(ofile, fmt, ...) do { if (!getenv("STARPU_SSILENT")) { \
  23. int _disp_rank; starpu_mpi_comm_rank(MPI_COMM_WORLD, &_disp_rank); \
  24. fprintf(ofile, "[%d][starpu_mpi][%s] " fmt , _disp_rank, __starpu_func__ ,## __VA_ARGS__); \
  25. fflush(ofile); }} while(0);
  26. void stencil5_cpu(void *descr[], void *_args)
  27. {
  28. (void)_args;
  29. float *xy = (float *)STARPU_VARIABLE_GET_PTR(descr[0]);
  30. float *xm1y = (float *)STARPU_VARIABLE_GET_PTR(descr[1]);
  31. float *xp1y = (float *)STARPU_VARIABLE_GET_PTR(descr[2]);
  32. float *xym1 = (float *)STARPU_VARIABLE_GET_PTR(descr[3]);
  33. float *xyp1 = (float *)STARPU_VARIABLE_GET_PTR(descr[4]);
  34. // fprintf(stdout, "VALUES: %2.2f %2.2f %2.2f %2.2f %2.2f\n", *xy, *xm1y, *xp1y, *xym1, *xyp1);
  35. *xy = (*xy + *xm1y + *xp1y + *xym1 + *xyp1) / 5;
  36. // fprintf(stdout, "VALUES: %2.2f %2.2f %2.2f %2.2f %2.2f\n", *xy, *xm1y, *xp1y, *xym1, *xyp1);
  37. }
  38. struct starpu_codelet stencil5_cl =
  39. {
  40. .cpu_funcs = {stencil5_cpu},
  41. .nbuffers = 5,
  42. .modes = {STARPU_RW, STARPU_R, STARPU_R, STARPU_R, STARPU_R},
  43. .model = &starpu_perfmodel_nop,
  44. };
  45. #ifdef STARPU_QUICK_CHECK
  46. # define NITER_DEF 5
  47. # define X 4
  48. # define Y 4
  49. #elif !defined(STARPU_LONG_CHECK)
  50. # define NITER_DEF 10
  51. # define X 5
  52. # define Y 5
  53. #else
  54. # define NITER_DEF 100
  55. # define X 20
  56. # define Y 20
  57. #endif
  58. int display = 0;
  59. int niter = NITER_DEF;
  60. /* Returns the MPI node number where data indexes index is */
  61. int my_distrib(int x, int y, int nb_nodes)
  62. {
  63. /* Block distrib */
  64. return ((int)(x / sqrt(nb_nodes) + (y / sqrt(nb_nodes)) * sqrt(nb_nodes))) % nb_nodes;
  65. }
  66. static void parse_args(int argc, char **argv)
  67. {
  68. int i;
  69. for (i = 1; i < argc; i++)
  70. {
  71. if (strcmp(argv[i], "-iter") == 0)
  72. {
  73. char *argptr;
  74. niter = strtol(argv[++i], &argptr, 10);
  75. }
  76. if (strcmp(argv[i], "-display") == 0)
  77. {
  78. display = 1;
  79. }
  80. }
  81. }
  82. void get_neighbors(int **neighbor_ids, int *nneighbors)
  83. {
  84. int rank, size;
  85. starpu_mpi_comm_rank(MPI_COMM_WORLD, &rank);
  86. starpu_mpi_comm_size(MPI_COMM_WORLD, &size);
  87. if (size <= 2)
  88. {
  89. *nneighbors = 1;
  90. *neighbor_ids = malloc(sizeof(int));
  91. *neighbor_ids[0] = rank==size-1?0:rank+1;
  92. fprintf(stderr, "rank %d has neighbor %d\n", rank, *neighbor_ids[0]);
  93. }
  94. else
  95. {
  96. *nneighbors = 2;
  97. *neighbor_ids = malloc(2*sizeof(int));
  98. (*neighbor_ids)[0] = rank==size-1?0:rank+1;
  99. (*neighbor_ids)[1] = rank==0?size-1:rank-1;
  100. fprintf(stderr, "rank %d has neighbor %d and %d\n", rank, (*neighbor_ids)[0], (*neighbor_ids)[1]);
  101. }
  102. }
  103. struct data_node
  104. {
  105. starpu_data_handle_t data_handle;
  106. int node;
  107. };
  108. struct data_node data_nodes[X][Y];
  109. void get_data_unit_to_migrate(starpu_data_handle_t **handle_unit, int *nhandles, int dst_node)
  110. {
  111. int rank, x, y;
  112. starpu_mpi_comm_rank(MPI_COMM_WORLD, &rank);
  113. fprintf(stderr, "Looking to move data from %d to %d\n", rank, dst_node);
  114. for(x = 0; x < X; x++)
  115. {
  116. for (y = 0; y < Y; y++)
  117. {
  118. if (data_nodes[x][y].node == rank)
  119. {
  120. *handle_unit = malloc(sizeof(starpu_data_handle_t));
  121. *handle_unit[0] = data_nodes[x][y].data_handle;
  122. *nhandles = 1;
  123. data_nodes[x][y].node = dst_node;
  124. return;
  125. }
  126. }
  127. }
  128. *nhandles = 0;
  129. }
  130. int main(int argc, char **argv)
  131. {
  132. int my_rank, size, x, y, loop;
  133. float mean=0;
  134. float matrix[X][Y];
  135. struct starpu_mpi_lb_conf itf;
  136. int ret;
  137. itf.get_neighbors = get_neighbors;
  138. itf.get_data_unit_to_migrate = get_data_unit_to_migrate;
  139. ret = starpu_mpi_init_conf(&argc, &argv, 1, MPI_COMM_WORLD, NULL);
  140. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_init_conf");
  141. starpu_mpi_comm_rank(MPI_COMM_WORLD, &my_rank);
  142. starpu_mpi_comm_size(MPI_COMM_WORLD, &size);
  143. if (size > 2)
  144. {
  145. FPRINTF(stderr, "Only works with 2 nodes\n");
  146. starpu_mpi_shutdown();
  147. return 77;
  148. }
  149. if (starpu_cpu_worker_get_count() == 0)
  150. {
  151. FPRINTF(stderr, "We need at least 1 CPU worker.\n");
  152. starpu_mpi_shutdown();
  153. return 77;
  154. }
  155. {
  156. char sleep_thr[10];
  157. snprintf(sleep_thr, 10, "%d", Y);
  158. setenv("LB_HEAT_SLEEP_THRESHOLD", sleep_thr, 1);
  159. }
  160. starpu_mpi_lb_init("heat", &itf);
  161. parse_args(argc, argv);
  162. /* Initial data values */
  163. starpu_srand48((long int)time(NULL));
  164. for(x = 0; x < X; x++)
  165. {
  166. for (y = 0; y < Y; y++)
  167. {
  168. matrix[x][y] = (float)starpu_drand48();
  169. mean += matrix[x][y];
  170. }
  171. }
  172. mean /= (X*Y);
  173. if (display)
  174. {
  175. FPRINTF_MPI(stdout, "mean=%2.2f\n", mean);
  176. for(x = 0; x < X; x++)
  177. {
  178. fprintf(stdout, "[%d] ", my_rank);
  179. for (y = 0; y < Y; y++)
  180. {
  181. fprintf(stdout, "%2.2f ", matrix[x][y]);
  182. }
  183. fprintf(stdout, "\n");
  184. }
  185. }
  186. /* Initial distribution */
  187. for(x = 0; x < X; x++)
  188. {
  189. for (y = 0; y < Y; y++)
  190. {
  191. data_nodes[x][y].node = my_distrib(x, y, size);
  192. if (data_nodes[x][y].node == my_rank)
  193. {
  194. //FPRINTF(stderr, "[%d] Owning data[%d][%d]\n", my_rank, x, y);
  195. starpu_variable_data_register(&data_nodes[x][y].data_handle, 0, (uintptr_t)&(matrix[x][y]), sizeof(float));
  196. }
  197. else if (my_rank == my_distrib(x+1, y, size) || my_rank == my_distrib(x-1, y, size)
  198. || my_rank == my_distrib(x, y+1, size) || my_rank == my_distrib(x, y-1, size))
  199. {
  200. /* I don't own this index, but will need it for my computations */
  201. //FPRINTF(stderr, "[%d] Neighbour of data[%d][%d]\n", my_rank, x, y);
  202. starpu_variable_data_register(&data_nodes[x][y].data_handle, -1, (uintptr_t)NULL, sizeof(float));
  203. }
  204. else
  205. {
  206. /* I know it's useless to allocate anything for this */
  207. data_nodes[x][y].data_handle = NULL;
  208. }
  209. if (data_nodes[x][y].data_handle)
  210. {
  211. starpu_data_set_coordinates(data_nodes[x][y].data_handle, 2, x, y);
  212. starpu_mpi_data_register(data_nodes[x][y].data_handle, (y*X)+x, data_nodes[x][y].node);
  213. }
  214. }
  215. }
  216. /* First computation with initial distribution */
  217. for(loop=0 ; loop<niter; loop++)
  218. {
  219. starpu_iteration_push(loop);
  220. for (x = 1; x < X-1; x++)
  221. {
  222. for (y = 1; y < Y-1; y++)
  223. {
  224. starpu_mpi_task_insert(MPI_COMM_WORLD, &stencil5_cl, STARPU_RW, data_nodes[x][y].data_handle,
  225. STARPU_R, data_nodes[x-1][y].data_handle, STARPU_R, data_nodes[x+1][y].data_handle,
  226. STARPU_R, data_nodes[x][y-1].data_handle, STARPU_R, data_nodes[x][y+1].data_handle,
  227. STARPU_TAG_ONLY, ((starpu_tag_t)Y)*x + y,
  228. 0);
  229. }
  230. }
  231. starpu_iteration_pop();
  232. }
  233. FPRINTF(stderr, "Waiting ...\n");
  234. starpu_task_wait_for_all();
  235. // The load balancer needs to be shutdown before unregistering data as it needs access to them
  236. starpu_mpi_lb_shutdown();
  237. /* Unregister data */
  238. for(x = 0; x < X; x++)
  239. {
  240. for (y = 0; y < Y; y++)
  241. {
  242. if (data_nodes[x][y].data_handle)
  243. {
  244. starpu_data_unregister(data_nodes[x][y].data_handle);
  245. }
  246. }
  247. }
  248. starpu_mpi_shutdown();
  249. if (display)
  250. {
  251. FPRINTF(stdout, "[%d] mean=%2.2f\n", my_rank, mean);
  252. for(x = 0; x < X; x++)
  253. {
  254. FPRINTF(stdout, "[%d] ", my_rank);
  255. for (y = 0; y < Y; y++)
  256. {
  257. FPRINTF(stdout, "%2.2f ", matrix[x][y]);
  258. }
  259. FPRINTF(stdout, "\n");
  260. }
  261. }
  262. return 0;
  263. }