mpi_scatter_gather.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011 Centre National de la Recherche Scientifique
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <starpu_mpi.h>
  17. /* Returns the MPI node number where data indexes index is */
  18. int my_distrib(int x, int y, int nb_nodes) {
  19. return (x+y) % nb_nodes;
  20. }
  21. void cpu_codelet(void *descr[], void *_args)
  22. {
  23. float *block;
  24. unsigned nx = STARPU_MATRIX_GET_NY(descr[0]);
  25. unsigned ld = STARPU_MATRIX_GET_LD(descr[0]);
  26. unsigned i,j;
  27. int rank;
  28. float factor;
  29. block = (float *)STARPU_MATRIX_GET_PTR(descr[0]);
  30. starpu_unpack_cl_args(_args, &rank);
  31. factor = block[0];
  32. //fprintf(stderr,"rank %d factor %f\n", rank, factor);
  33. for (j = 0; j < nx; j++)
  34. {
  35. for (i = 0; i < nx; i++)
  36. {
  37. //fprintf(stderr,"rank %d factor %f --> %f %f\n", rank, factor, block[j+i*ld], block[j+i*ld]*factor);
  38. block[j+i*ld] *= factor;
  39. }
  40. }
  41. }
  42. static starpu_codelet cl =
  43. {
  44. .where = STARPU_CPU,
  45. .cpu_func = cpu_codelet,
  46. .nbuffers = 1
  47. };
  48. int starpu_mpi_scatter(starpu_data_handle *data_handles, int count, int root, MPI_Comm comm)
  49. {
  50. int rank;
  51. int x;
  52. MPI_Comm_rank(comm, &rank);
  53. if (rank == root)
  54. {
  55. for(x = 0; x < count ; x++)
  56. {
  57. if (data_handles[x])
  58. {
  59. int owner = starpu_data_get_rank(data_handles[x]);
  60. if (owner != root)
  61. {
  62. //fprintf(stderr, "[%d] Sending data[%d] to %d\n", rank, x, owner);
  63. starpu_mpi_isend_detached(data_handles[x], owner, owner, comm, NULL, NULL);
  64. }
  65. }
  66. }
  67. }
  68. else {
  69. for(x = 0; x < count ; x++)
  70. {
  71. if (data_handles[x])
  72. {
  73. int owner = starpu_data_get_rank(data_handles[x]);
  74. if (owner == rank)
  75. {
  76. //fprintf(stderr, "[%d] Receiving data[%d] from %d\n", rank, x, root);
  77. starpu_mpi_irecv_detached(data_handles[x], root, rank, comm, NULL, NULL);
  78. }
  79. }
  80. }
  81. }
  82. return 0;
  83. }
  84. int starpu_mpi_gather(starpu_data_handle *data_handles, int count, int root, MPI_Comm comm)
  85. {
  86. int rank;
  87. int x;
  88. MPI_Comm_rank(comm, &rank);
  89. if (rank == root)
  90. {
  91. for(x = 0; x < count ; x++)
  92. {
  93. if (data_handles[x])
  94. {
  95. int owner = starpu_data_get_rank(data_handles[x]);
  96. if (owner != root)
  97. {
  98. //fprintf(stderr, "[%d] Receiving data[%d] from %d\n", rank, x, owner);
  99. starpu_mpi_irecv_detached(data_handles[x], owner, owner, comm, NULL, NULL);
  100. }
  101. }
  102. }
  103. }
  104. else {
  105. for(x = 0; x < count ; x++)
  106. {
  107. if (data_handles[x])
  108. {
  109. int owner = starpu_data_get_rank(data_handles[x]);
  110. if (owner == rank)
  111. {
  112. //fprintf(stderr, "[%d] Sending data[%d] to %d\n", rank, x, root);
  113. starpu_mpi_isend_detached(data_handles[x], root, rank, comm, NULL, NULL);
  114. }
  115. }
  116. }
  117. }
  118. return 0;
  119. }
  120. int main(int argc, char **argv)
  121. {
  122. int rank, nodes;
  123. float ***bmat;
  124. starpu_data_handle *data_handles;
  125. unsigned i,j,x,y;
  126. unsigned nblocks=4;
  127. unsigned block_size=2;
  128. unsigned size = nblocks*block_size;
  129. unsigned ld = size / nblocks;
  130. starpu_init(NULL);
  131. starpu_mpi_initialize_extended(&rank, &nodes);
  132. if (rank == 0)
  133. {
  134. /* Allocate the matrix */
  135. int block_number=10;
  136. bmat = malloc(nblocks * sizeof(float *));
  137. for(x=0 ; x<nblocks ; x++)
  138. {
  139. bmat[x] = malloc(nblocks * sizeof(float *));
  140. for(y=0 ; y<nblocks ; y++)
  141. {
  142. float value=0.0;
  143. starpu_malloc((void **)&bmat[x][y], block_size*block_size*sizeof(float));
  144. for (i = 0; i < block_size; i++)
  145. {
  146. for (j = 0; j < block_size; j++)
  147. {
  148. bmat[x][y][j +i*block_size] = block_number + value;
  149. value++;
  150. }
  151. }
  152. block_number += 10;
  153. }
  154. }
  155. }
  156. #if 0
  157. // Print matrix
  158. if (rank == 0)
  159. {
  160. fprintf(stderr, "Input matrix\n");
  161. for(x=0 ; x<nblocks ; x++)
  162. {
  163. for(y=0 ; y<nblocks ; y++)
  164. {
  165. for (j = 0; j < block_size; j++)
  166. {
  167. for (i = 0; i < block_size; i++)
  168. {
  169. fprintf(stderr, "%2.2f\t", bmat[x][y][j+i*block_size]);
  170. }
  171. fprintf(stderr,"\n");
  172. }
  173. fprintf(stderr,"\n");
  174. }
  175. }
  176. }
  177. #endif
  178. /* Allocate data handles and register data to StarPU */
  179. data_handles = malloc(nblocks*nblocks*sizeof(starpu_data_handle *));
  180. for(x = 0; x < nblocks ; x++)
  181. {
  182. for (y = 0; y < nblocks; y++)
  183. {
  184. int mpi_rank = my_distrib(x, y, nodes);
  185. if (rank == 0)
  186. starpu_matrix_data_register(&data_handles[x+y*nblocks], 0, (uintptr_t)bmat[x][y],
  187. ld, size/nblocks, size/nblocks, sizeof(float));
  188. else {
  189. if ((mpi_rank == rank) || ((rank == mpi_rank+1 || rank == mpi_rank-1)))
  190. {
  191. /* I own that index, or i will need it for my computations */
  192. //fprintf(stderr, "[%d] Owning or neighbor of data[%d][%d]\n", rank, x, y);
  193. starpu_matrix_data_register(&data_handles[x+y*nblocks], -1, (uintptr_t)NULL,
  194. ld, size/nblocks, size/nblocks, sizeof(float));
  195. }
  196. else
  197. {
  198. /* I know it's useless to allocate anything for this */
  199. data_handles[x+y*nblocks] = NULL;
  200. }
  201. }
  202. if (data_handles[x+y*nblocks])
  203. {
  204. starpu_data_set_rank(data_handles[x+y*nblocks], mpi_rank);
  205. }
  206. }
  207. }
  208. /* Scatter the matrix among the nodes */
  209. starpu_mpi_scatter(data_handles, nblocks*nblocks, 0, MPI_COMM_WORLD);
  210. /* Calculation */
  211. for(x = 0; x < nblocks*nblocks ; x++)
  212. {
  213. if (data_handles[x])
  214. {
  215. int owner = starpu_data_get_rank(data_handles[x]);
  216. if (owner == rank)
  217. {
  218. //fprintf(stderr,"[%d] Computing on data[%d]\n", rank, x);
  219. starpu_insert_task(&cl,
  220. STARPU_VALUE, &rank, sizeof(rank),
  221. STARPU_RW, data_handles[x],
  222. 0);
  223. starpu_task_wait_for_all();
  224. }
  225. }
  226. }
  227. /* Gather the matrix on main node */
  228. starpu_mpi_gather(data_handles, nblocks*nblocks, 0, MPI_COMM_WORLD);
  229. /* Unregister matrix from StarPU */
  230. for(x=0 ; x<nblocks*nblocks ; x++)
  231. {
  232. if (data_handles[x])
  233. {
  234. starpu_data_unregister(data_handles[x]);
  235. }
  236. }
  237. #if 0
  238. // Print matrix
  239. if (rank == 0)
  240. {
  241. fprintf(stderr, "Output matrix\n");
  242. for(x=0 ; x<nblocks ; x++)
  243. {
  244. for(y=0 ; y<nblocks ; y++)
  245. {
  246. for (j = 0; j < block_size; j++)
  247. {
  248. for (i = 0; i < block_size; i++)
  249. {
  250. fprintf(stderr, "%2.2f\t", bmat[x][y][j+i*block_size]);
  251. }
  252. fprintf(stderr,"\n");
  253. }
  254. fprintf(stderr,"\n");
  255. }
  256. }
  257. }
  258. #endif
  259. // Free memory
  260. free(data_handles);
  261. if (rank == 0)
  262. {
  263. for(x=0 ; x<nblocks ; x++)
  264. {
  265. for(y=0 ; y<nblocks ; y++)
  266. {
  267. starpu_free((void *)bmat[x][y]);
  268. }
  269. free(bmat[x]);
  270. }
  271. free(bmat);
  272. }
  273. starpu_mpi_shutdown();
  274. starpu_shutdown();
  275. return 0;
  276. }