mpi_scatter_gather.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011 Centre National de la Recherche Scientifique
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <starpu_mpi.h>
  17. /* Returns the MPI node number where data indexes index is */
  18. int my_distrib(int x, int y, int nb_nodes) {
  19. return (x+y) % nb_nodes;
  20. }
  21. void cpu_codelet(void *descr[], void *_args)
  22. {
  23. float *block;
  24. unsigned nx = STARPU_MATRIX_GET_NY(descr[0]);
  25. unsigned ld = STARPU_MATRIX_GET_LD(descr[0]);
  26. unsigned i,j;
  27. int rank;
  28. float factor;
  29. block = (float *)STARPU_MATRIX_GET_PTR(descr[0]);
  30. starpu_unpack_cl_args(_args, &rank, &factor);
  31. fprintf(stderr,"rank %d factor %f\n", rank, factor);
  32. for (j = 0; j < nx; j++)
  33. {
  34. for (i = 0; i < nx; i++)
  35. {
  36. block[j+i*ld] *= factor;
  37. }
  38. }
  39. }
  40. static starpu_codelet cl =
  41. {
  42. .where = STARPU_CPU,
  43. .cpu_func = cpu_codelet,
  44. .nbuffers = 1
  45. };
  46. int main(int argc, char **argv)
  47. {
  48. int rank, nodes;
  49. float ***bmat;
  50. starpu_data_handle **data_handles;
  51. unsigned i,j,x,y;
  52. unsigned nblocks=2;
  53. unsigned block_size=1;
  54. unsigned size = nblocks*block_size;
  55. unsigned ld = size / nblocks;
  56. starpu_init(NULL);
  57. starpu_mpi_initialize_extended(&rank, &nodes);
  58. if (rank == 0)
  59. {
  60. /* Allocate the matrix */
  61. int block_number=100;
  62. bmat = malloc(nblocks * sizeof(float *));
  63. for(x=0 ; x<nblocks ; x++)
  64. {
  65. bmat[x] = malloc(nblocks * sizeof(float *));
  66. for(y=0 ; y<nblocks ; y++)
  67. {
  68. float value=1.0;
  69. starpu_malloc((void **)&bmat[x][y], block_size*block_size*sizeof(float));
  70. for (i = 0; i < block_size; i++)
  71. {
  72. for (j = 0; j < block_size; j++)
  73. {
  74. bmat[x][y][j +i*block_size] = block_number + value;
  75. value++;
  76. }
  77. }
  78. block_number += 100;
  79. }
  80. }
  81. }
  82. // Print matrix
  83. if (rank == 0)
  84. {
  85. for(x=0 ; x<nblocks ; x++)
  86. {
  87. for(y=0 ; y<nblocks ; y++)
  88. {
  89. for (j = 0; j < block_size; j++)
  90. {
  91. for (i = 0; i < block_size; i++)
  92. {
  93. fprintf(stderr, "%2.2f\t", bmat[x][y][j+i*block_size]);
  94. }
  95. fprintf(stderr,"\n");
  96. }
  97. fprintf(stderr,"\n");
  98. }
  99. }
  100. }
  101. /* Allocate data handles and register data to StarPU */
  102. data_handles = malloc(nblocks*sizeof(starpu_data_handle *));
  103. for(x = 0; x < nblocks ; x++)
  104. {
  105. data_handles[x] = malloc(nblocks*sizeof(starpu_data_handle));
  106. for (y = 0; y < nblocks; y++)
  107. {
  108. int mpi_rank = my_distrib(x, y, nodes);
  109. if (rank == 0)
  110. starpu_matrix_data_register(&data_handles[x][y], 0, (uintptr_t)bmat[x][y],
  111. ld, size/nblocks, size/nblocks, sizeof(float));
  112. else {
  113. if ((mpi_rank == rank) || ((rank == mpi_rank+1 || rank == mpi_rank-1)))
  114. {
  115. /* I own that index, or i will need it for my computations */
  116. fprintf(stderr, "[%d] Owning or neighbor of data[%d][%d]\n", rank, x, y);
  117. starpu_matrix_data_register(&data_handles[x][y], -1, (uintptr_t)NULL,
  118. ld, size/nblocks, size/nblocks, sizeof(float));
  119. }
  120. else
  121. {
  122. /* I know it's useless to allocate anything for this */
  123. data_handles[x][y] = NULL;
  124. }
  125. }
  126. if (data_handles[x][y])
  127. starpu_data_set_rank(data_handles[x][y], mpi_rank);
  128. }
  129. }
  130. /* Scatter the matrix among the nodes */
  131. if (rank == 0)
  132. {
  133. for(x = 0; x < nblocks ; x++)
  134. {
  135. for (y = 0; y < nblocks; y++)
  136. {
  137. int mpi_rank = my_distrib(x, y, nodes);
  138. if (mpi_rank)
  139. {
  140. fprintf(stderr, "[%d] Sending data[%d][%d] to %d\n", rank, x, y, mpi_rank);
  141. starpu_mpi_send(data_handles[x][y], mpi_rank, mpi_rank, MPI_COMM_WORLD);
  142. }
  143. }
  144. }
  145. }
  146. else {
  147. for(x = 0; x < nblocks ; x++)
  148. {
  149. for (y = 0; y < nblocks; y++)
  150. {
  151. int mpi_rank = my_distrib(x, y, nodes);
  152. if (mpi_rank)
  153. {
  154. MPI_Status status;
  155. fprintf(stderr, "[%d] Receiving data[%d][%d] from %d\n", rank, x, y, 0);
  156. starpu_mpi_recv(data_handles[x][y], 0, mpi_rank, MPI_COMM_WORLD, &status);
  157. }
  158. }
  159. }
  160. }
  161. /* Calculation */
  162. float factor=10.0;
  163. for(x = 0; x < nblocks ; x++)
  164. {
  165. for (y = 0; y < nblocks; y++)
  166. {
  167. int mpi_rank = my_distrib(x, y, nodes);
  168. if (mpi_rank == rank)
  169. {
  170. fprintf(stderr,"[%d] Computing on data[%d][%d]\n", rank, x, y);
  171. starpu_insert_task(&cl,
  172. STARPU_VALUE, &rank, sizeof(rank),
  173. STARPU_VALUE, &factor, sizeof(factor),
  174. STARPU_RW, data_handles[x][y],
  175. 0);
  176. starpu_task_wait_for_all();
  177. }
  178. factor+=10.0;
  179. }
  180. }
  181. /* Gather the matrix on main node */
  182. if (rank == 0)
  183. {
  184. for(x = 0; x < nblocks ; x++)
  185. {
  186. for (y = 0; y < nblocks; y++)
  187. {
  188. int mpi_rank = my_distrib(x, y, nodes);
  189. if (mpi_rank)
  190. {
  191. MPI_Status status;
  192. fprintf(stderr, "[%d] Receiving data[%d][%d] from %d\n", rank, x, y, mpi_rank);
  193. starpu_mpi_recv(data_handles[x][y], mpi_rank, mpi_rank, MPI_COMM_WORLD, &status);
  194. }
  195. }
  196. }
  197. }
  198. else {
  199. for(x = 0; x < nblocks ; x++)
  200. {
  201. for (y = 0; y < nblocks; y++)
  202. {
  203. int mpi_rank = my_distrib(x, y, nodes);
  204. if (mpi_rank)
  205. {
  206. fprintf(stderr, "[%d] Sending data[%d][%d] to %d\n", rank, x, y, 0);
  207. starpu_mpi_send(data_handles[x][y], 0, mpi_rank, MPI_COMM_WORLD);
  208. }
  209. }
  210. }
  211. }
  212. // Print matrix
  213. if (rank == 0)
  214. {
  215. for(x=0 ; x<nblocks ; x++)
  216. {
  217. for(y=0 ; y<nblocks ; y++)
  218. {
  219. starpu_data_unregister(data_handles[x][y]);
  220. for (j = 0; j < block_size; j++)
  221. {
  222. for (i = 0; i < block_size; i++)
  223. {
  224. fprintf(stderr, "%2.2f\t", bmat[x][y][j+i*block_size]);
  225. }
  226. fprintf(stderr,"\n");
  227. }
  228. fprintf(stderr,"\n");
  229. }
  230. }
  231. }
  232. // Free memory
  233. for(x = 0; x < nblocks ; x++)
  234. {
  235. free(data_handles[x]);
  236. }
  237. free(data_handles);
  238. if (rank == 0)
  239. {
  240. for(x=0 ; x<nblocks ; x++)
  241. {
  242. for(y=0 ; y<nblocks ; y++)
  243. {
  244. starpu_free((void *)bmat[x][y]);
  245. }
  246. free(bmat[x]);
  247. }
  248. free(bmat);
  249. }
  250. starpu_mpi_shutdown();
  251. starpu_shutdown();
  252. return 0;
  253. }