mpi_scatter_gather.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011, 2012 Centre National de la Recherche Scientifique
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <starpu_mpi.h>
  17. /* Returns the MPI node number where data indexes index is */
  18. int my_distrib(int x, int y, int nb_nodes)
  19. {
  20. return (x+y) % nb_nodes;
  21. }
  22. void cpu_codelet(void *descr[], void *_args)
  23. {
  24. float *block;
  25. unsigned nx = STARPU_MATRIX_GET_NY(descr[0]);
  26. unsigned ld = STARPU_MATRIX_GET_LD(descr[0]);
  27. unsigned i,j;
  28. int rank;
  29. float factor;
  30. block = (float *)STARPU_MATRIX_GET_PTR(descr[0]);
  31. starpu_codelet_unpack_args(_args, &rank);
  32. factor = block[0];
  33. //fprintf(stderr,"rank %d factor %f\n", rank, factor);
  34. for (j = 0; j < nx; j++)
  35. {
  36. for (i = 0; i < nx; i++)
  37. {
  38. //fprintf(stderr,"rank %d factor %f --> %f %f\n", rank, factor, block[j+i*ld], block[j+i*ld]*factor);
  39. block[j+i*ld] *= factor;
  40. }
  41. }
  42. }
  43. static struct starpu_codelet cl =
  44. {
  45. .where = STARPU_CPU,
  46. .cpu_funcs = {cpu_codelet, NULL},
  47. .nbuffers = 1,
  48. .modes = {STARPU_RW},
  49. };
  50. void scallback(void *arg __attribute__((unused)))
  51. {
  52. char *msg = arg;
  53. fprintf(stderr, "Sending completed for <%s>\n", msg);
  54. }
  55. void rcallback(void *arg __attribute__((unused)))
  56. {
  57. char *msg = arg;
  58. fprintf(stderr, "Reception completed for <%s>\n", msg);
  59. }
  60. int main(int argc, char **argv)
  61. {
  62. int rank, nodes;
  63. float ***bmat = NULL;
  64. starpu_data_handle_t *data_handles;
  65. unsigned i,j,x,y;
  66. unsigned nblocks=4;
  67. unsigned block_size=2;
  68. unsigned size = nblocks*block_size;
  69. unsigned ld = size / nblocks;
  70. int ret = starpu_init(NULL);
  71. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  72. ret = starpu_mpi_init(&argc, &argv, 1);
  73. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_init");
  74. MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  75. MPI_Comm_size(MPI_COMM_WORLD, &nodes);
  76. if (rank == 0)
  77. {
  78. /* Allocate the matrix */
  79. int block_number=10;
  80. bmat = malloc(nblocks * sizeof(float *));
  81. for(x=0 ; x<nblocks ; x++)
  82. {
  83. bmat[x] = malloc(nblocks * sizeof(float *));
  84. for(y=0 ; y<nblocks ; y++)
  85. {
  86. float value=0.0;
  87. starpu_malloc((void **)&bmat[x][y], block_size*block_size*sizeof(float));
  88. for (i = 0; i < block_size; i++)
  89. {
  90. for (j = 0; j < block_size; j++)
  91. {
  92. bmat[x][y][j +i*block_size] = block_number + value;
  93. value++;
  94. }
  95. }
  96. block_number += 10;
  97. }
  98. }
  99. }
  100. #if 0
  101. // Print matrix
  102. if (rank == 0)
  103. {
  104. fprintf(stderr, "Input matrix\n");
  105. for(x=0 ; x<nblocks ; x++)
  106. {
  107. for(y=0 ; y<nblocks ; y++)
  108. {
  109. for (j = 0; j < block_size; j++)
  110. {
  111. for (i = 0; i < block_size; i++)
  112. {
  113. fprintf(stderr, "%2.2f\t", bmat[x][y][j+i*block_size]);
  114. }
  115. fprintf(stderr,"\n");
  116. }
  117. fprintf(stderr,"\n");
  118. }
  119. }
  120. }
  121. #endif
  122. /* Allocate data handles and register data to StarPU */
  123. data_handles = malloc(nblocks*nblocks*sizeof(starpu_data_handle_t *));
  124. for(x = 0; x < nblocks ; x++)
  125. {
  126. for (y = 0; y < nblocks; y++)
  127. {
  128. int mpi_rank = my_distrib(x, y, nodes);
  129. if (rank == 0)
  130. {
  131. starpu_matrix_data_register(&data_handles[x+y*nblocks], 0, (uintptr_t)bmat[x][y],
  132. ld, size/nblocks, size/nblocks, sizeof(float));
  133. }
  134. else if ((mpi_rank == rank) || ((rank == mpi_rank+1 || rank == mpi_rank-1)))
  135. {
  136. /* I own that index, or i will need it for my computations */
  137. //fprintf(stderr, "[%d] Owning or neighbor of data[%d][%d]\n", rank, x, y);
  138. starpu_matrix_data_register(&data_handles[x+y*nblocks], -1, (uintptr_t)NULL,
  139. ld, size/nblocks, size/nblocks, sizeof(float));
  140. }
  141. else
  142. {
  143. /* I know it's useless to allocate anything for this */
  144. data_handles[x+y*nblocks] = NULL;
  145. }
  146. if (data_handles[x+y*nblocks])
  147. {
  148. starpu_data_set_rank(data_handles[x+y*nblocks], mpi_rank);
  149. starpu_data_set_tag(data_handles[x+y*nblocks], (y*nblocks)+x);
  150. }
  151. }
  152. }
  153. /* Scatter the matrix among the nodes */
  154. starpu_mpi_scatter_detached(data_handles, nblocks*nblocks, 0, MPI_COMM_WORLD, scallback, "scatter", NULL, NULL);
  155. /* Calculation */
  156. for(x = 0; x < nblocks*nblocks ; x++)
  157. {
  158. if (data_handles[x])
  159. {
  160. int owner = starpu_data_get_rank(data_handles[x]);
  161. if (owner == rank)
  162. {
  163. //fprintf(stderr,"[%d] Computing on data[%d]\n", rank, x);
  164. starpu_insert_task(&cl,
  165. STARPU_VALUE, &rank, sizeof(rank),
  166. STARPU_RW, data_handles[x],
  167. 0);
  168. }
  169. }
  170. }
  171. /* Gather the matrix on main node */
  172. starpu_mpi_gather_detached(data_handles, nblocks*nblocks, 0, MPI_COMM_WORLD, scallback, "gather", rcallback, "gather");
  173. /* Unregister matrix from StarPU */
  174. for(x=0 ; x<nblocks*nblocks ; x++)
  175. {
  176. if (data_handles[x])
  177. {
  178. starpu_data_unregister(data_handles[x]);
  179. }
  180. }
  181. #if 0
  182. // Print matrix
  183. if (rank == 0)
  184. {
  185. fprintf(stderr, "Output matrix\n");
  186. for(x=0 ; x<nblocks ; x++)
  187. {
  188. for(y=0 ; y<nblocks ; y++)
  189. {
  190. for (j = 0; j < block_size; j++)
  191. {
  192. for (i = 0; i < block_size; i++)
  193. {
  194. fprintf(stderr, "%2.2f\t", bmat[x][y][j+i*block_size]);
  195. }
  196. fprintf(stderr,"\n");
  197. }
  198. fprintf(stderr,"\n");
  199. }
  200. }
  201. }
  202. #endif
  203. // Free memory
  204. free(data_handles);
  205. if (rank == 0)
  206. {
  207. for(x=0 ; x<nblocks ; x++)
  208. {
  209. for(y=0 ; y<nblocks ; y++)
  210. {
  211. starpu_free((void *)bmat[x][y]);
  212. }
  213. free(bmat[x]);
  214. }
  215. free(bmat);
  216. }
  217. starpu_mpi_shutdown();
  218. starpu_shutdown();
  219. return 0;
  220. }