mpi_cholesky_distributed.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2011 Université de Bordeaux 1
  4. * Copyright (C) 2010 Mehdi Juhoor <mjuhoor@gmail.com>
  5. * Copyright (C) 2010, 2011, 2012 Centre National de la Recherche Scientifique
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. #include <starpu_mpi.h>
  19. #include "mpi_cholesky.h"
  20. #include "mpi_cholesky_models.h"
  21. /*
  22. * Create the codelets
  23. */
  24. static struct starpu_codelet cl11 =
  25. {
  26. .where = STARPU_CPU|STARPU_CUDA,
  27. .cpu_funcs = {chol_cpu_codelet_update_u11, NULL},
  28. #ifdef STARPU_USE_CUDA
  29. .cuda_funcs = {chol_cublas_codelet_update_u11, NULL},
  30. #endif
  31. .nbuffers = 1,
  32. .modes = {STARPU_RW},
  33. .model = &chol_model_11
  34. };
  35. static struct starpu_codelet cl21 =
  36. {
  37. .where = STARPU_CPU|STARPU_CUDA,
  38. .cpu_funcs = {chol_cpu_codelet_update_u21, NULL},
  39. #ifdef STARPU_USE_CUDA
  40. .cuda_funcs = {chol_cublas_codelet_update_u21, NULL},
  41. #endif
  42. .nbuffers = 2,
  43. .modes = {STARPU_R, STARPU_RW},
  44. .model = &chol_model_21
  45. };
  46. static struct starpu_codelet cl22 =
  47. {
  48. .where = STARPU_CPU|STARPU_CUDA,
  49. .cpu_funcs = {chol_cpu_codelet_update_u22, NULL},
  50. #ifdef STARPU_USE_CUDA
  51. .cuda_funcs = {chol_cublas_codelet_update_u22, NULL},
  52. #endif
  53. .nbuffers = 3,
  54. .modes = {STARPU_R, STARPU_R, STARPU_RW},
  55. .model = &chol_model_22
  56. };
  57. /* Returns the MPI node number where data indexes index is */
  58. int my_distrib(int x, int y, int nb_nodes)
  59. {
  60. return (x+y) % nb_nodes;
  61. }
  62. /*
  63. * code to bootstrap the factorization
  64. * and construct the DAG
  65. */
  66. static void dw_cholesky(float ***matA, unsigned size, unsigned ld, unsigned nblocks, int rank, int nodes)
  67. {
  68. struct timeval start;
  69. struct timeval end;
  70. starpu_data_handle_t **data_handles;
  71. int x, y;
  72. /* create all the DAG nodes */
  73. unsigned i,j,k;
  74. data_handles = malloc(nblocks*sizeof(starpu_data_handle_t *));
  75. for(x=0 ; x<nblocks ; x++) data_handles[x] = malloc(nblocks*sizeof(starpu_data_handle_t));
  76. starpu_mpi_barrier(MPI_COMM_WORLD);
  77. gettimeofday(&start, NULL);
  78. for(x = 0; x < nblocks ; x++)
  79. {
  80. for (y = 0; y < nblocks; y++)
  81. {
  82. int mpi_rank = my_distrib(x, y, nodes);
  83. if (mpi_rank == rank)
  84. {
  85. //fprintf(stderr, "[%d] Owning data[%d][%d]\n", rank, x, y);
  86. starpu_matrix_data_register(&data_handles[x][y], 0, (uintptr_t)matA[x][y],
  87. ld, size/nblocks, size/nblocks, sizeof(float));
  88. }
  89. /* TODO: make better test to only registering what is needed */
  90. else
  91. {
  92. /* I don't own that index, but will need it for my computations */
  93. //fprintf(stderr, "[%d] Neighbour of data[%d][%d]\n", rank, x, y);
  94. starpu_matrix_data_register(&data_handles[x][y], -1, (uintptr_t)NULL,
  95. ld, size/nblocks, size/nblocks, sizeof(float));
  96. }
  97. if (data_handles[x][y])
  98. {
  99. starpu_data_set_rank(data_handles[x][y], mpi_rank);
  100. starpu_data_set_tag(data_handles[x][y], (y*nblocks)+x);
  101. }
  102. }
  103. }
  104. for (k = 0; k < nblocks; k++)
  105. {
  106. int prio = STARPU_DEFAULT_PRIO;
  107. if (!noprio) prio = STARPU_MAX_PRIO;
  108. starpu_mpi_insert_task(MPI_COMM_WORLD, &cl11,
  109. STARPU_PRIORITY, prio,
  110. STARPU_RW, data_handles[k][k],
  111. 0);
  112. for (j = k+1; j<nblocks; j++)
  113. {
  114. prio = STARPU_DEFAULT_PRIO;
  115. if (!noprio&& (j == k+1)) prio = STARPU_MAX_PRIO;
  116. starpu_mpi_insert_task(MPI_COMM_WORLD, &cl21,
  117. STARPU_PRIORITY, prio,
  118. STARPU_R, data_handles[k][k],
  119. STARPU_RW, data_handles[k][j],
  120. 0);
  121. for (i = k+1; i<nblocks; i++)
  122. {
  123. if (i <= j)
  124. {
  125. prio = STARPU_DEFAULT_PRIO;
  126. if (!noprio && (i == k + 1) && (j == k +1) ) prio = STARPU_MAX_PRIO;
  127. starpu_mpi_insert_task(MPI_COMM_WORLD, &cl22,
  128. STARPU_PRIORITY, prio,
  129. STARPU_R, data_handles[k][i],
  130. STARPU_R, data_handles[k][j],
  131. STARPU_RW, data_handles[i][j],
  132. 0);
  133. }
  134. }
  135. }
  136. }
  137. starpu_task_wait_for_all();
  138. for(x = 0; x < nblocks ; x++)
  139. {
  140. for (y = 0; y < nblocks; y++)
  141. {
  142. if (data_handles[x][y])
  143. starpu_data_unregister(data_handles[x][y]);
  144. }
  145. free(data_handles[x]);
  146. }
  147. free(data_handles);
  148. starpu_mpi_barrier(MPI_COMM_WORLD);
  149. gettimeofday(&end, NULL);
  150. if (rank == 0)
  151. {
  152. double timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  153. fprintf(stderr, "Computation took (in ms)\n");
  154. fprintf(stdout, "%2.2f\n", timing/1000);
  155. double flop = (1.0f*size*size*size)/3.0f;
  156. fprintf(stderr, "Synthetic GFlops : %2.2f\n", (flop/timing/1000.0f));
  157. }
  158. }
  159. int main(int argc, char **argv)
  160. {
  161. /* create a simple definite positive symetric matrix example
  162. *
  163. * Hilbert matrix : h(i,j) = 1/(i+j+1)
  164. * */
  165. float ***bmat;
  166. int rank, nodes;
  167. parse_args(argc, argv);
  168. struct starpu_conf conf;
  169. starpu_conf_init(&conf);
  170. conf.sched_policy_name = "heft";
  171. conf.calibrate = 1;
  172. starpu_init(&conf);
  173. starpu_mpi_initialize_extended(&rank, &nodes);
  174. starpu_helper_cublas_init();
  175. unsigned i,j,x,y;
  176. bmat = malloc(nblocks * sizeof(float *));
  177. for(x=0 ; x<nblocks ; x++)
  178. {
  179. bmat[x] = malloc(nblocks * sizeof(float *));
  180. for(y=0 ; y<nblocks ; y++)
  181. {
  182. int mpi_rank = my_distrib(x, y, nodes);
  183. if (mpi_rank == rank)
  184. {
  185. starpu_malloc((void **)&bmat[x][y], BLOCKSIZE*BLOCKSIZE*sizeof(float));
  186. for (i = 0; i < BLOCKSIZE; i++)
  187. {
  188. for (j = 0; j < BLOCKSIZE; j++)
  189. {
  190. bmat[x][y][j +i*BLOCKSIZE] = (1.0f/(1.0f+(i+(x*BLOCKSIZE)+j+(y*BLOCKSIZE)))) + ((i+(x*BLOCKSIZE) == j+(y*BLOCKSIZE))?1.0f*size:0.0f);
  191. //mat[j +i*size] = ((i == j)?1.0f*size:0.0f);
  192. }
  193. }
  194. }
  195. }
  196. }
  197. dw_cholesky(bmat, size, size/nblocks, nblocks, rank, nodes);
  198. starpu_mpi_shutdown();
  199. for(x=0 ; x<nblocks ; x++)
  200. {
  201. for(y=0 ; y<nblocks ; y++)
  202. {
  203. int mpi_rank = my_distrib(x, y, nodes);
  204. if (mpi_rank == rank)
  205. {
  206. starpu_free((void *)bmat[x][y]);
  207. }
  208. }
  209. free(bmat[x]);
  210. }
  211. free(bmat);
  212. starpu_helper_cublas_shutdown();
  213. starpu_shutdown();
  214. return 0;
  215. }