mm.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2016 Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. /*
  17. * This example illustrates how to distribute a pre-existing data structure to
  18. * a set of computing nodes using StarPU-MPI routines.
  19. */
  20. #include <stdlib.h>
  21. #include <stdio.h>
  22. #include <assert.h>
  23. #include <math.h>
  24. #include <starpu.h>
  25. #include <starpu_mpi.h>
  26. #include "helper.h"
  27. #define VERBOSE 0
  28. static int N = 16; /* Matrix size */
  29. static int BS = 4; /* Block size */
  30. #define NB ((N)/(BS)) /* Number of blocks */
  31. /* Matrices. Will be allocated as regular, linearized C arrays */
  32. static double *A = NULL; /* A will be partitioned as BS rows x N cols blocks */
  33. static double *B = NULL; /* B will be partitioned as N rows x BS cols blocks */
  34. static double *C = NULL; /* C will be partitioned as BS rows x BS cols blocks */
  35. /* Arrays of data handles for managing matrix blocks */
  36. static starpu_data_handle_t *A_h;
  37. static starpu_data_handle_t *B_h;
  38. static starpu_data_handle_t *C_h;
  39. static int comm_rank; /* mpi rank of the process */
  40. static int comm_size; /* size of the mpi session */
  41. static void alloc_matrices(void)
  42. {
  43. /* Regular 'malloc' can also be used instead, however, starpu_malloc make sure that
  44. * the area is allocated in suitably pinned memory to improve data transfers, especially
  45. * with CUDA */
  46. starpu_malloc((void **)&A, N*N*sizeof(double));
  47. starpu_malloc((void **)&B, N*N*sizeof(double));
  48. starpu_malloc((void **)&C, N*N*sizeof(double));
  49. }
  50. static void free_matrices(void)
  51. {
  52. starpu_free(A);
  53. starpu_free(B);
  54. starpu_free(C);
  55. }
  56. static void init_matrices(void)
  57. {
  58. int row,col;
  59. for (row = 0; row < N; row++)
  60. {
  61. for (col = 0; col < N; col++)
  62. {
  63. A[row*N+col] = (row==col)?2:0;
  64. B[row*N+col] = row*N+col;
  65. C[row*N+col] = 0;
  66. }
  67. }
  68. }
  69. #if VERBOSE
  70. static void disp_matrix(double *m)
  71. {
  72. int row,col;
  73. for (row = 0; row < N; row++)
  74. {
  75. for (col = 0; col < N; col++)
  76. {
  77. printf("\t%.2lf", m[row*N+col]);
  78. }
  79. printf("\n");
  80. }
  81. }
  82. #endif
  83. static void check_result(void) {
  84. int row,col;
  85. for (row = 0; row < N; row++)
  86. {
  87. for (col = 0; col < N; col++)
  88. {
  89. if (fabs(C[row*N+col] - 2*(row*N+col)) > 1.0)
  90. {
  91. fprintf(stderr, "check failed\n");
  92. exit(1);
  93. }
  94. }
  95. }
  96. #if VERBOSE
  97. printf("success\n");
  98. #endif
  99. }
  100. /* Register the matrix blocks to StarPU and to StarPU-MPI */
  101. static void register_matrices()
  102. {
  103. A_h = calloc(NB, sizeof(starpu_data_handle_t));
  104. B_h = calloc(NB, sizeof(starpu_data_handle_t));
  105. C_h = calloc(NB*NB, sizeof(starpu_data_handle_t));
  106. /* Memory region, where the data being registered resides.
  107. * In this example, all blocks are allocated by node 0, thus
  108. * - node 0 specifies STARPU_MAIN_RAM to indicate that it owns the block in its main memory
  109. * - nodes !0 specify -1 to indicate that they don't have a copy of the block initially
  110. */
  111. int mr = (comm_rank == 0) ? STARPU_MAIN_RAM : -1;
  112. /* mpi tag used for the block */
  113. int tag = 0;
  114. int b_row,b_col;
  115. for (b_row = 0; b_row < NB; b_row++) {
  116. /* Register a block to StarPU */
  117. starpu_matrix_data_register(&A_h[b_row],
  118. mr,
  119. (comm_rank == 0)?(uintptr_t)(A+b_row*BS*N):0, N, N, BS,
  120. sizeof(double));
  121. /* Register a block to StarPU-MPI, specifying the mpi tag to use for transfering the block
  122. * and the rank of the owner node.
  123. *
  124. * Note: StarPU-MPI is an autonomous layer built on top of StarPU, hence the two separate
  125. * registration steps.
  126. */
  127. starpu_mpi_data_register(A_h[b_row], tag++, 0);
  128. }
  129. for (b_col = 0; b_col < NB; b_col++) {
  130. starpu_matrix_data_register(&B_h[b_col],
  131. mr,
  132. (comm_rank == 0)?(uintptr_t)(B+b_col*BS):0, N, BS, N,
  133. sizeof(double));
  134. starpu_mpi_data_register(B_h[b_col], tag++, 0);
  135. }
  136. for (b_row = 0; b_row < NB; b_row++) {
  137. for (b_col = 0; b_col < NB; b_col++) {
  138. starpu_matrix_data_register(&C_h[b_row*NB+b_col],
  139. mr,
  140. (comm_rank == 0)?(uintptr_t)(C+b_row*BS*N+b_col*BS):0, N, BS, BS,
  141. sizeof(double));
  142. starpu_mpi_data_register(C_h[b_row*NB+b_col], tag++, 0);
  143. }
  144. }
  145. }
  146. /* Transfer ownership of the C matrix blocks following some user-defined distribution over the nodes.
  147. * Note: since C will be Write-accessed, it will implicitly define which node perform the task
  148. * associated to a given block. */
  149. static void distribute_matrix_C(void)
  150. {
  151. int b_row,b_col;
  152. for (b_row = 0; b_row < NB; b_row++)
  153. {
  154. for (b_col = 0; b_col < NB; b_col++)
  155. {
  156. starpu_data_handle_t h = C_h[b_row*NB+b_col];
  157. /* Select the node where the block should be computed. */
  158. int target_rank = (b_row+b_col)%comm_size;
  159. /* Move the block on to its new owner. */
  160. starpu_mpi_data_migrate(MPI_COMM_WORLD, h, target_rank);
  161. }
  162. }
  163. }
  164. /* Transfer ownership of the C matrix blocks back to node 0, for display purpose. This is not mandatory. */
  165. static void undistribute_matrix_C(void)
  166. {
  167. int b_row,b_col;
  168. for (b_row = 0; b_row < NB; b_row++) {
  169. for (b_col = 0; b_col < NB; b_col++) {
  170. starpu_data_handle_t h = C_h[b_row*NB+b_col];
  171. starpu_mpi_data_migrate(MPI_COMM_WORLD, h, 0);
  172. }
  173. }
  174. }
  175. /* Unregister matrices from the StarPU management. */
  176. static void unregister_matrices()
  177. {
  178. int b_row,b_col;
  179. for (b_row = 0; b_row < NB; b_row++) {
  180. starpu_data_unregister(A_h[b_row]);
  181. }
  182. for (b_col = 0; b_col < NB; b_col++) {
  183. starpu_data_unregister(B_h[b_col]);
  184. }
  185. for (b_row = 0; b_row < NB; b_row++) {
  186. for (b_col = 0; b_col < NB; b_col++) {
  187. starpu_data_unregister(C_h[b_row*NB+b_col]);
  188. }
  189. }
  190. free(A_h);
  191. free(B_h);
  192. free(C_h);
  193. }
  194. /* Perform the actual computation. In a real-life case, this would rather call a BLAS 'gemm' routine
  195. * instead. */
  196. static void cpu_mult(void *handles[], STARPU_ATTRIBUTE_UNUSED void *arg)
  197. {
  198. double *block_A = (double *)STARPU_MATRIX_GET_PTR(handles[0]);
  199. double *block_B = (double *)STARPU_MATRIX_GET_PTR(handles[1]);
  200. double *block_C = (double *)STARPU_MATRIX_GET_PTR(handles[2]);
  201. unsigned n_col_A = STARPU_MATRIX_GET_NX(handles[0]);
  202. unsigned n_col_B = STARPU_MATRIX_GET_NX(handles[1]);
  203. unsigned n_col_C = STARPU_MATRIX_GET_NX(handles[2]);
  204. unsigned n_row_A = STARPU_MATRIX_GET_NY(handles[0]);
  205. unsigned n_row_B = STARPU_MATRIX_GET_NY(handles[1]);
  206. unsigned n_row_C = STARPU_MATRIX_GET_NY(handles[2]);
  207. unsigned ld_A = STARPU_MATRIX_GET_LD(handles[0]);
  208. unsigned ld_B = STARPU_MATRIX_GET_LD(handles[1]);
  209. unsigned ld_C = STARPU_MATRIX_GET_LD(handles[2]);
  210. /* Sanity check, not needed in real life case */
  211. assert(n_col_C == n_col_B);
  212. assert(n_row_C == n_row_A);
  213. assert(n_col_A == n_row_B);
  214. unsigned i,j,k;
  215. for (k = 0; k < n_row_C; k++) {
  216. for (j = 0; j < n_col_C; j++) {
  217. for (i = 0; i < n_col_A; i++) {
  218. block_C[k*ld_C+j] += block_A[k*ld_A+i] * block_B[i*ld_B+j];
  219. }
  220. #if VERBOSE
  221. /* For illustration purpose, shows which node computed
  222. * the block in the decimal part of the cell */
  223. block_C[k*ld_C+j] += comm_rank / 100.0;
  224. #endif
  225. }
  226. }
  227. }
  228. /* Define a StarPU 'codelet' structure for the matrix multiply kernel above.
  229. * This structure enable specifying multiple implementations for the kernel (such as CUDA or OpenCL versions)
  230. */
  231. static struct starpu_codelet gemm_cl =
  232. {
  233. .cpu_funcs = {cpu_mult}, /* cpu implementation(s) of the routine */
  234. .nbuffers = 3, /* number of data handles referenced by this routine */
  235. .modes = {STARPU_R, STARPU_R, STARPU_RW} /* access modes for each data handle */
  236. };
  237. int main(int argc, char *argv[])
  238. {
  239. /* Initializes the StarPU core */
  240. int ret = starpu_init(NULL);
  241. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  242. /* Initializes the StarPU-MPI layer */
  243. ret = starpu_mpi_init(&argc, &argv, 1);
  244. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_init");
  245. if (starpu_cpu_worker_get_count() == 0)
  246. {
  247. FPRINTF(stderr, "We need at least 1 CPU worker.\n");
  248. starpu_mpi_shutdown();
  249. starpu_shutdown();
  250. return STARPU_TEST_SKIPPED;
  251. }
  252. /* Parse the matrix size and block size optional args */
  253. if (argc > 1) {
  254. N = atoi(argv[1]);
  255. if (N < 1) {
  256. fprintf(stderr, "invalid matrix size\n");
  257. exit(1);
  258. }
  259. if (argc > 2) {
  260. BS = atoi(argv[2]);
  261. }
  262. if (BS < 1 || N % BS != 0) {
  263. fprintf(stderr, "invalid block size\n");
  264. exit(1);
  265. }
  266. }
  267. /* Get the process rank and session size */
  268. starpu_mpi_comm_rank(MPI_COMM_WORLD, &comm_rank);
  269. starpu_mpi_comm_size(MPI_COMM_WORLD, &comm_size);
  270. if (comm_rank == 0)
  271. {
  272. #if VERBOSE
  273. printf("N = %d\n", N);
  274. printf("BS = %d\n", BS);
  275. printf("NB = %d\n", NB);
  276. printf("comm_size = %d\n", comm_size);
  277. #endif
  278. /* In this example, node rank 0 performs all the memory allocations and initializations,
  279. * and the blocks are later distributed on the other nodes.
  280. * This is not mandatory however, and blocks could be allocated on other nodes right
  281. * from the beginning, depending on the application needs (in particular for the case
  282. * where the session wide data footprint is larger than a single node available memory. */
  283. alloc_matrices();
  284. init_matrices();
  285. }
  286. /* Register matrices to StarPU and StarPU-MPI */
  287. register_matrices();
  288. /* Distribute C blocks */
  289. distribute_matrix_C();
  290. int b_row,b_col;
  291. for (b_row = 0; b_row < NB; b_row++)
  292. {
  293. for (b_col = 0; b_col < NB; b_col++)
  294. {
  295. starpu_mpi_task_insert(MPI_COMM_WORLD, &gemm_cl,
  296. STARPU_R, A_h[b_row],
  297. STARPU_R, B_h[b_col],
  298. STARPU_RW, C_h[b_row*NB+b_col],
  299. 0);
  300. }
  301. }
  302. starpu_task_wait_for_all();
  303. undistribute_matrix_C();
  304. unregister_matrices();
  305. if (comm_rank == 0) {
  306. #if VERBOSE
  307. disp_matrix(C);
  308. #endif
  309. check_result();
  310. free_matrices();
  311. }
  312. starpu_mpi_shutdown();
  313. starpu_shutdown();
  314. return 0;
  315. }