mm.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2016 Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. /*
  17. * This example illustrates how to distribute a pre-existing data structure to
  18. * a set of computing nodes using StarPU-MPI routines.
  19. */
  20. #include <stdlib.h>
  21. #include <stdio.h>
  22. #include <assert.h>
  23. #include <math.h>
  24. #include <starpu.h>
  25. #include <starpu_mpi.h>
  26. #include "helper.h"
  27. #define VERBOSE 0
  28. static int N = 16; /* Matrix size */
  29. static int BS = 4; /* Block size */
  30. #define NB ((N)/(BS)) /* Number of blocks */
  31. /* Matrices. Will be allocated as regular, linearized C arrays */
  32. static double *A = NULL; /* A will be partitioned as BS rows x N cols blocks */
  33. static double *B = NULL; /* B will be partitioned as N rows x BS cols blocks */
  34. static double *C = NULL; /* C will be partitioned as BS rows x BS cols blocks */
  35. /* Arrays of data handles for managing matrix blocks */
  36. static starpu_data_handle_t *A_h;
  37. static starpu_data_handle_t *B_h;
  38. static starpu_data_handle_t *C_h;
  39. static int comm_rank; /* mpi rank of the process */
  40. static int comm_size; /* size of the mpi session */
  41. static void alloc_matrices(void)
  42. {
  43. /* Regular 'malloc' can also be used instead, however, starpu_malloc make sure that
  44. * the area is allocated in suitably pinned memory to improve data transfers, especially
  45. * with CUDA */
  46. starpu_malloc((void **)&A, N*N*sizeof(double));
  47. starpu_malloc((void **)&B, N*N*sizeof(double));
  48. starpu_malloc((void **)&C, N*N*sizeof(double));
  49. }
  50. static void free_matrices(void)
  51. {
  52. starpu_free(A);
  53. starpu_free(B);
  54. starpu_free(C);
  55. }
  56. static void init_matrices(void)
  57. {
  58. int row,col;
  59. for (row = 0; row < N; row++)
  60. {
  61. for (col = 0; col < N; col++)
  62. {
  63. A[row*N+col] = (row==col)?2:0;
  64. B[row*N+col] = row*N+col;
  65. C[row*N+col] = 0;
  66. }
  67. }
  68. }
  69. #if VERBOSE
  70. static void disp_matrix(double *m)
  71. {
  72. int row,col;
  73. for (row = 0; row < N; row++)
  74. {
  75. for (col = 0; col < N; col++)
  76. {
  77. printf("\t%.2lf", m[row*N+col]);
  78. }
  79. printf("\n");
  80. }
  81. }
  82. #endif
  83. static void check_result(void) {
  84. int row,col;
  85. for (row = 0; row < N; row++)
  86. {
  87. for (col = 0; col < N; col++)
  88. {
  89. if (fabs(C[row*N+col] - 2*(row*N+col)) > 1.0)
  90. {
  91. fprintf(stderr, "check failed\n");
  92. exit(1);
  93. }
  94. }
  95. }
  96. #if VERBOSE
  97. printf("success\n");
  98. #endif
  99. }
  100. /* Register the matrix blocks to StarPU and to StarPU-MPI */
  101. static void register_matrices()
  102. {
  103. A_h = calloc(NB, sizeof(starpu_data_handle_t));
  104. B_h = calloc(NB, sizeof(starpu_data_handle_t));
  105. C_h = calloc(NB*NB, sizeof(starpu_data_handle_t));
  106. /* Memory region, where the data being registered resides.
  107. * In this example, all blocks are allocated by node 0, thus
  108. * - node 0 specifies STARPU_MAIN_RAM to indicate that it owns the block in its main memory
  109. * - nodes !0 specify -1 to indicate that they don't have a copy of the block initially
  110. */
  111. int mr = (comm_rank == 0) ? STARPU_MAIN_RAM : -1;
  112. /* mpi tag used for the block */
  113. int tag = 0;
  114. int b_row,b_col;
  115. for (b_row = 0; b_row < NB; b_row++) {
  116. /* Register a block to StarPU */
  117. starpu_matrix_data_register(&A_h[b_row],
  118. mr,
  119. (comm_rank == 0)?(uintptr_t)(A+b_row*BS*N):0, N, N, BS,
  120. sizeof(double));
  121. /* Register a block to StarPU-MPI, specifying the mpi tag to use for transfering the block
  122. * and the rank of the owner node.
  123. *
  124. * Note: StarPU-MPI is an autonomous layer built on top of StarPU, hence the two separate
  125. * registration steps.
  126. */
  127. starpu_mpi_data_register(A_h[b_row], tag++, 0);
  128. }
  129. for (b_col = 0; b_col < NB; b_col++) {
  130. starpu_matrix_data_register(&B_h[b_col],
  131. mr,
  132. (comm_rank == 0)?(uintptr_t)(B+b_col*BS):0, N, BS, N,
  133. sizeof(double));
  134. starpu_mpi_data_register(B_h[b_col], tag++, 0);
  135. }
  136. for (b_row = 0; b_row < NB; b_row++) {
  137. for (b_col = 0; b_col < NB; b_col++) {
  138. starpu_matrix_data_register(&C_h[b_row*NB+b_col],
  139. mr,
  140. (comm_rank == 0)?(uintptr_t)(C+b_row*BS*N+b_col*BS):0, N, BS, BS,
  141. sizeof(double));
  142. starpu_mpi_data_register(C_h[b_row*NB+b_col], tag++, 0);
  143. }
  144. }
  145. }
  146. /* Transfer ownership of the C matrix blocks following some user-defined distribution over the nodes.
  147. * Note: since C will be Write-accessed, it will implicitly define which node perform the task
  148. * associated to a given block. */
  149. static void distribute_matrix_C(void)
  150. {
  151. int b_row,b_col;
  152. for (b_row = 0; b_row < NB; b_row++)
  153. {
  154. for (b_col = 0; b_col < NB; b_col++)
  155. {
  156. starpu_data_handle_t h = C_h[b_row*NB+b_col];
  157. /* Select the node where the block should be computed. */
  158. int target_rank = (b_row+b_col)%comm_size;
  159. /* Move the block on to its new owner. */
  160. starpu_mpi_get_data_on_node(MPI_COMM_WORLD, h, target_rank);
  161. starpu_mpi_data_set_rank(h, target_rank);
  162. }
  163. }
  164. }
  165. /* Transfer ownership of the C matrix blocks back to node 0, for display purpose. This is not mandatory. */
  166. static void undistribute_matrix_C(void)
  167. {
  168. int b_row,b_col;
  169. for (b_row = 0; b_row < NB; b_row++) {
  170. for (b_col = 0; b_col < NB; b_col++) {
  171. starpu_data_handle_t h = C_h[b_row*NB+b_col];
  172. starpu_mpi_get_data_on_node(MPI_COMM_WORLD, h, 0);
  173. starpu_mpi_data_set_rank(h, 0);
  174. }
  175. }
  176. }
  177. /* Unregister matrices from the StarPU management. */
  178. static void unregister_matrices()
  179. {
  180. int b_row,b_col;
  181. for (b_row = 0; b_row < NB; b_row++) {
  182. starpu_data_unregister(A_h[b_row]);
  183. }
  184. for (b_col = 0; b_col < NB; b_col++) {
  185. starpu_data_unregister(B_h[b_col]);
  186. }
  187. for (b_row = 0; b_row < NB; b_row++) {
  188. for (b_col = 0; b_col < NB; b_col++) {
  189. starpu_data_unregister(C_h[b_row*NB+b_col]);
  190. }
  191. }
  192. free(A_h);
  193. free(B_h);
  194. free(C_h);
  195. }
  196. /* Perform the actual computation. In a real-life case, this would rather call a BLAS 'gemm' routine
  197. * instead. */
  198. static void cpu_mult(void *handles[], STARPU_ATTRIBUTE_UNUSED void *arg)
  199. {
  200. double *block_A = (double *)STARPU_MATRIX_GET_PTR(handles[0]);
  201. double *block_B = (double *)STARPU_MATRIX_GET_PTR(handles[1]);
  202. double *block_C = (double *)STARPU_MATRIX_GET_PTR(handles[2]);
  203. unsigned n_col_A = STARPU_MATRIX_GET_NX(handles[0]);
  204. unsigned n_col_B = STARPU_MATRIX_GET_NX(handles[1]);
  205. unsigned n_col_C = STARPU_MATRIX_GET_NX(handles[2]);
  206. unsigned n_row_A = STARPU_MATRIX_GET_NY(handles[0]);
  207. unsigned n_row_B = STARPU_MATRIX_GET_NY(handles[1]);
  208. unsigned n_row_C = STARPU_MATRIX_GET_NY(handles[2]);
  209. unsigned ld_A = STARPU_MATRIX_GET_LD(handles[0]);
  210. unsigned ld_B = STARPU_MATRIX_GET_LD(handles[1]);
  211. unsigned ld_C = STARPU_MATRIX_GET_LD(handles[2]);
  212. /* Sanity check, not needed in real life case */
  213. assert(n_col_C == n_col_B);
  214. assert(n_row_C == n_row_A);
  215. assert(n_col_A == n_row_B);
  216. unsigned i,j,k;
  217. for (k = 0; k < n_row_C; k++) {
  218. for (j = 0; j < n_col_C; j++) {
  219. for (i = 0; i < n_col_A; i++) {
  220. block_C[k*ld_C+j] += block_A[k*ld_A+i] * block_B[i*ld_B+j];
  221. }
  222. #if VERBOSE
  223. /* For illustration purpose, shows which node computed
  224. * the block in the decimal part of the cell */
  225. block_C[k*ld_C+j] += comm_rank / 100.0;
  226. #endif
  227. }
  228. }
  229. }
  230. /* Define a StarPU 'codelet' structure for the matrix multiply kernel above.
  231. * This structure enable specifying multiple implementations for the kernel (such as CUDA or OpenCL versions)
  232. */
  233. static struct starpu_codelet gemm_cl =
  234. {
  235. .cpu_funcs = {cpu_mult}, /* cpu implementation(s) of the routine */
  236. .nbuffers = 3, /* number of data handles referenced by this routine */
  237. .modes = {STARPU_R, STARPU_R, STARPU_RW} /* access modes for each data handle */
  238. };
  239. int main(int argc, char *argv[])
  240. {
  241. /* Initializes the StarPU core */
  242. int ret = starpu_init(NULL);
  243. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  244. /* Initializes the StarPU-MPI layer */
  245. ret = starpu_mpi_init(&argc, &argv, 1);
  246. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_init");
  247. if (starpu_cpu_worker_get_count() == 0)
  248. {
  249. FPRINTF(stderr, "We need at least 1 CPU worker.\n");
  250. starpu_mpi_shutdown();
  251. starpu_shutdown();
  252. return STARPU_TEST_SKIPPED;
  253. }
  254. /* Parse the matrix size and block size optional args */
  255. if (argc > 1) {
  256. N = atoi(argv[1]);
  257. if (N < 1) {
  258. fprintf(stderr, "invalid matrix size\n");
  259. exit(1);
  260. }
  261. if (argc > 2) {
  262. BS = atoi(argv[2]);
  263. }
  264. if (BS < 1 || N % BS != 0) {
  265. fprintf(stderr, "invalid block size\n");
  266. exit(1);
  267. }
  268. }
  269. /* Get the process rank and session size */
  270. starpu_mpi_comm_rank(MPI_COMM_WORLD, &comm_rank);
  271. starpu_mpi_comm_size(MPI_COMM_WORLD, &comm_size);
  272. if (comm_rank == 0)
  273. {
  274. #if VERBOSE
  275. printf("N = %d\n", N);
  276. printf("BS = %d\n", BS);
  277. printf("NB = %d\n", NB);
  278. printf("comm_size = %d\n", comm_size);
  279. #endif
  280. /* In this example, node rank 0 performs all the memory allocations and initializations,
  281. * and the blocks are later distributed on the other nodes.
  282. * This is not mandatory however, and blocks could be allocated on other nodes right
  283. * from the beginning, depending on the application needs (in particular for the case
  284. * where the session wide data footprint is larger than a single node available memory. */
  285. alloc_matrices();
  286. init_matrices();
  287. }
  288. /* Register matrices to StarPU and StarPU-MPI */
  289. register_matrices();
  290. /* Distribute C blocks */
  291. distribute_matrix_C();
  292. int b_row,b_col;
  293. for (b_row = 0; b_row < NB; b_row++)
  294. {
  295. for (b_col = 0; b_col < NB; b_col++)
  296. {
  297. starpu_mpi_task_insert(MPI_COMM_WORLD, &gemm_cl,
  298. STARPU_R, A_h[b_row],
  299. STARPU_R, B_h[b_col],
  300. STARPU_RW, C_h[b_row*NB+b_col],
  301. 0);
  302. }
  303. }
  304. starpu_task_wait_for_all();
  305. undistribute_matrix_C();
  306. unregister_matrices();
  307. if (comm_rank == 0) {
  308. #if VERBOSE
  309. disp_matrix(C);
  310. #endif
  311. check_result();
  312. free_matrices();
  313. }
  314. starpu_mpi_shutdown();
  315. starpu_shutdown();
  316. return 0;
  317. }