mult.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2012-2013 Inria
  4. * Copyright (C) 2009-2011,2013-2015 Université de Bordeaux
  5. * Copyright (C) 2010 Mehdi Juhoor
  6. * Copyright (C) 2010-2013,2015,2017 CNRS
  7. *
  8. * StarPU is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU Lesser General Public License as published by
  10. * the Free Software Foundation; either version 2.1 of the License, or (at
  11. * your option) any later version.
  12. *
  13. * StarPU is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  16. *
  17. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  18. */
  19. /*
  20. * This example shows a simple implementation of a blocked matrix
  21. * multiplication. Note that this is NOT intended to be an efficient
  22. * implementation of sgemm! In this example, we show:
  23. * - how to declare dense matrices (starpu_matrix_data_register)
  24. * - how to manipulate matrices within codelets (eg. descr[0].blas.ld)
  25. * - how to use filters to partition the matrices into blocks
  26. * (starpu_data_partition and starpu_data_map_filters)
  27. * - how to unpartition data (starpu_data_unpartition) and how to stop
  28. * monitoring data (starpu_data_unregister)
  29. * - how to manipulate subsets of data (starpu_data_get_sub_data)
  30. * - how to construct an autocalibrated performance model (starpu_perfmodel)
  31. * - how to submit asynchronous tasks
  32. */
  33. #include <string.h>
  34. #include <math.h>
  35. #include <sys/types.h>
  36. #include <signal.h>
  37. #include <starpu.h>
  38. static float *A, *B, *C;
  39. static starpu_data_handle_t A_handle, B_handle, C_handle;
  40. static unsigned nslicesx = 4;
  41. static unsigned nslicesy = 4;
  42. #ifdef STARPU_QUICK_CHECK
  43. static unsigned xdim = 512;
  44. static unsigned ydim = 512;
  45. static unsigned zdim = 256;
  46. #else
  47. static unsigned xdim = 1024;
  48. static unsigned ydim = 1024;
  49. static unsigned zdim = 512;
  50. #endif
  51. /*
  52. * That program should compute C = A * B
  53. *
  54. * A of size (z,y)
  55. * B of size (x,z)
  56. * C of size (x,y)
  57. |---------------|
  58. z | B |
  59. |---------------|
  60. z x
  61. |----| |---------------|
  62. | | | |
  63. | | | |
  64. | A | y | C |
  65. | | | |
  66. | | | |
  67. |----| |---------------|
  68. */
  69. /*
  70. * The codelet is passed 3 matrices, the "descr" union-type field gives a
  71. * description of the layout of those 3 matrices in the local memory (ie. RAM
  72. * in the case of CPU, GPU frame buffer in the case of GPU etc.). Since we have
  73. * registered data with the "matrix" data interface, we use the matrix macros.
  74. */
  75. void cpu_mult(void *descr[], void *arg)
  76. {
  77. (void)arg;
  78. float *subA, *subB, *subC;
  79. uint32_t nxC, nyC, nyA;
  80. uint32_t ldA, ldB, ldC;
  81. /* .blas.ptr gives a pointer to the first element of the local copy */
  82. subA = (float *)STARPU_MATRIX_GET_PTR(descr[0]);
  83. subB = (float *)STARPU_MATRIX_GET_PTR(descr[1]);
  84. subC = (float *)STARPU_MATRIX_GET_PTR(descr[2]);
  85. /* .blas.nx is the number of rows (consecutive elements) and .blas.ny
  86. * is the number of lines that are separated by .blas.ld elements (ld
  87. * stands for leading dimension).
  88. * NB: in case some filters were used, the leading dimension is not
  89. * guaranteed to be the same in main memory (on the original matrix)
  90. * and on the accelerator! */
  91. nxC = STARPU_MATRIX_GET_NX(descr[2]);
  92. nyC = STARPU_MATRIX_GET_NY(descr[2]);
  93. nyA = STARPU_MATRIX_GET_NY(descr[0]);
  94. ldA = STARPU_MATRIX_GET_LD(descr[0]);
  95. ldB = STARPU_MATRIX_GET_LD(descr[1]);
  96. ldC = STARPU_MATRIX_GET_LD(descr[2]);
  97. /* we assume a FORTRAN-ordering! */
  98. unsigned i,j,k;
  99. for (i = 0; i < nyC; i++)
  100. {
  101. for (j = 0; j < nxC; j++)
  102. {
  103. float sum = 0.0;
  104. for (k = 0; k < nyA; k++)
  105. {
  106. sum += subA[j+k*ldA]*subB[k+i*ldB];
  107. }
  108. subC[j + i*ldC] = sum;
  109. }
  110. }
  111. }
  112. static void init_problem_data(void)
  113. {
  114. unsigned i,j;
  115. /* we initialize matrices A, B and C in the usual way */
  116. A = (float *) malloc(zdim*ydim*sizeof(float));
  117. B = (float *) malloc(xdim*zdim*sizeof(float));
  118. C = (float *) malloc(xdim*ydim*sizeof(float));
  119. /* fill the A and B matrices */
  120. starpu_srand48(2009);
  121. for (j=0; j < ydim; j++)
  122. {
  123. for (i=0; i < zdim; i++)
  124. {
  125. A[j+i*ydim] = (float)(starpu_drand48());
  126. }
  127. }
  128. for (j=0; j < zdim; j++)
  129. {
  130. for (i=0; i < xdim; i++)
  131. {
  132. B[j+i*zdim] = (float)(starpu_drand48());
  133. }
  134. }
  135. for (j=0; j < ydim; j++)
  136. {
  137. for (i=0; i < xdim; i++)
  138. {
  139. C[j+i*ydim] = (float)(0);
  140. }
  141. }
  142. }
  143. static void partition_mult_data(void)
  144. {
  145. /* note that we assume a FORTRAN ordering here! */
  146. /* The BLAS data interface is described by 4 parameters:
  147. * - the location of the first element of the matrix to monitor (3rd
  148. * argument)
  149. * - the number of elements between columns, aka leading dimension
  150. * (4th arg)
  151. * - the number of (contiguous) elements per column, ie. contiguous
  152. * elements (5th arg)
  153. * - the number of columns (6th arg)
  154. * The first elements is a pointer to the data_handle that will be
  155. * associated to the matrix, and the second elements gives the memory
  156. * node in which resides the matrix: 0 means that the 3rd argument is
  157. * an adress in main memory.
  158. */
  159. starpu_matrix_data_register(&A_handle, STARPU_MAIN_RAM, (uintptr_t)A,
  160. ydim, ydim, zdim, sizeof(float));
  161. starpu_matrix_data_register(&B_handle, STARPU_MAIN_RAM, (uintptr_t)B,
  162. zdim, zdim, xdim, sizeof(float));
  163. starpu_matrix_data_register(&C_handle, STARPU_MAIN_RAM, (uintptr_t)C,
  164. ydim, ydim, xdim, sizeof(float));
  165. /* A filter is a method to partition a data into disjoint chunks, it is
  166. * described by the means of the "struct starpu_data_filter" structure that
  167. * contains a function that is applied on a data handle to partition it
  168. * into smaller chunks, and an argument that is passed to the function
  169. * (eg. the number of blocks to create here).
  170. */
  171. /* StarPU supplies some basic filters such as the partition of a matrix
  172. * into blocks, note that we are using a FORTRAN ordering so that the
  173. * name of the filters are a bit misleading */
  174. struct starpu_data_filter vert =
  175. {
  176. .filter_func = starpu_matrix_filter_vertical_block,
  177. .nchildren = nslicesx
  178. };
  179. struct starpu_data_filter horiz =
  180. {
  181. .filter_func = starpu_matrix_filter_block,
  182. .nchildren = nslicesy
  183. };
  184. /*
  185. * Illustration with nslicex = 4 and nslicey = 2, it is possible to access
  186. * sub-data by using the "starpu_data_get_sub_data" method, which takes a data handle,
  187. * the number of filters to apply, and the indexes for each filters, for
  188. * instance:
  189. *
  190. * A' handle is starpu_data_get_sub_data(A_handle, 1, 1);
  191. * B' handle is starpu_data_get_sub_data(B_handle, 1, 2);
  192. * C' handle is starpu_data_get_sub_data(C_handle, 2, 2, 1);
  193. *
  194. * Note that here we applied 2 filters recursively onto C.
  195. *
  196. * "starpu_data_get_sub_data(C_handle, 1, 3)" would return a handle to the 4th column
  197. * of blocked matrix C for example.
  198. *
  199. * |---|---|---|---|
  200. * | | | B'| | B
  201. * |---|---|---|---|
  202. * 0 1 2 3
  203. * |----| |---|---|---|---|
  204. * | | | | | | |
  205. * | | 0 | | | | |
  206. * |----| |---|---|---|---|
  207. * | A' | | | | C'| |
  208. * | | | | | | |
  209. * |----| |---|---|---|---|
  210. * A C
  211. *
  212. * IMPORTANT: applying filters is equivalent to partitionning a piece of
  213. * data in a hierarchical manner, so that memory consistency is enforced
  214. * for each of the elements independantly. The tasks should therefore NOT
  215. * access inner nodes (eg. one column of C or the whole C) but only the
  216. * leafs of the tree (ie. blocks here). Manipulating inner nodes is only
  217. * possible by disapplying the filters (using starpu_data_unpartition), to
  218. * enforce memory consistency.
  219. */
  220. starpu_data_partition(B_handle, &vert);
  221. starpu_data_partition(A_handle, &horiz);
  222. /* starpu_data_map_filters is a variable-arity function, the first argument
  223. * is the handle of the data to partition, the second argument is the
  224. * number of filters to apply recursively. Filters are applied in the
  225. * same order as the arguments.
  226. * This would be equivalent to starpu_data_partition(C_handle, &vert) and
  227. * then applying horiz on each sub-data (ie. each column of C)
  228. */
  229. starpu_data_map_filters(C_handle, 2, &vert, &horiz);
  230. }
  231. static struct starpu_perfmodel mult_perf_model =
  232. {
  233. .type = STARPU_HISTORY_BASED,
  234. .symbol = "mult_perf_model"
  235. };
  236. static struct starpu_codelet cl =
  237. {
  238. /* we can only execute that kernel on a CPU yet */
  239. /* CPU implementation of the codelet */
  240. .cpu_funcs = {cpu_mult},
  241. .cpu_funcs_name = {"cpu_mult"},
  242. /* the codelet manipulates 3 buffers that are managed by the
  243. * DSM */
  244. .nbuffers = 3,
  245. .modes = {STARPU_R, STARPU_R, STARPU_W},
  246. /* in case the scheduling policy may use performance models */
  247. .model = &mult_perf_model
  248. };
  249. static int launch_tasks(void)
  250. {
  251. int ret;
  252. /* partition the work into slices */
  253. unsigned taskx, tasky;
  254. for (taskx = 0; taskx < nslicesx; taskx++)
  255. {
  256. for (tasky = 0; tasky < nslicesy; tasky++)
  257. {
  258. /* C[taskx, tasky] = A[tasky] B[taskx] */
  259. /* by default, starpu_task_create() returns an
  260. * asynchronous task (ie. task->synchronous = 0) */
  261. struct starpu_task *task = starpu_task_create();
  262. /* this task implements codelet "cl" */
  263. task->cl = &cl;
  264. /*
  265. * |---|---|---|---|
  266. * | | * | | | B
  267. * |---|---|---|---|
  268. * X
  269. * |----| |---|---|---|---|
  270. * |****| Y | |***| | |
  271. * |****| | |***| | |
  272. * |----| |---|---|---|---|
  273. * | | | | | | |
  274. * | | | | | | |
  275. * |----| |---|---|---|---|
  276. * A C
  277. */
  278. /* there was a single filter applied to matrices A
  279. * (respectively B) so we grab the handle to the chunk
  280. * identified by "tasky" (respectively "taskx). The "1"
  281. * tells StarPU that there is a single argument to the
  282. * variable-arity function starpu_data_get_sub_data */
  283. task->handles[0] = starpu_data_get_sub_data(A_handle, 1, tasky);
  284. task->handles[1] = starpu_data_get_sub_data(B_handle, 1, taskx);
  285. /* 2 filters were applied on matrix C, so we give
  286. * starpu_data_get_sub_data 2 arguments. The order of the arguments
  287. * must match the order in which the filters were
  288. * applied.
  289. * NB: starpu_data_get_sub_data(C_handle, 1, k) would have returned
  290. * a handle to the column number k of matrix C.
  291. * NB2: starpu_data_get_sub_data(C_handle, 2, taskx, tasky) is
  292. * equivalent to
  293. * starpu_data_get_sub_data(starpu_data_get_sub_data(C_handle, 1, taskx), 1, tasky)*/
  294. task->handles[2] = starpu_data_get_sub_data(C_handle, 2, taskx, tasky);
  295. /* this is not a blocking call since task->synchronous = 0 */
  296. ret = starpu_task_submit(task);
  297. if (ret == -ENODEV) return ret;
  298. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  299. }
  300. }
  301. return 0;
  302. }
  303. int main(void)
  304. {
  305. int ret;
  306. /* start the runtime */
  307. ret = starpu_init(NULL);
  308. if (ret == -ENODEV)
  309. return 77;
  310. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  311. /* initialize matrices A, B and C and register them to StarPU */
  312. init_problem_data();
  313. /* partition matrices into blocks that can be manipulated by the
  314. * codelets */
  315. partition_mult_data();
  316. /* submit all tasks in an asynchronous fashion */
  317. ret = launch_tasks();
  318. if (ret == -ENODEV) goto enodev;
  319. /* wait for termination */
  320. starpu_task_wait_for_all();
  321. /* remove the filters applied by the means of starpu_data_map_filters; now
  322. * it's not possible to manipulate a subset of C using starpu_data_get_sub_data until
  323. * starpu_data_map_filters is called again on C_handle.
  324. * The second argument is the memory node where the different subsets
  325. * should be reassembled, 0 = main memory (RAM) */
  326. starpu_data_unpartition(A_handle, STARPU_MAIN_RAM);
  327. starpu_data_unpartition(B_handle, STARPU_MAIN_RAM);
  328. starpu_data_unpartition(C_handle, STARPU_MAIN_RAM);
  329. /* stop monitoring matrix C : after this, it is not possible to pass C
  330. * (or any subset of C) as a codelet input/output. This also implements
  331. * a barrier so that the piece of data is put back into main memory in
  332. * case it was only available on a GPU for instance. */
  333. starpu_data_unregister(A_handle);
  334. starpu_data_unregister(B_handle);
  335. starpu_data_unregister(C_handle);
  336. free(A);
  337. free(B);
  338. free(C);
  339. starpu_shutdown();
  340. return 0;
  341. enodev:
  342. starpu_shutdown();
  343. return 77;
  344. }