mult.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413
  1. /*
  2. * StarPU
  3. * Copyright (C) Université Bordeaux 1, CNRS 2008-2010 (see AUTHORS file)
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. /*
  17. * This example shows a simple implementation of a blocked matrix
  18. * multiplication. Note that this is NOT intended to be an efficient
  19. * implementation of sgemm! In this example, we show:
  20. * - how to declare dense matrices (starpu_matrix_data_register)
  21. * - how to manipulate matrices within codelets (eg. descr[0].blas.ld)
  22. * - how to use filters to partition the matrices into blocks
  23. * (starpu_data_partition and starpu_data_map_filters)
  24. * - how to unpartition data (starpu_data_unpartition) and how to stop
  25. * monitoring data (starpu_data_unregister)
  26. * - how to manipulate subsets of data (starpu_data_get_sub_data)
  27. * - how to construct an autocalibrated performance model (starpu_perfmodel_t)
  28. * - how to submit asynchronous tasks and how to use callback to handle task
  29. * termination
  30. */
  31. #include <string.h>
  32. #include <math.h>
  33. #include <sys/types.h>
  34. #include <sys/time.h>
  35. #include <pthread.h>
  36. #include <signal.h>
  37. #include <starpu.h>
  38. static float *A, *B, *C;
  39. static starpu_data_handle A_handle, B_handle, C_handle;
  40. static pthread_mutex_t mutex;
  41. static pthread_cond_t cond;
  42. static unsigned taskcounter;
  43. static unsigned terminated = 0;
  44. static unsigned nslicesx = 4;
  45. static unsigned nslicesy = 4;
  46. static unsigned nslicesz = 4;
  47. static unsigned xdim = 1024;
  48. static unsigned ydim = 1024;
  49. static unsigned zdim = 512;
  50. /*
  51. * That program should compute C = A * B
  52. *
  53. * A of size (z,y)
  54. * B of size (x,z)
  55. * C of size (x,y)
  56. |---------------|
  57. z | B |
  58. |---------------|
  59. z x
  60. |----| |---------------|
  61. | | | |
  62. | | | |
  63. | A | y | C |
  64. | | | |
  65. | | | |
  66. |----| |---------------|
  67. */
  68. static void callback_func(void *arg)
  69. {
  70. /* the argument is a pointer to a counter of the remaining tasks */
  71. int *counterptr = arg;
  72. /* counterptr points to a variable with the number of remaining tasks,
  73. * when it reaches 0, all tasks are done */
  74. int counter = STARPU_ATOMIC_ADD(counterptr, -1);
  75. if (counter == 0)
  76. {
  77. /* IMPORTANT : note that we CANNOT call blocking operations
  78. * within callbacks as it may lead to a deadlock of StarPU.
  79. * starpu_data_unpartition is for instance called by the main
  80. * thread since it may cause /potentially/ blocking operations
  81. * such as memory transfers from a GPU to a CPU. */
  82. /* wake the application to notify the termination of all the
  83. * tasks */
  84. pthread_mutex_lock(&mutex);
  85. terminated = 1;
  86. pthread_cond_signal(&cond);
  87. pthread_mutex_unlock(&mutex);
  88. }
  89. }
  90. /*
  91. * The codelet is passed 3 matrices, the "descr" union-type field gives a
  92. * description of the layout of those 3 matrices in the local memory (ie. RAM
  93. * in the case of CPU, GPU frame buffer in the case of GPU etc.). Since we have
  94. * registered data with the "blas" data interface, we manipulate the .blas
  95. * field of the descr[x] elements which are union types.
  96. */
  97. static void cpu_mult(void *descr[], __attribute__((unused)) void *arg)
  98. {
  99. float *subA, *subB, *subC;
  100. uint32_t nxC, nyC, nyA;
  101. uint32_t ldA, ldB, ldC;
  102. /* .blas.ptr gives a pointer to the first element of the local copy */
  103. subA = (float *)STARPU_MATRIX_GET_PTR(descr[0]);
  104. subB = (float *)STARPU_MATRIX_GET_PTR(descr[1]);
  105. subC = (float *)STARPU_MATRIX_GET_PTR(descr[2]);
  106. /* .blas.nx is the number of rows (consecutive elements) and .blas.ny
  107. * is the number of lines that are separated by .blas.ld elements (ld
  108. * stands for leading dimension).
  109. * NB: in case some filters were used, the leading dimension is not
  110. * guaranteed to be the same in main memory (on the original matrix)
  111. * and on the accelerator! */
  112. nxC = STARPU_MATRIX_GET_NX(descr[2]);
  113. nyC = STARPU_MATRIX_GET_NY(descr[2]);
  114. nyA = STARPU_MATRIX_GET_NY(descr[0]);
  115. ldA = STARPU_MATRIX_GET_LD(descr[0]);
  116. ldB = STARPU_MATRIX_GET_LD(descr[1]);
  117. ldC = STARPU_MATRIX_GET_LD(descr[2]);
  118. /* we assume a FORTRAN-ordering! */
  119. unsigned i,j,k;
  120. for (i = 0; i < nyC; i++)
  121. {
  122. for (j = 0; j < nxC; j++)
  123. {
  124. float sum = 0.0;
  125. for (k = 0; k < nyA; k++)
  126. {
  127. sum += subA[j+k*ldA]*subB[k+i*ldB];
  128. }
  129. subC[j + i*ldC] = sum;
  130. }
  131. }
  132. }
  133. static void init_problem_data(void)
  134. {
  135. unsigned i,j;
  136. /* we initialize matrices A, B and C in the usual way */
  137. A = malloc(zdim*ydim*sizeof(float));
  138. B = malloc(xdim*zdim*sizeof(float));
  139. C = malloc(xdim*ydim*sizeof(float));
  140. /* fill the A and B matrices */
  141. srand(2009);
  142. for (j=0; j < ydim; j++) {
  143. for (i=0; i < zdim; i++) {
  144. A[j+i*ydim] = (float)(starpu_drand48());
  145. }
  146. }
  147. for (j=0; j < zdim; j++) {
  148. for (i=0; i < xdim; i++) {
  149. B[j+i*zdim] = (float)(starpu_drand48());
  150. }
  151. }
  152. for (j=0; j < ydim; j++) {
  153. for (i=0; i < xdim; i++) {
  154. C[j+i*ydim] = (float)(0);
  155. }
  156. }
  157. }
  158. static void partition_mult_data(void)
  159. {
  160. /* note that we assume a FORTRAN ordering here! */
  161. /* The BLAS data interface is described by 4 parameters:
  162. * - the location of the first element of the matrix to monitor (3rd
  163. * argument)
  164. * - the number of elements between columns, aka leading dimension
  165. * (4th arg)
  166. * - the number of (contiguous) elements per column, ie. contiguous
  167. * elements (5th arg)
  168. * - the number of columns (6th arg)
  169. * The first elements is a pointer to the data_handle that will be
  170. * associated to the matrix, and the second elements gives the memory
  171. * node in which resides the matrix: 0 means that the 3rd argument is
  172. * an adress in main memory.
  173. */
  174. starpu_matrix_data_register(&A_handle, 0, (uintptr_t)A,
  175. ydim, ydim, zdim, sizeof(float));
  176. starpu_matrix_data_register(&B_handle, 0, (uintptr_t)B,
  177. zdim, zdim, xdim, sizeof(float));
  178. starpu_matrix_data_register(&C_handle, 0, (uintptr_t)C,
  179. ydim, ydim, xdim, sizeof(float));
  180. /* A filter is a method to partition a data into disjoint chunks, it is
  181. * described by the means of the "struct starpu_data_filter" structure that
  182. * contains a function that is applied on a data handle to partition it
  183. * into smaller chunks, and an argument that is passed to the function
  184. * (eg. the number of blocks to create here).
  185. */
  186. /* StarPU supplies some basic filters such as the partition of a matrix
  187. * into blocks, note that we are using a FORTRAN ordering so that the
  188. * name of the filters are a bit misleading */
  189. struct starpu_data_filter f = {
  190. .filter_func = starpu_vertical_block_filter_func,
  191. .nchildren = nslicesx,
  192. .get_nchildren = NULL,
  193. .get_child_ops = NULL
  194. };
  195. struct starpu_data_filter f2 = {
  196. .filter_func = starpu_block_filter_func,
  197. .nchildren = nslicesy,
  198. .get_nchildren = NULL,
  199. .get_child_ops = NULL
  200. };
  201. /*
  202. * Illustration with nslicex = 4 and nslicey = 2, it is possible to access
  203. * sub-data by using the "starpu_data_get_sub_data" method, which takes a data handle,
  204. * the number of filters to apply, and the indexes for each filters, for
  205. * instance:
  206. *
  207. * A' handle is starpu_data_get_sub_data(A_handle, 1, 1);
  208. * B' handle is starpu_data_get_sub_data(B_handle, 1, 2);
  209. * C' handle is starpu_data_get_sub_data(C_handle, 2, 2, 1);
  210. *
  211. * Note that here we applied 2 filters recursively onto C.
  212. *
  213. * "starpu_data_get_sub_data(C_handle, 1, 3)" would return a handle to the 4th column
  214. * of blocked matrix C for example.
  215. *
  216. * |---|---|---|---|
  217. * | | | B'| | B
  218. * |---|---|---|---|
  219. * 0 1 2 3
  220. * |----| |---|---|---|---|
  221. * | | | | | | |
  222. * | | 0 | | | | |
  223. * |----| |---|---|---|---|
  224. * | A' | | | | C'| |
  225. * | | | | | | |
  226. * |----| |---|---|---|---|
  227. * A C
  228. *
  229. * IMPORTANT: applying filters is equivalent to partitionning a piece of
  230. * data in a hierarchical manner, so that memory consistency is enforced
  231. * for each of the elements independantly. The tasks should therefore NOT
  232. * access inner nodes (eg. one column of C or the whole C) but only the
  233. * leafs of the tree (ie. blocks here). Manipulating inner nodes is only
  234. * possible by disapplying the filters (using starpu_data_unpartition), to
  235. * enforce memory consistency.
  236. */
  237. starpu_data_partition(B_handle, &f);
  238. starpu_data_partition(A_handle, &f2);
  239. /* starpu_data_map_filters is a variable-arity function, the first argument
  240. * is the handle of the data to partition, the second argument is the
  241. * number of filters to apply recursively. Filters are applied in the
  242. * same order as the arguments.
  243. * This would be equivalent to starpu_data_partition(C_handle, &f) and
  244. * then applying f2 on each sub-data (ie. each column of C)
  245. */
  246. starpu_data_map_filters(C_handle, 2, &f, &f2);
  247. }
  248. static struct starpu_perfmodel_t mult_perf_model = {
  249. .type = STARPU_HISTORY_BASED,
  250. .symbol = "mult_perf_model"
  251. };
  252. static void launch_tasks(void)
  253. {
  254. /* partition the work into slices */
  255. unsigned taskx, tasky;
  256. /* the callback decrements this value every time a task is terminated
  257. * and notify the termination of the computation to the application
  258. * when the counter reaches 0 */
  259. taskcounter = nslicesx * nslicesy;
  260. starpu_codelet cl = {
  261. /* we can only execute that kernel on a CPU yet */
  262. .where = STARPU_CPU,
  263. /* CPU implementation of the codelet */
  264. .cpu_func = cpu_mult,
  265. /* the codelet manipulates 3 buffers that are managed by the
  266. * DSM */
  267. .nbuffers = 3,
  268. /* in case the scheduling policy may use performance models */
  269. .model = &mult_perf_model
  270. };
  271. for (taskx = 0; taskx < nslicesx; taskx++)
  272. {
  273. for (tasky = 0; tasky < nslicesy; tasky++)
  274. {
  275. /* C[taskx, tasky] = A[tasky] B[taskx] */
  276. /* by default, starpu_task_create() returns an
  277. * asynchronous task (ie. task->synchronous = 0) */
  278. struct starpu_task *task = starpu_task_create();
  279. /* this task implements codelet "cl" */
  280. task->cl = &cl;
  281. task->callback_func = callback_func;
  282. task->callback_arg = &taskcounter;
  283. /*
  284. * |---|---|---|---|
  285. * | | * | | | B
  286. * |---|---|---|---|
  287. * X
  288. * |----| |---|---|---|---|
  289. * |****| Y | |***| | |
  290. * |****| | |***| | |
  291. * |----| |---|---|---|---|
  292. * | | | | | | |
  293. * | | | | | | |
  294. * |----| |---|---|---|---|
  295. * A C
  296. */
  297. /* there was a single filter applied to matrices A
  298. * (respectively B) so we grab the handle to the chunk
  299. * identified by "tasky" (respectively "taskx). The "1"
  300. * tells StarPU that there is a single argument to the
  301. * variable-arity function starpu_data_get_sub_data */
  302. task->buffers[0].handle = starpu_data_get_sub_data(A_handle, 1, tasky);
  303. task->buffers[0].mode = STARPU_R;
  304. task->buffers[1].handle = starpu_data_get_sub_data(B_handle, 1, taskx);
  305. task->buffers[1].mode = STARPU_R;
  306. /* 2 filters were applied on matrix C, so we give
  307. * starpu_data_get_sub_data 2 arguments. The order of the arguments
  308. * must match the order in which the filters were
  309. * applied.
  310. * NB: starpu_data_get_sub_data(C_handle, 1, k) would have returned
  311. * a handle to the column number k of matrix C.
  312. * NB2: starpu_data_get_sub_data(C_handle, 2, taskx, tasky) is
  313. * equivalent to
  314. * starpu_data_get_sub_data(starpu_data_get_sub_data(C_handle, 1, taskx), 1, tasky)*/
  315. task->buffers[2].handle = starpu_data_get_sub_data(C_handle, 2, taskx, tasky);
  316. task->buffers[2].mode = STARPU_W;
  317. /* this is not a blocking call since task->synchronous = 0 */
  318. starpu_task_submit(task);
  319. }
  320. }
  321. }
  322. int main(__attribute__ ((unused)) int argc,
  323. __attribute__ ((unused)) char **argv)
  324. {
  325. pthread_mutex_init(&mutex, NULL);
  326. pthread_cond_init(&cond, NULL);
  327. /* start the runtime */
  328. starpu_init(NULL);
  329. /* initialize matrices A, B and C and register them to StarPU */
  330. init_problem_data();
  331. /* partition matrices into blocks that can be manipulated by the
  332. * codelets */
  333. partition_mult_data();
  334. /* submit all tasks in an asynchronous fashion */
  335. launch_tasks();
  336. /* the different tasks are asynchronous so we use a callback to get
  337. * notified of the termination of the computation */
  338. pthread_mutex_lock(&mutex);
  339. if (!terminated)
  340. pthread_cond_wait(&cond, &mutex);
  341. pthread_mutex_unlock(&mutex);
  342. /* remove the filters applied by the means of starpu_data_map_filters; now
  343. * it's not possible to manipulate a subset of C using starpu_data_get_sub_data until
  344. * starpu_data_map_filters is called again on C_handle.
  345. * The second argument is the memory node where the different subsets
  346. * should be reassembled, 0 = main memory (RAM) */
  347. starpu_data_unpartition(C_handle, 0);
  348. /* stop monitoring matrix C : after this, it is not possible to pass C
  349. * (or any subset of C) as a codelet input/output. This also implements
  350. * a barrier so that the piece of data is put back into main memory in
  351. * case it was only available on a GPU for instance. */
  352. starpu_data_unregister(C_handle);
  353. starpu_shutdown();
  354. return 0;
  355. }