mult_impl.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385
  1. /*/* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009, 2010, 2011 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011 Télécom-SudParis
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <string.h>
  18. #include <math.h>
  19. #include <sys/types.h>
  20. #include <sys/time.h>
  21. #include <pthread.h>
  22. #include <signal.h>
  23. #include <starpu.h>
  24. static float *A, *B, *C;
  25. static starpu_data_handle A_handle, B_handle, C_handle;
  26. static unsigned nslicesx = 4;
  27. static unsigned nslicesy = 4;
  28. static unsigned xdim = 1024;
  29. static unsigned ydim = 1024;
  30. static unsigned zdim = 512;
  31. double mult_gemm_cost(starpu_buffer_descr *descr)
  32. {
  33. /* C = A * B */
  34. uint32_t nxC, nyC, nxA;
  35. nxC = starpu_matrix_get_nx(descr[2].handle);
  36. nyC = starpu_matrix_get_ny(descr[2].handle);
  37. nxA = starpu_matrix_get_nx(descr[0].handle);
  38. //printf("nxC %d nxC %d nxA %d\n", nxC, nyC, nxA);
  39. double cost = ((double)nxC)*((double)nyC)*((double)nxA/1000.0f/4.11f);
  40. printf("cost %e \n", cost);
  41. return cost;
  42. }
  43. static void cpu_mult(void *descr[], __attribute__((unused)) void *arg)
  44. {
  45. float *subA, *subB, *subC;
  46. uint32_t nxC, nyC, nyA;
  47. uint32_t ldA, ldB, ldC;
  48. printf("On application: Hello, this is kernel cpu_mult\n\n");
  49. /* .blas.ptr gives a pointer to the first element of the local copy */
  50. subA = (float *)STARPU_MATRIX_GET_PTR(descr[0]);
  51. subB = (float *)STARPU_MATRIX_GET_PTR(descr[1]);
  52. subC = (float *)STARPU_MATRIX_GET_PTR(descr[2]);
  53. /* .blas.nx is the number of rows (consecutive elements) and .blas.ny
  54. * is the number of lines that are separated by .blas.ld elements (ld
  55. * stands for leading dimension).
  56. * NB: in case some filters were used, the leading dimension is not
  57. * guaranteed to be the same in main memory (on the original matrix)
  58. * and on the accelerator! */
  59. nxC = STARPU_MATRIX_GET_NX(descr[2]);
  60. nyC = STARPU_MATRIX_GET_NY(descr[2]);
  61. nyA = STARPU_MATRIX_GET_NY(descr[0]);
  62. ldA = STARPU_MATRIX_GET_LD(descr[0]);
  63. ldB = STARPU_MATRIX_GET_LD(descr[1]);
  64. ldC = STARPU_MATRIX_GET_LD(descr[2]);
  65. /* we assume a FORTRAN-ordering! */
  66. unsigned i,j,k;
  67. for (i = 0; i < nyC; i++)
  68. {
  69. for (j = 0; j < nxC; j++)
  70. {
  71. float sum = 0.0;
  72. for (k = 0; k < nyA; k++)
  73. {
  74. sum += subA[j+k*ldA]*subB[k+i*ldB];
  75. }
  76. subC[j + i*ldC] = sum;
  77. }
  78. }
  79. }
  80. static void cpu_mult_2(void *descr[], __attribute__((unused)) void *arg)
  81. {
  82. float *subA, *subB, *subC;
  83. uint32_t nxC, nyC, nyA;
  84. uint32_t ldA, ldB, ldC;
  85. printf("On application: this is kernel cpu_mult_2\n\n");
  86. /* .blas.ptr gives a pointer to the first element of the local copy */
  87. subA = (float *)STARPU_MATRIX_GET_PTR(descr[0]);
  88. subB = (float *)STARPU_MATRIX_GET_PTR(descr[1]);
  89. subC = (float *)STARPU_MATRIX_GET_PTR(descr[2]);
  90. nxC = STARPU_MATRIX_GET_NX(descr[2]);
  91. nyC = STARPU_MATRIX_GET_NY(descr[2]);
  92. nyA = STARPU_MATRIX_GET_NY(descr[0]);
  93. ldA = STARPU_MATRIX_GET_LD(descr[0]);
  94. ldB = STARPU_MATRIX_GET_LD(descr[1]);
  95. ldC = STARPU_MATRIX_GET_LD(descr[2]);
  96. /* we assume a FORTRAN-ordering! */
  97. unsigned i,j,k;
  98. for (j = 0; j < nxC; j++)
  99. {
  100. for (i = 0; i < nyC; i++)
  101. {
  102. float sum = 0.0;
  103. for (k = 0; k < nyA; k++)
  104. {
  105. sum += subA[j+k*ldA]*subB[k+i*ldB];
  106. }
  107. subC[j + i*ldC] = sum;
  108. }
  109. }
  110. }
  111. static void init_problem_data(void)
  112. {
  113. unsigned i,j;
  114. /* we initialize matrices A, B and C in the usual way */
  115. A = malloc(zdim*ydim*sizeof(float));
  116. B = malloc(xdim*zdim*sizeof(float));
  117. C = malloc(xdim*ydim*sizeof(float));
  118. /* fill the A and B matrices */
  119. srand(2009);
  120. for (j=0; j < ydim; j++) {
  121. for (i=0; i < zdim; i++) {
  122. A[j+i*ydim] = (float)(starpu_drand48());
  123. }
  124. }
  125. for (j=0; j < zdim; j++) {
  126. for (i=0; i < xdim; i++) {
  127. B[j+i*zdim] = (float)(starpu_drand48());
  128. }
  129. }
  130. for (j=0; j < ydim; j++) {
  131. for (i=0; i < xdim; i++) {
  132. C[j+i*ydim] = (float)(0);
  133. }
  134. }
  135. }
  136. static void partition_mult_data(void)
  137. {
  138. /* note that we assume a FORTRAN ordering here! */
  139. starpu_matrix_data_register(&A_handle, 0, (uintptr_t)A,
  140. ydim, ydim, zdim, sizeof(float));
  141. starpu_matrix_data_register(&B_handle, 0, (uintptr_t)B,
  142. zdim, zdim, xdim, sizeof(float));
  143. starpu_matrix_data_register(&C_handle, 0, (uintptr_t)C,
  144. ydim, ydim, xdim, sizeof(float));
  145. /* A filter is a method to partition a data into disjoint chunks, it is
  146. * described by the means of the "struct starpu_data_filter" structure that
  147. * contains a function that is applied on a data handle to partition it
  148. * into smaller chunks, and an argument that is passed to the function
  149. * (eg. the number of blocks to create here).
  150. */
  151. struct starpu_data_filter vert = {
  152. .filter_func = starpu_vertical_block_filter_func,
  153. .nchildren = nslicesx,
  154. .get_nchildren = NULL,
  155. .get_child_ops = NULL
  156. };
  157. struct starpu_data_filter horiz = {
  158. .filter_func = starpu_block_filter_func,
  159. .nchildren = nslicesy,
  160. .get_nchildren = NULL,
  161. .get_child_ops = NULL
  162. };
  163. /*
  164. * Illustration with nslicex = 4 and nslicey = 2, it is possible to access
  165. * sub-data by using the "starpu_data_get_sub_data" method, which takes a data handle,
  166. * the number of filters to apply, and the indexes for each filters, for
  167. * instance:
  168. *
  169. * A' handle is starpu_data_get_sub_data(A_handle, 1, 1);
  170. * B' handle is starpu_data_get_sub_data(B_handle, 1, 2);
  171. * C' handle is starpu_data_get_sub_data(C_handle, 2, 2, 1);
  172. *
  173. * Note that here we applied 2 filters recursively onto C.
  174. *
  175. * "starpu_data_get_sub_data(C_handle, 1, 3)" would return a handle to the 4th column
  176. * of blocked matrix C for example.
  177. *
  178. * |---|---|---|---|
  179. * | | | B'| | B
  180. * |---|---|---|---|
  181. * 0 1 2 3
  182. * |----| |---|---|---|---|
  183. * | | | | | | |
  184. * | | 0 | | | | |
  185. * |----| |---|---|---|---|
  186. * | A' | | | | C'| |
  187. * | | | | | | |
  188. * |----| |---|---|---|---|
  189. * A C
  190. *
  191. * IMPORTANT: applying filters is equivalent to partitionning a piece of
  192. * data in a hierarchical manner, so that memory consistency is enforced
  193. * for each of the elements independantly. The tasks should therefore NOT
  194. * access inner nodes (eg. one column of C or the whole C) but only the
  195. * leafs of the tree (ie. blocks here). Manipulating inner nodes is only
  196. * possible by disapplying the filters (using starpu_data_unpartition), to
  197. * enforce memory consistency.
  198. */
  199. starpu_data_partition(B_handle, &vert);
  200. starpu_data_partition(A_handle, &horiz);
  201. /* starpu_data_map_filters is a variable-arity function, the first argument
  202. * is the handle of the data to partition, the second argument is the
  203. * number of filters to apply recursively. Filters are applied in the
  204. * same order as the arguments.
  205. * This would be equivalent to starpu_data_partition(C_handle, &vert) and
  206. * then applying horiz on each sub-data (ie. each column of C)
  207. */
  208. starpu_data_map_filters(C_handle, 2, &vert, &horiz);
  209. }
  210. static struct starpu_perfmodel_t starpu_dgemm_model_common = {
  211. .cost_model = mult_gemm_cost,
  212. .type = STARPU_HISTORY_BASED,//STARPU_COMMON, //STARPU_PER_ARCH,
  213. .symbol = "mult_perf_model"
  214. };
  215. /*
  216. static struct starpu_perfmodel_t mult_perf_model = {
  217. .type = STARPU_HISTORY_BASED,
  218. .symbol = "mult_perf_model"
  219. };
  220. */
  221. struct starpu_conf conf = {
  222. .sched_policy_name = "heft",
  223. .calibrate = 1,
  224. .ncpus = 4
  225. };
  226. static starpu_codelet cl = {
  227. /* we can only execute that kernel on a CPU yet */
  228. .where = STARPU_CPU,
  229. //.starpu_impl_multiple = 1,
  230. /* CPU implementation of the codelet */
  231. .cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS,
  232. .cpu_funcs = {cpu_mult,cpu_mult_2},
  233. /* the codelet manipulates 3 buffers that are managed by the
  234. * DSM */
  235. .nbuffers = 3,
  236. /* in case the scheduling policy may use performance models */
  237. .model = &starpu_dgemm_model_common
  238. };
  239. static void launch_tasks(void)
  240. {
  241. /* partition the work into slices */
  242. unsigned taskx, tasky;
  243. for (taskx = 0; taskx < nslicesx; taskx++)
  244. {
  245. for (tasky = 0; tasky < nslicesy; tasky++)
  246. {
  247. /* C[taskx, tasky] = A[tasky] B[taskx] */
  248. /* by default, starpu_task_create() returns an
  249. * asynchronous task (ie. task->synchronous = 0) */
  250. struct starpu_task *task = starpu_task_create();
  251. /* this task implements codelet "cl" */
  252. task->cl = &cl;
  253. /*
  254. * |---|---|---|---|
  255. * | | * | | | B
  256. * |---|---|---|---|
  257. * X
  258. * |----| |---|---|---|---|
  259. * |****| Y | |***| | |
  260. * |****| | |***| | |
  261. * |----| |---|---|---|---|
  262. * | | | | | | |
  263. * | | | | | | |
  264. * |----| |---|---|---|---|
  265. * A C
  266. */
  267. /* there was a single filter applied to matrices A
  268. * (respectively B) so we grab the handle to the chunk
  269. * identified by "tasky" (respectively "taskx). The "1"
  270. * tells StarPU that there is a single argument to the
  271. * variable-arity function starpu_data_get_sub_data */
  272. task->buffers[0].handle = starpu_data_get_sub_data(A_handle, 1, tasky);
  273. task->buffers[0].mode = STARPU_R;
  274. task->buffers[1].handle = starpu_data_get_sub_data(B_handle, 1, taskx);
  275. task->buffers[1].mode = STARPU_R;
  276. /* 2 filters were applied on matrix C, so we give
  277. * starpu_data_get_sub_data 2 arguments. The order of the arguments
  278. * must match the order in which the filters were
  279. * applied.
  280. * NB: starpu_data_get_sub_data(C_handle, 1, k) would have returned
  281. * a handle to the column number k of matrix C.
  282. * NB2: starpu_data_get_sub_data(C_handle, 2, taskx, tasky) is
  283. * equivalent to
  284. * starpu_data_get_sub_data(starpu_data_get_sub_data(C_handle, 1, taskx), 1, tasky)*/
  285. task->buffers[2].handle = starpu_data_get_sub_data(C_handle, 2, taskx, tasky);
  286. task->buffers[2].mode = STARPU_W;
  287. /* this is not a blocking call since task->synchronous = 0 */
  288. int summit_task;
  289. summit_task = starpu_task_submit(task);
  290. printf("task is submmited or not %d\n",summit_task);
  291. }
  292. }
  293. }
  294. int main(void)
  295. {
  296. /* start the runtime */
  297. starpu_init(&conf);
  298. /* initialize matrices A, B and C and register them to StarPU */
  299. init_problem_data();
  300. /* partition matrices into blocks that can be manipulated by the
  301. * codelets */
  302. partition_mult_data();
  303. /* submit all tasks in an asynchronous fashion */
  304. launch_tasks();
  305. /* wait for termination */
  306. starpu_task_wait_for_all();
  307. /* remove the filters applied by the means of starpu_data_map_filters; now
  308. * it's not possible to manipulate a subset of C using starpu_data_get_sub_data until
  309. * starpu_data_map_filters is called again on C_handle.
  310. * The second argument is the memory node where the different subsets
  311. * should be reassembled, 0 = main memory (RAM) */
  312. starpu_data_unpartition(C_handle, 0);
  313. /* stop monitoring matrix C : after this, it is not possible to pass C
  314. * (or any subset of C) as a codelet input/output. This also implements
  315. * a barrier so that the piece of data is put back into main memory in
  316. * case it was only available on a GPU for instance. */
  317. starpu_data_unregister(C_handle);
  318. starpu_shutdown();
  319. return 0;
  320. }