xgemm.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009, 2010, 2011 Université de Bordeaux 1
  4. * Copyright (C) 2010 Mehdi Juhoor <mjuhoor@gmail.com>
  5. * Copyright (C) 2010 Centre National de la Recherche Scientifique
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. #include <limits.h>
  19. #include <string.h>
  20. #include <math.h>
  21. #include <sys/types.h>
  22. #include <sys/time.h>
  23. #include <starpu.h>
  24. #include <common/blas.h>
  25. #ifdef STARPU_USE_CUDA
  26. #include <cuda.h>
  27. #include <cublas.h>
  28. #include <starpu_cuda.h>
  29. #endif
  30. static unsigned niter = 100;
  31. static unsigned nslicesx = 4;
  32. static unsigned nslicesy = 4;
  33. static unsigned xdim = 256;
  34. static unsigned ydim = 256;
  35. static unsigned zdim = 64;
  36. static unsigned check = 0;
  37. static TYPE *A, *B, *C;
  38. static starpu_data_handle A_handle, B_handle, C_handle;
  39. static void check_output(void)
  40. {
  41. /* compute C = C - AB */
  42. CPU_GEMM("N", "N", ydim, xdim, zdim, (TYPE)-1.0f, A, ydim, B, zdim, (TYPE)1.0f, C, ydim);
  43. /* make sure C = 0 */
  44. TYPE err;
  45. err = CPU_ASUM(xdim*ydim, C, 1);
  46. if (err < xdim*ydim*0.001) {
  47. fprintf(stderr, "Results are OK\n");
  48. }
  49. else {
  50. int max;
  51. max = CPU_IAMAX(xdim*ydim, C, 1);
  52. fprintf(stderr, "There were errors ... err = %f\n", err);
  53. fprintf(stderr, "Max error : %e\n", C[max]);
  54. }
  55. }
  56. static void init_problem_data(void)
  57. {
  58. unsigned i,j;
  59. starpu_data_malloc_pinned_if_possible((void **)&A, zdim*ydim*sizeof(TYPE));
  60. starpu_data_malloc_pinned_if_possible((void **)&B, xdim*zdim*sizeof(TYPE));
  61. starpu_data_malloc_pinned_if_possible((void **)&C, xdim*ydim*sizeof(TYPE));
  62. /* fill the A and B matrices */
  63. for (j=0; j < ydim; j++) {
  64. for (i=0; i < zdim; i++) {
  65. A[j+i*ydim] = (TYPE)(starpu_drand48());
  66. }
  67. }
  68. for (j=0; j < zdim; j++) {
  69. for (i=0; i < xdim; i++) {
  70. B[j+i*zdim] = (TYPE)(starpu_drand48());
  71. }
  72. }
  73. for (j=0; j < ydim; j++) {
  74. for (i=0; i < xdim; i++) {
  75. C[j+i*ydim] = (TYPE)(0);
  76. }
  77. }
  78. }
  79. static void partition_mult_data(void)
  80. {
  81. starpu_matrix_data_register(&A_handle, 0, (uintptr_t)A,
  82. ydim, ydim, zdim, sizeof(TYPE));
  83. starpu_matrix_data_register(&B_handle, 0, (uintptr_t)B,
  84. zdim, zdim, xdim, sizeof(TYPE));
  85. starpu_matrix_data_register(&C_handle, 0, (uintptr_t)C,
  86. ydim, ydim, xdim, sizeof(TYPE));
  87. struct starpu_data_filter f;
  88. memset(&f, 0, sizeof(f));
  89. f.filter_func = starpu_vertical_block_filter_func;
  90. f.nchildren = nslicesx;
  91. struct starpu_data_filter f2;
  92. memset(&f2, 0, sizeof(f2));
  93. f2.filter_func = starpu_block_filter_func;
  94. f2.nchildren = nslicesy;
  95. starpu_data_partition(B_handle, &f);
  96. starpu_data_partition(A_handle, &f2);
  97. starpu_data_map_filters(C_handle, 2, &f, &f2);
  98. }
  99. static void mult_kernel_common(void *descr[], int type)
  100. {
  101. TYPE *subA = (TYPE *)STARPU_MATRIX_GET_PTR(descr[0]);
  102. TYPE *subB = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  103. TYPE *subC = (TYPE *)STARPU_MATRIX_GET_PTR(descr[2]);
  104. unsigned nxC = STARPU_MATRIX_GET_NX(descr[2]);
  105. unsigned nyC = STARPU_MATRIX_GET_NY(descr[2]);
  106. unsigned nyA = STARPU_MATRIX_GET_NY(descr[0]);
  107. unsigned ldA = STARPU_MATRIX_GET_LD(descr[0]);
  108. unsigned ldB = STARPU_MATRIX_GET_LD(descr[1]);
  109. unsigned ldC = STARPU_MATRIX_GET_LD(descr[2]);
  110. if (type == STARPU_CPU) {
  111. int worker_size = starpu_combined_worker_get_size();
  112. if (worker_size == 1)
  113. {
  114. /* Sequential CPU task */
  115. CPU_GEMM("N", "N", nxC, nyC, nyA, (TYPE)1.0, subA, ldA, subB, ldB, (TYPE)0.0, subC, ldC);
  116. }
  117. else {
  118. /* Parallel CPU task */
  119. int rank = starpu_combined_worker_get_rank();
  120. int block_size = (nyC + worker_size - 1)/worker_size;
  121. int new_nyC = STARPU_MIN(nyC, block_size*(rank+1)) - block_size*rank;
  122. TYPE *new_subA = &subA[block_size*rank];
  123. TYPE *new_subC = &subC[block_size*rank];
  124. CPU_GEMM("N", "N", nxC, new_nyC, nyA, (TYPE)1.0, new_subA, ldA, subB, ldB, (TYPE)0.0, new_subC, ldC);
  125. }
  126. }
  127. #ifdef STARPU_USE_CUDA
  128. else {
  129. CUBLAS_GEMM('n', 'n', nxC, nyC, nyA, (TYPE)1.0, subA, ldA, subB, ldB,
  130. (TYPE)0.0, subC, ldC);
  131. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  132. }
  133. #endif
  134. }
  135. #ifdef STARPU_USE_CUDA
  136. static void cublas_mult(void *descr[], __attribute__((unused)) void *arg)
  137. {
  138. mult_kernel_common(descr, STARPU_CUDA);
  139. }
  140. #endif
  141. static void cpu_mult(void *descr[], __attribute__((unused)) void *arg)
  142. {
  143. mult_kernel_common(descr, STARPU_CPU);
  144. }
  145. static struct starpu_perfmodel_t starpu_gemm_model = {
  146. .type = STARPU_HISTORY_BASED,
  147. .symbol = STARPU_GEMM_STR(gemm)
  148. };
  149. static starpu_codelet cl = {
  150. .where = STARPU_CPU|STARPU_CUDA,
  151. .type = STARPU_SEQ, /* changed to STARPU_SPMD if -spmd is passed */
  152. .max_parallelism = INT_MAX,
  153. .cpu_func = cpu_mult,
  154. #ifdef STARPU_USE_CUDA
  155. .cuda_func = cublas_mult,
  156. #endif
  157. .nbuffers = 3,
  158. .model = &starpu_gemm_model
  159. };
  160. static void parse_args(int argc, char **argv)
  161. {
  162. int i;
  163. for (i = 1; i < argc; i++) {
  164. if (strcmp(argv[i], "-nblocks") == 0) {
  165. char *argptr;
  166. nslicesx = strtol(argv[++i], &argptr, 10);
  167. nslicesy = nslicesx;
  168. }
  169. if (strcmp(argv[i], "-nblocksx") == 0) {
  170. char *argptr;
  171. nslicesx = strtol(argv[++i], &argptr, 10);
  172. }
  173. if (strcmp(argv[i], "-nblocksy") == 0) {
  174. char *argptr;
  175. nslicesy = strtol(argv[++i], &argptr, 10);
  176. }
  177. if (strcmp(argv[i], "-x") == 0) {
  178. char *argptr;
  179. xdim = strtol(argv[++i], &argptr, 10);
  180. }
  181. if (strcmp(argv[i], "-y") == 0) {
  182. char *argptr;
  183. ydim = strtol(argv[++i], &argptr, 10);
  184. }
  185. if (strcmp(argv[i], "-z") == 0) {
  186. char *argptr;
  187. zdim = strtol(argv[++i], &argptr, 10);
  188. }
  189. if (strcmp(argv[i], "-iter") == 0) {
  190. char *argptr;
  191. niter = strtol(argv[++i], &argptr, 10);
  192. }
  193. if (strcmp(argv[i], "-check") == 0) {
  194. check = 1;
  195. }
  196. if (strcmp(argv[i], "-spmd") == 0) {
  197. cl.type = STARPU_SPMD;
  198. }
  199. }
  200. }
  201. int main(int argc, char **argv)
  202. {
  203. struct timeval start;
  204. struct timeval end;
  205. parse_args(argc, argv);
  206. starpu_init(NULL);
  207. starpu_helper_cublas_init();
  208. init_problem_data();
  209. partition_mult_data();
  210. gettimeofday(&start, NULL);
  211. unsigned x, y, iter;
  212. for (iter = 0; iter < niter; iter++)
  213. {
  214. for (x = 0; x < nslicesx; x++)
  215. for (y = 0; y < nslicesy; y++)
  216. {
  217. struct starpu_task *task = starpu_task_create();
  218. task->cl = &cl;
  219. task->buffers[0].handle = starpu_data_get_sub_data(A_handle, 1, y);
  220. task->buffers[0].mode = STARPU_R;
  221. task->buffers[1].handle = starpu_data_get_sub_data(B_handle, 1, x);
  222. task->buffers[1].mode = STARPU_R;
  223. task->buffers[2].handle = starpu_data_get_sub_data(C_handle, 2, x, y);
  224. task->buffers[2].mode = STARPU_RW;
  225. int ret = starpu_task_submit(task);
  226. STARPU_ASSERT(!ret);
  227. }
  228. starpu_task_wait_for_all();
  229. }
  230. gettimeofday(&end, NULL);
  231. double timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  232. fprintf(stderr, "Time: %2.2f ms\n", timing/1000.0);
  233. double flops = 2.0*((unsigned long)niter)*((unsigned long)xdim)
  234. *((unsigned long)ydim)*((unsigned long)zdim);
  235. fprintf(stderr, "GFlop/s: %.2f\n", flops/timing/1000.0);
  236. starpu_data_unpartition(C_handle, 0);
  237. starpu_data_unregister(C_handle);
  238. if (check)
  239. check_output();
  240. starpu_helper_cublas_shutdown();
  241. starpu_shutdown();
  242. return 0;
  243. }