cholesky_compil.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011-2013,2015 Inria
  4. * Copyright (C) 2009-2017,2019-2020 Université de Bordeaux
  5. * Copyright (C) 2010-2013,2015-2017 CNRS
  6. * Copyright (C) 2013 Thibaut Lambert
  7. * Copyright (C) 2010 Mehdi Juhoor
  8. *
  9. * StarPU is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU Lesser General Public License as published by
  11. * the Free Software Foundation; either version 2.1 of the License, or (at
  12. * your option) any later version.
  13. *
  14. * StarPU is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  17. *
  18. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  19. */
  20. /*
  21. * This version of the Cholesky factorization can include an
  22. * externally-compiler-generated loop nest, which allows to play with
  23. * compiler-side optimizations.
  24. */
  25. /* Note: this is using fortran ordering, i.e. column-major ordering, i.e.
  26. * elements with consecutive row number are consecutive in memory */
  27. #include "cholesky.h"
  28. #include "../sched_ctx_utils/sched_ctx_utils.h"
  29. #include <math.h>
  30. #if defined(STARPU_USE_CUDA) && defined(STARPU_HAVE_MAGMA)
  31. #include "magma.h"
  32. #endif
  33. /*
  34. * code to bootstrap the factorization
  35. * and construct the DAG
  36. */
  37. static void callback_turn_spmd_on(void *arg)
  38. {
  39. (void)arg;
  40. cl22.type = STARPU_SPMD;
  41. }
  42. static int _cholesky(starpu_data_handle_t dataA, unsigned nblocks)
  43. {
  44. double start;
  45. double end;
  46. unsigned long nelems = starpu_matrix_get_nx(dataA);
  47. unsigned long nn = nelems/nblocks;
  48. int M = nblocks;
  49. int N = nblocks;
  50. int lambda_b = starpu_get_env_float_default("CHOLESKY_LAMBDA_B", nblocks);
  51. int lambda_o_u = starpu_get_env_float_default("CHOLESKY_LAMBDA_O_U", 0);
  52. int lambda_o_d = starpu_get_env_float_default("CHOLESKY_LAMBDA_O_D", 0);
  53. unsigned unbound_prio = STARPU_MAX_PRIO == INT_MAX && STARPU_MIN_PRIO == INT_MIN;
  54. if (bound_p || bound_lp_p || bound_mps_p)
  55. starpu_bound_start(bound_deps_p, 0);
  56. starpu_fxt_start_profiling();
  57. start = starpu_timing_now();
  58. #define min(x,y) (x<y?x:y)
  59. #define max(x,y) (x<y?y:x)
  60. #define ceild(n,d) ceil(((double)(n))/((double)(d)))
  61. #define floord(n,d) floor(((double)(n))/((double)(d)))
  62. #define A(i,j) starpu_data_get_sub_data(dataA, 2, i, j)
  63. #define _POTRF(cl, A, prio, name) do { \
  64. int ret = starpu_task_insert(cl, \
  65. STARPU_PRIORITY, noprio_p ? STARPU_DEFAULT_PRIO : unbound_prio ? (int) (prio) : (int) STARPU_MAX_PRIO, \
  66. STARPU_RW, A, \
  67. STARPU_FLOPS, (double) FLOPS_SPOTRF(nn), \
  68. STARPU_NAME, name, \
  69. 0); \
  70. if (ret == -ENODEV) return 77; \
  71. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert"); \
  72. } while (0)
  73. #define _TRSM(cl, A, B, prio, name) do { \
  74. int ret = starpu_task_insert(cl, \
  75. STARPU_PRIORITY, noprio_p ? STARPU_DEFAULT_PRIO : unbound_prio ? (int) (prio) : (int) STARPU_DEFAULT_PRIO, \
  76. STARPU_R, A, \
  77. STARPU_RW, B, \
  78. STARPU_FLOPS, (double) FLOPS_STRSM(nn,nn), \
  79. STARPU_NAME, name, \
  80. 0); \
  81. if (ret == -ENODEV) return 77; \
  82. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert"); \
  83. } while (0)
  84. /* TODO: use real SYRK */
  85. #define _SYRK(cl, A, C, prio, name) do { \
  86. int ret = starpu_task_insert(cl, \
  87. STARPU_PRIORITY, noprio_p ? STARPU_DEFAULT_PRIO : unbound_prio ? (int) (prio) : (int) STARPU_DEFAULT_PRIO, \
  88. STARPU_R, A, \
  89. STARPU_R, A, \
  90. STARPU_RW, C, \
  91. STARPU_FLOPS, (double) FLOPS_SGEMM(nn,nn,nn), \
  92. STARPU_NAME, name, \
  93. 0); \
  94. if (ret == -ENODEV) return 77; \
  95. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert"); \
  96. } while (0)
  97. #define _GEMM(cl, A, B, C, prio, name) do { \
  98. int ret = starpu_task_insert(cl, \
  99. STARPU_PRIORITY, noprio_p ? STARPU_DEFAULT_PRIO : unbound_prio ? (int) (prio) : (int) STARPU_DEFAULT_PRIO, \
  100. STARPU_R, A, \
  101. STARPU_R, B, \
  102. STARPU_RW, C, \
  103. STARPU_FLOPS, (double) FLOPS_SGEMM(nn,nn,nn), \
  104. STARPU_NAME, name, \
  105. 0); \
  106. if (ret == -ENODEV) return 77; \
  107. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert"); \
  108. } while (0)
  109. #define POTRF(A, prio) _POTRF(&cl11, A, prio, "potrf")
  110. #define TRSM(A, B, prio) _TRSM(&cl21, A, B, prio, "trsm")
  111. #define SYRK(A, B, prio) _SYRK(&cl22, A, B, prio, "syrk")
  112. #define GEMM(A, B, C, prio) _GEMM(&cl22, A, B, C, prio, "gemm")
  113. #define POTRF_GPU(A, prio) _POTRF(&cl11_gpu, A, prio, "potrf_gpu")
  114. #define TRSM_GPU(A, B, prio) _TRSM(&cl21_gpu, A, B, prio, "trsm_gpu")
  115. #define SYRK_GPU(A, B, prio) _SYRK(&cl22_gpu, A, B, prio, "syrk_gpu")
  116. #define GEMM_GPU(A, B, C, prio) _GEMM(&cl22_gpu, A, B, C, prio, "gemm_gpu")
  117. #define POTRF_CPU(A, prio) _POTRF(&cl11_cpu, A, prio, "potrf_cpu")
  118. #define TRSM_CPU(A, B, prio) _TRSM(&cl21_cpu, A, B, prio, "trsm_cpu")
  119. #define SYRK_CPU(A, B, prio) _SYRK(&cl22_cpu, A, B, prio, "syrk_cpu")
  120. #define GEMM_CPU(A, B, C, prio) _GEMM(&cl22_cpu, A, B, C, prio, "gemm_cpu")
  121. #define potrf_oreille_up(k) { POTRF_GPU(A(k,k),(2*N - 2*k)); }
  122. #define potrf_oreille_down(k) { POTRF_GPU(A(k,k),(2*N - 2*k)); }
  123. #define potrf_cpu(k) { POTRF_CPU(A(k,k),(2*N - 2*k)); }
  124. #define potrf_bande(k) { POTRF(A(k,k),(2*N - 2*k)); }
  125. #define trsm_oreille_up(k,m) { TRSM_GPU(A(k,k),A(m,k), (2*nblocks - 2*k - m)); }
  126. #define trsm_oreille_down(k,m) { TRSM_GPU(A(k,k),A(m,k), (2*nblocks - 2*k - m)); }
  127. #define trsm_cpu(k,m) { TRSM_CPU(A(k,k),A(m,k), (2*nblocks - 2*k - m)); }
  128. #define trsm_bande(k,m) { TRSM(A(k,k),A(m,k), (2*nblocks - 2*k - m)); }
  129. #define herk_oreille_up(k,n) { SYRK_GPU(A(n,k),A(n,n), (2*nblocks - 2*k - n)); }
  130. #define herk_oreille_down(k,n) { SYRK_GPU(A(n,k),A(n,n), (2*nblocks - 2*k - n)); }
  131. #define herk_cpu(k,n) { SYRK(A(n,k),A(n,n), (2*nblocks - 2*k - n)); }
  132. #define herk_bande(k,n) { SYRK(A(n,k),A(n,n), (2*nblocks - 2*k - n)); }
  133. #define gemm_oreille_up(k,n,m) { GEMM_GPU(A(m,k),A(n,k),A(m,n), (2*nblocks - 2*k - n - m)); }
  134. #define gemm_oreille_down(k,n,m) { GEMM_GPU(A(m,k),A(n,k),A(m,n), (2*nblocks - 2*k - n - m)); }
  135. #define gemm_cpu(k,n,m) { GEMM(A(m,k),A(n,k),A(m,n), (2*nblocks - 2*k - n - m)); }
  136. #define gemm_bande(k,n,m) { GEMM(A(m,k),A(n,k),A(m,n), (2*nblocks - 2*k - n - m)); }
  137. #include "cholesky_compiled.c"
  138. starpu_task_wait_for_all();
  139. end = starpu_timing_now();
  140. starpu_fxt_stop_profiling();
  141. if (bound_p || bound_lp_p || bound_mps_p)
  142. starpu_bound_stop();
  143. double timing = end - start;
  144. double flop = FLOPS_SPOTRF(nelems);
  145. if(with_ctxs_p || with_noctxs_p || chole1_p || chole2_p)
  146. update_sched_ctx_timing_results((flop/timing/1000.0f), (timing/1000000.0f));
  147. else
  148. {
  149. PRINTF("# size\tms\tGFlops");
  150. if (bound_p)
  151. PRINTF("\tTms\tTGFlops");
  152. PRINTF("\n");
  153. PRINTF("%lu\t%.0f\t%.1f", nelems, timing/1000, (flop/timing/1000.0f));
  154. if (bound_lp_p)
  155. {
  156. FILE *f = fopen("cholesky.lp", "w");
  157. starpu_bound_print_lp(f);
  158. fclose(f);
  159. }
  160. if (bound_mps_p)
  161. {
  162. FILE *f = fopen("cholesky.mps", "w");
  163. starpu_bound_print_mps(f);
  164. fclose(f);
  165. }
  166. if (bound_p)
  167. {
  168. double res;
  169. starpu_bound_compute(&res, NULL, 0);
  170. PRINTF("\t%.0f\t%.1f", res, (flop/res/1000000.0f));
  171. }
  172. PRINTF("\n");
  173. }
  174. return 0;
  175. }
  176. static int cholesky(float *matA, unsigned size, unsigned ld, unsigned nblocks)
  177. {
  178. starpu_data_handle_t dataA;
  179. unsigned m, n;
  180. /* monitor and partition the A matrix into blocks :
  181. * one block is now determined by 2 unsigned (m,n) */
  182. starpu_matrix_data_register(&dataA, STARPU_MAIN_RAM, (uintptr_t)matA, ld, size, size, sizeof(float));
  183. /* Split into blocks of complete rows first */
  184. struct starpu_data_filter f =
  185. {
  186. .filter_func = starpu_matrix_filter_block,
  187. .nchildren = nblocks
  188. };
  189. /* Then split rows into tiles */
  190. struct starpu_data_filter f2 =
  191. {
  192. /* Note: here "vertical" is for row-major, we are here using column-major. */
  193. .filter_func = starpu_matrix_filter_vertical_block,
  194. .nchildren = nblocks
  195. };
  196. starpu_data_map_filters(dataA, 2, &f, &f2);
  197. for (m = 0; m < nblocks; m++)
  198. for (n = 0; n < nblocks; n++)
  199. {
  200. starpu_data_handle_t data = starpu_data_get_sub_data(dataA, 2, m, n);
  201. starpu_data_set_coordinates(data, 2, m, n);
  202. }
  203. int ret = _cholesky(dataA, nblocks);
  204. starpu_data_unpartition(dataA, STARPU_MAIN_RAM);
  205. starpu_data_unregister(dataA);
  206. return ret;
  207. }
  208. static void execute_cholesky(unsigned size, unsigned nblocks)
  209. {
  210. float *mat = NULL;
  211. #ifndef STARPU_SIMGRID
  212. unsigned m,n;
  213. starpu_malloc_flags((void **)&mat, (size_t)size*size*sizeof(float), STARPU_MALLOC_PINNED|STARPU_MALLOC_SIMULATION_FOLDED);
  214. for (n = 0; n < size; n++)
  215. {
  216. for (m = 0; m < size; m++)
  217. {
  218. mat[m +n*size] = (1.0f/(1.0f+m+n)) + ((m == n)?1.0f*size:0.0f);
  219. /* mat[m +n*size] = ((m == n)?1.0f*size:0.0f); */
  220. }
  221. }
  222. /* #define PRINT_OUTPUT */
  223. #ifdef PRINT_OUTPUT
  224. FPRINTF(stdout, "Input :\n");
  225. for (m = 0; m < size; m++)
  226. {
  227. for (n = 0; n < size; n++)
  228. {
  229. if (n <= m)
  230. {
  231. FPRINTF(stdout, "%2.2f\t", mat[m +n*size]);
  232. }
  233. else
  234. {
  235. FPRINTF(stdout, ".\t");
  236. }
  237. }
  238. FPRINTF(stdout, "\n");
  239. }
  240. #endif
  241. #endif
  242. cholesky(mat, size, size, nblocks);
  243. #ifndef STARPU_SIMGRID
  244. #ifdef PRINT_OUTPUT
  245. FPRINTF(stdout, "Results :\n");
  246. for (m = 0; m < size; m++)
  247. {
  248. for (n = 0; n < size; n++)
  249. {
  250. if (n <= m)
  251. {
  252. FPRINTF(stdout, "%2.2f\t", mat[m +n*size]);
  253. }
  254. else
  255. {
  256. FPRINTF(stdout, ".\t");
  257. }
  258. }
  259. FPRINTF(stdout, "\n");
  260. }
  261. #endif
  262. if (check_p)
  263. {
  264. FPRINTF(stderr, "compute explicit LLt ...\n");
  265. for (m = 0; m < size; m++)
  266. {
  267. for (n = 0; n < size; n++)
  268. {
  269. if (n > m)
  270. {
  271. mat[m+n*size] = 0.0f; /* debug */
  272. }
  273. }
  274. }
  275. float *test_mat = malloc(size*size*sizeof(float));
  276. STARPU_ASSERT(test_mat);
  277. STARPU_SSYRK("L", "N", size, size, 1.0f,
  278. mat, size, 0.0f, test_mat, size);
  279. FPRINTF(stderr, "comparing results ...\n");
  280. #ifdef PRINT_OUTPUT
  281. for (m = 0; m < size; m++)
  282. {
  283. for (n = 0; n < size; n++)
  284. {
  285. if (n <= m)
  286. {
  287. FPRINTF(stdout, "%2.2f\t", test_mat[m +n*size]);
  288. }
  289. else
  290. {
  291. FPRINTF(stdout, ".\t");
  292. }
  293. }
  294. FPRINTF(stdout, "\n");
  295. }
  296. #endif
  297. for (m = 0; m < size; m++)
  298. {
  299. for (n = 0; n < size; n++)
  300. {
  301. if (n <= m)
  302. {
  303. float orig = (1.0f/(1.0f+m+n)) + ((m == n)?1.0f*size:0.0f);
  304. float err = fabsf(test_mat[m +n*size] - orig) / orig;
  305. if (err > 0.0001)
  306. {
  307. FPRINTF(stderr, "Error[%u, %u] --> %2.6f != %2.6f (err %2.6f)\n", m, n, test_mat[m +n*size], orig, err);
  308. assert(0);
  309. }
  310. }
  311. }
  312. }
  313. free(test_mat);
  314. }
  315. starpu_free_flags(mat, (size_t)size*size*sizeof(float), STARPU_MALLOC_PINNED|STARPU_MALLOC_SIMULATION_FOLDED);
  316. #endif
  317. }
  318. int main(int argc, char **argv)
  319. {
  320. /* create a simple definite positive symetric matrix example
  321. *
  322. * Hilbert matrix : h(i,j) = 1/(i+j+1)
  323. * */
  324. #ifdef STARPU_HAVE_MAGMA
  325. magma_init();
  326. #endif
  327. int ret;
  328. ret = starpu_init(NULL);
  329. if (ret == -ENODEV) return 77;
  330. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  331. //starpu_fxt_stop_profiling();
  332. init_sizes();
  333. parse_args(argc, argv);
  334. if(with_ctxs_p || with_noctxs_p || chole1_p || chole2_p)
  335. parse_args_ctx(argc, argv);
  336. #ifdef STARPU_USE_CUDA
  337. initialize_chol_model(&chol_model_11,"chol_model_11",cpu_chol_task_11_cost,cuda_chol_task_11_cost);
  338. initialize_chol_model(&chol_model_21,"chol_model_21",cpu_chol_task_21_cost,cuda_chol_task_21_cost);
  339. initialize_chol_model(&chol_model_22,"chol_model_22",cpu_chol_task_22_cost,cuda_chol_task_22_cost);
  340. #else
  341. initialize_chol_model(&chol_model_11,"chol_model_11",cpu_chol_task_11_cost,NULL);
  342. initialize_chol_model(&chol_model_21,"chol_model_21",cpu_chol_task_21_cost,NULL);
  343. initialize_chol_model(&chol_model_22,"chol_model_22",cpu_chol_task_22_cost,NULL);
  344. #endif
  345. starpu_cublas_init();
  346. if(with_ctxs_p)
  347. {
  348. construct_contexts();
  349. start_2benchs(execute_cholesky);
  350. }
  351. else if(with_noctxs_p)
  352. start_2benchs(execute_cholesky);
  353. else if(chole1_p)
  354. start_1stbench(execute_cholesky);
  355. else if(chole2_p)
  356. start_2ndbench(execute_cholesky);
  357. else
  358. execute_cholesky(size_p, nblocks_p);
  359. starpu_cublas_shutdown();
  360. starpu_shutdown();
  361. return 0;
  362. }