cholesky_compil.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2020 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. * Copyright (C) 2010 Mehdi Juhoor
  5. * Copyright (C) 2013 Thibaut Lambert
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. /*
  19. * This version of the Cholesky factorization can include an
  20. * externally-compiler-generated loop nest, which allows to play with
  21. * compiler-side optimizations.
  22. */
  23. /* Note: this is using fortran ordering, i.e. column-major ordering, i.e.
  24. * elements with consecutive row number are consecutive in memory */
  25. #include "cholesky.h"
  26. #include "../sched_ctx_utils/sched_ctx_utils.h"
  27. #include <math.h>
  28. #if defined(STARPU_USE_CUDA) && defined(STARPU_HAVE_MAGMA)
  29. #include "magma.h"
  30. #endif
  31. /*
  32. * code to bootstrap the factorization
  33. * and construct the DAG
  34. */
  35. static void callback_turn_spmd_on(void *arg)
  36. {
  37. (void)arg;
  38. cl22.type = STARPU_SPMD;
  39. }
  40. static int _cholesky(starpu_data_handle_t dataA, unsigned nblocks)
  41. {
  42. double start;
  43. double end;
  44. unsigned long nelems = starpu_matrix_get_nx(dataA);
  45. unsigned long nn = nelems/nblocks;
  46. int M = nblocks;
  47. int N = nblocks;
  48. int lambda_b = starpu_get_env_float_default("CHOLESKY_LAMBDA_B", nblocks);
  49. int lambda_o_u = starpu_get_env_float_default("CHOLESKY_LAMBDA_O_U", 0);
  50. int lambda_o_d = starpu_get_env_float_default("CHOLESKY_LAMBDA_O_D", 0);
  51. unsigned unbound_prio = STARPU_MAX_PRIO == INT_MAX && STARPU_MIN_PRIO == INT_MIN;
  52. if (bound_p || bound_lp_p || bound_mps_p)
  53. starpu_bound_start(bound_deps_p, 0);
  54. starpu_fxt_start_profiling();
  55. start = starpu_timing_now();
  56. #define min(x,y) (x<y?x:y)
  57. #define max(x,y) (x<y?y:x)
  58. #define ceild(n,d) ceil(((double)(n))/((double)(d)))
  59. #define floord(n,d) floor(((double)(n))/((double)(d)))
  60. #define A(i,j) starpu_data_get_sub_data(dataA, 2, i, j)
  61. #define _POTRF(cl, A, prio, name) do { \
  62. int ret = starpu_task_insert(cl, \
  63. STARPU_PRIORITY, noprio_p ? STARPU_DEFAULT_PRIO : unbound_prio ? (int) (prio) : (int) STARPU_MAX_PRIO, \
  64. STARPU_RW, A, \
  65. STARPU_FLOPS, (double) FLOPS_SPOTRF(nn), \
  66. STARPU_NAME, name, \
  67. 0); \
  68. if (ret == -ENODEV) return 77; \
  69. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert"); \
  70. } while (0)
  71. #define _TRSM(cl, A, B, prio, name) do { \
  72. int ret = starpu_task_insert(cl, \
  73. STARPU_PRIORITY, noprio_p ? STARPU_DEFAULT_PRIO : unbound_prio ? (int) (prio) : (int) STARPU_DEFAULT_PRIO, \
  74. STARPU_R, A, \
  75. STARPU_RW, B, \
  76. STARPU_FLOPS, (double) FLOPS_STRSM(nn,nn), \
  77. STARPU_NAME, name, \
  78. 0); \
  79. if (ret == -ENODEV) return 77; \
  80. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert"); \
  81. } while (0)
  82. /* TODO: use real SYRK */
  83. #define _SYRK(cl, A, C, prio, name) do { \
  84. int ret = starpu_task_insert(cl, \
  85. STARPU_PRIORITY, noprio_p ? STARPU_DEFAULT_PRIO : unbound_prio ? (int) (prio) : (int) STARPU_DEFAULT_PRIO, \
  86. STARPU_R, A, \
  87. STARPU_R, A, \
  88. STARPU_RW, C, \
  89. STARPU_FLOPS, (double) FLOPS_SGEMM(nn,nn,nn), \
  90. STARPU_NAME, name, \
  91. 0); \
  92. if (ret == -ENODEV) return 77; \
  93. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert"); \
  94. } while (0)
  95. #define _GEMM(cl, A, B, C, prio, name) do { \
  96. int ret = starpu_task_insert(cl, \
  97. STARPU_PRIORITY, noprio_p ? STARPU_DEFAULT_PRIO : unbound_prio ? (int) (prio) : (int) STARPU_DEFAULT_PRIO, \
  98. STARPU_R, A, \
  99. STARPU_R, B, \
  100. STARPU_RW, C, \
  101. STARPU_FLOPS, (double) FLOPS_SGEMM(nn,nn,nn), \
  102. STARPU_NAME, name, \
  103. 0); \
  104. if (ret == -ENODEV) return 77; \
  105. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert"); \
  106. } while (0)
  107. #define POTRF(A, prio) _POTRF(&cl11, A, prio, "potrf")
  108. #define TRSM(A, B, prio) _TRSM(&cl21, A, B, prio, "trsm")
  109. #define SYRK(A, B, prio) _SYRK(&cl22, A, B, prio, "syrk")
  110. #define GEMM(A, B, C, prio) _GEMM(&cl22, A, B, C, prio, "gemm")
  111. #define POTRF_GPU(A, prio) _POTRF(&cl11_gpu, A, prio, "potrf_gpu")
  112. #define TRSM_GPU(A, B, prio) _TRSM(&cl21_gpu, A, B, prio, "trsm_gpu")
  113. #define SYRK_GPU(A, B, prio) _SYRK(&cl22_gpu, A, B, prio, "syrk_gpu")
  114. #define GEMM_GPU(A, B, C, prio) _GEMM(&cl22_gpu, A, B, C, prio, "gemm_gpu")
  115. #define POTRF_CPU(A, prio) _POTRF(&cl11_cpu, A, prio, "potrf_cpu")
  116. #define TRSM_CPU(A, B, prio) _TRSM(&cl21_cpu, A, B, prio, "trsm_cpu")
  117. #define SYRK_CPU(A, B, prio) _SYRK(&cl22_cpu, A, B, prio, "syrk_cpu")
  118. #define GEMM_CPU(A, B, C, prio) _GEMM(&cl22_cpu, A, B, C, prio, "gemm_cpu")
  119. #define potrf_oreille_up(k) { POTRF_GPU(A(k,k),(2*N - 2*k)); }
  120. #define potrf_oreille_down(k) { POTRF_GPU(A(k,k),(2*N - 2*k)); }
  121. #define potrf_cpu(k) { POTRF_CPU(A(k,k),(2*N - 2*k)); }
  122. #define potrf_bande(k) { POTRF(A(k,k),(2*N - 2*k)); }
  123. #define trsm_oreille_up(k,m) { TRSM_GPU(A(k,k),A(m,k), (2*nblocks - 2*k - m)); }
  124. #define trsm_oreille_down(k,m) { TRSM_GPU(A(k,k),A(m,k), (2*nblocks - 2*k - m)); }
  125. #define trsm_cpu(k,m) { TRSM_CPU(A(k,k),A(m,k), (2*nblocks - 2*k - m)); }
  126. #define trsm_bande(k,m) { TRSM(A(k,k),A(m,k), (2*nblocks - 2*k - m)); }
  127. #define herk_oreille_up(k,n) { SYRK_GPU(A(n,k),A(n,n), (2*nblocks - 2*k - n)); }
  128. #define herk_oreille_down(k,n) { SYRK_GPU(A(n,k),A(n,n), (2*nblocks - 2*k - n)); }
  129. #define herk_cpu(k,n) { SYRK(A(n,k),A(n,n), (2*nblocks - 2*k - n)); }
  130. #define herk_bande(k,n) { SYRK(A(n,k),A(n,n), (2*nblocks - 2*k - n)); }
  131. #define gemm_oreille_up(k,n,m) { GEMM_GPU(A(m,k),A(n,k),A(m,n), (2*nblocks - 2*k - n - m)); }
  132. #define gemm_oreille_down(k,n,m) { GEMM_GPU(A(m,k),A(n,k),A(m,n), (2*nblocks - 2*k - n - m)); }
  133. #define gemm_cpu(k,n,m) { GEMM(A(m,k),A(n,k),A(m,n), (2*nblocks - 2*k - n - m)); }
  134. #define gemm_bande(k,n,m) { GEMM(A(m,k),A(n,k),A(m,n), (2*nblocks - 2*k - n - m)); }
  135. #include "cholesky_compiled.c"
  136. starpu_task_wait_for_all();
  137. end = starpu_timing_now();
  138. starpu_fxt_stop_profiling();
  139. if (bound_p || bound_lp_p || bound_mps_p)
  140. starpu_bound_stop();
  141. double timing = end - start;
  142. double flop = FLOPS_SPOTRF(nelems);
  143. if(with_ctxs_p || with_noctxs_p || chole1_p || chole2_p)
  144. update_sched_ctx_timing_results((flop/timing/1000.0f), (timing/1000000.0f));
  145. else
  146. {
  147. PRINTF("# size\tms\tGFlops");
  148. if (bound_p)
  149. PRINTF("\tTms\tTGFlops");
  150. PRINTF("\n");
  151. PRINTF("%lu\t%.0f\t%.1f", nelems, timing/1000, (flop/timing/1000.0f));
  152. if (bound_lp_p)
  153. {
  154. FILE *f = fopen("cholesky.lp", "w");
  155. starpu_bound_print_lp(f);
  156. fclose(f);
  157. }
  158. if (bound_mps_p)
  159. {
  160. FILE *f = fopen("cholesky.mps", "w");
  161. starpu_bound_print_mps(f);
  162. fclose(f);
  163. }
  164. if (bound_p)
  165. {
  166. double res;
  167. starpu_bound_compute(&res, NULL, 0);
  168. PRINTF("\t%.0f\t%.1f", res, (flop/res/1000000.0f));
  169. }
  170. PRINTF("\n");
  171. }
  172. return 0;
  173. }
  174. static int cholesky(float *matA, unsigned size, unsigned ld, unsigned nblocks)
  175. {
  176. starpu_data_handle_t dataA;
  177. unsigned m, n;
  178. /* monitor and partition the A matrix into blocks :
  179. * one block is now determined by 2 unsigned (m,n) */
  180. starpu_matrix_data_register(&dataA, STARPU_MAIN_RAM, (uintptr_t)matA, ld, size, size, sizeof(float));
  181. /* Split into blocks of complete rows first */
  182. struct starpu_data_filter f =
  183. {
  184. .filter_func = starpu_matrix_filter_block,
  185. .nchildren = nblocks
  186. };
  187. /* Then split rows into tiles */
  188. struct starpu_data_filter f2 =
  189. {
  190. /* Note: here "vertical" is for row-major, we are here using column-major. */
  191. .filter_func = starpu_matrix_filter_vertical_block,
  192. .nchildren = nblocks
  193. };
  194. starpu_data_map_filters(dataA, 2, &f, &f2);
  195. for (m = 0; m < nblocks; m++)
  196. for (n = 0; n < nblocks; n++)
  197. {
  198. starpu_data_handle_t data = starpu_data_get_sub_data(dataA, 2, m, n);
  199. starpu_data_set_coordinates(data, 2, m, n);
  200. }
  201. int ret = _cholesky(dataA, nblocks);
  202. starpu_data_unpartition(dataA, STARPU_MAIN_RAM);
  203. starpu_data_unregister(dataA);
  204. return ret;
  205. }
  206. static void execute_cholesky(unsigned size, unsigned nblocks)
  207. {
  208. float *mat = NULL;
  209. #ifndef STARPU_SIMGRID
  210. unsigned m,n;
  211. starpu_malloc_flags((void **)&mat, (size_t)size*size*sizeof(float), STARPU_MALLOC_PINNED|STARPU_MALLOC_SIMULATION_FOLDED);
  212. for (n = 0; n < size; n++)
  213. {
  214. for (m = 0; m < size; m++)
  215. {
  216. mat[m +n*size] = (1.0f/(1.0f+m+n)) + ((m == n)?1.0f*size:0.0f);
  217. /* mat[m +n*size] = ((m == n)?1.0f*size:0.0f); */
  218. }
  219. }
  220. /* #define PRINT_OUTPUT */
  221. #ifdef PRINT_OUTPUT
  222. FPRINTF(stdout, "Input :\n");
  223. for (m = 0; m < size; m++)
  224. {
  225. for (n = 0; n < size; n++)
  226. {
  227. if (n <= m)
  228. {
  229. FPRINTF(stdout, "%2.2f\t", mat[m +n*size]);
  230. }
  231. else
  232. {
  233. FPRINTF(stdout, ".\t");
  234. }
  235. }
  236. FPRINTF(stdout, "\n");
  237. }
  238. #endif
  239. #endif
  240. cholesky(mat, size, size, nblocks);
  241. #ifndef STARPU_SIMGRID
  242. #ifdef PRINT_OUTPUT
  243. FPRINTF(stdout, "Results :\n");
  244. for (m = 0; m < size; m++)
  245. {
  246. for (n = 0; n < size; n++)
  247. {
  248. if (n <= m)
  249. {
  250. FPRINTF(stdout, "%2.2f\t", mat[m +n*size]);
  251. }
  252. else
  253. {
  254. FPRINTF(stdout, ".\t");
  255. }
  256. }
  257. FPRINTF(stdout, "\n");
  258. }
  259. #endif
  260. if (check_p)
  261. {
  262. FPRINTF(stderr, "compute explicit LLt ...\n");
  263. for (m = 0; m < size; m++)
  264. {
  265. for (n = 0; n < size; n++)
  266. {
  267. if (n > m)
  268. {
  269. mat[m+n*size] = 0.0f; /* debug */
  270. }
  271. }
  272. }
  273. float *test_mat = malloc(size*size*sizeof(float));
  274. STARPU_ASSERT(test_mat);
  275. STARPU_SSYRK("L", "N", size, size, 1.0f,
  276. mat, size, 0.0f, test_mat, size);
  277. FPRINTF(stderr, "comparing results ...\n");
  278. #ifdef PRINT_OUTPUT
  279. for (m = 0; m < size; m++)
  280. {
  281. for (n = 0; n < size; n++)
  282. {
  283. if (n <= m)
  284. {
  285. FPRINTF(stdout, "%2.2f\t", test_mat[m +n*size]);
  286. }
  287. else
  288. {
  289. FPRINTF(stdout, ".\t");
  290. }
  291. }
  292. FPRINTF(stdout, "\n");
  293. }
  294. #endif
  295. for (m = 0; m < size; m++)
  296. {
  297. for (n = 0; n < size; n++)
  298. {
  299. if (n <= m)
  300. {
  301. float orig = (1.0f/(1.0f+m+n)) + ((m == n)?1.0f*size:0.0f);
  302. float err = fabsf(test_mat[m +n*size] - orig) / orig;
  303. if (err > 0.0001)
  304. {
  305. FPRINTF(stderr, "Error[%u, %u] --> %2.6f != %2.6f (err %2.6f)\n", m, n, test_mat[m +n*size], orig, err);
  306. assert(0);
  307. }
  308. }
  309. }
  310. }
  311. free(test_mat);
  312. }
  313. starpu_free_flags(mat, (size_t)size*size*sizeof(float), STARPU_MALLOC_PINNED|STARPU_MALLOC_SIMULATION_FOLDED);
  314. #endif
  315. }
  316. int main(int argc, char **argv)
  317. {
  318. /* create a simple definite positive symetric matrix example
  319. *
  320. * Hilbert matrix : h(i,j) = 1/(i+j+1)
  321. * */
  322. #ifdef STARPU_HAVE_MAGMA
  323. magma_init();
  324. #endif
  325. int ret;
  326. ret = starpu_init(NULL);
  327. if (ret == -ENODEV) return 77;
  328. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  329. //starpu_fxt_stop_profiling();
  330. init_sizes();
  331. parse_args(argc, argv);
  332. if(with_ctxs_p || with_noctxs_p || chole1_p || chole2_p)
  333. parse_args_ctx(argc, argv);
  334. #ifdef STARPU_USE_CUDA
  335. initialize_chol_model(&chol_model_11,"chol_model_11",cpu_chol_task_11_cost,cuda_chol_task_11_cost);
  336. initialize_chol_model(&chol_model_21,"chol_model_21",cpu_chol_task_21_cost,cuda_chol_task_21_cost);
  337. initialize_chol_model(&chol_model_22,"chol_model_22",cpu_chol_task_22_cost,cuda_chol_task_22_cost);
  338. #else
  339. initialize_chol_model(&chol_model_11,"chol_model_11",cpu_chol_task_11_cost,NULL);
  340. initialize_chol_model(&chol_model_21,"chol_model_21",cpu_chol_task_21_cost,NULL);
  341. initialize_chol_model(&chol_model_22,"chol_model_22",cpu_chol_task_22_cost,NULL);
  342. #endif
  343. starpu_cublas_init();
  344. if(with_ctxs_p)
  345. {
  346. construct_contexts();
  347. start_2benchs(execute_cholesky);
  348. }
  349. else if(with_noctxs_p)
  350. start_2benchs(execute_cholesky);
  351. else if(chole1_p)
  352. start_1stbench(execute_cholesky);
  353. else if(chole2_p)
  354. start_2ndbench(execute_cholesky);
  355. else
  356. execute_cholesky(size_p, nblocks_p);
  357. starpu_cublas_shutdown();
  358. starpu_shutdown();
  359. return 0;
  360. }