cholesky_compil.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011-2013,2015 Inria
  4. * Copyright (C) 2009-2017,2019 Université de Bordeaux
  5. * Copyright (C) 2010-2013,2015-2017 CNRS
  6. * Copyright (C) 2013 Thibaut Lambert
  7. * Copyright (C) 2010 Mehdi Juhoor
  8. *
  9. * StarPU is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU Lesser General Public License as published by
  11. * the Free Software Foundation; either version 2.1 of the License, or (at
  12. * your option) any later version.
  13. *
  14. * StarPU is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  17. *
  18. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  19. */
  20. /*
  21. * This version of the Cholesky factorization can include an
  22. * externally-compiler-generated loop nest, which allows to play with
  23. * compiler-side optimizations.
  24. */
  25. #include "cholesky.h"
  26. #include "../sched_ctx_utils/sched_ctx_utils.h"
  27. #include <math.h>
  28. #if defined(STARPU_USE_CUDA) && defined(STARPU_HAVE_MAGMA)
  29. #include "magma.h"
  30. #endif
  31. /*
  32. * code to bootstrap the factorization
  33. * and construct the DAG
  34. */
  35. static void callback_turn_spmd_on(void *arg)
  36. {
  37. (void)arg;
  38. cl22.type = STARPU_SPMD;
  39. }
  40. static int _cholesky(starpu_data_handle_t dataA, unsigned nblocks)
  41. {
  42. double start;
  43. double end;
  44. unsigned long nelems = starpu_matrix_get_nx(dataA);
  45. unsigned long nn = nelems/nblocks;
  46. int N = nblocks;
  47. int M = nblocks;
  48. int lambda_b = starpu_get_env_float_default("CHOLESKY_LAMBDA_B", nblocks);
  49. int lambda_o_u = starpu_get_env_float_default("CHOLESKY_LAMBDA_O_U", 0);
  50. int lambda_o_d = starpu_get_env_float_default("CHOLESKY_LAMBDA_O_D", 0);
  51. unsigned unbound_prio = STARPU_MAX_PRIO == INT_MAX && STARPU_MIN_PRIO == INT_MIN;
  52. if (bound_p || bound_lp_p || bound_mps_p)
  53. starpu_bound_start(bound_deps_p, 0);
  54. starpu_fxt_start_profiling();
  55. start = starpu_timing_now();
  56. #define min(x,y) (x<y?x:y)
  57. #define max(x,y) (x<y?y:x)
  58. #define ceild(n,d) ceil(((double)(n))/((double)(d)))
  59. #define floord(n,d) floor(((double)(n))/((double)(d)))
  60. #define A(i,j) starpu_data_get_sub_data(dataA, 2, j, i)
  61. #define _POTRF(cl, A, prio, name) do { \
  62. int ret = starpu_task_insert(cl, \
  63. STARPU_PRIORITY, noprio_p ? STARPU_DEFAULT_PRIO : unbound_prio ? (int) (prio) : (int) STARPU_MAX_PRIO, \
  64. STARPU_RW, A, \
  65. STARPU_FLOPS, (double) FLOPS_SPOTRF(nn), \
  66. STARPU_NAME, name, \
  67. 0); \
  68. if (ret == -ENODEV) return 77; \
  69. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert"); \
  70. } while (0)
  71. #define _TRSM(cl, A, B, prio, name) do { \
  72. int ret = starpu_task_insert(cl, \
  73. STARPU_PRIORITY, noprio_p ? STARPU_DEFAULT_PRIO : unbound_prio ? (int) (prio) : (int) STARPU_DEFAULT_PRIO, \
  74. STARPU_R, A, \
  75. STARPU_RW, B, \
  76. STARPU_FLOPS, (double) FLOPS_STRSM(nn,nn), \
  77. STARPU_NAME, name, \
  78. 0); \
  79. if (ret == -ENODEV) return 77; \
  80. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert"); \
  81. } while (0)
  82. /* TODO: use real SYRK */
  83. #define _SYRK(cl, A, C, prio, name) do { \
  84. int ret = starpu_task_insert(cl, \
  85. STARPU_PRIORITY, noprio_p ? STARPU_DEFAULT_PRIO : unbound_prio ? (int) (prio) : (int) STARPU_DEFAULT_PRIO, \
  86. STARPU_R, A, \
  87. STARPU_R, A, \
  88. STARPU_RW, C, \
  89. STARPU_FLOPS, (double) FLOPS_SGEMM(nn,nn,nn), \
  90. STARPU_NAME, name, \
  91. 0); \
  92. if (ret == -ENODEV) return 77; \
  93. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert"); \
  94. } while (0)
  95. #define _GEMM(cl, A, B, C, prio, name) do { \
  96. int ret = starpu_task_insert(cl, \
  97. STARPU_PRIORITY, noprio_p ? STARPU_DEFAULT_PRIO : unbound_prio ? (int) (prio) : (int) STARPU_DEFAULT_PRIO, \
  98. STARPU_R, A, \
  99. STARPU_R, B, \
  100. STARPU_RW, C, \
  101. STARPU_FLOPS, (double) FLOPS_SGEMM(nn,nn,nn), \
  102. STARPU_NAME, name, \
  103. 0); \
  104. if (ret == -ENODEV) return 77; \
  105. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert"); \
  106. } while (0)
  107. #define POTRF(A, prio) _POTRF(&cl11, A, prio, "potrf")
  108. #define TRSM(A, B, prio) _TRSM(&cl21, A, B, prio, "trsm")
  109. #define SYRK(A, B, prio) _SYRK(&cl22, A, B, prio, "syrk")
  110. #define GEMM(A, B, C, prio) _GEMM(&cl22, A, B, C, prio, "gemm")
  111. #define POTRF_GPU(A, prio) _POTRF(&cl11_gpu, A, prio, "potrf_gpu")
  112. #define TRSM_GPU(A, B, prio) _TRSM(&cl21_gpu, A, B, prio, "trsm_gpu")
  113. #define SYRK_GPU(A, B, prio) _SYRK(&cl22_gpu, A, B, prio, "syrk_gpu")
  114. #define GEMM_GPU(A, B, C, prio) _GEMM(&cl22_gpu, A, B, C, prio, "gemm_gpu")
  115. #define POTRF_CPU(A, prio) _POTRF(&cl11_cpu, A, prio, "potrf_cpu")
  116. #define TRSM_CPU(A, B, prio) _TRSM(&cl21_cpu, A, B, prio, "trsm_cpu")
  117. #define SYRK_CPU(A, B, prio) _SYRK(&cl22_cpu, A, B, prio, "syrk_cpu")
  118. #define GEMM_CPU(A, B, C, prio) _GEMM(&cl22_cpu, A, B, C, prio, "gemm_cpu")
  119. #define potrf_oreille_up(k) { POTRF_GPU(A(k,k),(2*N - 2*k)); }
  120. #define potrf_oreille_down(k) { POTRF_GPU(A(k,k),(2*N - 2*k)); }
  121. #define potrf_cpu(k) { POTRF_CPU(A(k,k),(2*N - 2*k)); }
  122. #define potrf_bande(k) { POTRF(A(k,k),(2*N - 2*k)); }
  123. #define trsm_oreille_up(k,m) { TRSM_GPU(A(k,k),A(m,k), (2*nblocks - 2*k - m)); }
  124. #define trsm_oreille_down(k,m) { TRSM_GPU(A(k,k),A(m,k), (2*nblocks - 2*k - m)); }
  125. #define trsm_cpu(k,m) { TRSM_CPU(A(k,k),A(m,k), (2*nblocks - 2*k - m)); }
  126. #define trsm_bande(k,m) { TRSM(A(k,k),A(m,k), (2*nblocks - 2*k - m)); }
  127. #define herk_oreille_up(k,n) { SYRK_GPU(A(n,k),A(n,n), (2*nblocks - 2*k - n)); }
  128. #define herk_oreille_down(k,n) { SYRK_GPU(A(n,k),A(n,n), (2*nblocks - 2*k - n)); }
  129. #define herk_cpu(k,n) { SYRK(A(n,k),A(n,n), (2*nblocks - 2*k - n)); }
  130. #define herk_bande(k,n) { SYRK(A(n,k),A(n,n), (2*nblocks - 2*k - n)); }
  131. #define gemm_oreille_up(k,n,m) { GEMM_GPU(A(m,k),A(n,k),A(m,n), (2*nblocks - 2*k - n - m)); }
  132. #define gemm_oreille_down(k,n,m) { GEMM_GPU(A(m,k),A(n,k),A(m,n), (2*nblocks - 2*k - n - m)); }
  133. #define gemm_cpu(k,n,m) { GEMM(A(m,k),A(n,k),A(m,n), (2*nblocks - 2*k - n - m)); }
  134. #define gemm_bande(k,n,m) { GEMM(A(m,k),A(n,k),A(m,n), (2*nblocks - 2*k - n - m)); }
  135. #include "cholesky_compiled.c"
  136. starpu_task_wait_for_all();
  137. end = starpu_timing_now();
  138. starpu_fxt_stop_profiling();
  139. if (bound_p || bound_lp_p || bound_mps_p)
  140. starpu_bound_stop();
  141. double timing = end - start;
  142. double flop = FLOPS_SPOTRF(nelems);
  143. if(with_ctxs_p || with_noctxs_p || chole1_p || chole2_p)
  144. update_sched_ctx_timing_results((flop/timing/1000.0f), (timing/1000000.0f));
  145. else
  146. {
  147. PRINTF("# size\tms\tGFlops");
  148. if (bound_p)
  149. PRINTF("\tTms\tTGFlops");
  150. PRINTF("\n");
  151. PRINTF("%lu\t%.0f\t%.1f", nelems, timing/1000, (flop/timing/1000.0f));
  152. if (bound_lp_p)
  153. {
  154. FILE *f = fopen("cholesky.lp", "w");
  155. starpu_bound_print_lp(f);
  156. fclose(f);
  157. }
  158. if (bound_mps_p)
  159. {
  160. FILE *f = fopen("cholesky.mps", "w");
  161. starpu_bound_print_mps(f);
  162. fclose(f);
  163. }
  164. if (bound_p)
  165. {
  166. double res;
  167. starpu_bound_compute(&res, NULL, 0);
  168. PRINTF("\t%.0f\t%.1f", res, (flop/res/1000000.0f));
  169. }
  170. PRINTF("\n");
  171. }
  172. return 0;
  173. }
  174. static int cholesky(float *matA, unsigned size, unsigned ld, unsigned nblocks)
  175. {
  176. starpu_data_handle_t dataA;
  177. unsigned x, y;
  178. /* monitor and partition the A matrix into blocks :
  179. * one block is now determined by 2 unsigned (i,j) */
  180. starpu_matrix_data_register(&dataA, STARPU_MAIN_RAM, (uintptr_t)matA, ld, size, size, sizeof(float));
  181. struct starpu_data_filter f =
  182. {
  183. .filter_func = starpu_matrix_filter_vertical_block,
  184. .nchildren = nblocks
  185. };
  186. struct starpu_data_filter f2 =
  187. {
  188. .filter_func = starpu_matrix_filter_block,
  189. .nchildren = nblocks
  190. };
  191. starpu_data_map_filters(dataA, 2, &f, &f2);
  192. for (x = 0; x < nblocks; x++)
  193. for (y = 0; y < nblocks; y++)
  194. {
  195. starpu_data_handle_t data = starpu_data_get_sub_data(dataA, 2, x, y);
  196. starpu_data_set_coordinates(data, 2, x, y);
  197. }
  198. int ret = _cholesky(dataA, nblocks);
  199. starpu_data_unpartition(dataA, STARPU_MAIN_RAM);
  200. starpu_data_unregister(dataA);
  201. return ret;
  202. }
  203. static void execute_cholesky(unsigned size, unsigned nblocks)
  204. {
  205. float *mat = NULL;
  206. #ifndef STARPU_SIMGRID
  207. unsigned i,j;
  208. starpu_malloc_flags((void **)&mat, (size_t)size*size*sizeof(float), STARPU_MALLOC_PINNED|STARPU_MALLOC_SIMULATION_FOLDED);
  209. for (i = 0; i < size; i++)
  210. {
  211. for (j = 0; j < size; j++)
  212. {
  213. mat[j +i*size] = (1.0f/(1.0f+i+j)) + ((i == j)?1.0f*size:0.0f);
  214. /* mat[j +i*size] = ((i == j)?1.0f*size:0.0f); */
  215. }
  216. }
  217. /* #define PRINT_OUTPUT */
  218. #ifdef PRINT_OUTPUT
  219. FPRINTF(stdout, "Input :\n");
  220. for (j = 0; j < size; j++)
  221. {
  222. for (i = 0; i < size; i++)
  223. {
  224. if (i <= j)
  225. {
  226. FPRINTF(stdout, "%2.2f\t", mat[j +i*size]);
  227. }
  228. else
  229. {
  230. FPRINTF(stdout, ".\t");
  231. }
  232. }
  233. FPRINTF(stdout, "\n");
  234. }
  235. #endif
  236. #endif
  237. cholesky(mat, size, size, nblocks);
  238. #ifndef STARPU_SIMGRID
  239. #ifdef PRINT_OUTPUT
  240. FPRINTF(stdout, "Results :\n");
  241. for (j = 0; j < size; j++)
  242. {
  243. for (i = 0; i < size; i++)
  244. {
  245. if (i <= j)
  246. {
  247. FPRINTF(stdout, "%2.2f\t", mat[j +i*size]);
  248. }
  249. else
  250. {
  251. FPRINTF(stdout, ".\t");
  252. mat[j+i*size] = 0.0f; /* debug */
  253. }
  254. }
  255. FPRINTF(stdout, "\n");
  256. }
  257. #endif
  258. if (check_p)
  259. {
  260. FPRINTF(stderr, "compute explicit LLt ...\n");
  261. for (j = 0; j < size; j++)
  262. {
  263. for (i = 0; i < size; i++)
  264. {
  265. if (i > j)
  266. {
  267. mat[j+i*size] = 0.0f; /* debug */
  268. }
  269. }
  270. }
  271. float *test_mat = malloc(size*size*sizeof(float));
  272. STARPU_ASSERT(test_mat);
  273. STARPU_SSYRK("L", "N", size, size, 1.0f,
  274. mat, size, 0.0f, test_mat, size);
  275. FPRINTF(stderr, "comparing results ...\n");
  276. #ifdef PRINT_OUTPUT
  277. for (j = 0; j < size; j++)
  278. {
  279. for (i = 0; i < size; i++)
  280. {
  281. if (i <= j)
  282. {
  283. FPRINTF(stdout, "%2.2f\t", test_mat[j +i*size]);
  284. }
  285. else
  286. {
  287. FPRINTF(stdout, ".\t");
  288. }
  289. }
  290. FPRINTF(stdout, "\n");
  291. }
  292. #endif
  293. for (j = 0; j < size; j++)
  294. {
  295. for (i = 0; i < size; i++)
  296. {
  297. if (i <= j)
  298. {
  299. float orig = (1.0f/(1.0f+i+j)) + ((i == j)?1.0f*size:0.0f);
  300. float err = fabsf(test_mat[j +i*size] - orig) / orig;
  301. if (err > 0.00001)
  302. {
  303. FPRINTF(stderr, "Error[%u, %u] --> %2.6f != %2.6f (err %2.6f)\n", i, j, test_mat[j +i*size], orig, err);
  304. assert(0);
  305. }
  306. }
  307. }
  308. }
  309. free(test_mat);
  310. }
  311. starpu_free_flags(mat, (size_t)size*size*sizeof(float), STARPU_MALLOC_PINNED|STARPU_MALLOC_SIMULATION_FOLDED);
  312. #endif
  313. }
  314. int main(int argc, char **argv)
  315. {
  316. /* create a simple definite positive symetric matrix example
  317. *
  318. * Hilbert matrix : h(i,j) = 1/(i+j+1)
  319. * */
  320. #ifdef STARPU_HAVE_MAGMA
  321. magma_init();
  322. #endif
  323. int ret;
  324. ret = starpu_init(NULL);
  325. if (ret == -ENODEV) return 77;
  326. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  327. //starpu_fxt_stop_profiling();
  328. init_sizes();
  329. parse_args(argc, argv);
  330. if(with_ctxs_p || with_noctxs_p || chole1_p || chole2_p)
  331. parse_args_ctx(argc, argv);
  332. #ifdef STARPU_USE_CUDA
  333. initialize_chol_model(&chol_model_11,"chol_model_11",cpu_chol_task_11_cost,cuda_chol_task_11_cost);
  334. initialize_chol_model(&chol_model_21,"chol_model_21",cpu_chol_task_21_cost,cuda_chol_task_21_cost);
  335. initialize_chol_model(&chol_model_22,"chol_model_22",cpu_chol_task_22_cost,cuda_chol_task_22_cost);
  336. #else
  337. initialize_chol_model(&chol_model_11,"chol_model_11",cpu_chol_task_11_cost,NULL);
  338. initialize_chol_model(&chol_model_21,"chol_model_21",cpu_chol_task_21_cost,NULL);
  339. initialize_chol_model(&chol_model_22,"chol_model_22",cpu_chol_task_22_cost,NULL);
  340. #endif
  341. starpu_cublas_init();
  342. if(with_ctxs_p)
  343. {
  344. construct_contexts();
  345. start_2benchs(execute_cholesky);
  346. }
  347. else if(with_noctxs_p)
  348. start_2benchs(execute_cholesky);
  349. else if(chole1_p)
  350. start_1stbench(execute_cholesky);
  351. else if(chole2_p)
  352. start_2ndbench(execute_cholesky);
  353. else
  354. execute_cholesky(size_p, nblocks_p);
  355. starpu_cublas_shutdown();
  356. starpu_shutdown();
  357. return 0;
  358. }