cholesky_implicit.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2021 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. * Copyright (C) 2010 Mehdi Juhoor
  5. * Copyright (C) 2013 Thibaut Lambert
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. /*
  19. * This version of the Cholesky factorization uses implicit dependency computation.
  20. * The whole algorithm thus appears clearly in the task submission loop in _cholesky().
  21. */
  22. /* Note: this is using fortran ordering, i.e. column-major ordering, i.e.
  23. * elements with consecutive row number are consecutive in memory */
  24. #include "cholesky.h"
  25. #include "../sched_ctx_utils/sched_ctx_utils.h"
  26. #if defined(STARPU_USE_CUDA) && defined(STARPU_HAVE_MAGMA)
  27. #include "magma.h"
  28. #endif
  29. /*
  30. * code to bootstrap the factorization
  31. * and construct the DAG
  32. */
  33. static void callback_turn_spmd_on(void *arg)
  34. {
  35. (void)arg;
  36. cl22.type = STARPU_SPMD;
  37. }
  38. static int _cholesky(starpu_data_handle_t dataA, unsigned nblocks)
  39. {
  40. double start;
  41. double end;
  42. unsigned k,m,n;
  43. unsigned long nx = starpu_matrix_get_nx(dataA);
  44. unsigned long nn = nx/nblocks;
  45. unsigned unbound_prio = STARPU_MAX_PRIO == INT_MAX && STARPU_MIN_PRIO == INT_MIN;
  46. if (bound_p || bound_lp_p || bound_mps_p)
  47. starpu_bound_start(bound_deps_p, 0);
  48. starpu_fxt_start_profiling();
  49. start = starpu_timing_now();
  50. /* create all the DAG nodes */
  51. for (k = 0; k < nblocks; k++)
  52. {
  53. int ret;
  54. starpu_iteration_push(k);
  55. starpu_data_handle_t sdatakk = starpu_data_get_sub_data(dataA, 2, k, k);
  56. ret = starpu_task_insert(&cl11,
  57. STARPU_PRIORITY, noprio_p ? STARPU_DEFAULT_PRIO : unbound_prio ? (int)(2*nblocks - 2*k) : STARPU_MAX_PRIO,
  58. STARPU_RW, sdatakk,
  59. STARPU_CALLBACK, (k == 3*nblocks/4)?callback_turn_spmd_on:NULL,
  60. STARPU_FLOPS, (double) FLOPS_SPOTRF(nn),
  61. STARPU_TAG_ONLY, TAG11(k),
  62. 0);
  63. if (ret == -ENODEV) return 77;
  64. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  65. for (m = k+1; m<nblocks; m++)
  66. {
  67. starpu_data_handle_t sdatamk = starpu_data_get_sub_data(dataA, 2, m, k);
  68. ret = starpu_task_insert(&cl21,
  69. STARPU_PRIORITY, noprio_p ? STARPU_DEFAULT_PRIO : unbound_prio ? (int)(2*nblocks - 2*k - m) : (m == k+1)?STARPU_MAX_PRIO:STARPU_DEFAULT_PRIO,
  70. STARPU_R, sdatakk,
  71. STARPU_RW, sdatamk,
  72. STARPU_FLOPS, (double) FLOPS_STRSM(nn, nn),
  73. STARPU_TAG_ONLY, TAG21(m,k),
  74. 0);
  75. if (ret == -ENODEV) return 77;
  76. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  77. }
  78. starpu_data_wont_use(sdatakk);
  79. for (n = k+1; n<nblocks; n++)
  80. {
  81. starpu_data_handle_t sdatank = starpu_data_get_sub_data(dataA, 2, n, k);
  82. for (m = n; m<nblocks; m++)
  83. {
  84. starpu_data_handle_t sdatamk = starpu_data_get_sub_data(dataA, 2, m, k);
  85. starpu_data_handle_t sdatamn = starpu_data_get_sub_data(dataA, 2, m, n);
  86. ret = starpu_task_insert(&cl22,
  87. STARPU_PRIORITY, noprio_p ? STARPU_DEFAULT_PRIO : unbound_prio ? (int)(2*nblocks - 2*k - m - n) : ((n == k+1) && (m == k+1))?STARPU_MAX_PRIO:STARPU_DEFAULT_PRIO,
  88. STARPU_R, sdatamk,
  89. STARPU_R, sdatank,
  90. cl22.modes[2], sdatamn,
  91. STARPU_FLOPS, (double) FLOPS_SGEMM(nn, nn, nn),
  92. STARPU_TAG_ONLY, TAG22(k,m,n),
  93. 0);
  94. if (ret == -ENODEV) return 77;
  95. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  96. }
  97. starpu_data_wont_use(sdatank);
  98. }
  99. starpu_iteration_pop();
  100. }
  101. starpu_task_wait_for_all();
  102. end = starpu_timing_now();
  103. starpu_fxt_stop_profiling();
  104. if (bound_p || bound_lp_p || bound_mps_p)
  105. starpu_bound_stop();
  106. double timing = end - start;
  107. double flop = FLOPS_SPOTRF(nx);
  108. if(with_ctxs_p || with_noctxs_p || chole1_p || chole2_p)
  109. update_sched_ctx_timing_results((flop/timing/1000.0f), (timing/1000000.0f));
  110. else
  111. {
  112. PRINTF("# size\tms\tGFlops");
  113. if (bound_p)
  114. PRINTF("\tTms\tTGFlops");
  115. PRINTF("\n");
  116. PRINTF("%lu\t%.0f\t%.1f", nx, timing/1000, (flop/timing/1000.0f));
  117. if (bound_lp_p)
  118. {
  119. FILE *f = fopen("cholesky.lp", "w");
  120. starpu_bound_print_lp(f);
  121. fclose(f);
  122. }
  123. if (bound_mps_p)
  124. {
  125. FILE *f = fopen("cholesky.mps", "w");
  126. starpu_bound_print_mps(f);
  127. fclose(f);
  128. }
  129. if (bound_p)
  130. {
  131. double res;
  132. starpu_bound_compute(&res, NULL, 0);
  133. PRINTF("\t%.0f\t%.1f", res, (flop/res/1000000.0f));
  134. }
  135. PRINTF("\n");
  136. }
  137. return 0;
  138. }
  139. static int cholesky(float *matA, unsigned size, unsigned ld, unsigned nblocks)
  140. {
  141. starpu_data_handle_t dataA;
  142. unsigned m, n;
  143. /* monitor and partition the A matrix into blocks :
  144. * one block is now determined by 2 unsigned (m,n) */
  145. starpu_matrix_data_register(&dataA, STARPU_MAIN_RAM, (uintptr_t)matA, ld, size, size, sizeof(float));
  146. /* Split into blocks of complete rows first */
  147. struct starpu_data_filter f =
  148. {
  149. .filter_func = starpu_matrix_filter_block,
  150. .nchildren = nblocks
  151. };
  152. /* Then split rows into tiles */
  153. struct starpu_data_filter f2 =
  154. {
  155. /* Note: here "vertical" is for row-major, we are here using column-major. */
  156. .filter_func = starpu_matrix_filter_vertical_block,
  157. .nchildren = nblocks
  158. };
  159. starpu_data_map_filters(dataA, 2, &f, &f2);
  160. for (m = 0; m < nblocks; m++)
  161. for (n = 0; n < nblocks; n++)
  162. {
  163. starpu_data_handle_t data = starpu_data_get_sub_data(dataA, 2, m, n);
  164. starpu_data_set_coordinates(data, 2, m, n);
  165. }
  166. int ret = _cholesky(dataA, nblocks);
  167. starpu_data_unpartition(dataA, STARPU_MAIN_RAM);
  168. starpu_data_unregister(dataA);
  169. return ret;
  170. }
  171. static void execute_cholesky(unsigned size, unsigned nblocks)
  172. {
  173. float *mat = NULL;
  174. /*
  175. * create a simple definite positive symetric matrix example
  176. *
  177. * Hilbert matrix : h(i,j) = 1/(i+j+1)
  178. *
  179. * and make it better conditioned by adding one on the diagonal.
  180. */
  181. #ifndef STARPU_SIMGRID
  182. unsigned m,n;
  183. starpu_malloc_flags((void **)&mat, (size_t)size*size*sizeof(float), STARPU_MALLOC_PINNED|STARPU_MALLOC_SIMULATION_FOLDED);
  184. for (n = 0; n < size; n++)
  185. {
  186. for (m = 0; m < size; m++)
  187. {
  188. mat[m +n*size] = (1.0f/(1.0f+m+n)) + ((m == n)?1.0f*size:0.0f);
  189. /* mat[m +n*size] = ((m == n)?1.0f*size:0.0f); */
  190. }
  191. }
  192. /* #define PRINT_OUTPUT */
  193. #ifdef PRINT_OUTPUT
  194. FPRINTF(stdout, "Input :\n");
  195. for (m = 0; m < size; m++)
  196. {
  197. for (n = 0; n < size; n++)
  198. {
  199. if (n <= m)
  200. {
  201. FPRINTF(stdout, "%2.2f\t", mat[m +n*size]);
  202. }
  203. else
  204. {
  205. FPRINTF(stdout, ".\t");
  206. }
  207. }
  208. FPRINTF(stdout, "\n");
  209. }
  210. #endif
  211. #endif
  212. cholesky(mat, size, size, nblocks);
  213. #ifndef STARPU_SIMGRID
  214. #ifdef PRINT_OUTPUT
  215. FPRINTF(stdout, "Results :\n");
  216. for (m = 0; m < size; m++)
  217. {
  218. for (n = 0; n < size; n++)
  219. {
  220. if (n <= m)
  221. {
  222. FPRINTF(stdout, "%2.2f\t", mat[m +n*size]);
  223. }
  224. else
  225. {
  226. FPRINTF(stdout, ".\t");
  227. }
  228. }
  229. FPRINTF(stdout, "\n");
  230. }
  231. #endif
  232. if (check_p)
  233. {
  234. FPRINTF(stderr, "compute explicit LLt ...\n");
  235. for (m = 0; m < size; m++)
  236. {
  237. for (n = 0; n < size; n++)
  238. {
  239. if (n > m)
  240. {
  241. mat[m+n*size] = 0.0f; /* debug */
  242. }
  243. }
  244. }
  245. float *test_mat = malloc(size*size*sizeof(float));
  246. STARPU_ASSERT(test_mat);
  247. STARPU_SSYRK("L", "N", size, size, 1.0f,
  248. mat, size, 0.0f, test_mat, size);
  249. FPRINTF(stderr, "comparing results ...\n");
  250. #ifdef PRINT_OUTPUT
  251. for (m = 0; m < size; m++)
  252. {
  253. for (n = 0; n < size; n++)
  254. {
  255. if (n <= m)
  256. {
  257. FPRINTF(stdout, "%2.2f\t", test_mat[m +n*size]);
  258. }
  259. else
  260. {
  261. FPRINTF(stdout, ".\t");
  262. }
  263. }
  264. FPRINTF(stdout, "\n");
  265. }
  266. #endif
  267. for (m = 0; m < size; m++)
  268. {
  269. for (n = 0; n < size; n++)
  270. {
  271. if (n <= m)
  272. {
  273. float orig = (1.0f/(1.0f+m+n)) + ((m == n)?1.0f*size:0.0f);
  274. float err = fabsf(test_mat[m +n*size] - orig) / orig;
  275. if (err > 0.0001)
  276. {
  277. FPRINTF(stderr, "Error[%u, %u] --> %2.6f != %2.6f (err %2.6f)\n", m, n, test_mat[m +n*size], orig, err);
  278. assert(0);
  279. }
  280. }
  281. }
  282. }
  283. free(test_mat);
  284. }
  285. starpu_free_flags(mat, (size_t)size*size*sizeof(float), STARPU_MALLOC_PINNED|STARPU_MALLOC_SIMULATION_FOLDED);
  286. #endif
  287. }
  288. int main(int argc, char **argv)
  289. {
  290. #ifdef STARPU_HAVE_MAGMA
  291. magma_init();
  292. #endif
  293. int ret;
  294. ret = starpu_init(NULL);
  295. if (ret == -ENODEV) return 77;
  296. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  297. //starpu_fxt_stop_profiling();
  298. init_sizes();
  299. parse_args(argc, argv);
  300. if(with_ctxs_p || with_noctxs_p || chole1_p || chole2_p)
  301. parse_args_ctx(argc, argv);
  302. #ifdef STARPU_USE_CUDA
  303. initialize_chol_model(&chol_model_11,"chol_model_11",cpu_chol_task_11_cost,cuda_chol_task_11_cost);
  304. initialize_chol_model(&chol_model_21,"chol_model_21",cpu_chol_task_21_cost,cuda_chol_task_21_cost);
  305. initialize_chol_model(&chol_model_22,"chol_model_22",cpu_chol_task_22_cost,cuda_chol_task_22_cost);
  306. #else
  307. initialize_chol_model(&chol_model_11,"chol_model_11",cpu_chol_task_11_cost,NULL);
  308. initialize_chol_model(&chol_model_21,"chol_model_21",cpu_chol_task_21_cost,NULL);
  309. initialize_chol_model(&chol_model_22,"chol_model_22",cpu_chol_task_22_cost,NULL);
  310. #endif
  311. starpu_cublas_init();
  312. if(with_ctxs_p)
  313. {
  314. construct_contexts();
  315. start_2benchs(execute_cholesky);
  316. }
  317. else if(with_noctxs_p)
  318. start_2benchs(execute_cholesky);
  319. else if(chole1_p)
  320. start_1stbench(execute_cholesky);
  321. else if(chole2_p)
  322. start_2ndbench(execute_cholesky);
  323. else
  324. execute_cholesky(size_p, nblocks_p);
  325. starpu_cublas_shutdown();
  326. starpu_shutdown();
  327. return 0;
  328. }