dw_sparse_cg.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467
  1. /*
  2. * StarPU
  3. * Copyright (C) INRIA 2008-2009 (see AUTHORS file)
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. /*
  17. * Conjugate gradients for Sparse matrices
  18. */
  19. #include "dw_sparse_cg.h"
  20. #ifdef USE_CUDA
  21. /* CUDA spmv codelet */
  22. static struct starpu_cuda_module_s cuda_module;
  23. static struct starpu_cuda_function_s cuda_function;
  24. static starpu_cuda_codelet_t cuda_codelet;
  25. void initialize_cuda(void)
  26. {
  27. char module_path[1024];
  28. sprintf(module_path,
  29. "%s/examples/cuda/spmv_cuda.cubin", STARPUDIR);
  30. char *function_symbol = "spmv_kernel_3";
  31. starpu_init_cuda_module(&cuda_module, module_path);
  32. starpu_init_cuda_function(&cuda_function, &cuda_module, function_symbol);
  33. cuda_codelet.func = &cuda_function;
  34. cuda_codelet.stack = NULL;
  35. cuda_codelet.stack_size = 0;
  36. cuda_codelet.gridx = grids;
  37. cuda_codelet.gridy = 1;
  38. cuda_codelet.blockx = blocks;
  39. cuda_codelet.blocky = 1;
  40. cuda_codelet.shmemsize = 128;
  41. }
  42. #endif // USE_CUDA
  43. static struct starpu_task *create_task(starpu_tag_t id)
  44. {
  45. starpu_codelet *cl = malloc(sizeof(starpu_codelet));
  46. cl->where = ANY;
  47. cl->model = NULL;
  48. struct starpu_task *task = starpu_task_create();
  49. task->cl = cl;
  50. task->cl_arg = NULL;
  51. task->use_tag = 1;
  52. task->tag_id = id;
  53. return task;
  54. }
  55. static void create_data(float **_nzvalA, float **_vecb, float **_vecx, uint32_t *_nnz, uint32_t *_nrow, uint32_t **_colind, uint32_t **_rowptr)
  56. {
  57. /* we need a sparse symetric (definite positive ?) matrix and a "dense" vector */
  58. /* example of 3-band matrix */
  59. float *nzval;
  60. uint32_t nnz;
  61. uint32_t *colind;
  62. uint32_t *rowptr;
  63. nnz = 3*size-2;
  64. nzval = malloc(nnz*sizeof(float));
  65. colind = malloc(nnz*sizeof(uint32_t));
  66. rowptr = malloc(size*sizeof(uint32_t));
  67. assert(nzval);
  68. assert(colind);
  69. assert(rowptr);
  70. /* fill the matrix */
  71. unsigned row;
  72. unsigned pos = 0;
  73. for (row = 0; row < size; row++)
  74. {
  75. rowptr[row] = pos;
  76. if (row > 0) {
  77. nzval[pos] = 1.0f;
  78. colind[pos] = row-1;
  79. pos++;
  80. }
  81. nzval[pos] = 5.0f;
  82. colind[pos] = row;
  83. pos++;
  84. if (row < size - 1) {
  85. nzval[pos] = 1.0f;
  86. colind[pos] = row+1;
  87. pos++;
  88. }
  89. }
  90. *_nnz = nnz;
  91. *_nrow = size;
  92. *_nzvalA = nzval;
  93. *_colind = colind;
  94. *_rowptr = rowptr;
  95. STARPU_ASSERT(pos == nnz);
  96. /* initiate the 2 vectors */
  97. float *invec, *outvec;
  98. invec = malloc(size*sizeof(float));
  99. assert(invec);
  100. outvec = malloc(size*sizeof(float));
  101. assert(outvec);
  102. /* fill those */
  103. unsigned ind;
  104. for (ind = 0; ind < size; ind++)
  105. {
  106. invec[ind] = 2.0f;
  107. outvec[ind] = 0.0f;
  108. }
  109. *_vecb = invec;
  110. *_vecx = outvec;
  111. }
  112. void init_problem(void)
  113. {
  114. /* create the sparse input matrix */
  115. float *nzval;
  116. float *vecb;
  117. float *vecx;
  118. uint32_t nnz;
  119. uint32_t nrow;
  120. uint32_t *colind;
  121. uint32_t *rowptr;
  122. create_data(&nzval, &vecb, &vecx, &nnz, &nrow, &colind, &rowptr);
  123. conjugate_gradient(nzval, vecb, vecx, nnz, nrow, colind, rowptr);
  124. }
  125. /*
  126. * cg initialization phase
  127. */
  128. void init_cg(struct cg_problem *problem)
  129. {
  130. problem->i = 0;
  131. /* r = b - A x */
  132. struct starpu_task *task1 = create_task(1UL);
  133. task1->cl->where = CORE;
  134. task1->cl->core_func = core_codelet_func_1;
  135. task1->cl->nbuffers = 4;
  136. task1->buffers[0].state = problem->ds_matrixA;
  137. task1->buffers[0].mode = R;
  138. task1->buffers[1].state = problem->ds_vecx;
  139. task1->buffers[1].mode = R;
  140. task1->buffers[2].state = problem->ds_vecr;
  141. task1->buffers[2].mode = W;
  142. task1->buffers[3].state = problem->ds_vecb;
  143. task1->buffers[3].mode = R;
  144. /* d = r */
  145. struct starpu_task *task2 = create_task(2UL);
  146. task2->cl->where = CORE;
  147. task2->cl->core_func = core_codelet_func_2;
  148. task2->cl->nbuffers = 2;
  149. task2->buffers[0].state = problem->ds_vecd;
  150. task2->buffers[0].mode = W;
  151. task2->buffers[1].state = problem->ds_vecr;
  152. task2->buffers[1].mode = R;
  153. starpu_tag_declare_deps(2UL, 1, 1UL);
  154. /* delta_new = trans(r) r */
  155. struct starpu_task *task3 = create_task(3UL);
  156. task3->cl->where = CUBLAS|CORE;
  157. #ifdef USE_CUDA
  158. task3->cl->cublas_func = cublas_codelet_func_3;
  159. #endif
  160. task3->cl->core_func = core_codelet_func_3;
  161. task3->cl_arg = problem;
  162. task3->cl->nbuffers = 1;
  163. task3->buffers[0].state = problem->ds_vecr;
  164. task3->buffers[0].mode = R;
  165. task3->callback_func = iteration_cg;
  166. task3->callback_arg = problem;
  167. /* XXX 3 should only depend on 1 ... */
  168. starpu_tag_declare_deps(3UL, 1, 2UL);
  169. /* launch the computation now */
  170. starpu_submit_task(task1);
  171. starpu_submit_task(task2);
  172. starpu_submit_task(task3);
  173. }
  174. /*
  175. * the inner iteration of the cg algorithm
  176. * the codelet code launcher is its own callback !
  177. */
  178. void launch_new_cg_iteration(struct cg_problem *problem)
  179. {
  180. unsigned iter = problem->i;
  181. unsigned long long maskiter = (iter*1024);
  182. /* q = A d */
  183. struct starpu_task *task4 = create_task(maskiter | 4UL);
  184. task4->cl->where = CORE;
  185. task4->cl->core_func = core_codelet_func_4;
  186. task4->cl->nbuffers = 3;
  187. task4->buffers[0].state = problem->ds_matrixA;
  188. task4->buffers[0].mode = R;
  189. task4->buffers[1].state = problem->ds_vecd;
  190. task4->buffers[1].mode = R;
  191. task4->buffers[2].state = problem->ds_vecq;
  192. task4->buffers[2].mode = W;
  193. /* alpha = delta_new / ( trans(d) q )*/
  194. struct starpu_task *task5 = create_task(maskiter | 5UL);
  195. task5->cl->where = CUBLAS|CORE;
  196. #ifdef USE_CUDA
  197. task5->cl->cublas_func = cublas_codelet_func_5;
  198. #endif
  199. task5->cl->core_func = core_codelet_func_5;
  200. task5->cl_arg = problem;
  201. task5->cl->nbuffers = 2;
  202. task5->buffers[0].state = problem->ds_vecd;
  203. task5->buffers[0].mode = R;
  204. task5->buffers[1].state = problem->ds_vecq;
  205. task5->buffers[1].mode = R;
  206. starpu_tag_declare_deps(maskiter | 5UL, 1, maskiter | 4UL);
  207. /* x = x + alpha d */
  208. struct starpu_task *task6 = create_task(maskiter | 6UL);
  209. task6->cl->where = CUBLAS|CORE;
  210. #ifdef USE_CUDA
  211. task6->cl->cublas_func = cublas_codelet_func_6;
  212. #endif
  213. task6->cl->core_func = core_codelet_func_6;
  214. task6->cl_arg = problem;
  215. task6->cl->nbuffers = 2;
  216. task6->buffers[0].state = problem->ds_vecx;
  217. task6->buffers[0].mode = RW;
  218. task6->buffers[1].state = problem->ds_vecd;
  219. task6->buffers[1].mode = R;
  220. starpu_tag_declare_deps(maskiter | 6UL, 1, maskiter | 5UL);
  221. /* r = r - alpha q */
  222. struct starpu_task *task7 = create_task(maskiter | 7UL);
  223. task7->cl->where = CUBLAS|CORE;
  224. #ifdef USE_CUDA
  225. task7->cl->cublas_func = cublas_codelet_func_7;
  226. #endif
  227. task7->cl->core_func = core_codelet_func_7;
  228. task7->cl_arg = problem;
  229. task7->cl->nbuffers = 2;
  230. task7->buffers[0].state = problem->ds_vecr;
  231. task7->buffers[0].mode = RW;
  232. task7->buffers[1].state = problem->ds_vecq;
  233. task7->buffers[1].mode = R;
  234. starpu_tag_declare_deps(maskiter | 7UL, 1, maskiter | 6UL);
  235. /* update delta_* and compute beta */
  236. struct starpu_task *task8 = create_task(maskiter | 8UL);
  237. task8->cl->where = CUBLAS|CORE;
  238. #ifdef USE_CUDA
  239. task8->cl->cublas_func = cublas_codelet_func_8;
  240. #endif
  241. task8->cl->core_func = core_codelet_func_8;
  242. task8->cl_arg = problem;
  243. task8->cl->nbuffers = 1;
  244. task8->buffers[0].state = problem->ds_vecr;
  245. task8->buffers[0].mode = R;
  246. starpu_tag_declare_deps(maskiter | 8UL, 1, maskiter | 7UL);
  247. /* d = r + beta d */
  248. struct starpu_task *task9 = create_task(maskiter | 9UL);
  249. task9->cl->where = CUBLAS|CORE;
  250. #ifdef USE_CUDA
  251. task9->cl->cublas_func = cublas_codelet_func_9;
  252. #endif
  253. task9->cl->core_func = core_codelet_func_9;
  254. task9->cl_arg = problem;
  255. task9->cl->nbuffers = 2;
  256. task9->buffers[0].state = problem->ds_vecd;
  257. task9->buffers[0].mode = RW;
  258. task9->buffers[1].state = problem->ds_vecr;
  259. task9->buffers[1].mode = R;
  260. starpu_tag_declare_deps(maskiter | 9UL, 1, maskiter | 8UL);
  261. task9->callback_func = iteration_cg;
  262. task9->callback_arg = problem;
  263. /* launch the computation now */
  264. starpu_submit_task(task4);
  265. starpu_submit_task(task5);
  266. starpu_submit_task(task6);
  267. starpu_submit_task(task7);
  268. starpu_submit_task(task8);
  269. starpu_submit_task(task9);
  270. }
  271. void iteration_cg(void *problem)
  272. {
  273. struct cg_problem *pb = problem;
  274. printf("i : %d (MAX %d)\n\tdelta_new %f (%f)\n", pb->i, MAXITER, pb->delta_new, sqrt(pb->delta_new / pb->size));
  275. if ((pb->i < MAXITER) &&
  276. (pb->delta_new > pb->epsilon) )
  277. {
  278. if (pb->i % 1000 == 0)
  279. printf("i : %d\n\tdelta_new %f (%f)\n", pb->i, pb->delta_new, sqrt(pb->delta_new / pb->size));
  280. pb->i++;
  281. /* we did not reach the stop condition yet */
  282. launch_new_cg_iteration(problem);
  283. }
  284. else {
  285. /* we may stop */
  286. printf("We are done ... after %d iterations \n", pb->i - 1);
  287. printf("i : %d\n\tdelta_new %2.5f\n", pb->i, pb->delta_new);
  288. sem_post(pb->sem);
  289. }
  290. }
  291. /*
  292. * initializing the problem
  293. */
  294. void conjugate_gradient(float *nzvalA, float *vecb, float *vecx, uint32_t nnz,
  295. unsigned nrow, uint32_t *colind, uint32_t *rowptr)
  296. {
  297. /* first declare all the data structures to the runtime */
  298. starpu_data_handle ds_matrixA;
  299. starpu_data_handle ds_vecx, ds_vecb;
  300. starpu_data_handle ds_vecr, ds_vecd, ds_vecq;
  301. /* first the user-allocated data */
  302. starpu_monitor_csr_data(&ds_matrixA, 0, nnz, nrow,
  303. (uintptr_t)nzvalA, colind, rowptr, 0, sizeof(float));
  304. starpu_monitor_vector_data(&ds_vecx, 0, (uintptr_t)vecx, nrow, sizeof(float));
  305. starpu_monitor_vector_data(&ds_vecb, 0, (uintptr_t)vecb, nrow, sizeof(float));
  306. /* then allocate the algorithm intern data */
  307. float *ptr_vecr, *ptr_vecd, *ptr_vecq;
  308. unsigned i;
  309. ptr_vecr = malloc(nrow*sizeof(float));
  310. ptr_vecd = malloc(nrow*sizeof(float));
  311. ptr_vecq = malloc(nrow*sizeof(float));
  312. for (i = 0; i < nrow; i++)
  313. {
  314. ptr_vecr[i] = 0.0f;
  315. ptr_vecd[i] = 0.0f;
  316. ptr_vecq[i] = 0.0f;
  317. }
  318. printf("nrow = %d \n", nrow);
  319. /* and declare them as well */
  320. starpu_monitor_vector_data(&ds_vecr, 0, (uintptr_t)ptr_vecr, nrow, sizeof(float));
  321. starpu_monitor_vector_data(&ds_vecd, 0, (uintptr_t)ptr_vecd, nrow, sizeof(float));
  322. starpu_monitor_vector_data(&ds_vecq, 0, (uintptr_t)ptr_vecq, nrow, sizeof(float));
  323. /* we now have the complete problem */
  324. struct cg_problem problem;
  325. problem.ds_matrixA = ds_matrixA;
  326. problem.ds_vecx = ds_vecx;
  327. problem.ds_vecb = ds_vecb;
  328. problem.ds_vecr = ds_vecr;
  329. problem.ds_vecd = ds_vecd;
  330. problem.ds_vecq = ds_vecq;
  331. problem.epsilon = EPSILON;
  332. problem.size = nrow;
  333. problem.delta_old = 1.0;
  334. problem.delta_new = 1.0; /* just to make sure we do at least one iteration */
  335. /* we need a semaphore to synchronize with callbacks */
  336. sem_t sem;
  337. sem_init(&sem, 0, 0U);
  338. problem.sem = &sem;
  339. init_cg(&problem);
  340. sem_wait(&sem);
  341. sem_destroy(&sem);
  342. print_results(vecx, nrow);
  343. }
  344. void do_conjugate_gradient(float *nzvalA, float *vecb, float *vecx, uint32_t nnz,
  345. unsigned nrow, uint32_t *colind, uint32_t *rowptr)
  346. {
  347. /* start the runtime */
  348. starpu_init(NULL);
  349. #ifdef USE_CUDA
  350. initialize_cuda();
  351. #endif
  352. conjugate_gradient(nzvalA, vecb, vecx, nnz, nrow, colind, rowptr);
  353. }
  354. #if 0
  355. int main(__attribute__ ((unused)) int argc,
  356. __attribute__ ((unused)) char **argv)
  357. {
  358. parse_args(argc, argv);
  359. timing_init();
  360. /* start the runtime */
  361. starpu_init(NULL);
  362. #ifdef USE_CUDA
  363. initialize_cuda();
  364. #endif
  365. init_problem();
  366. double timing = timing_delay(&start, &end);
  367. fprintf(stderr, "Computation took (in ms)\n");
  368. printf("%2.2f\n", timing/1000);
  369. return 0;
  370. }
  371. #endif