cg_kernels.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2021 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. /*
  17. * Standard BLAS kernels used by CG
  18. */
  19. #include "cg.h"
  20. #include <math.h>
  21. #include <limits.h>
  22. #ifdef STARPU_USE_CUDA
  23. #include <cuda.h>
  24. #include <starpu_cublas_v2.h>
  25. static const TYPE gp1 = 1.0;
  26. static const TYPE gm1 = -1.0;
  27. #endif
  28. #define FPRINTF(ofile, fmt, ...) do { if (!getenv("STARPU_SSILENT")) {fprintf(ofile, fmt, ## __VA_ARGS__); }} while(0)
  29. static int nblocks = 8;
  30. #ifdef STARPU_QUICK_CHECK
  31. static int i_max = 5;
  32. static int long long n = 2048;
  33. #elif !defined(STARPU_LONG_CHECK)
  34. static int long long n = 4096;
  35. static int i_max = 100;
  36. #else
  37. static int long long n = 4096;
  38. static int i_max = 1000;
  39. #endif
  40. static double eps = (10e-14);
  41. int use_reduction = 1;
  42. int display_result = 0;
  43. HANDLE_TYPE_MATRIX A_handle;
  44. HANDLE_TYPE_VECTOR b_handle;
  45. HANDLE_TYPE_VECTOR x_handle;
  46. HANDLE_TYPE_VECTOR r_handle;
  47. HANDLE_TYPE_VECTOR d_handle;
  48. HANDLE_TYPE_VECTOR q_handle;
  49. starpu_data_handle_t dtq_handle;
  50. starpu_data_handle_t rtr_handle;
  51. TYPE dtq, rtr;
  52. #if 0
  53. static void print_vector_from_descr(unsigned nx, TYPE *v)
  54. {
  55. unsigned i;
  56. for (i = 0; i < nx; i++)
  57. {
  58. fprintf(stderr, "%2.2e ", v[i]);
  59. }
  60. fprintf(stderr, "\n");
  61. }
  62. static void print_matrix_from_descr(unsigned nx, unsigned ny, unsigned ld, TYPE *mat)
  63. {
  64. unsigned i, j;
  65. for (j = 0; j < nx; j++)
  66. {
  67. for (i = 0; i < ny; i++)
  68. {
  69. fprintf(stderr, "%2.2e ", mat[j+i*ld]);
  70. }
  71. fprintf(stderr, "\n");
  72. }
  73. }
  74. #endif
  75. static int can_execute(unsigned workerid, struct starpu_task *task, unsigned nimpl)
  76. {
  77. (void)task;
  78. (void)nimpl;
  79. enum starpu_worker_archtype type = starpu_worker_get_type(workerid);
  80. if (type == STARPU_CPU_WORKER || type == STARPU_OPENCL_WORKER || type == STARPU_MIC_WORKER)
  81. return 1;
  82. #ifdef STARPU_USE_CUDA
  83. #ifdef STARPU_SIMGRID
  84. /* We don't know, let's assume it can */
  85. return 1;
  86. #else
  87. /* Cuda device */
  88. const struct cudaDeviceProp *props;
  89. props = starpu_cuda_get_device_properties(workerid);
  90. if (props->major >= 2 || props->minor >= 3)
  91. /* At least compute capability 1.3, supports doubles */
  92. return 1;
  93. #endif
  94. #endif
  95. /* Old card, does not support doubles */
  96. return 0;
  97. }
  98. /*
  99. * Reduction accumulation methods
  100. */
  101. #ifdef STARPU_USE_CUDA
  102. static void accumulate_variable_cuda(void *descr[], void *cl_arg)
  103. {
  104. (void)cl_arg;
  105. TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  106. TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
  107. cublasStatus_t status = cublasaxpy(starpu_cublas_get_local_handle(), 1, &gp1, v_src, 1, v_dst, 1);
  108. if (status != CUBLAS_STATUS_SUCCESS)
  109. STARPU_CUBLAS_REPORT_ERROR(status);
  110. }
  111. #endif
  112. void accumulate_variable_cpu(void *descr[], void *cl_arg)
  113. {
  114. (void)cl_arg;
  115. TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  116. TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
  117. *v_dst = *v_dst + *v_src;
  118. }
  119. static struct starpu_perfmodel accumulate_variable_model =
  120. {
  121. .type = STARPU_HISTORY_BASED,
  122. .symbol = "accumulate_variable"
  123. };
  124. struct starpu_codelet accumulate_variable_cl =
  125. {
  126. .can_execute = can_execute,
  127. .cpu_funcs = {accumulate_variable_cpu},
  128. .cpu_funcs_name = {"accumulate_variable_cpu"},
  129. #ifdef STARPU_USE_CUDA
  130. .cuda_funcs = {accumulate_variable_cuda},
  131. .cuda_flags = {STARPU_CUDA_ASYNC},
  132. #endif
  133. .modes = {STARPU_RW|STARPU_COMMUTE, STARPU_R},
  134. .nbuffers = 2,
  135. .model = &accumulate_variable_model,
  136. .name = "accumulate_variable"
  137. };
  138. #ifdef STARPU_USE_CUDA
  139. static void accumulate_vector_cuda(void *descr[], void *cl_arg)
  140. {
  141. (void)cl_arg;
  142. TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  143. TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  144. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  145. cublasStatus_t status = cublasaxpy(starpu_cublas_get_local_handle(), n, &gp1, v_src, 1, v_dst, 1);
  146. if (status != CUBLAS_STATUS_SUCCESS)
  147. STARPU_CUBLAS_REPORT_ERROR(status);
  148. }
  149. #endif
  150. void accumulate_vector_cpu(void *descr[], void *cl_arg)
  151. {
  152. (void)cl_arg;
  153. TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  154. TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  155. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  156. AXPY(n, (TYPE)1.0, v_src, 1, v_dst, 1);
  157. }
  158. static struct starpu_perfmodel accumulate_vector_model =
  159. {
  160. .type = STARPU_HISTORY_BASED,
  161. .symbol = "accumulate_vector"
  162. };
  163. struct starpu_codelet accumulate_vector_cl =
  164. {
  165. .can_execute = can_execute,
  166. .cpu_funcs = {accumulate_vector_cpu},
  167. .cpu_funcs_name = {"accumulate_vector_cpu"},
  168. #ifdef STARPU_USE_CUDA
  169. .cuda_funcs = {accumulate_vector_cuda},
  170. .cuda_flags = {STARPU_CUDA_ASYNC},
  171. #endif
  172. .modes = {STARPU_RW|STARPU_COMMUTE, STARPU_R},
  173. .nbuffers = 2,
  174. .model = &accumulate_vector_model,
  175. .name = "accumulate_vector"
  176. };
  177. /*
  178. * Reduction initialization methods
  179. */
  180. #ifdef STARPU_USE_CUDA
  181. extern void zero_vector(TYPE *x, unsigned nelems);
  182. static void bzero_variable_cuda(void *descr[], void *cl_arg)
  183. {
  184. (void)cl_arg;
  185. TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  186. size_t size = STARPU_VARIABLE_GET_ELEMSIZE(descr[0]);
  187. cudaMemsetAsync(v, 0, size, starpu_cuda_get_local_stream());
  188. }
  189. #endif
  190. void bzero_variable_cpu(void *descr[], void *cl_arg)
  191. {
  192. (void)cl_arg;
  193. TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  194. *v = (TYPE)0.0;
  195. }
  196. static struct starpu_perfmodel bzero_variable_model =
  197. {
  198. .type = STARPU_HISTORY_BASED,
  199. .symbol = "bzero_variable"
  200. };
  201. struct starpu_codelet bzero_variable_cl =
  202. {
  203. .can_execute = can_execute,
  204. .cpu_funcs = {bzero_variable_cpu},
  205. .cpu_funcs_name = {"bzero_variable_cpu"},
  206. #ifdef STARPU_USE_CUDA
  207. .cuda_funcs = {bzero_variable_cuda},
  208. .cuda_flags = {STARPU_CUDA_ASYNC},
  209. #endif
  210. .modes = {STARPU_W},
  211. .nbuffers = 1,
  212. .model = &bzero_variable_model,
  213. .name = "bzero_variable"
  214. };
  215. #ifdef STARPU_USE_CUDA
  216. static void bzero_vector_cuda(void *descr[], void *cl_arg)
  217. {
  218. (void)cl_arg;
  219. TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  220. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  221. size_t elemsize = STARPU_VECTOR_GET_ELEMSIZE(descr[0]);
  222. cudaMemsetAsync(v, 0, n * elemsize, starpu_cuda_get_local_stream());
  223. }
  224. #endif
  225. void bzero_vector_cpu(void *descr[], void *cl_arg)
  226. {
  227. (void)cl_arg;
  228. TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  229. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  230. memset(v, 0, n*sizeof(TYPE));
  231. }
  232. static struct starpu_perfmodel bzero_vector_model =
  233. {
  234. .type = STARPU_HISTORY_BASED,
  235. .symbol = "bzero_vector"
  236. };
  237. struct starpu_codelet bzero_vector_cl =
  238. {
  239. .can_execute = can_execute,
  240. .cpu_funcs = {bzero_vector_cpu},
  241. .cpu_funcs_name = {"bzero_vector_cpu"},
  242. #ifdef STARPU_USE_CUDA
  243. .cuda_funcs = {bzero_vector_cuda},
  244. .cuda_flags = {STARPU_CUDA_ASYNC},
  245. #endif
  246. .modes = {STARPU_W},
  247. .nbuffers = 1,
  248. .model = &bzero_vector_model,
  249. .name = "bzero_vector"
  250. };
  251. /*
  252. * DOT kernel : s = dot(v1, v2)
  253. */
  254. #ifdef STARPU_USE_CUDA
  255. static void dot_kernel_cuda(void *descr[], void *cl_arg)
  256. {
  257. (void)cl_arg;
  258. TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  259. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  260. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  261. unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
  262. cublasHandle_t handle = starpu_cublas_get_local_handle();
  263. cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_DEVICE);
  264. cublasStatus_t status = cublasdot(handle,
  265. n, v1, 1, v2, 1, dot);
  266. if (status != CUBLAS_STATUS_SUCCESS)
  267. STARPU_CUBLAS_REPORT_ERROR(status);
  268. cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_HOST);
  269. }
  270. #endif
  271. void dot_kernel_cpu(void *descr[], void *cl_arg)
  272. {
  273. (void)cl_arg;
  274. TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  275. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  276. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  277. unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
  278. TYPE local_dot;
  279. /* Note that we explicitely cast the result of the DOT kernel because
  280. * some BLAS library will return a double for sdot for instance. */
  281. local_dot = (TYPE)DOT(n, v1, 1, v2, 1);
  282. *dot = *dot + local_dot;
  283. }
  284. static struct starpu_perfmodel dot_kernel_model =
  285. {
  286. .type = STARPU_HISTORY_BASED,
  287. .symbol = "dot_kernel"
  288. };
  289. static struct starpu_codelet dot_kernel_cl =
  290. {
  291. .can_execute = can_execute,
  292. .cpu_funcs = {dot_kernel_cpu},
  293. .cpu_funcs_name = {"dot_kernel_cpu"},
  294. #ifdef STARPU_USE_CUDA
  295. .cuda_funcs = {dot_kernel_cuda},
  296. #endif
  297. .cuda_flags = {STARPU_CUDA_ASYNC},
  298. .nbuffers = 3,
  299. .model = &dot_kernel_model,
  300. .name = "dot_kernel"
  301. };
  302. int dot_kernel(HANDLE_TYPE_VECTOR v1,
  303. HANDLE_TYPE_VECTOR v2,
  304. starpu_data_handle_t s,
  305. unsigned nblocks,
  306. int use_reduction)
  307. {
  308. int ret;
  309. /* Blank the accumulation variable */
  310. if (use_reduction)
  311. starpu_data_invalidate_submit(s);
  312. else
  313. {
  314. ret = TASK_INSERT(&bzero_variable_cl, STARPU_W, s, 0);
  315. if (ret == -ENODEV) return ret;
  316. STARPU_CHECK_RETURN_VALUE(ret, "TASK_INSERT");
  317. }
  318. unsigned b;
  319. for (b = 0; b < nblocks; b++)
  320. {
  321. ret = TASK_INSERT(&dot_kernel_cl,
  322. use_reduction?STARPU_REDUX:STARPU_RW, s,
  323. STARPU_R, GET_VECTOR_BLOCK(v1, b),
  324. STARPU_R, GET_VECTOR_BLOCK(v2, b),
  325. STARPU_TAG_ONLY, (starpu_tag_t) b,
  326. 0);
  327. STARPU_CHECK_RETURN_VALUE(ret, "TASK_INSERT");
  328. }
  329. return 0;
  330. }
  331. /*
  332. * SCAL kernel : v1 = p1 v1
  333. */
  334. #ifdef STARPU_USE_CUDA
  335. static void scal_kernel_cuda(void *descr[], void *cl_arg)
  336. {
  337. TYPE p1;
  338. starpu_codelet_unpack_args(cl_arg, &p1);
  339. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  340. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  341. /* v1 = p1 v1 */
  342. TYPE alpha = p1;
  343. cublasStatus_t status = cublasscal(starpu_cublas_get_local_handle(), n, &alpha, v1, 1);
  344. if (status != CUBLAS_STATUS_SUCCESS)
  345. STARPU_CUBLAS_REPORT_ERROR(status);
  346. }
  347. #endif
  348. void scal_kernel_cpu(void *descr[], void *cl_arg)
  349. {
  350. TYPE alpha;
  351. starpu_codelet_unpack_args(cl_arg, &alpha);
  352. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  353. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  354. /* v1 = alpha v1 */
  355. SCAL(n, alpha, v1, 1);
  356. }
  357. static struct starpu_perfmodel scal_kernel_model =
  358. {
  359. .type = STARPU_HISTORY_BASED,
  360. .symbol = "scal_kernel"
  361. };
  362. static struct starpu_codelet scal_kernel_cl =
  363. {
  364. .can_execute = can_execute,
  365. .cpu_funcs = {scal_kernel_cpu},
  366. .cpu_funcs_name = {"scal_kernel_cpu"},
  367. #ifdef STARPU_USE_CUDA
  368. .cuda_funcs = {scal_kernel_cuda},
  369. .cuda_flags = {STARPU_CUDA_ASYNC},
  370. #endif
  371. .nbuffers = 1,
  372. .model = &scal_kernel_model,
  373. .name = "scal_kernel"
  374. };
  375. /*
  376. * GEMV kernel : v1 = p1 * v1 + p2 * M v2
  377. */
  378. #ifdef STARPU_USE_CUDA
  379. static void gemv_kernel_cuda(void *descr[], void *cl_arg)
  380. {
  381. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  382. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  383. TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  384. unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
  385. unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
  386. unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
  387. TYPE alpha, beta;
  388. starpu_codelet_unpack_args(cl_arg, &beta, &alpha);
  389. /* Compute v1 = alpha M v2 + beta v1 */
  390. cublasStatus_t status = cublasgemv(starpu_cublas_get_local_handle(),
  391. CUBLAS_OP_N, nx, ny, &alpha, M, ld, v2, 1, &beta, v1, 1);
  392. if (status != CUBLAS_STATUS_SUCCESS)
  393. STARPU_CUBLAS_REPORT_ERROR(status);
  394. }
  395. #endif
  396. void gemv_kernel_cpu(void *descr[], void *cl_arg)
  397. {
  398. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  399. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  400. TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  401. unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
  402. unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
  403. unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
  404. TYPE alpha, beta;
  405. starpu_codelet_unpack_args(cl_arg, &beta, &alpha);
  406. int worker_size = starpu_combined_worker_get_size();
  407. if (worker_size > 1)
  408. {
  409. /* Parallel CPU task */
  410. unsigned rank = starpu_combined_worker_get_rank();
  411. unsigned block_size = (ny + worker_size - 1)/worker_size;
  412. unsigned new_nx = STARPU_MIN(nx, block_size*(rank+1)) - block_size*rank;
  413. nx = new_nx;
  414. v1 = &v1[block_size*rank];
  415. M = &M[block_size*rank];
  416. }
  417. /* Compute v1 = alpha M v2 + beta v1 */
  418. GEMV("N", nx, ny, alpha, M, ld, v2, 1, beta, v1, 1);
  419. }
  420. static struct starpu_perfmodel gemv_kernel_model =
  421. {
  422. .type = STARPU_HISTORY_BASED,
  423. .symbol = "gemv_kernel"
  424. };
  425. static struct starpu_codelet gemv_kernel_cl =
  426. {
  427. .can_execute = can_execute,
  428. .type = STARPU_SPMD,
  429. .max_parallelism = INT_MAX,
  430. .cpu_funcs = {gemv_kernel_cpu},
  431. .cpu_funcs_name = {"gemv_kernel_cpu"},
  432. #ifdef STARPU_USE_CUDA
  433. .cuda_funcs = {gemv_kernel_cuda},
  434. .cuda_flags = {STARPU_CUDA_ASYNC},
  435. #endif
  436. .nbuffers = 3,
  437. .model = &gemv_kernel_model,
  438. .name = "gemv_kernel"
  439. };
  440. int gemv_kernel(HANDLE_TYPE_VECTOR v1,
  441. HANDLE_TYPE_MATRIX matrix,
  442. HANDLE_TYPE_VECTOR v2,
  443. TYPE p1, TYPE p2,
  444. unsigned nblocks,
  445. int use_reduction)
  446. {
  447. unsigned b1, b2;
  448. int ret;
  449. for (b2 = 0; b2 < nblocks; b2++)
  450. {
  451. ret = TASK_INSERT(&scal_kernel_cl,
  452. STARPU_RW, GET_VECTOR_BLOCK(v1, b2),
  453. STARPU_VALUE, &p1, sizeof(p1),
  454. STARPU_TAG_ONLY, (starpu_tag_t) b2,
  455. 0);
  456. if (ret == -ENODEV) return ret;
  457. STARPU_CHECK_RETURN_VALUE(ret, "TASK_INSERT");
  458. }
  459. for (b2 = 0; b2 < nblocks; b2++)
  460. {
  461. for (b1 = 0; b1 < nblocks; b1++)
  462. {
  463. TYPE one = 1.0;
  464. ret = TASK_INSERT(&gemv_kernel_cl,
  465. use_reduction?STARPU_REDUX:STARPU_RW, GET_VECTOR_BLOCK(v1, b2),
  466. STARPU_R, GET_MATRIX_BLOCK(matrix, b2, b1),
  467. STARPU_R, GET_VECTOR_BLOCK(v2, b1),
  468. STARPU_VALUE, &one, sizeof(one),
  469. STARPU_VALUE, &p2, sizeof(p2),
  470. STARPU_TAG_ONLY, ((starpu_tag_t)b2) * nblocks + b1,
  471. 0);
  472. STARPU_CHECK_RETURN_VALUE(ret, "TASK_INSERT");
  473. }
  474. }
  475. return 0;
  476. }
  477. /*
  478. * AXPY + SCAL kernel : v1 = p1 * v1 + p2 * v2
  479. */
  480. #ifdef STARPU_USE_CUDA
  481. static void scal_axpy_kernel_cuda(void *descr[], void *cl_arg)
  482. {
  483. TYPE p1, p2;
  484. starpu_codelet_unpack_args(cl_arg, &p1, &p2);
  485. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  486. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  487. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  488. /* Compute v1 = p1 * v1 + p2 * v2.
  489. * v1 = p1 v1
  490. * v1 = v1 + p2 v2
  491. */
  492. cublasStatus_t status;
  493. status = cublasscal(starpu_cublas_get_local_handle(), n, &p1, v1, 1);
  494. if (status != CUBLAS_STATUS_SUCCESS)
  495. STARPU_CUBLAS_REPORT_ERROR(status);
  496. status = cublasaxpy(starpu_cublas_get_local_handle(), n, &p2, v2, 1, v1, 1);
  497. if (status != CUBLAS_STATUS_SUCCESS)
  498. STARPU_CUBLAS_REPORT_ERROR(status);
  499. }
  500. #endif
  501. void scal_axpy_kernel_cpu(void *descr[], void *cl_arg)
  502. {
  503. TYPE p1, p2;
  504. starpu_codelet_unpack_args(cl_arg, &p1, &p2);
  505. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  506. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  507. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  508. /* Compute v1 = p1 * v1 + p2 * v2.
  509. * v1 = p1 v1
  510. * v1 = v1 + p2 v2
  511. */
  512. SCAL(nx, p1, v1, 1);
  513. AXPY(nx, p2, v2, 1, v1, 1);
  514. }
  515. static struct starpu_perfmodel scal_axpy_kernel_model =
  516. {
  517. .type = STARPU_HISTORY_BASED,
  518. .symbol = "scal_axpy_kernel"
  519. };
  520. static struct starpu_codelet scal_axpy_kernel_cl =
  521. {
  522. .can_execute = can_execute,
  523. .cpu_funcs = {scal_axpy_kernel_cpu},
  524. .cpu_funcs_name = {"scal_axpy_kernel_cpu"},
  525. #ifdef STARPU_USE_CUDA
  526. .cuda_funcs = {scal_axpy_kernel_cuda},
  527. .cuda_flags = {STARPU_CUDA_ASYNC},
  528. #endif
  529. .nbuffers = 2,
  530. .model = &scal_axpy_kernel_model,
  531. .name = "scal_axpy_kernel"
  532. };
  533. int scal_axpy_kernel(HANDLE_TYPE_VECTOR v1, TYPE p1,
  534. HANDLE_TYPE_VECTOR v2, TYPE p2,
  535. unsigned nblocks)
  536. {
  537. unsigned b;
  538. for (b = 0; b < nblocks; b++)
  539. {
  540. int ret;
  541. ret = TASK_INSERT(&scal_axpy_kernel_cl,
  542. STARPU_RW, GET_VECTOR_BLOCK(v1, b),
  543. STARPU_R, GET_VECTOR_BLOCK(v2, b),
  544. STARPU_VALUE, &p1, sizeof(p1),
  545. STARPU_VALUE, &p2, sizeof(p2),
  546. STARPU_TAG_ONLY, (starpu_tag_t) b,
  547. 0);
  548. if (ret == -ENODEV) return ret;
  549. STARPU_CHECK_RETURN_VALUE(ret, "TASK_INSERT");
  550. }
  551. return 0;
  552. }
  553. /*
  554. * AXPY kernel : v1 = v1 + p1 * v2
  555. */
  556. #ifdef STARPU_USE_CUDA
  557. static void axpy_kernel_cuda(void *descr[], void *cl_arg)
  558. {
  559. TYPE p1;
  560. starpu_codelet_unpack_args(cl_arg, &p1);
  561. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  562. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  563. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  564. /* Compute v1 = v1 + p1 * v2.
  565. */
  566. cublasStatus_t status = cublasaxpy(starpu_cublas_get_local_handle(),
  567. n, &p1, v2, 1, v1, 1);
  568. if (status != CUBLAS_STATUS_SUCCESS)
  569. STARPU_CUBLAS_REPORT_ERROR(status);
  570. }
  571. #endif
  572. void axpy_kernel_cpu(void *descr[], void *cl_arg)
  573. {
  574. TYPE p1;
  575. starpu_codelet_unpack_args(cl_arg, &p1);
  576. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  577. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  578. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  579. /* Compute v1 = p1 * v1 + p2 * v2.
  580. */
  581. AXPY(nx, p1, v2, 1, v1, 1);
  582. }
  583. static struct starpu_perfmodel axpy_kernel_model =
  584. {
  585. .type = STARPU_HISTORY_BASED,
  586. .symbol = "axpy_kernel"
  587. };
  588. static struct starpu_codelet axpy_kernel_cl =
  589. {
  590. .can_execute = can_execute,
  591. .cpu_funcs = {axpy_kernel_cpu},
  592. .cpu_funcs_name = {"axpy_kernel_cpu"},
  593. #ifdef STARPU_USE_CUDA
  594. .cuda_funcs = {axpy_kernel_cuda},
  595. .cuda_flags = {STARPU_CUDA_ASYNC},
  596. #endif
  597. .nbuffers = 2,
  598. .model = &axpy_kernel_model,
  599. .name = "axpy_kernel"
  600. };
  601. int axpy_kernel(HANDLE_TYPE_VECTOR v1,
  602. HANDLE_TYPE_VECTOR v2, TYPE p1,
  603. unsigned nblocks)
  604. {
  605. unsigned b;
  606. for (b = 0; b < nblocks; b++)
  607. {
  608. int ret;
  609. ret = TASK_INSERT(&axpy_kernel_cl,
  610. STARPU_RW, GET_VECTOR_BLOCK(v1, b),
  611. STARPU_R, GET_VECTOR_BLOCK(v2, b),
  612. STARPU_VALUE, &p1, sizeof(p1),
  613. STARPU_TAG_ONLY, (starpu_tag_t) b,
  614. 0);
  615. if (ret == -ENODEV) return ret;
  616. STARPU_CHECK_RETURN_VALUE(ret, "TASK_INSERT");
  617. }
  618. return 0;
  619. }
  620. /*
  621. * Main loop
  622. */
  623. int cg(void)
  624. {
  625. TYPE delta_new, delta_0, error, delta_old, alpha, beta;
  626. double start, end, timing;
  627. int i = 0, ret;
  628. /* r <- b */
  629. ret = copy_handle(r_handle, b_handle, nblocks);
  630. if (ret == -ENODEV) return ret;
  631. /* r <- r - A x */
  632. ret = gemv_kernel(r_handle, A_handle, x_handle, 1.0, -1.0, nblocks, use_reduction);
  633. if (ret == -ENODEV) return ret;
  634. /* d <- r */
  635. ret = copy_handle(d_handle, r_handle, nblocks);
  636. if (ret == -ENODEV) return ret;
  637. /* delta_new = dot(r,r) */
  638. ret = dot_kernel(r_handle, r_handle, rtr_handle, nblocks, use_reduction);
  639. if (ret == -ENODEV) return ret;
  640. GET_DATA_HANDLE(rtr_handle);
  641. starpu_data_acquire(rtr_handle, STARPU_R);
  642. delta_new = rtr;
  643. delta_0 = delta_new;
  644. starpu_data_release(rtr_handle);
  645. FPRINTF_SERVER(stderr, "Delta limit: %e\n", (double) (eps*eps*delta_0));
  646. FPRINTF_SERVER(stderr, "**************** INITIAL ****************\n");
  647. FPRINTF_SERVER(stderr, "Delta 0: %e\n", delta_new);
  648. BARRIER();
  649. start = starpu_timing_now();
  650. while ((i < i_max) && ((double)delta_new > (double)(eps*eps*delta_0)))
  651. {
  652. starpu_iteration_push(i);
  653. /* q <- A d */
  654. gemv_kernel(q_handle, A_handle, d_handle, 0.0, 1.0, nblocks, use_reduction);
  655. /* dtq <- dot(d,q) */
  656. dot_kernel(d_handle, q_handle, dtq_handle, nblocks, use_reduction);
  657. /* alpha = delta_new / dtq */
  658. GET_DATA_HANDLE(dtq_handle);
  659. starpu_data_acquire(dtq_handle, STARPU_R);
  660. alpha = delta_new / dtq;
  661. starpu_data_release(dtq_handle);
  662. /* x <- x + alpha d */
  663. axpy_kernel(x_handle, d_handle, alpha, nblocks);
  664. if ((i % 50) == 0)
  665. {
  666. /* r <- b */
  667. copy_handle(r_handle, b_handle, nblocks);
  668. /* r <- r - A x */
  669. gemv_kernel(r_handle, A_handle, x_handle, 1.0, -1.0, nblocks, use_reduction);
  670. }
  671. else
  672. {
  673. /* r <- r - alpha q */
  674. axpy_kernel(r_handle, q_handle, -alpha, nblocks);
  675. }
  676. /* delta_new = dot(r,r) */
  677. dot_kernel(r_handle, r_handle, rtr_handle, nblocks, use_reduction);
  678. GET_DATA_HANDLE(rtr_handle);
  679. starpu_data_acquire(rtr_handle, STARPU_R);
  680. delta_old = delta_new;
  681. delta_new = rtr;
  682. beta = delta_new / delta_old;
  683. starpu_data_release(rtr_handle);
  684. /* d <- beta d + r */
  685. scal_axpy_kernel(d_handle, beta, r_handle, 1.0, nblocks);
  686. if ((i % 10) == 0)
  687. {
  688. /* We here take the error as ||r||_2 / (n||b||_2) */
  689. error = sqrt(delta_new/delta_0)/(1.0*n);
  690. FPRINTF_SERVER(stderr, "*****************************************\n");
  691. FPRINTF_SERVER(stderr, "iter %d DELTA %e - %e\n", i, delta_new, error);
  692. }
  693. starpu_iteration_pop();
  694. i++;
  695. }
  696. BARRIER();
  697. end = starpu_timing_now();
  698. timing = end - start;
  699. error = sqrt(delta_new/delta_0)/(1.0*n);
  700. FPRINTF_SERVER(stderr, "*****************************************\n");
  701. FPRINTF_SERVER(stderr, "iter %d DELTA %e - %e\n", i, delta_new, error);
  702. FPRINTF_SERVER(stderr, "Total timing : %2.2f seconds\n", timing/10e6);
  703. FPRINTF_SERVER(stderr, "Seconds per iteration : %2.2e seconds\n", timing/10e6/i);
  704. FPRINTF_SERVER(stderr, "Number of iterations per second : %2.2e it/s\n", i/(timing/10e6));
  705. return 0;
  706. }
  707. void parse_common_args(int argc, char **argv)
  708. {
  709. int i;
  710. for (i = 1; i < argc; i++)
  711. {
  712. if (strcmp(argv[i], "-n") == 0)
  713. {
  714. n = (int long long)atoi(argv[++i]);
  715. continue;
  716. }
  717. if (strcmp(argv[i], "-display-result") == 0)
  718. {
  719. display_result = 1;
  720. continue;
  721. }
  722. if (strcmp(argv[i], "-maxiter") == 0)
  723. {
  724. i_max = atoi(argv[++i]);
  725. if (i_max <= 0)
  726. {
  727. FPRINTF_SERVER(stderr, "the number of iterations must be positive, not %d\n", i_max);
  728. exit(EXIT_FAILURE);
  729. }
  730. continue;
  731. }
  732. if (strcmp(argv[i], "-nblocks") == 0)
  733. {
  734. nblocks = atoi(argv[++i]);
  735. continue;
  736. }
  737. if (strcmp(argv[i], "-no-reduction") == 0)
  738. {
  739. use_reduction = 0;
  740. continue;
  741. }
  742. }
  743. }