cg_kernels.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2017 Université de Bordeaux
  4. * Copyright (C) 2011-2013,2015-2017,2019 CNRS
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. /*
  18. * Standard BLAS kernels used by CG
  19. */
  20. #include "cg.h"
  21. #include <math.h>
  22. #include <limits.h>
  23. #ifdef STARPU_USE_CUDA
  24. #include <starpu_cublas_v2.h>
  25. static const TYPE gp1 = 1.0;
  26. static const TYPE gm1 = -1.0;
  27. #endif
  28. #if 0
  29. static void print_vector_from_descr(unsigned nx, TYPE *v)
  30. {
  31. unsigned i;
  32. for (i = 0; i < nx; i++)
  33. {
  34. fprintf(stderr, "%2.2e ", v[i]);
  35. }
  36. fprintf(stderr, "\n");
  37. }
  38. static void print_matrix_from_descr(unsigned nx, unsigned ny, unsigned ld, TYPE *mat)
  39. {
  40. unsigned i, j;
  41. for (j = 0; j < nx; j++)
  42. {
  43. for (i = 0; i < ny; i++)
  44. {
  45. fprintf(stderr, "%2.2e ", mat[j+i*ld]);
  46. }
  47. fprintf(stderr, "\n");
  48. }
  49. }
  50. #endif
  51. static int can_execute(unsigned workerid, struct starpu_task *task, unsigned nimpl)
  52. {
  53. (void)task;
  54. (void)nimpl;
  55. enum starpu_worker_archtype type = starpu_worker_get_type(workerid);
  56. if (type == STARPU_CPU_WORKER || type == STARPU_OPENCL_WORKER || type == STARPU_MIC_WORKER)
  57. return 1;
  58. #ifdef STARPU_USE_CUDA
  59. #ifdef STARPU_SIMGRID
  60. /* We don't know, let's assume it can */
  61. return 1;
  62. #else
  63. /* Cuda device */
  64. const struct cudaDeviceProp *props;
  65. props = starpu_cuda_get_device_properties(workerid);
  66. if (props->major >= 2 || props->minor >= 3)
  67. /* At least compute capability 1.3, supports doubles */
  68. return 1;
  69. #endif
  70. #endif
  71. /* Old card, does not support doubles */
  72. return 0;
  73. }
  74. /*
  75. * Reduction accumulation methods
  76. */
  77. #ifdef STARPU_USE_CUDA
  78. static void accumulate_variable_cuda(void *descr[], void *cl_arg)
  79. {
  80. (void)cl_arg;
  81. TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  82. TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
  83. cublasStatus_t status = cublasaxpy(starpu_cublas_get_local_handle(), 1, &gp1, v_src, 1, v_dst, 1);
  84. if (status != CUBLAS_STATUS_SUCCESS)
  85. STARPU_CUBLAS_REPORT_ERROR(status);
  86. }
  87. #endif
  88. void accumulate_variable_cpu(void *descr[], void *cl_arg)
  89. {
  90. (void)cl_arg;
  91. TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  92. TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
  93. *v_dst = *v_dst + *v_src;
  94. }
  95. static struct starpu_perfmodel accumulate_variable_model =
  96. {
  97. .type = STARPU_HISTORY_BASED,
  98. .symbol = "accumulate_variable"
  99. };
  100. struct starpu_codelet accumulate_variable_cl =
  101. {
  102. .can_execute = can_execute,
  103. .cpu_funcs = {accumulate_variable_cpu},
  104. .cpu_funcs_name = {"accumulate_variable_cpu"},
  105. #ifdef STARPU_USE_CUDA
  106. .cuda_funcs = {accumulate_variable_cuda},
  107. .cuda_flags = {STARPU_CUDA_ASYNC},
  108. #endif
  109. .modes = {STARPU_RW, STARPU_R},
  110. .nbuffers = 2,
  111. .model = &accumulate_variable_model
  112. };
  113. #ifdef STARPU_USE_CUDA
  114. static void accumulate_vector_cuda(void *descr[], void *cl_arg)
  115. {
  116. (void)cl_arg;
  117. TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  118. TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  119. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  120. cublasStatus_t status = cublasaxpy(starpu_cublas_get_local_handle(), n, &gp1, v_src, 1, v_dst, 1);
  121. if (status != CUBLAS_STATUS_SUCCESS)
  122. STARPU_CUBLAS_REPORT_ERROR(status);
  123. }
  124. #endif
  125. void accumulate_vector_cpu(void *descr[], void *cl_arg)
  126. {
  127. (void)cl_arg;
  128. TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  129. TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  130. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  131. AXPY(n, (TYPE)1.0, v_src, 1, v_dst, 1);
  132. }
  133. static struct starpu_perfmodel accumulate_vector_model =
  134. {
  135. .type = STARPU_HISTORY_BASED,
  136. .symbol = "accumulate_vector"
  137. };
  138. struct starpu_codelet accumulate_vector_cl =
  139. {
  140. .can_execute = can_execute,
  141. .cpu_funcs = {accumulate_vector_cpu},
  142. .cpu_funcs_name = {"accumulate_vector_cpu"},
  143. #ifdef STARPU_USE_CUDA
  144. .cuda_funcs = {accumulate_vector_cuda},
  145. .cuda_flags = {STARPU_CUDA_ASYNC},
  146. #endif
  147. .modes = {STARPU_RW, STARPU_R},
  148. .nbuffers = 2,
  149. .model = &accumulate_vector_model
  150. };
  151. /*
  152. * Reduction initialization methods
  153. */
  154. #ifdef STARPU_USE_CUDA
  155. extern void zero_vector(TYPE *x, unsigned nelems);
  156. static void bzero_variable_cuda(void *descr[], void *cl_arg)
  157. {
  158. (void)cl_arg;
  159. TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  160. size_t size = STARPU_VARIABLE_GET_ELEMSIZE(descr[0]);
  161. cudaMemsetAsync(v, 0, size, starpu_cuda_get_local_stream());
  162. }
  163. #endif
  164. void bzero_variable_cpu(void *descr[], void *cl_arg)
  165. {
  166. (void)cl_arg;
  167. TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  168. *v = (TYPE)0.0;
  169. }
  170. static struct starpu_perfmodel bzero_variable_model =
  171. {
  172. .type = STARPU_HISTORY_BASED,
  173. .symbol = "bzero_variable"
  174. };
  175. struct starpu_codelet bzero_variable_cl =
  176. {
  177. .can_execute = can_execute,
  178. .cpu_funcs = {bzero_variable_cpu},
  179. .cpu_funcs_name = {"bzero_variable_cpu"},
  180. #ifdef STARPU_USE_CUDA
  181. .cuda_funcs = {bzero_variable_cuda},
  182. .cuda_flags = {STARPU_CUDA_ASYNC},
  183. #endif
  184. .modes = {STARPU_W},
  185. .nbuffers = 1,
  186. .model = &bzero_variable_model
  187. };
  188. #ifdef STARPU_USE_CUDA
  189. static void bzero_vector_cuda(void *descr[], void *cl_arg)
  190. {
  191. (void)cl_arg;
  192. TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  193. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  194. size_t elemsize = STARPU_VECTOR_GET_ELEMSIZE(descr[0]);
  195. cudaMemsetAsync(v, 0, n * elemsize, starpu_cuda_get_local_stream());
  196. }
  197. #endif
  198. void bzero_vector_cpu(void *descr[], void *cl_arg)
  199. {
  200. (void)cl_arg;
  201. TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  202. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  203. memset(v, 0, n*sizeof(TYPE));
  204. }
  205. static struct starpu_perfmodel bzero_vector_model =
  206. {
  207. .type = STARPU_HISTORY_BASED,
  208. .symbol = "bzero_vector"
  209. };
  210. struct starpu_codelet bzero_vector_cl =
  211. {
  212. .can_execute = can_execute,
  213. .cpu_funcs = {bzero_vector_cpu},
  214. .cpu_funcs_name = {"bzero_vector_cpu"},
  215. #ifdef STARPU_USE_CUDA
  216. .cuda_funcs = {bzero_vector_cuda},
  217. .cuda_flags = {STARPU_CUDA_ASYNC},
  218. #endif
  219. .modes = {STARPU_W},
  220. .nbuffers = 1,
  221. .model = &bzero_vector_model
  222. };
  223. /*
  224. * DOT kernel : s = dot(v1, v2)
  225. */
  226. #ifdef STARPU_USE_CUDA
  227. static void dot_kernel_cuda(void *descr[], void *cl_arg)
  228. {
  229. (void)cl_arg;
  230. TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  231. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  232. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  233. unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
  234. cublasHandle_t handle = starpu_cublas_get_local_handle();
  235. cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_DEVICE);
  236. cublasStatus_t status = cublasdot(handle,
  237. n, v1, 1, v2, 1, dot);
  238. if (status != CUBLAS_STATUS_SUCCESS)
  239. STARPU_CUBLAS_REPORT_ERROR(status);
  240. cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_HOST);
  241. }
  242. #endif
  243. void dot_kernel_cpu(void *descr[], void *cl_arg)
  244. {
  245. (void)cl_arg;
  246. TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  247. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  248. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  249. unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
  250. TYPE local_dot;
  251. /* Note that we explicitely cast the result of the DOT kernel because
  252. * some BLAS library will return a double for sdot for instance. */
  253. local_dot = (TYPE)DOT(n, v1, 1, v2, 1);
  254. *dot = *dot + local_dot;
  255. }
  256. static struct starpu_perfmodel dot_kernel_model =
  257. {
  258. .type = STARPU_HISTORY_BASED,
  259. .symbol = "dot_kernel"
  260. };
  261. static struct starpu_codelet dot_kernel_cl =
  262. {
  263. .can_execute = can_execute,
  264. .cpu_funcs = {dot_kernel_cpu},
  265. .cpu_funcs_name = {"dot_kernel_cpu"},
  266. #ifdef STARPU_USE_CUDA
  267. .cuda_funcs = {dot_kernel_cuda},
  268. #endif
  269. .cuda_flags = {STARPU_CUDA_ASYNC},
  270. .nbuffers = 3,
  271. .model = &dot_kernel_model
  272. };
  273. int dot_kernel(starpu_data_handle_t v1,
  274. starpu_data_handle_t v2,
  275. starpu_data_handle_t s,
  276. unsigned nblocks,
  277. int use_reduction)
  278. {
  279. int ret;
  280. /* Blank the accumulation variable */
  281. if (use_reduction)
  282. starpu_data_invalidate_submit(s);
  283. else
  284. {
  285. ret = starpu_task_insert(&bzero_variable_cl, STARPU_W, s, 0);
  286. if (ret == -ENODEV) return ret;
  287. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  288. }
  289. unsigned b;
  290. for (b = 0; b < nblocks; b++)
  291. {
  292. ret = starpu_task_insert(&dot_kernel_cl,
  293. use_reduction?STARPU_REDUX:STARPU_RW, s,
  294. STARPU_R, starpu_data_get_sub_data(v1, 1, b),
  295. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  296. STARPU_TAG_ONLY, (starpu_tag_t) b,
  297. 0);
  298. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  299. }
  300. return 0;
  301. }
  302. /*
  303. * SCAL kernel : v1 = p1 v1
  304. */
  305. #ifdef STARPU_USE_CUDA
  306. static void scal_kernel_cuda(void *descr[], void *cl_arg)
  307. {
  308. TYPE p1;
  309. starpu_codelet_unpack_args(cl_arg, &p1);
  310. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  311. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  312. /* v1 = p1 v1 */
  313. TYPE alpha = p1;
  314. cublasStatus_t status = cublasscal(starpu_cublas_get_local_handle(), n, &alpha, v1, 1);
  315. if (status != CUBLAS_STATUS_SUCCESS)
  316. STARPU_CUBLAS_REPORT_ERROR(status);
  317. }
  318. #endif
  319. void scal_kernel_cpu(void *descr[], void *cl_arg)
  320. {
  321. TYPE alpha;
  322. starpu_codelet_unpack_args(cl_arg, &alpha);
  323. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  324. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  325. /* v1 = alpha v1 */
  326. SCAL(n, alpha, v1, 1);
  327. }
  328. static struct starpu_perfmodel scal_kernel_model =
  329. {
  330. .type = STARPU_HISTORY_BASED,
  331. .symbol = "scal_kernel"
  332. };
  333. static struct starpu_codelet scal_kernel_cl =
  334. {
  335. .can_execute = can_execute,
  336. .cpu_funcs = {scal_kernel_cpu},
  337. .cpu_funcs_name = {"scal_kernel_cpu"},
  338. #ifdef STARPU_USE_CUDA
  339. .cuda_funcs = {scal_kernel_cuda},
  340. .cuda_flags = {STARPU_CUDA_ASYNC},
  341. #endif
  342. .nbuffers = 1,
  343. .model = &scal_kernel_model
  344. };
  345. /*
  346. * GEMV kernel : v1 = p1 * v1 + p2 * M v2
  347. */
  348. #ifdef STARPU_USE_CUDA
  349. static void gemv_kernel_cuda(void *descr[], void *cl_arg)
  350. {
  351. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  352. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  353. TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  354. unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
  355. unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
  356. unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
  357. TYPE alpha, beta;
  358. starpu_codelet_unpack_args(cl_arg, &beta, &alpha);
  359. /* Compute v1 = alpha M v2 + beta v1 */
  360. cublasStatus_t status = cublasgemv(starpu_cublas_get_local_handle(),
  361. CUBLAS_OP_N, nx, ny, &alpha, M, ld, v2, 1, &beta, v1, 1);
  362. if (status != CUBLAS_STATUS_SUCCESS)
  363. STARPU_CUBLAS_REPORT_ERROR(status);
  364. }
  365. #endif
  366. void gemv_kernel_cpu(void *descr[], void *cl_arg)
  367. {
  368. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  369. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  370. TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  371. unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
  372. unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
  373. unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
  374. TYPE alpha, beta;
  375. starpu_codelet_unpack_args(cl_arg, &beta, &alpha);
  376. int worker_size = starpu_combined_worker_get_size();
  377. if (worker_size > 1)
  378. {
  379. /* Parallel CPU task */
  380. unsigned rank = starpu_combined_worker_get_rank();
  381. unsigned block_size = (ny + worker_size - 1)/worker_size;
  382. unsigned new_nx = STARPU_MIN(nx, block_size*(rank+1)) - block_size*rank;
  383. nx = new_nx;
  384. v1 = &v1[block_size*rank];
  385. M = &M[block_size*rank];
  386. }
  387. /* Compute v1 = alpha M v2 + beta v1 */
  388. GEMV("N", nx, ny, alpha, M, ld, v2, 1, beta, v1, 1);
  389. }
  390. static struct starpu_perfmodel gemv_kernel_model =
  391. {
  392. .type = STARPU_HISTORY_BASED,
  393. .symbol = "gemv_kernel"
  394. };
  395. static struct starpu_codelet gemv_kernel_cl =
  396. {
  397. .can_execute = can_execute,
  398. .type = STARPU_SPMD,
  399. .max_parallelism = INT_MAX,
  400. .cpu_funcs = {gemv_kernel_cpu},
  401. .cpu_funcs_name = {"gemv_kernel_cpu"},
  402. #ifdef STARPU_USE_CUDA
  403. .cuda_funcs = {gemv_kernel_cuda},
  404. .cuda_flags = {STARPU_CUDA_ASYNC},
  405. #endif
  406. .nbuffers = 3,
  407. .model = &gemv_kernel_model
  408. };
  409. int gemv_kernel(starpu_data_handle_t v1,
  410. starpu_data_handle_t matrix,
  411. starpu_data_handle_t v2,
  412. TYPE p1, TYPE p2,
  413. unsigned nblocks,
  414. int use_reduction)
  415. {
  416. unsigned b1, b2;
  417. int ret;
  418. for (b2 = 0; b2 < nblocks; b2++)
  419. {
  420. ret = starpu_task_insert(&scal_kernel_cl,
  421. STARPU_RW, starpu_data_get_sub_data(v1, 1, b2),
  422. STARPU_VALUE, &p1, sizeof(p1),
  423. STARPU_TAG_ONLY, (starpu_tag_t) b2,
  424. 0);
  425. if (ret == -ENODEV) return ret;
  426. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  427. }
  428. for (b2 = 0; b2 < nblocks; b2++)
  429. {
  430. for (b1 = 0; b1 < nblocks; b1++)
  431. {
  432. TYPE one = 1.0;
  433. ret = starpu_task_insert(&gemv_kernel_cl,
  434. use_reduction?STARPU_REDUX:STARPU_RW, starpu_data_get_sub_data(v1, 1, b2),
  435. STARPU_R, starpu_data_get_sub_data(matrix, 2, b2, b1),
  436. STARPU_R, starpu_data_get_sub_data(v2, 1, b1),
  437. STARPU_VALUE, &one, sizeof(one),
  438. STARPU_VALUE, &p2, sizeof(p2),
  439. STARPU_TAG_ONLY, ((starpu_tag_t)b2) * nblocks + b1,
  440. 0);
  441. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  442. }
  443. }
  444. return 0;
  445. }
  446. /*
  447. * AXPY + SCAL kernel : v1 = p1 * v1 + p2 * v2
  448. */
  449. #ifdef STARPU_USE_CUDA
  450. static void scal_axpy_kernel_cuda(void *descr[], void *cl_arg)
  451. {
  452. TYPE p1, p2;
  453. starpu_codelet_unpack_args(cl_arg, &p1, &p2);
  454. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  455. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  456. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  457. /* Compute v1 = p1 * v1 + p2 * v2.
  458. * v1 = p1 v1
  459. * v1 = v1 + p2 v2
  460. */
  461. cublasStatus_t status;
  462. status = cublasscal(starpu_cublas_get_local_handle(), n, &p1, v1, 1);
  463. if (status != CUBLAS_STATUS_SUCCESS)
  464. STARPU_CUBLAS_REPORT_ERROR(status);
  465. status = cublasaxpy(starpu_cublas_get_local_handle(), n, &p2, v2, 1, v1, 1);
  466. if (status != CUBLAS_STATUS_SUCCESS)
  467. STARPU_CUBLAS_REPORT_ERROR(status);
  468. }
  469. #endif
  470. void scal_axpy_kernel_cpu(void *descr[], void *cl_arg)
  471. {
  472. TYPE p1, p2;
  473. starpu_codelet_unpack_args(cl_arg, &p1, &p2);
  474. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  475. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  476. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  477. /* Compute v1 = p1 * v1 + p2 * v2.
  478. * v1 = p1 v1
  479. * v1 = v1 + p2 v2
  480. */
  481. SCAL(nx, p1, v1, 1);
  482. AXPY(nx, p2, v2, 1, v1, 1);
  483. }
  484. static struct starpu_perfmodel scal_axpy_kernel_model =
  485. {
  486. .type = STARPU_HISTORY_BASED,
  487. .symbol = "scal_axpy_kernel"
  488. };
  489. static struct starpu_codelet scal_axpy_kernel_cl =
  490. {
  491. .can_execute = can_execute,
  492. .cpu_funcs = {scal_axpy_kernel_cpu},
  493. .cpu_funcs_name = {"scal_axpy_kernel_cpu"},
  494. #ifdef STARPU_USE_CUDA
  495. .cuda_funcs = {scal_axpy_kernel_cuda},
  496. .cuda_flags = {STARPU_CUDA_ASYNC},
  497. #endif
  498. .nbuffers = 2,
  499. .model = &scal_axpy_kernel_model
  500. };
  501. int scal_axpy_kernel(starpu_data_handle_t v1, TYPE p1,
  502. starpu_data_handle_t v2, TYPE p2,
  503. unsigned nblocks)
  504. {
  505. unsigned b;
  506. for (b = 0; b < nblocks; b++)
  507. {
  508. int ret;
  509. ret = starpu_task_insert(&scal_axpy_kernel_cl,
  510. STARPU_RW, starpu_data_get_sub_data(v1, 1, b),
  511. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  512. STARPU_VALUE, &p1, sizeof(p1),
  513. STARPU_VALUE, &p2, sizeof(p2),
  514. STARPU_TAG_ONLY, (starpu_tag_t) b,
  515. 0);
  516. if (ret == -ENODEV) return ret;
  517. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  518. }
  519. return 0;
  520. }
  521. /*
  522. * AXPY kernel : v1 = v1 + p1 * v2
  523. */
  524. #ifdef STARPU_USE_CUDA
  525. static void axpy_kernel_cuda(void *descr[], void *cl_arg)
  526. {
  527. TYPE p1;
  528. starpu_codelet_unpack_args(cl_arg, &p1);
  529. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  530. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  531. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  532. /* Compute v1 = v1 + p1 * v2.
  533. */
  534. cublasStatus_t status = cublasaxpy(starpu_cublas_get_local_handle(),
  535. n, &p1, v2, 1, v1, 1);
  536. if (status != CUBLAS_STATUS_SUCCESS)
  537. STARPU_CUBLAS_REPORT_ERROR(status);
  538. }
  539. #endif
  540. void axpy_kernel_cpu(void *descr[], void *cl_arg)
  541. {
  542. TYPE p1;
  543. starpu_codelet_unpack_args(cl_arg, &p1);
  544. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  545. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  546. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  547. /* Compute v1 = p1 * v1 + p2 * v2.
  548. */
  549. AXPY(nx, p1, v2, 1, v1, 1);
  550. }
  551. static struct starpu_perfmodel axpy_kernel_model =
  552. {
  553. .type = STARPU_HISTORY_BASED,
  554. .symbol = "axpy_kernel"
  555. };
  556. static struct starpu_codelet axpy_kernel_cl =
  557. {
  558. .can_execute = can_execute,
  559. .cpu_funcs = {axpy_kernel_cpu},
  560. .cpu_funcs_name = {"axpy_kernel_cpu"},
  561. #ifdef STARPU_USE_CUDA
  562. .cuda_funcs = {axpy_kernel_cuda},
  563. .cuda_flags = {STARPU_CUDA_ASYNC},
  564. #endif
  565. .nbuffers = 2,
  566. .model = &axpy_kernel_model
  567. };
  568. int axpy_kernel(starpu_data_handle_t v1,
  569. starpu_data_handle_t v2, TYPE p1,
  570. unsigned nblocks)
  571. {
  572. unsigned b;
  573. for (b = 0; b < nblocks; b++)
  574. {
  575. int ret;
  576. ret = starpu_task_insert(&axpy_kernel_cl,
  577. STARPU_RW, starpu_data_get_sub_data(v1, 1, b),
  578. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  579. STARPU_VALUE, &p1, sizeof(p1),
  580. STARPU_TAG_ONLY, (starpu_tag_t) b,
  581. 0);
  582. if (ret == -ENODEV) return ret;
  583. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  584. }
  585. return 0;
  586. }
  587. int copy_handle(starpu_data_handle_t dst, starpu_data_handle_t src, unsigned nblocks)
  588. {
  589. unsigned b;
  590. for (b = 0; b < nblocks; b++)
  591. starpu_data_cpy(starpu_data_get_sub_data(dst, 1, b), starpu_data_get_sub_data(src, 1, b), 1, NULL, NULL);
  592. return 0;
  593. }