cg_kernels.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2020 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. /*
  17. * Standard BLAS kernels used by CG
  18. */
  19. #include "cg.h"
  20. #include <math.h>
  21. #include <limits.h>
  22. #ifdef STARPU_USE_CUDA
  23. #include <starpu_cublas_v2.h>
  24. static const TYPE gp1 = 1.0;
  25. static const TYPE gm1 = -1.0;
  26. #endif
  27. #if 0
  28. static void print_vector_from_descr(unsigned nx, TYPE *v)
  29. {
  30. unsigned i;
  31. for (i = 0; i < nx; i++)
  32. {
  33. fprintf(stderr, "%2.2e ", v[i]);
  34. }
  35. fprintf(stderr, "\n");
  36. }
  37. static void print_matrix_from_descr(unsigned nx, unsigned ny, unsigned ld, TYPE *mat)
  38. {
  39. unsigned i, j;
  40. for (j = 0; j < nx; j++)
  41. {
  42. for (i = 0; i < ny; i++)
  43. {
  44. fprintf(stderr, "%2.2e ", mat[j+i*ld]);
  45. }
  46. fprintf(stderr, "\n");
  47. }
  48. }
  49. #endif
  50. static int can_execute(unsigned workerid, struct starpu_task *task, unsigned nimpl)
  51. {
  52. (void)task;
  53. (void)nimpl;
  54. enum starpu_worker_archtype type = starpu_worker_get_type(workerid);
  55. if (type == STARPU_CPU_WORKER || type == STARPU_OPENCL_WORKER || type == STARPU_MIC_WORKER)
  56. return 1;
  57. #ifdef STARPU_USE_CUDA
  58. #ifdef STARPU_SIMGRID
  59. /* We don't know, let's assume it can */
  60. return 1;
  61. #else
  62. /* Cuda device */
  63. const struct cudaDeviceProp *props;
  64. props = starpu_cuda_get_device_properties(workerid);
  65. if (props->major >= 2 || props->minor >= 3)
  66. /* At least compute capability 1.3, supports doubles */
  67. return 1;
  68. #endif
  69. #endif
  70. /* Old card, does not support doubles */
  71. return 0;
  72. }
  73. /*
  74. * Reduction accumulation methods
  75. */
  76. #ifdef STARPU_USE_CUDA
  77. static void accumulate_variable_cuda(void *descr[], void *cl_arg)
  78. {
  79. (void)cl_arg;
  80. TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  81. TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
  82. cublasStatus_t status = cublasaxpy(starpu_cublas_get_local_handle(), 1, &gp1, v_src, 1, v_dst, 1);
  83. if (status != CUBLAS_STATUS_SUCCESS)
  84. STARPU_CUBLAS_REPORT_ERROR(status);
  85. }
  86. #endif
  87. void accumulate_variable_cpu(void *descr[], void *cl_arg)
  88. {
  89. (void)cl_arg;
  90. TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  91. TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
  92. *v_dst = *v_dst + *v_src;
  93. }
  94. static struct starpu_perfmodel accumulate_variable_model =
  95. {
  96. .type = STARPU_HISTORY_BASED,
  97. .symbol = "accumulate_variable"
  98. };
  99. struct starpu_codelet accumulate_variable_cl =
  100. {
  101. .can_execute = can_execute,
  102. .cpu_funcs = {accumulate_variable_cpu},
  103. .cpu_funcs_name = {"accumulate_variable_cpu"},
  104. #ifdef STARPU_USE_CUDA
  105. .cuda_funcs = {accumulate_variable_cuda},
  106. .cuda_flags = {STARPU_CUDA_ASYNC},
  107. #endif
  108. .modes = {STARPU_RW, STARPU_R},
  109. .nbuffers = 2,
  110. .model = &accumulate_variable_model
  111. };
  112. #ifdef STARPU_USE_CUDA
  113. static void accumulate_vector_cuda(void *descr[], void *cl_arg)
  114. {
  115. (void)cl_arg;
  116. TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  117. TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  118. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  119. cublasStatus_t status = cublasaxpy(starpu_cublas_get_local_handle(), n, &gp1, v_src, 1, v_dst, 1);
  120. if (status != CUBLAS_STATUS_SUCCESS)
  121. STARPU_CUBLAS_REPORT_ERROR(status);
  122. }
  123. #endif
  124. void accumulate_vector_cpu(void *descr[], void *cl_arg)
  125. {
  126. (void)cl_arg;
  127. TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  128. TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  129. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  130. AXPY(n, (TYPE)1.0, v_src, 1, v_dst, 1);
  131. }
  132. static struct starpu_perfmodel accumulate_vector_model =
  133. {
  134. .type = STARPU_HISTORY_BASED,
  135. .symbol = "accumulate_vector"
  136. };
  137. struct starpu_codelet accumulate_vector_cl =
  138. {
  139. .can_execute = can_execute,
  140. .cpu_funcs = {accumulate_vector_cpu},
  141. .cpu_funcs_name = {"accumulate_vector_cpu"},
  142. #ifdef STARPU_USE_CUDA
  143. .cuda_funcs = {accumulate_vector_cuda},
  144. .cuda_flags = {STARPU_CUDA_ASYNC},
  145. #endif
  146. .modes = {STARPU_RW, STARPU_R},
  147. .nbuffers = 2,
  148. .model = &accumulate_vector_model
  149. };
  150. /*
  151. * Reduction initialization methods
  152. */
  153. #ifdef STARPU_USE_CUDA
  154. extern void zero_vector(TYPE *x, unsigned nelems);
  155. static void bzero_variable_cuda(void *descr[], void *cl_arg)
  156. {
  157. (void)cl_arg;
  158. TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  159. size_t size = STARPU_VARIABLE_GET_ELEMSIZE(descr[0]);
  160. cudaMemsetAsync(v, 0, size, starpu_cuda_get_local_stream());
  161. }
  162. #endif
  163. void bzero_variable_cpu(void *descr[], void *cl_arg)
  164. {
  165. (void)cl_arg;
  166. TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  167. *v = (TYPE)0.0;
  168. }
  169. static struct starpu_perfmodel bzero_variable_model =
  170. {
  171. .type = STARPU_HISTORY_BASED,
  172. .symbol = "bzero_variable"
  173. };
  174. struct starpu_codelet bzero_variable_cl =
  175. {
  176. .can_execute = can_execute,
  177. .cpu_funcs = {bzero_variable_cpu},
  178. .cpu_funcs_name = {"bzero_variable_cpu"},
  179. #ifdef STARPU_USE_CUDA
  180. .cuda_funcs = {bzero_variable_cuda},
  181. .cuda_flags = {STARPU_CUDA_ASYNC},
  182. #endif
  183. .modes = {STARPU_W},
  184. .nbuffers = 1,
  185. .model = &bzero_variable_model
  186. };
  187. #ifdef STARPU_USE_CUDA
  188. static void bzero_vector_cuda(void *descr[], void *cl_arg)
  189. {
  190. (void)cl_arg;
  191. TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  192. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  193. size_t elemsize = STARPU_VECTOR_GET_ELEMSIZE(descr[0]);
  194. cudaMemsetAsync(v, 0, n * elemsize, starpu_cuda_get_local_stream());
  195. }
  196. #endif
  197. void bzero_vector_cpu(void *descr[], void *cl_arg)
  198. {
  199. (void)cl_arg;
  200. TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  201. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  202. memset(v, 0, n*sizeof(TYPE));
  203. }
  204. static struct starpu_perfmodel bzero_vector_model =
  205. {
  206. .type = STARPU_HISTORY_BASED,
  207. .symbol = "bzero_vector"
  208. };
  209. struct starpu_codelet bzero_vector_cl =
  210. {
  211. .can_execute = can_execute,
  212. .cpu_funcs = {bzero_vector_cpu},
  213. .cpu_funcs_name = {"bzero_vector_cpu"},
  214. #ifdef STARPU_USE_CUDA
  215. .cuda_funcs = {bzero_vector_cuda},
  216. .cuda_flags = {STARPU_CUDA_ASYNC},
  217. #endif
  218. .modes = {STARPU_W},
  219. .nbuffers = 1,
  220. .model = &bzero_vector_model
  221. };
  222. /*
  223. * DOT kernel : s = dot(v1, v2)
  224. */
  225. #ifdef STARPU_USE_CUDA
  226. static void dot_kernel_cuda(void *descr[], void *cl_arg)
  227. {
  228. (void)cl_arg;
  229. TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  230. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  231. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  232. unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
  233. cublasHandle_t handle = starpu_cublas_get_local_handle();
  234. cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_DEVICE);
  235. cublasStatus_t status = cublasdot(handle,
  236. n, v1, 1, v2, 1, dot);
  237. if (status != CUBLAS_STATUS_SUCCESS)
  238. STARPU_CUBLAS_REPORT_ERROR(status);
  239. cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_HOST);
  240. }
  241. #endif
  242. void dot_kernel_cpu(void *descr[], void *cl_arg)
  243. {
  244. (void)cl_arg;
  245. TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  246. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  247. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  248. unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
  249. TYPE local_dot;
  250. /* Note that we explicitely cast the result of the DOT kernel because
  251. * some BLAS library will return a double for sdot for instance. */
  252. local_dot = (TYPE)DOT(n, v1, 1, v2, 1);
  253. *dot = *dot + local_dot;
  254. }
  255. static struct starpu_perfmodel dot_kernel_model =
  256. {
  257. .type = STARPU_HISTORY_BASED,
  258. .symbol = "dot_kernel"
  259. };
  260. static struct starpu_codelet dot_kernel_cl =
  261. {
  262. .can_execute = can_execute,
  263. .cpu_funcs = {dot_kernel_cpu},
  264. .cpu_funcs_name = {"dot_kernel_cpu"},
  265. #ifdef STARPU_USE_CUDA
  266. .cuda_funcs = {dot_kernel_cuda},
  267. #endif
  268. .cuda_flags = {STARPU_CUDA_ASYNC},
  269. .nbuffers = 3,
  270. .model = &dot_kernel_model
  271. };
  272. int dot_kernel(starpu_data_handle_t v1,
  273. starpu_data_handle_t v2,
  274. starpu_data_handle_t s,
  275. unsigned nblocks,
  276. int use_reduction)
  277. {
  278. int ret;
  279. /* Blank the accumulation variable */
  280. if (use_reduction)
  281. starpu_data_invalidate_submit(s);
  282. else
  283. {
  284. ret = starpu_task_insert(&bzero_variable_cl, STARPU_W, s, 0);
  285. if (ret == -ENODEV) return ret;
  286. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  287. }
  288. unsigned b;
  289. for (b = 0; b < nblocks; b++)
  290. {
  291. ret = starpu_task_insert(&dot_kernel_cl,
  292. use_reduction?STARPU_REDUX:STARPU_RW, s,
  293. STARPU_R, starpu_data_get_sub_data(v1, 1, b),
  294. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  295. STARPU_TAG_ONLY, (starpu_tag_t) b,
  296. 0);
  297. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  298. }
  299. return 0;
  300. }
  301. /*
  302. * SCAL kernel : v1 = p1 v1
  303. */
  304. #ifdef STARPU_USE_CUDA
  305. static void scal_kernel_cuda(void *descr[], void *cl_arg)
  306. {
  307. TYPE p1;
  308. starpu_codelet_unpack_args(cl_arg, &p1);
  309. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  310. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  311. /* v1 = p1 v1 */
  312. TYPE alpha = p1;
  313. cublasStatus_t status = cublasscal(starpu_cublas_get_local_handle(), n, &alpha, v1, 1);
  314. if (status != CUBLAS_STATUS_SUCCESS)
  315. STARPU_CUBLAS_REPORT_ERROR(status);
  316. }
  317. #endif
  318. void scal_kernel_cpu(void *descr[], void *cl_arg)
  319. {
  320. TYPE alpha;
  321. starpu_codelet_unpack_args(cl_arg, &alpha);
  322. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  323. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  324. /* v1 = alpha v1 */
  325. SCAL(n, alpha, v1, 1);
  326. }
  327. static struct starpu_perfmodel scal_kernel_model =
  328. {
  329. .type = STARPU_HISTORY_BASED,
  330. .symbol = "scal_kernel"
  331. };
  332. static struct starpu_codelet scal_kernel_cl =
  333. {
  334. .can_execute = can_execute,
  335. .cpu_funcs = {scal_kernel_cpu},
  336. .cpu_funcs_name = {"scal_kernel_cpu"},
  337. #ifdef STARPU_USE_CUDA
  338. .cuda_funcs = {scal_kernel_cuda},
  339. .cuda_flags = {STARPU_CUDA_ASYNC},
  340. #endif
  341. .nbuffers = 1,
  342. .model = &scal_kernel_model
  343. };
  344. /*
  345. * GEMV kernel : v1 = p1 * v1 + p2 * M v2
  346. */
  347. #ifdef STARPU_USE_CUDA
  348. static void gemv_kernel_cuda(void *descr[], void *cl_arg)
  349. {
  350. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  351. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  352. TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  353. unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
  354. unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
  355. unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
  356. TYPE alpha, beta;
  357. starpu_codelet_unpack_args(cl_arg, &beta, &alpha);
  358. /* Compute v1 = alpha M v2 + beta v1 */
  359. cublasStatus_t status = cublasgemv(starpu_cublas_get_local_handle(),
  360. CUBLAS_OP_N, nx, ny, &alpha, M, ld, v2, 1, &beta, v1, 1);
  361. if (status != CUBLAS_STATUS_SUCCESS)
  362. STARPU_CUBLAS_REPORT_ERROR(status);
  363. }
  364. #endif
  365. void gemv_kernel_cpu(void *descr[], void *cl_arg)
  366. {
  367. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  368. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  369. TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  370. unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
  371. unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
  372. unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
  373. TYPE alpha, beta;
  374. starpu_codelet_unpack_args(cl_arg, &beta, &alpha);
  375. int worker_size = starpu_combined_worker_get_size();
  376. if (worker_size > 1)
  377. {
  378. /* Parallel CPU task */
  379. unsigned rank = starpu_combined_worker_get_rank();
  380. unsigned block_size = (ny + worker_size - 1)/worker_size;
  381. unsigned new_nx = STARPU_MIN(nx, block_size*(rank+1)) - block_size*rank;
  382. nx = new_nx;
  383. v1 = &v1[block_size*rank];
  384. M = &M[block_size*rank];
  385. }
  386. /* Compute v1 = alpha M v2 + beta v1 */
  387. GEMV("N", nx, ny, alpha, M, ld, v2, 1, beta, v1, 1);
  388. }
  389. static struct starpu_perfmodel gemv_kernel_model =
  390. {
  391. .type = STARPU_HISTORY_BASED,
  392. .symbol = "gemv_kernel"
  393. };
  394. static struct starpu_codelet gemv_kernel_cl =
  395. {
  396. .can_execute = can_execute,
  397. .type = STARPU_SPMD,
  398. .max_parallelism = INT_MAX,
  399. .cpu_funcs = {gemv_kernel_cpu},
  400. .cpu_funcs_name = {"gemv_kernel_cpu"},
  401. #ifdef STARPU_USE_CUDA
  402. .cuda_funcs = {gemv_kernel_cuda},
  403. .cuda_flags = {STARPU_CUDA_ASYNC},
  404. #endif
  405. .nbuffers = 3,
  406. .model = &gemv_kernel_model
  407. };
  408. int gemv_kernel(starpu_data_handle_t v1,
  409. starpu_data_handle_t matrix,
  410. starpu_data_handle_t v2,
  411. TYPE p1, TYPE p2,
  412. unsigned nblocks,
  413. int use_reduction)
  414. {
  415. unsigned b1, b2;
  416. int ret;
  417. for (b2 = 0; b2 < nblocks; b2++)
  418. {
  419. ret = starpu_task_insert(&scal_kernel_cl,
  420. STARPU_RW, starpu_data_get_sub_data(v1, 1, b2),
  421. STARPU_VALUE, &p1, sizeof(p1),
  422. STARPU_TAG_ONLY, (starpu_tag_t) b2,
  423. 0);
  424. if (ret == -ENODEV) return ret;
  425. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  426. }
  427. for (b2 = 0; b2 < nblocks; b2++)
  428. {
  429. for (b1 = 0; b1 < nblocks; b1++)
  430. {
  431. TYPE one = 1.0;
  432. ret = starpu_task_insert(&gemv_kernel_cl,
  433. use_reduction?STARPU_REDUX:STARPU_RW, starpu_data_get_sub_data(v1, 1, b2),
  434. STARPU_R, starpu_data_get_sub_data(matrix, 2, b2, b1),
  435. STARPU_R, starpu_data_get_sub_data(v2, 1, b1),
  436. STARPU_VALUE, &one, sizeof(one),
  437. STARPU_VALUE, &p2, sizeof(p2),
  438. STARPU_TAG_ONLY, ((starpu_tag_t)b2) * nblocks + b1,
  439. 0);
  440. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  441. }
  442. }
  443. return 0;
  444. }
  445. /*
  446. * AXPY + SCAL kernel : v1 = p1 * v1 + p2 * v2
  447. */
  448. #ifdef STARPU_USE_CUDA
  449. static void scal_axpy_kernel_cuda(void *descr[], void *cl_arg)
  450. {
  451. TYPE p1, p2;
  452. starpu_codelet_unpack_args(cl_arg, &p1, &p2);
  453. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  454. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  455. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  456. /* Compute v1 = p1 * v1 + p2 * v2.
  457. * v1 = p1 v1
  458. * v1 = v1 + p2 v2
  459. */
  460. cublasStatus_t status;
  461. status = cublasscal(starpu_cublas_get_local_handle(), n, &p1, v1, 1);
  462. if (status != CUBLAS_STATUS_SUCCESS)
  463. STARPU_CUBLAS_REPORT_ERROR(status);
  464. status = cublasaxpy(starpu_cublas_get_local_handle(), n, &p2, v2, 1, v1, 1);
  465. if (status != CUBLAS_STATUS_SUCCESS)
  466. STARPU_CUBLAS_REPORT_ERROR(status);
  467. }
  468. #endif
  469. void scal_axpy_kernel_cpu(void *descr[], void *cl_arg)
  470. {
  471. TYPE p1, p2;
  472. starpu_codelet_unpack_args(cl_arg, &p1, &p2);
  473. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  474. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  475. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  476. /* Compute v1 = p1 * v1 + p2 * v2.
  477. * v1 = p1 v1
  478. * v1 = v1 + p2 v2
  479. */
  480. SCAL(nx, p1, v1, 1);
  481. AXPY(nx, p2, v2, 1, v1, 1);
  482. }
  483. static struct starpu_perfmodel scal_axpy_kernel_model =
  484. {
  485. .type = STARPU_HISTORY_BASED,
  486. .symbol = "scal_axpy_kernel"
  487. };
  488. static struct starpu_codelet scal_axpy_kernel_cl =
  489. {
  490. .can_execute = can_execute,
  491. .cpu_funcs = {scal_axpy_kernel_cpu},
  492. .cpu_funcs_name = {"scal_axpy_kernel_cpu"},
  493. #ifdef STARPU_USE_CUDA
  494. .cuda_funcs = {scal_axpy_kernel_cuda},
  495. .cuda_flags = {STARPU_CUDA_ASYNC},
  496. #endif
  497. .nbuffers = 2,
  498. .model = &scal_axpy_kernel_model
  499. };
  500. int scal_axpy_kernel(starpu_data_handle_t v1, TYPE p1,
  501. starpu_data_handle_t v2, TYPE p2,
  502. unsigned nblocks)
  503. {
  504. unsigned b;
  505. for (b = 0; b < nblocks; b++)
  506. {
  507. int ret;
  508. ret = starpu_task_insert(&scal_axpy_kernel_cl,
  509. STARPU_RW, starpu_data_get_sub_data(v1, 1, b),
  510. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  511. STARPU_VALUE, &p1, sizeof(p1),
  512. STARPU_VALUE, &p2, sizeof(p2),
  513. STARPU_TAG_ONLY, (starpu_tag_t) b,
  514. 0);
  515. if (ret == -ENODEV) return ret;
  516. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  517. }
  518. return 0;
  519. }
  520. /*
  521. * AXPY kernel : v1 = v1 + p1 * v2
  522. */
  523. #ifdef STARPU_USE_CUDA
  524. static void axpy_kernel_cuda(void *descr[], void *cl_arg)
  525. {
  526. TYPE p1;
  527. starpu_codelet_unpack_args(cl_arg, &p1);
  528. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  529. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  530. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  531. /* Compute v1 = v1 + p1 * v2.
  532. */
  533. cublasStatus_t status = cublasaxpy(starpu_cublas_get_local_handle(),
  534. n, &p1, v2, 1, v1, 1);
  535. if (status != CUBLAS_STATUS_SUCCESS)
  536. STARPU_CUBLAS_REPORT_ERROR(status);
  537. }
  538. #endif
  539. void axpy_kernel_cpu(void *descr[], void *cl_arg)
  540. {
  541. TYPE p1;
  542. starpu_codelet_unpack_args(cl_arg, &p1);
  543. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  544. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  545. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  546. /* Compute v1 = p1 * v1 + p2 * v2.
  547. */
  548. AXPY(nx, p1, v2, 1, v1, 1);
  549. }
  550. static struct starpu_perfmodel axpy_kernel_model =
  551. {
  552. .type = STARPU_HISTORY_BASED,
  553. .symbol = "axpy_kernel"
  554. };
  555. static struct starpu_codelet axpy_kernel_cl =
  556. {
  557. .can_execute = can_execute,
  558. .cpu_funcs = {axpy_kernel_cpu},
  559. .cpu_funcs_name = {"axpy_kernel_cpu"},
  560. #ifdef STARPU_USE_CUDA
  561. .cuda_funcs = {axpy_kernel_cuda},
  562. .cuda_flags = {STARPU_CUDA_ASYNC},
  563. #endif
  564. .nbuffers = 2,
  565. .model = &axpy_kernel_model
  566. };
  567. int axpy_kernel(starpu_data_handle_t v1,
  568. starpu_data_handle_t v2, TYPE p1,
  569. unsigned nblocks)
  570. {
  571. unsigned b;
  572. for (b = 0; b < nblocks; b++)
  573. {
  574. int ret;
  575. ret = starpu_task_insert(&axpy_kernel_cl,
  576. STARPU_RW, starpu_data_get_sub_data(v1, 1, b),
  577. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  578. STARPU_VALUE, &p1, sizeof(p1),
  579. STARPU_TAG_ONLY, (starpu_tag_t) b,
  580. 0);
  581. if (ret == -ENODEV) return ret;
  582. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  583. }
  584. return 0;
  585. }
  586. int copy_handle(starpu_data_handle_t dst, starpu_data_handle_t src, unsigned nblocks)
  587. {
  588. unsigned b;
  589. for (b = 0; b < nblocks; b++)
  590. starpu_data_cpy(starpu_data_get_sub_data(dst, 1, b), starpu_data_get_sub_data(src, 1, b), 1, NULL, NULL);
  591. return 0;
  592. }