cg_kernels.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010, 2012-2014 Université de Bordeaux
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include "cg.h"
  17. #include <math.h>
  18. #include <limits.h>
  19. #if 0
  20. static void print_vector_from_descr(unsigned nx, TYPE *v)
  21. {
  22. unsigned i;
  23. for (i = 0; i < nx; i++)
  24. {
  25. fprintf(stderr, "%2.2e ", v[i]);
  26. }
  27. fprintf(stderr, "\n");
  28. }
  29. static void print_matrix_from_descr(unsigned nx, unsigned ny, unsigned ld, TYPE *mat)
  30. {
  31. unsigned i, j;
  32. for (j = 0; j < nx; j++)
  33. {
  34. for (i = 0; i < ny; i++)
  35. {
  36. fprintf(stderr, "%2.2e ", mat[j+i*ld]);
  37. }
  38. fprintf(stderr, "\n");
  39. }
  40. }
  41. #endif
  42. static int can_execute(unsigned workerid, struct starpu_task *task, unsigned nimpl)
  43. {
  44. enum starpu_worker_archtype type = starpu_worker_get_type(workerid);
  45. if (type == STARPU_CPU_WORKER || type == STARPU_OPENCL_WORKER)
  46. return 1;
  47. #ifdef STARPU_USE_CUDA
  48. #ifdef STARPU_SIMGRID
  49. /* We don't know, let's assume it can */
  50. return 1;
  51. #else
  52. /* Cuda device */
  53. const struct cudaDeviceProp *props;
  54. props = starpu_cuda_get_device_properties(workerid);
  55. if (props->major >= 2 || props->minor >= 3)
  56. /* At least compute capability 1.3, supports doubles */
  57. return 1;
  58. #endif
  59. #endif
  60. /* Old card, does not support doubles */
  61. return 0;
  62. }
  63. /*
  64. * Reduction accumulation methods
  65. */
  66. #ifdef STARPU_USE_CUDA
  67. static void accumulate_variable_cuda(void *descr[], void *cl_arg)
  68. {
  69. TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  70. TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
  71. cublasaxpy(1, (TYPE)1.0, v_src, 1, v_dst, 1);
  72. }
  73. #endif
  74. static void accumulate_variable_cpu(void *descr[], void *cl_arg)
  75. {
  76. TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  77. TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
  78. *v_dst = *v_dst + *v_src;
  79. }
  80. static struct starpu_perfmodel accumulate_variable_model =
  81. {
  82. .type = STARPU_HISTORY_BASED,
  83. .symbol = "accumulate_variable"
  84. };
  85. struct starpu_codelet accumulate_variable_cl =
  86. {
  87. .can_execute = can_execute,
  88. .cpu_funcs = {accumulate_variable_cpu},
  89. #ifdef STARPU_USE_CUDA
  90. .cuda_funcs = {accumulate_variable_cuda},
  91. .cuda_flags = {STARPU_CUDA_ASYNC},
  92. #endif
  93. .modes = {STARPU_RW, STARPU_R},
  94. .nbuffers = 2,
  95. .model = &accumulate_variable_model
  96. };
  97. #ifdef STARPU_USE_CUDA
  98. static void accumulate_vector_cuda(void *descr[], void *cl_arg)
  99. {
  100. TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  101. TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  102. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  103. cublasaxpy(n, (TYPE)1.0, v_src, 1, v_dst, 1);
  104. }
  105. #endif
  106. static void accumulate_vector_cpu(void *descr[], void *cl_arg)
  107. {
  108. TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  109. TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  110. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  111. AXPY(n, (TYPE)1.0, v_src, 1, v_dst, 1);
  112. }
  113. static struct starpu_perfmodel accumulate_vector_model =
  114. {
  115. .type = STARPU_HISTORY_BASED,
  116. .symbol = "accumulate_vector"
  117. };
  118. struct starpu_codelet accumulate_vector_cl =
  119. {
  120. .can_execute = can_execute,
  121. .cpu_funcs = {accumulate_vector_cpu},
  122. #ifdef STARPU_USE_CUDA
  123. .cuda_funcs = {accumulate_vector_cuda},
  124. .cuda_flags = {STARPU_CUDA_ASYNC},
  125. #endif
  126. .modes = {STARPU_RW, STARPU_R},
  127. .nbuffers = 2,
  128. .model = &accumulate_vector_model
  129. };
  130. /*
  131. * Reduction initialization methods
  132. */
  133. #ifdef STARPU_USE_CUDA
  134. extern void zero_vector(TYPE *x, unsigned nelems);
  135. static void bzero_variable_cuda(void *descr[], void *cl_arg)
  136. {
  137. TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  138. zero_vector(v, 1);
  139. }
  140. #endif
  141. static void bzero_variable_cpu(void *descr[], void *cl_arg)
  142. {
  143. TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  144. *v = (TYPE)0.0;
  145. }
  146. static struct starpu_perfmodel bzero_variable_model =
  147. {
  148. .type = STARPU_HISTORY_BASED,
  149. .symbol = "bzero_variable"
  150. };
  151. struct starpu_codelet bzero_variable_cl =
  152. {
  153. .can_execute = can_execute,
  154. .cpu_funcs = {bzero_variable_cpu},
  155. #ifdef STARPU_USE_CUDA
  156. .cuda_funcs = {bzero_variable_cuda},
  157. .cuda_flags = {STARPU_CUDA_ASYNC},
  158. #endif
  159. .modes = {STARPU_W},
  160. .nbuffers = 1,
  161. .model = &bzero_variable_model
  162. };
  163. #ifdef STARPU_USE_CUDA
  164. static void bzero_vector_cuda(void *descr[], void *cl_arg)
  165. {
  166. TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  167. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  168. zero_vector(v, n);
  169. }
  170. #endif
  171. static void bzero_vector_cpu(void *descr[], void *cl_arg)
  172. {
  173. TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  174. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  175. memset(v, 0, n*sizeof(TYPE));
  176. }
  177. static struct starpu_perfmodel bzero_vector_model =
  178. {
  179. .type = STARPU_HISTORY_BASED,
  180. .symbol = "bzero_vector"
  181. };
  182. struct starpu_codelet bzero_vector_cl =
  183. {
  184. .can_execute = can_execute,
  185. .cpu_funcs = {bzero_vector_cpu},
  186. #ifdef STARPU_USE_CUDA
  187. .cuda_funcs = {bzero_vector_cuda},
  188. .cuda_flags = {STARPU_CUDA_ASYNC},
  189. #endif
  190. .modes = {STARPU_W},
  191. .nbuffers = 1,
  192. .model = &bzero_vector_model
  193. };
  194. /*
  195. * DOT kernel : s = dot(v1, v2)
  196. */
  197. #ifdef STARPU_USE_CUDA
  198. extern void dot_host(TYPE *x, TYPE *y, unsigned nelems, TYPE *dot);
  199. static void dot_kernel_cuda(void *descr[], void *cl_arg)
  200. {
  201. TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  202. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  203. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  204. unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
  205. /* Contrary to cublasSdot, this function puts its result directly in
  206. * device memory, so that we don't have to transfer that value back and
  207. * forth. */
  208. dot_host(v1, v2, n, dot);
  209. }
  210. #endif
  211. static void dot_kernel_cpu(void *descr[], void *cl_arg)
  212. {
  213. TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  214. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  215. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  216. unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
  217. TYPE local_dot = 0.0;
  218. /* Note that we explicitely cast the result of the DOT kernel because
  219. * some BLAS library will return a double for sdot for instance. */
  220. local_dot = (TYPE)DOT(n, v1, 1, v2, 1);
  221. *dot = *dot + local_dot;
  222. }
  223. static struct starpu_perfmodel dot_kernel_model =
  224. {
  225. .type = STARPU_HISTORY_BASED,
  226. .symbol = "dot_kernel"
  227. };
  228. static struct starpu_codelet dot_kernel_cl =
  229. {
  230. .can_execute = can_execute,
  231. .cpu_funcs = {dot_kernel_cpu},
  232. #ifdef STARPU_USE_CUDA
  233. .cuda_funcs = {dot_kernel_cuda},
  234. #endif
  235. .nbuffers = 3,
  236. .model = &dot_kernel_model
  237. };
  238. int dot_kernel(starpu_data_handle_t v1,
  239. starpu_data_handle_t v2,
  240. starpu_data_handle_t s,
  241. unsigned nblocks,
  242. int use_reduction)
  243. {
  244. int ret;
  245. /* Blank the accumulation variable */
  246. if (use_reduction)
  247. starpu_data_invalidate_submit(s);
  248. else
  249. {
  250. ret = starpu_task_insert(&bzero_variable_cl, STARPU_W, s, 0);
  251. if (ret == -ENODEV) return ret;
  252. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  253. }
  254. unsigned b;
  255. for (b = 0; b < nblocks; b++)
  256. {
  257. ret = starpu_task_insert(&dot_kernel_cl,
  258. use_reduction?STARPU_REDUX:STARPU_RW, s,
  259. STARPU_R, starpu_data_get_sub_data(v1, 1, b),
  260. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  261. STARPU_TAG_ONLY, (starpu_tag_t) b,
  262. 0);
  263. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  264. }
  265. return 0;
  266. }
  267. /*
  268. * SCAL kernel : v1 = p1 v1
  269. */
  270. #ifdef STARPU_USE_CUDA
  271. static void scal_kernel_cuda(void *descr[], void *cl_arg)
  272. {
  273. TYPE p1;
  274. starpu_codelet_unpack_args(cl_arg, &p1);
  275. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  276. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  277. /* v1 = p1 v1 */
  278. TYPE alpha = p1;
  279. cublasscal(n, alpha, v1, 1);
  280. }
  281. #endif
  282. static void scal_kernel_cpu(void *descr[], void *cl_arg)
  283. {
  284. TYPE alpha;
  285. starpu_codelet_unpack_args(cl_arg, &alpha);
  286. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  287. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  288. /* v1 = alpha v1 */
  289. SCAL(n, alpha, v1, 1);
  290. }
  291. static struct starpu_perfmodel scal_kernel_model =
  292. {
  293. .type = STARPU_HISTORY_BASED,
  294. .symbol = "scal_kernel"
  295. };
  296. static struct starpu_codelet scal_kernel_cl =
  297. {
  298. .can_execute = can_execute,
  299. .cpu_funcs = {scal_kernel_cpu},
  300. #ifdef STARPU_USE_CUDA
  301. .cuda_funcs = {scal_kernel_cuda},
  302. .cuda_flags = {STARPU_CUDA_ASYNC},
  303. #endif
  304. .nbuffers = 1,
  305. .model = &scal_kernel_model
  306. };
  307. /*
  308. * GEMV kernel : v1 = p1 * v1 + p2 * M v2
  309. */
  310. #ifdef STARPU_USE_CUDA
  311. static void gemv_kernel_cuda(void *descr[], void *cl_arg)
  312. {
  313. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  314. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  315. TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  316. unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
  317. unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
  318. unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
  319. TYPE alpha, beta;
  320. starpu_codelet_unpack_args(cl_arg, &beta, &alpha);
  321. /* Compute v1 = alpha M v2 + beta v1 */
  322. cublasgemv('N', nx, ny, alpha, M, ld, v2, 1, beta, v1, 1);
  323. }
  324. #endif
  325. static void gemv_kernel_cpu(void *descr[], void *cl_arg)
  326. {
  327. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  328. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  329. TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  330. unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
  331. unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
  332. unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
  333. TYPE alpha, beta;
  334. starpu_codelet_unpack_args(cl_arg, &beta, &alpha);
  335. int worker_size = starpu_combined_worker_get_size();
  336. if (worker_size > 1)
  337. {
  338. /* Parallel CPU task */
  339. unsigned rank = starpu_combined_worker_get_rank();
  340. unsigned block_size = (ny + worker_size - 1)/worker_size;
  341. unsigned new_nx = STARPU_MIN(nx, block_size*(rank+1)) - block_size*rank;
  342. nx = new_nx;
  343. v1 = &v1[block_size*rank];
  344. M = &M[block_size*rank];
  345. }
  346. /* Compute v1 = alpha M v2 + beta v1 */
  347. GEMV("N", nx, ny, alpha, M, ld, v2, 1, beta, v1, 1);
  348. }
  349. static struct starpu_perfmodel gemv_kernel_model =
  350. {
  351. .type = STARPU_HISTORY_BASED,
  352. .symbol = "gemv_kernel"
  353. };
  354. static struct starpu_codelet gemv_kernel_cl =
  355. {
  356. .can_execute = can_execute,
  357. .type = STARPU_SPMD,
  358. .max_parallelism = INT_MAX,
  359. .cpu_funcs = {gemv_kernel_cpu},
  360. #ifdef STARPU_USE_CUDA
  361. .cuda_funcs = {gemv_kernel_cuda},
  362. .cuda_flags = {STARPU_CUDA_ASYNC},
  363. #endif
  364. .nbuffers = 3,
  365. .model = &gemv_kernel_model
  366. };
  367. int gemv_kernel(starpu_data_handle_t v1,
  368. starpu_data_handle_t matrix,
  369. starpu_data_handle_t v2,
  370. TYPE p1, TYPE p2,
  371. unsigned nblocks,
  372. int use_reduction)
  373. {
  374. unsigned b1, b2;
  375. int ret;
  376. for (b2 = 0; b2 < nblocks; b2++)
  377. {
  378. ret = starpu_task_insert(&scal_kernel_cl,
  379. STARPU_RW, starpu_data_get_sub_data(v1, 1, b2),
  380. STARPU_VALUE, &p1, sizeof(p1),
  381. STARPU_TAG_ONLY, (starpu_tag_t) b2,
  382. 0);
  383. if (ret == -ENODEV) return ret;
  384. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  385. }
  386. for (b2 = 0; b2 < nblocks; b2++)
  387. {
  388. for (b1 = 0; b1 < nblocks; b1++)
  389. {
  390. TYPE one = 1.0;
  391. ret = starpu_task_insert(&gemv_kernel_cl,
  392. use_reduction?STARPU_REDUX:STARPU_RW, starpu_data_get_sub_data(v1, 1, b2),
  393. STARPU_R, starpu_data_get_sub_data(matrix, 2, b2, b1),
  394. STARPU_R, starpu_data_get_sub_data(v2, 1, b1),
  395. STARPU_VALUE, &one, sizeof(one),
  396. STARPU_VALUE, &p2, sizeof(p2),
  397. STARPU_TAG_ONLY, (starpu_tag_t) (b2 * nblocks + b1),
  398. 0);
  399. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  400. }
  401. }
  402. return 0;
  403. }
  404. /*
  405. * AXPY + SCAL kernel : v1 = p1 * v1 + p2 * v2
  406. */
  407. #ifdef STARPU_USE_CUDA
  408. static void scal_axpy_kernel_cuda(void *descr[], void *cl_arg)
  409. {
  410. TYPE p1, p2;
  411. starpu_codelet_unpack_args(cl_arg, &p1, &p2);
  412. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  413. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  414. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  415. /* Compute v1 = p1 * v1 + p2 * v2.
  416. * v1 = p1 v1
  417. * v1 = v1 + p2 v2
  418. */
  419. cublasscal(n, p1, v1, 1);
  420. cublasaxpy(n, p2, v2, 1, v1, 1);
  421. }
  422. #endif
  423. static void scal_axpy_kernel_cpu(void *descr[], void *cl_arg)
  424. {
  425. TYPE p1, p2;
  426. starpu_codelet_unpack_args(cl_arg, &p1, &p2);
  427. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  428. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  429. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  430. /* Compute v1 = p1 * v1 + p2 * v2.
  431. * v1 = p1 v1
  432. * v1 = v1 + p2 v2
  433. */
  434. SCAL(nx, p1, v1, 1);
  435. AXPY(nx, p2, v2, 1, v1, 1);
  436. }
  437. static struct starpu_perfmodel scal_axpy_kernel_model =
  438. {
  439. .type = STARPU_HISTORY_BASED,
  440. .symbol = "scal_axpy_kernel"
  441. };
  442. static struct starpu_codelet scal_axpy_kernel_cl =
  443. {
  444. .can_execute = can_execute,
  445. .cpu_funcs = {scal_axpy_kernel_cpu},
  446. #ifdef STARPU_USE_CUDA
  447. .cuda_funcs = {scal_axpy_kernel_cuda},
  448. .cuda_flags = {STARPU_CUDA_ASYNC},
  449. #endif
  450. .nbuffers = 2,
  451. .model = &scal_axpy_kernel_model
  452. };
  453. int scal_axpy_kernel(starpu_data_handle_t v1, TYPE p1,
  454. starpu_data_handle_t v2, TYPE p2,
  455. unsigned nblocks)
  456. {
  457. int ret;
  458. unsigned b;
  459. for (b = 0; b < nblocks; b++)
  460. {
  461. ret = starpu_task_insert(&scal_axpy_kernel_cl,
  462. STARPU_RW, starpu_data_get_sub_data(v1, 1, b),
  463. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  464. STARPU_VALUE, &p1, sizeof(p1),
  465. STARPU_VALUE, &p2, sizeof(p2),
  466. STARPU_TAG_ONLY, (starpu_tag_t) b,
  467. 0);
  468. if (ret == -ENODEV) return ret;
  469. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  470. }
  471. return 0;
  472. }
  473. /*
  474. * AXPY kernel : v1 = v1 + p1 * v2
  475. */
  476. #ifdef STARPU_USE_CUDA
  477. static void axpy_kernel_cuda(void *descr[], void *cl_arg)
  478. {
  479. TYPE p1;
  480. starpu_codelet_unpack_args(cl_arg, &p1);
  481. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  482. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  483. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  484. /* Compute v1 = v1 + p1 * v2.
  485. */
  486. cublasaxpy(n, p1, v2, 1, v1, 1);
  487. }
  488. #endif
  489. static void axpy_kernel_cpu(void *descr[], void *cl_arg)
  490. {
  491. TYPE p1;
  492. starpu_codelet_unpack_args(cl_arg, &p1);
  493. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  494. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  495. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  496. /* Compute v1 = p1 * v1 + p2 * v2.
  497. */
  498. AXPY(nx, p1, v2, 1, v1, 1);
  499. }
  500. static struct starpu_perfmodel axpy_kernel_model =
  501. {
  502. .type = STARPU_HISTORY_BASED,
  503. .symbol = "axpy_kernel"
  504. };
  505. static struct starpu_codelet axpy_kernel_cl =
  506. {
  507. .can_execute = can_execute,
  508. .cpu_funcs = {axpy_kernel_cpu},
  509. #ifdef STARPU_USE_CUDA
  510. .cuda_funcs = {axpy_kernel_cuda},
  511. .cuda_flags = {STARPU_CUDA_ASYNC},
  512. #endif
  513. .nbuffers = 2,
  514. .model = &axpy_kernel_model
  515. };
  516. int axpy_kernel(starpu_data_handle_t v1,
  517. starpu_data_handle_t v2, TYPE p1,
  518. unsigned nblocks)
  519. {
  520. int ret;
  521. unsigned b;
  522. for (b = 0; b < nblocks; b++)
  523. {
  524. ret = starpu_task_insert(&axpy_kernel_cl,
  525. STARPU_RW, starpu_data_get_sub_data(v1, 1, b),
  526. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  527. STARPU_VALUE, &p1, sizeof(p1),
  528. STARPU_TAG_ONLY, (starpu_tag_t) b,
  529. 0);
  530. if (ret == -ENODEV) return ret;
  531. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  532. }
  533. return 0;
  534. }
  535. int copy_handle(starpu_data_handle_t dst, starpu_data_handle_t src, unsigned nblocks)
  536. {
  537. unsigned b;
  538. for (b = 0; b < nblocks; b++)
  539. starpu_data_cpy(starpu_data_get_sub_data(dst, 1, b), starpu_data_get_sub_data(src, 1, b), 1, NULL, NULL);
  540. return 0;
  541. }