cg_kernels.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010, 2012-2015 Université de Bordeaux
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. /*
  17. * Standard BLAS kernels used by CG
  18. */
  19. #include "cg.h"
  20. #include <math.h>
  21. #include <limits.h>
  22. #if 0
  23. static void print_vector_from_descr(unsigned nx, TYPE *v)
  24. {
  25. unsigned i;
  26. for (i = 0; i < nx; i++)
  27. {
  28. fprintf(stderr, "%2.2e ", v[i]);
  29. }
  30. fprintf(stderr, "\n");
  31. }
  32. static void print_matrix_from_descr(unsigned nx, unsigned ny, unsigned ld, TYPE *mat)
  33. {
  34. unsigned i, j;
  35. for (j = 0; j < nx; j++)
  36. {
  37. for (i = 0; i < ny; i++)
  38. {
  39. fprintf(stderr, "%2.2e ", mat[j+i*ld]);
  40. }
  41. fprintf(stderr, "\n");
  42. }
  43. }
  44. #endif
  45. static int can_execute(unsigned workerid, struct starpu_task *task, unsigned nimpl)
  46. {
  47. enum starpu_worker_archtype type = starpu_worker_get_type(workerid);
  48. if (type == STARPU_CPU_WORKER || type == STARPU_OPENCL_WORKER || type == STARPU_MIC_WORKER || type == STARPU_SCC_WORKER)
  49. return 1;
  50. #ifdef STARPU_USE_CUDA
  51. #ifdef STARPU_SIMGRID
  52. /* We don't know, let's assume it can */
  53. return 1;
  54. #else
  55. /* Cuda device */
  56. const struct cudaDeviceProp *props;
  57. props = starpu_cuda_get_device_properties(workerid);
  58. if (props->major >= 2 || props->minor >= 3)
  59. /* At least compute capability 1.3, supports doubles */
  60. return 1;
  61. #endif
  62. #endif
  63. /* Old card, does not support doubles */
  64. return 0;
  65. }
  66. /*
  67. * Reduction accumulation methods
  68. */
  69. #ifdef STARPU_USE_CUDA
  70. static void accumulate_variable_cuda(void *descr[], void *cl_arg)
  71. {
  72. TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  73. TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
  74. cublasaxpy(1, (TYPE)1.0, v_src, 1, v_dst, 1);
  75. }
  76. #endif
  77. void accumulate_variable_cpu(void *descr[], void *cl_arg)
  78. {
  79. TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  80. TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
  81. *v_dst = *v_dst + *v_src;
  82. }
  83. static struct starpu_perfmodel accumulate_variable_model =
  84. {
  85. .type = STARPU_HISTORY_BASED,
  86. .symbol = "accumulate_variable"
  87. };
  88. struct starpu_codelet accumulate_variable_cl =
  89. {
  90. .can_execute = can_execute,
  91. .cpu_funcs = {accumulate_variable_cpu},
  92. .cpu_funcs_name = {"accumulate_variable_cpu"},
  93. #ifdef STARPU_USE_CUDA
  94. .cuda_funcs = {accumulate_variable_cuda},
  95. .cuda_flags = {STARPU_CUDA_ASYNC},
  96. #endif
  97. .modes = {STARPU_RW, STARPU_R},
  98. .nbuffers = 2,
  99. .model = &accumulate_variable_model
  100. };
  101. #ifdef STARPU_USE_CUDA
  102. static void accumulate_vector_cuda(void *descr[], void *cl_arg)
  103. {
  104. TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  105. TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  106. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  107. cublasaxpy(n, (TYPE)1.0, v_src, 1, v_dst, 1);
  108. }
  109. #endif
  110. void accumulate_vector_cpu(void *descr[], void *cl_arg)
  111. {
  112. TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  113. TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  114. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  115. AXPY(n, (TYPE)1.0, v_src, 1, v_dst, 1);
  116. }
  117. static struct starpu_perfmodel accumulate_vector_model =
  118. {
  119. .type = STARPU_HISTORY_BASED,
  120. .symbol = "accumulate_vector"
  121. };
  122. struct starpu_codelet accumulate_vector_cl =
  123. {
  124. .can_execute = can_execute,
  125. .cpu_funcs = {accumulate_vector_cpu},
  126. .cpu_funcs_name = {"accumulate_vector_cpu"},
  127. #ifdef STARPU_USE_CUDA
  128. .cuda_funcs = {accumulate_vector_cuda},
  129. .cuda_flags = {STARPU_CUDA_ASYNC},
  130. #endif
  131. .modes = {STARPU_RW, STARPU_R},
  132. .nbuffers = 2,
  133. .model = &accumulate_vector_model
  134. };
  135. /*
  136. * Reduction initialization methods
  137. */
  138. #ifdef STARPU_USE_CUDA
  139. extern void zero_vector(TYPE *x, unsigned nelems);
  140. static void bzero_variable_cuda(void *descr[], void *cl_arg)
  141. {
  142. TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  143. zero_vector(v, 1);
  144. }
  145. #endif
  146. void bzero_variable_cpu(void *descr[], void *cl_arg)
  147. {
  148. TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  149. *v = (TYPE)0.0;
  150. }
  151. static struct starpu_perfmodel bzero_variable_model =
  152. {
  153. .type = STARPU_HISTORY_BASED,
  154. .symbol = "bzero_variable"
  155. };
  156. struct starpu_codelet bzero_variable_cl =
  157. {
  158. .can_execute = can_execute,
  159. .cpu_funcs = {bzero_variable_cpu},
  160. .cpu_funcs_name = {"bzero_variable_cpu"},
  161. #ifdef STARPU_USE_CUDA
  162. .cuda_funcs = {bzero_variable_cuda},
  163. .cuda_flags = {STARPU_CUDA_ASYNC},
  164. #endif
  165. .modes = {STARPU_W},
  166. .nbuffers = 1,
  167. .model = &bzero_variable_model
  168. };
  169. #ifdef STARPU_USE_CUDA
  170. static void bzero_vector_cuda(void *descr[], void *cl_arg)
  171. {
  172. TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  173. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  174. zero_vector(v, n);
  175. }
  176. #endif
  177. void bzero_vector_cpu(void *descr[], void *cl_arg)
  178. {
  179. TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  180. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  181. memset(v, 0, n*sizeof(TYPE));
  182. }
  183. static struct starpu_perfmodel bzero_vector_model =
  184. {
  185. .type = STARPU_HISTORY_BASED,
  186. .symbol = "bzero_vector"
  187. };
  188. struct starpu_codelet bzero_vector_cl =
  189. {
  190. .can_execute = can_execute,
  191. .cpu_funcs = {bzero_vector_cpu},
  192. .cpu_funcs_name = {"bzero_vector_cpu"},
  193. #ifdef STARPU_USE_CUDA
  194. .cuda_funcs = {bzero_vector_cuda},
  195. .cuda_flags = {STARPU_CUDA_ASYNC},
  196. #endif
  197. .modes = {STARPU_W},
  198. .nbuffers = 1,
  199. .model = &bzero_vector_model
  200. };
  201. /*
  202. * DOT kernel : s = dot(v1, v2)
  203. */
  204. #ifdef STARPU_USE_CUDA
  205. extern void dot_host(TYPE *x, TYPE *y, unsigned nelems, TYPE *dot);
  206. static void dot_kernel_cuda(void *descr[], void *cl_arg)
  207. {
  208. TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  209. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  210. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  211. unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
  212. /* Contrary to cublasSdot, this function puts its result directly in
  213. * device memory, so that we don't have to transfer that value back and
  214. * forth. */
  215. dot_host(v1, v2, n, dot);
  216. }
  217. #endif
  218. void dot_kernel_cpu(void *descr[], void *cl_arg)
  219. {
  220. TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  221. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  222. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  223. unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
  224. TYPE local_dot = 0.0;
  225. /* Note that we explicitely cast the result of the DOT kernel because
  226. * some BLAS library will return a double for sdot for instance. */
  227. local_dot = (TYPE)DOT(n, v1, 1, v2, 1);
  228. *dot = *dot + local_dot;
  229. }
  230. static struct starpu_perfmodel dot_kernel_model =
  231. {
  232. .type = STARPU_HISTORY_BASED,
  233. .symbol = "dot_kernel"
  234. };
  235. static struct starpu_codelet dot_kernel_cl =
  236. {
  237. .can_execute = can_execute,
  238. .cpu_funcs = {dot_kernel_cpu},
  239. .cpu_funcs_name = {"dot_kernel_cpu"},
  240. #ifdef STARPU_USE_CUDA
  241. .cuda_funcs = {dot_kernel_cuda},
  242. #endif
  243. .nbuffers = 3,
  244. .model = &dot_kernel_model
  245. };
  246. int dot_kernel(starpu_data_handle_t v1,
  247. starpu_data_handle_t v2,
  248. starpu_data_handle_t s,
  249. unsigned nblocks,
  250. int use_reduction)
  251. {
  252. int ret;
  253. /* Blank the accumulation variable */
  254. if (use_reduction)
  255. starpu_data_invalidate_submit(s);
  256. else
  257. {
  258. ret = starpu_task_insert(&bzero_variable_cl, STARPU_W, s, 0);
  259. if (ret == -ENODEV) return ret;
  260. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  261. }
  262. unsigned b;
  263. for (b = 0; b < nblocks; b++)
  264. {
  265. ret = starpu_task_insert(&dot_kernel_cl,
  266. use_reduction?STARPU_REDUX:STARPU_RW, s,
  267. STARPU_R, starpu_data_get_sub_data(v1, 1, b),
  268. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  269. STARPU_TAG_ONLY, (starpu_tag_t) b,
  270. 0);
  271. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  272. }
  273. return 0;
  274. }
  275. /*
  276. * SCAL kernel : v1 = p1 v1
  277. */
  278. #ifdef STARPU_USE_CUDA
  279. static void scal_kernel_cuda(void *descr[], void *cl_arg)
  280. {
  281. TYPE p1;
  282. starpu_codelet_unpack_args(cl_arg, &p1);
  283. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  284. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  285. /* v1 = p1 v1 */
  286. TYPE alpha = p1;
  287. cublasscal(n, alpha, v1, 1);
  288. }
  289. #endif
  290. void scal_kernel_cpu(void *descr[], void *cl_arg)
  291. {
  292. TYPE alpha;
  293. starpu_codelet_unpack_args(cl_arg, &alpha);
  294. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  295. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  296. /* v1 = alpha v1 */
  297. SCAL(n, alpha, v1, 1);
  298. }
  299. static struct starpu_perfmodel scal_kernel_model =
  300. {
  301. .type = STARPU_HISTORY_BASED,
  302. .symbol = "scal_kernel"
  303. };
  304. static struct starpu_codelet scal_kernel_cl =
  305. {
  306. .can_execute = can_execute,
  307. .cpu_funcs = {scal_kernel_cpu},
  308. .cpu_funcs_name = {"scal_kernel_cpu"},
  309. #ifdef STARPU_USE_CUDA
  310. .cuda_funcs = {scal_kernel_cuda},
  311. .cuda_flags = {STARPU_CUDA_ASYNC},
  312. #endif
  313. .nbuffers = 1,
  314. .model = &scal_kernel_model
  315. };
  316. /*
  317. * GEMV kernel : v1 = p1 * v1 + p2 * M v2
  318. */
  319. #ifdef STARPU_USE_CUDA
  320. static void gemv_kernel_cuda(void *descr[], void *cl_arg)
  321. {
  322. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  323. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  324. TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  325. unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
  326. unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
  327. unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
  328. TYPE alpha, beta;
  329. starpu_codelet_unpack_args(cl_arg, &beta, &alpha);
  330. /* Compute v1 = alpha M v2 + beta v1 */
  331. cublasgemv('N', nx, ny, alpha, M, ld, v2, 1, beta, v1, 1);
  332. }
  333. #endif
  334. void gemv_kernel_cpu(void *descr[], void *cl_arg)
  335. {
  336. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  337. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  338. TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  339. unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
  340. unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
  341. unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
  342. TYPE alpha, beta;
  343. starpu_codelet_unpack_args(cl_arg, &beta, &alpha);
  344. int worker_size = starpu_combined_worker_get_size();
  345. if (worker_size > 1)
  346. {
  347. /* Parallel CPU task */
  348. unsigned rank = starpu_combined_worker_get_rank();
  349. unsigned block_size = (ny + worker_size - 1)/worker_size;
  350. unsigned new_nx = STARPU_MIN(nx, block_size*(rank+1)) - block_size*rank;
  351. nx = new_nx;
  352. v1 = &v1[block_size*rank];
  353. M = &M[block_size*rank];
  354. }
  355. /* Compute v1 = alpha M v2 + beta v1 */
  356. GEMV("N", nx, ny, alpha, M, ld, v2, 1, beta, v1, 1);
  357. }
  358. static struct starpu_perfmodel gemv_kernel_model =
  359. {
  360. .type = STARPU_HISTORY_BASED,
  361. .symbol = "gemv_kernel"
  362. };
  363. static struct starpu_codelet gemv_kernel_cl =
  364. {
  365. .can_execute = can_execute,
  366. .type = STARPU_SPMD,
  367. .max_parallelism = INT_MAX,
  368. .cpu_funcs = {gemv_kernel_cpu},
  369. .cpu_funcs_name = {"gemv_kernel_cpu"},
  370. #ifdef STARPU_USE_CUDA
  371. .cuda_funcs = {gemv_kernel_cuda},
  372. .cuda_flags = {STARPU_CUDA_ASYNC},
  373. #endif
  374. .nbuffers = 3,
  375. .model = &gemv_kernel_model
  376. };
  377. int gemv_kernel(starpu_data_handle_t v1,
  378. starpu_data_handle_t matrix,
  379. starpu_data_handle_t v2,
  380. TYPE p1, TYPE p2,
  381. unsigned nblocks,
  382. int use_reduction)
  383. {
  384. unsigned b1, b2;
  385. int ret;
  386. for (b2 = 0; b2 < nblocks; b2++)
  387. {
  388. ret = starpu_task_insert(&scal_kernel_cl,
  389. STARPU_RW, starpu_data_get_sub_data(v1, 1, b2),
  390. STARPU_VALUE, &p1, sizeof(p1),
  391. STARPU_TAG_ONLY, (starpu_tag_t) b2,
  392. 0);
  393. if (ret == -ENODEV) return ret;
  394. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  395. }
  396. for (b2 = 0; b2 < nblocks; b2++)
  397. {
  398. for (b1 = 0; b1 < nblocks; b1++)
  399. {
  400. TYPE one = 1.0;
  401. ret = starpu_task_insert(&gemv_kernel_cl,
  402. use_reduction?STARPU_REDUX:STARPU_RW, starpu_data_get_sub_data(v1, 1, b2),
  403. STARPU_R, starpu_data_get_sub_data(matrix, 2, b2, b1),
  404. STARPU_R, starpu_data_get_sub_data(v2, 1, b1),
  405. STARPU_VALUE, &one, sizeof(one),
  406. STARPU_VALUE, &p2, sizeof(p2),
  407. STARPU_TAG_ONLY, ((starpu_tag_t)b2) * nblocks + b1,
  408. 0);
  409. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  410. }
  411. }
  412. return 0;
  413. }
  414. /*
  415. * AXPY + SCAL kernel : v1 = p1 * v1 + p2 * v2
  416. */
  417. #ifdef STARPU_USE_CUDA
  418. static void scal_axpy_kernel_cuda(void *descr[], void *cl_arg)
  419. {
  420. TYPE p1, p2;
  421. starpu_codelet_unpack_args(cl_arg, &p1, &p2);
  422. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  423. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  424. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  425. /* Compute v1 = p1 * v1 + p2 * v2.
  426. * v1 = p1 v1
  427. * v1 = v1 + p2 v2
  428. */
  429. cublasscal(n, p1, v1, 1);
  430. cublasaxpy(n, p2, v2, 1, v1, 1);
  431. }
  432. #endif
  433. void scal_axpy_kernel_cpu(void *descr[], void *cl_arg)
  434. {
  435. TYPE p1, p2;
  436. starpu_codelet_unpack_args(cl_arg, &p1, &p2);
  437. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  438. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  439. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  440. /* Compute v1 = p1 * v1 + p2 * v2.
  441. * v1 = p1 v1
  442. * v1 = v1 + p2 v2
  443. */
  444. SCAL(nx, p1, v1, 1);
  445. AXPY(nx, p2, v2, 1, v1, 1);
  446. }
  447. static struct starpu_perfmodel scal_axpy_kernel_model =
  448. {
  449. .type = STARPU_HISTORY_BASED,
  450. .symbol = "scal_axpy_kernel"
  451. };
  452. static struct starpu_codelet scal_axpy_kernel_cl =
  453. {
  454. .can_execute = can_execute,
  455. .cpu_funcs = {scal_axpy_kernel_cpu},
  456. .cpu_funcs_name = {"scal_axpy_kernel_cpu"},
  457. #ifdef STARPU_USE_CUDA
  458. .cuda_funcs = {scal_axpy_kernel_cuda},
  459. .cuda_flags = {STARPU_CUDA_ASYNC},
  460. #endif
  461. .nbuffers = 2,
  462. .model = &scal_axpy_kernel_model
  463. };
  464. int scal_axpy_kernel(starpu_data_handle_t v1, TYPE p1,
  465. starpu_data_handle_t v2, TYPE p2,
  466. unsigned nblocks)
  467. {
  468. int ret;
  469. unsigned b;
  470. for (b = 0; b < nblocks; b++)
  471. {
  472. ret = starpu_task_insert(&scal_axpy_kernel_cl,
  473. STARPU_RW, starpu_data_get_sub_data(v1, 1, b),
  474. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  475. STARPU_VALUE, &p1, sizeof(p1),
  476. STARPU_VALUE, &p2, sizeof(p2),
  477. STARPU_TAG_ONLY, (starpu_tag_t) b,
  478. 0);
  479. if (ret == -ENODEV) return ret;
  480. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  481. }
  482. return 0;
  483. }
  484. /*
  485. * AXPY kernel : v1 = v1 + p1 * v2
  486. */
  487. #ifdef STARPU_USE_CUDA
  488. static void axpy_kernel_cuda(void *descr[], void *cl_arg)
  489. {
  490. TYPE p1;
  491. starpu_codelet_unpack_args(cl_arg, &p1);
  492. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  493. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  494. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  495. /* Compute v1 = v1 + p1 * v2.
  496. */
  497. cublasaxpy(n, p1, v2, 1, v1, 1);
  498. }
  499. #endif
  500. void axpy_kernel_cpu(void *descr[], void *cl_arg)
  501. {
  502. TYPE p1;
  503. starpu_codelet_unpack_args(cl_arg, &p1);
  504. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  505. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  506. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  507. /* Compute v1 = p1 * v1 + p2 * v2.
  508. */
  509. AXPY(nx, p1, v2, 1, v1, 1);
  510. }
  511. static struct starpu_perfmodel axpy_kernel_model =
  512. {
  513. .type = STARPU_HISTORY_BASED,
  514. .symbol = "axpy_kernel"
  515. };
  516. static struct starpu_codelet axpy_kernel_cl =
  517. {
  518. .can_execute = can_execute,
  519. .cpu_funcs = {axpy_kernel_cpu},
  520. .cpu_funcs_name = {"axpy_kernel_cpu"},
  521. #ifdef STARPU_USE_CUDA
  522. .cuda_funcs = {axpy_kernel_cuda},
  523. .cuda_flags = {STARPU_CUDA_ASYNC},
  524. #endif
  525. .nbuffers = 2,
  526. .model = &axpy_kernel_model
  527. };
  528. int axpy_kernel(starpu_data_handle_t v1,
  529. starpu_data_handle_t v2, TYPE p1,
  530. unsigned nblocks)
  531. {
  532. int ret;
  533. unsigned b;
  534. for (b = 0; b < nblocks; b++)
  535. {
  536. ret = starpu_task_insert(&axpy_kernel_cl,
  537. STARPU_RW, starpu_data_get_sub_data(v1, 1, b),
  538. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  539. STARPU_VALUE, &p1, sizeof(p1),
  540. STARPU_TAG_ONLY, (starpu_tag_t) b,
  541. 0);
  542. if (ret == -ENODEV) return ret;
  543. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  544. }
  545. return 0;
  546. }
  547. int copy_handle(starpu_data_handle_t dst, starpu_data_handle_t src, unsigned nblocks)
  548. {
  549. unsigned b;
  550. for (b = 0; b < nblocks; b++)
  551. starpu_data_cpy(starpu_data_get_sub_data(dst, 1, b), starpu_data_get_sub_data(src, 1, b), 1, NULL, NULL);
  552. return 0;
  553. }