cg_kernels.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010, 2012-2013 Université de Bordeaux 1
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include "cg.h"
  17. #include <math.h>
  18. #include <limits.h>
  19. #if 0
  20. static void print_vector_from_descr(unsigned nx, TYPE *v)
  21. {
  22. unsigned i;
  23. for (i = 0; i < nx; i++)
  24. {
  25. fprintf(stderr, "%2.2e ", v[i]);
  26. }
  27. fprintf(stderr, "\n");
  28. }
  29. static void print_matrix_from_descr(unsigned nx, unsigned ny, unsigned ld, TYPE *mat)
  30. {
  31. unsigned i, j;
  32. for (j = 0; j < nx; j++)
  33. {
  34. for (i = 0; i < ny; i++)
  35. {
  36. fprintf(stderr, "%2.2e ", mat[j+i*ld]);
  37. }
  38. fprintf(stderr, "\n");
  39. }
  40. }
  41. #endif
  42. static int can_execute(unsigned workerid, struct starpu_task *task, unsigned nimpl)
  43. {
  44. enum starpu_archtype type = starpu_worker_get_type(workerid);
  45. if (type == STARPU_CPU_WORKER || type == STARPU_OPENCL_WORKER)
  46. return 1;
  47. #ifdef STARPU_USE_CUDA
  48. /* Cuda device */
  49. const struct cudaDeviceProp *props;
  50. props = starpu_cuda_get_device_properties(workerid);
  51. if (props->major >= 2 || props->minor >= 3)
  52. /* At least compute capability 1.3, supports doubles */
  53. return 1;
  54. #endif
  55. /* Old card, does not support doubles */
  56. return 0;
  57. }
  58. /*
  59. * Reduction accumulation methods
  60. */
  61. #ifdef STARPU_USE_CUDA
  62. static void accumulate_variable_cuda(void *descr[], void *cl_arg)
  63. {
  64. TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  65. TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
  66. cublasaxpy(1, (TYPE)1.0, v_src, 1, v_dst, 1);
  67. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  68. }
  69. #endif
  70. static void accumulate_variable_cpu(void *descr[], void *cl_arg)
  71. {
  72. TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  73. TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
  74. *v_dst = *v_dst + *v_src;
  75. }
  76. static struct starpu_perfmodel accumulate_variable_model =
  77. {
  78. .type = STARPU_HISTORY_BASED,
  79. .symbol = "accumulate_variable"
  80. };
  81. struct starpu_codelet accumulate_variable_cl =
  82. {
  83. .can_execute = can_execute,
  84. .where = STARPU_CPU|STARPU_CUDA,
  85. .cpu_funcs = {accumulate_variable_cpu, NULL},
  86. #ifdef STARPU_USE_CUDA
  87. .cuda_funcs = {accumulate_variable_cuda, NULL},
  88. #endif
  89. .nbuffers = 2,
  90. .model = &accumulate_variable_model
  91. };
  92. #ifdef STARPU_USE_CUDA
  93. static void accumulate_vector_cuda(void *descr[], void *cl_arg)
  94. {
  95. TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  96. TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  97. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  98. cublasaxpy(n, (TYPE)1.0, v_src, 1, v_dst, 1);
  99. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  100. }
  101. #endif
  102. static void accumulate_vector_cpu(void *descr[], void *cl_arg)
  103. {
  104. TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  105. TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  106. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  107. AXPY(n, (TYPE)1.0, v_src, 1, v_dst, 1);
  108. }
  109. static struct starpu_perfmodel accumulate_vector_model =
  110. {
  111. .type = STARPU_HISTORY_BASED,
  112. .symbol = "accumulate_vector"
  113. };
  114. struct starpu_codelet accumulate_vector_cl =
  115. {
  116. .can_execute = can_execute,
  117. .where = STARPU_CPU|STARPU_CUDA,
  118. .cpu_funcs = {accumulate_vector_cpu, NULL},
  119. #ifdef STARPU_USE_CUDA
  120. .cuda_funcs = {accumulate_vector_cuda, NULL},
  121. #endif
  122. .nbuffers = 2,
  123. .model = &accumulate_vector_model
  124. };
  125. /*
  126. * Reduction initialization methods
  127. */
  128. #ifdef STARPU_USE_CUDA
  129. extern void zero_vector(TYPE *x, unsigned nelems);
  130. static void bzero_variable_cuda(void *descr[], void *cl_arg)
  131. {
  132. TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  133. zero_vector(v, 1);
  134. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  135. }
  136. #endif
  137. static void bzero_variable_cpu(void *descr[], void *cl_arg)
  138. {
  139. TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  140. *v = (TYPE)0.0;
  141. }
  142. static struct starpu_perfmodel bzero_variable_model =
  143. {
  144. .type = STARPU_HISTORY_BASED,
  145. .symbol = "bzero_variable"
  146. };
  147. struct starpu_codelet bzero_variable_cl =
  148. {
  149. .can_execute = can_execute,
  150. .where = STARPU_CPU|STARPU_CUDA,
  151. .cpu_funcs = {bzero_variable_cpu, NULL},
  152. #ifdef STARPU_USE_CUDA
  153. .cuda_funcs = {bzero_variable_cuda, NULL},
  154. #endif
  155. .nbuffers = 1,
  156. .model = &bzero_variable_model
  157. };
  158. #ifdef STARPU_USE_CUDA
  159. static void bzero_vector_cuda(void *descr[], void *cl_arg)
  160. {
  161. TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  162. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  163. zero_vector(v, n);
  164. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  165. }
  166. #endif
  167. static void bzero_vector_cpu(void *descr[], void *cl_arg)
  168. {
  169. TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  170. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  171. memset(v, 0, n*sizeof(TYPE));
  172. }
  173. static struct starpu_perfmodel bzero_vector_model =
  174. {
  175. .type = STARPU_HISTORY_BASED,
  176. .symbol = "bzero_vector"
  177. };
  178. struct starpu_codelet bzero_vector_cl =
  179. {
  180. .can_execute = can_execute,
  181. .where = STARPU_CPU|STARPU_CUDA,
  182. .cpu_funcs = {bzero_vector_cpu, NULL},
  183. #ifdef STARPU_USE_CUDA
  184. .cuda_funcs = {bzero_vector_cuda, NULL},
  185. #endif
  186. .nbuffers = 1,
  187. .model = &bzero_vector_model
  188. };
  189. /*
  190. * DOT kernel : s = dot(v1, v2)
  191. */
  192. #ifdef STARPU_USE_CUDA
  193. extern void dot_host(TYPE *x, TYPE *y, unsigned nelems, TYPE *dot);
  194. static void dot_kernel_cuda(void *descr[], void *cl_arg)
  195. {
  196. TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  197. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  198. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  199. unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
  200. /* Contrary to cublasSdot, this function puts its result directly in
  201. * device memory, so that we don't have to transfer that value back and
  202. * forth. */
  203. dot_host(v1, v2, n, dot);
  204. }
  205. #endif
  206. static void dot_kernel_cpu(void *descr[], void *cl_arg)
  207. {
  208. TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  209. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  210. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  211. unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
  212. TYPE local_dot = 0.0;
  213. /* Note that we explicitely cast the result of the DOT kernel because
  214. * some BLAS library will return a double for sdot for instance. */
  215. local_dot = (TYPE)DOT(n, v1, 1, v2, 1);
  216. *dot = *dot + local_dot;
  217. }
  218. static struct starpu_perfmodel dot_kernel_model =
  219. {
  220. .type = STARPU_HISTORY_BASED,
  221. .symbol = "dot_kernel"
  222. };
  223. static struct starpu_codelet dot_kernel_cl =
  224. {
  225. .can_execute = can_execute,
  226. .where = STARPU_CPU|STARPU_CUDA,
  227. .cpu_funcs = {dot_kernel_cpu, NULL},
  228. #ifdef STARPU_USE_CUDA
  229. .cuda_funcs = {dot_kernel_cuda, NULL},
  230. #endif
  231. .nbuffers = 3,
  232. .model = &dot_kernel_model
  233. };
  234. int dot_kernel(starpu_data_handle_t v1,
  235. starpu_data_handle_t v2,
  236. starpu_data_handle_t s,
  237. unsigned nblocks,
  238. int use_reduction)
  239. {
  240. int ret;
  241. /* Blank the accumulation variable */
  242. if (use_reduction)
  243. starpu_data_invalidate_submit(s);
  244. else {
  245. ret = starpu_insert_task(&bzero_variable_cl, STARPU_W, s, 0);
  246. if (ret == -ENODEV) return ret;
  247. STARPU_CHECK_RETURN_VALUE(ret, "starpu_insert_task");
  248. }
  249. unsigned b;
  250. for (b = 0; b < nblocks; b++)
  251. {
  252. ret = starpu_insert_task(&dot_kernel_cl,
  253. use_reduction?STARPU_REDUX:STARPU_RW, s,
  254. STARPU_R, starpu_data_get_sub_data(v1, 1, b),
  255. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  256. 0);
  257. STARPU_CHECK_RETURN_VALUE(ret, "starpu_insert_task");
  258. }
  259. return 0;
  260. }
  261. /*
  262. * SCAL kernel : v1 = p1 v1
  263. */
  264. #ifdef STARPU_USE_CUDA
  265. static void scal_kernel_cuda(void *descr[], void *cl_arg)
  266. {
  267. TYPE p1;
  268. starpu_codelet_unpack_args(cl_arg, &p1);
  269. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  270. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  271. /* v1 = p1 v1 */
  272. TYPE alpha = p1;
  273. cublasscal(n, alpha, v1, 1);
  274. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  275. }
  276. #endif
  277. static void scal_kernel_cpu(void *descr[], void *cl_arg)
  278. {
  279. TYPE alpha;
  280. starpu_codelet_unpack_args(cl_arg, &alpha);
  281. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  282. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  283. /* v1 = alpha v1 */
  284. SCAL(n, alpha, v1, 1);
  285. }
  286. static struct starpu_perfmodel scal_kernel_model =
  287. {
  288. .type = STARPU_HISTORY_BASED,
  289. .symbol = "scal_kernel"
  290. };
  291. static struct starpu_codelet scal_kernel_cl =
  292. {
  293. .can_execute = can_execute,
  294. .where = STARPU_CPU|STARPU_CUDA,
  295. .cpu_funcs = {scal_kernel_cpu, NULL},
  296. #ifdef STARPU_USE_CUDA
  297. .cuda_funcs = {scal_kernel_cuda, NULL},
  298. #endif
  299. .nbuffers = 1,
  300. .model = &scal_kernel_model
  301. };
  302. /*
  303. * GEMV kernel : v1 = p1 * v1 + p2 * M v2
  304. */
  305. #ifdef STARPU_USE_CUDA
  306. static void gemv_kernel_cuda(void *descr[], void *cl_arg)
  307. {
  308. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  309. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  310. TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  311. unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
  312. unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
  313. unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
  314. TYPE alpha, beta;
  315. starpu_codelet_unpack_args(cl_arg, &beta, &alpha);
  316. /* Compute v1 = alpha M v2 + beta v1 */
  317. cublasgemv('N', nx, ny, alpha, M, ld, v2, 1, beta, v1, 1);
  318. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  319. }
  320. #endif
  321. static void gemv_kernel_cpu(void *descr[], void *cl_arg)
  322. {
  323. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  324. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  325. TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  326. unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
  327. unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
  328. unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
  329. TYPE alpha, beta;
  330. starpu_codelet_unpack_args(cl_arg, &beta, &alpha);
  331. int worker_size = starpu_combined_worker_get_size();
  332. if (worker_size > 1)
  333. {
  334. /* Parallel CPU task */
  335. int rank = starpu_combined_worker_get_rank();
  336. int block_size = (ny + worker_size - 1)/worker_size;
  337. int new_nx = STARPU_MIN(nx, block_size*(rank+1)) - block_size*rank;
  338. nx = new_nx;
  339. v1 = &v1[block_size*rank];
  340. M = &M[block_size*rank];
  341. }
  342. /* Compute v1 = alpha M v2 + beta v1 */
  343. GEMV("N", nx, ny, alpha, M, ld, v2, 1, beta, v1, 1);
  344. }
  345. static struct starpu_perfmodel gemv_kernel_model =
  346. {
  347. .type = STARPU_HISTORY_BASED,
  348. .symbol = "gemv_kernel"
  349. };
  350. static struct starpu_codelet gemv_kernel_cl =
  351. {
  352. .can_execute = can_execute,
  353. .where = STARPU_CPU|STARPU_CUDA,
  354. .type = STARPU_SPMD,
  355. .max_parallelism = INT_MAX,
  356. .cpu_funcs = {gemv_kernel_cpu, NULL},
  357. #ifdef STARPU_USE_CUDA
  358. .cuda_funcs = {gemv_kernel_cuda, NULL},
  359. #endif
  360. .nbuffers = 3,
  361. .model = &gemv_kernel_model
  362. };
  363. int gemv_kernel(starpu_data_handle_t v1,
  364. starpu_data_handle_t matrix,
  365. starpu_data_handle_t v2,
  366. TYPE p1, TYPE p2,
  367. unsigned nblocks,
  368. int use_reduction)
  369. {
  370. unsigned b1, b2;
  371. int ret;
  372. for (b2 = 0; b2 < nblocks; b2++)
  373. {
  374. ret = starpu_insert_task(&scal_kernel_cl,
  375. STARPU_RW, starpu_data_get_sub_data(v1, 1, b2),
  376. STARPU_VALUE, &p1, sizeof(p1),
  377. 0);
  378. if (ret == -ENODEV) return ret;
  379. STARPU_CHECK_RETURN_VALUE(ret, "starpu_insert_task");
  380. }
  381. for (b2 = 0; b2 < nblocks; b2++)
  382. {
  383. for (b1 = 0; b1 < nblocks; b1++)
  384. {
  385. TYPE one = 1.0;
  386. ret = starpu_insert_task(&gemv_kernel_cl,
  387. use_reduction?STARPU_REDUX:STARPU_RW, starpu_data_get_sub_data(v1, 1, b2),
  388. STARPU_R, starpu_data_get_sub_data(matrix, 2, b2, b1),
  389. STARPU_R, starpu_data_get_sub_data(v2, 1, b1),
  390. STARPU_VALUE, &one, sizeof(one),
  391. STARPU_VALUE, &p2, sizeof(p2),
  392. 0);
  393. STARPU_CHECK_RETURN_VALUE(ret, "starpu_insert_task");
  394. }
  395. }
  396. return 0;
  397. }
  398. /*
  399. * AXPY + SCAL kernel : v1 = p1 * v1 + p2 * v2
  400. */
  401. #ifdef STARPU_USE_CUDA
  402. static void scal_axpy_kernel_cuda(void *descr[], void *cl_arg)
  403. {
  404. TYPE p1, p2;
  405. starpu_codelet_unpack_args(cl_arg, &p1, &p2);
  406. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  407. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  408. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  409. /* Compute v1 = p1 * v1 + p2 * v2.
  410. * v1 = p1 v1
  411. * v1 = v1 + p2 v2
  412. */
  413. cublasscal(n, p1, v1, 1);
  414. cublasaxpy(n, p2, v2, 1, v1, 1);
  415. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  416. }
  417. #endif
  418. static void scal_axpy_kernel_cpu(void *descr[], void *cl_arg)
  419. {
  420. TYPE p1, p2;
  421. starpu_codelet_unpack_args(cl_arg, &p1, &p2);
  422. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  423. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  424. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  425. /* Compute v1 = p1 * v1 + p2 * v2.
  426. * v1 = p1 v1
  427. * v1 = v1 + p2 v2
  428. */
  429. SCAL(nx, p1, v1, 1);
  430. AXPY(nx, p2, v2, 1, v1, 1);
  431. }
  432. static struct starpu_perfmodel scal_axpy_kernel_model =
  433. {
  434. .type = STARPU_HISTORY_BASED,
  435. .symbol = "scal_axpy_kernel"
  436. };
  437. static struct starpu_codelet scal_axpy_kernel_cl =
  438. {
  439. .can_execute = can_execute,
  440. .where = STARPU_CPU|STARPU_CUDA,
  441. .cpu_funcs = {scal_axpy_kernel_cpu, NULL},
  442. #ifdef STARPU_USE_CUDA
  443. .cuda_funcs = {scal_axpy_kernel_cuda, NULL},
  444. #endif
  445. .nbuffers = 2,
  446. .model = &scal_axpy_kernel_model
  447. };
  448. int scal_axpy_kernel(starpu_data_handle_t v1, TYPE p1,
  449. starpu_data_handle_t v2, TYPE p2,
  450. unsigned nblocks)
  451. {
  452. int ret;
  453. unsigned b;
  454. for (b = 0; b < nblocks; b++)
  455. {
  456. ret = starpu_insert_task(&scal_axpy_kernel_cl,
  457. STARPU_RW, starpu_data_get_sub_data(v1, 1, b),
  458. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  459. STARPU_VALUE, &p1, sizeof(p1),
  460. STARPU_VALUE, &p2, sizeof(p2),
  461. 0);
  462. if (ret == -ENODEV) return ret;
  463. STARPU_CHECK_RETURN_VALUE(ret, "starpu_insert_task");
  464. }
  465. return 0;
  466. }
  467. /*
  468. * AXPY kernel : v1 = v1 + p1 * v2
  469. */
  470. #ifdef STARPU_USE_CUDA
  471. static void axpy_kernel_cuda(void *descr[], void *cl_arg)
  472. {
  473. TYPE p1;
  474. starpu_codelet_unpack_args(cl_arg, &p1);
  475. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  476. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  477. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  478. /* Compute v1 = v1 + p1 * v2.
  479. */
  480. cublasaxpy(n, p1, v2, 1, v1, 1);
  481. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  482. }
  483. #endif
  484. static void axpy_kernel_cpu(void *descr[], void *cl_arg)
  485. {
  486. TYPE p1;
  487. starpu_codelet_unpack_args(cl_arg, &p1);
  488. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  489. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  490. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  491. /* Compute v1 = p1 * v1 + p2 * v2.
  492. */
  493. AXPY(nx, p1, v2, 1, v1, 1);
  494. }
  495. static struct starpu_perfmodel axpy_kernel_model =
  496. {
  497. .type = STARPU_HISTORY_BASED,
  498. .symbol = "axpy_kernel"
  499. };
  500. static struct starpu_codelet axpy_kernel_cl =
  501. {
  502. .can_execute = can_execute,
  503. .where = STARPU_CPU|STARPU_CUDA,
  504. .cpu_funcs = {axpy_kernel_cpu, NULL},
  505. #ifdef STARPU_USE_CUDA
  506. .cuda_funcs = {axpy_kernel_cuda, NULL},
  507. #endif
  508. .nbuffers = 2,
  509. .model = &axpy_kernel_model
  510. };
  511. int axpy_kernel(starpu_data_handle_t v1,
  512. starpu_data_handle_t v2, TYPE p1,
  513. unsigned nblocks)
  514. {
  515. int ret;
  516. unsigned b;
  517. for (b = 0; b < nblocks; b++)
  518. {
  519. ret = starpu_insert_task(&axpy_kernel_cl,
  520. STARPU_RW, starpu_data_get_sub_data(v1, 1, b),
  521. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  522. STARPU_VALUE, &p1, sizeof(p1),
  523. 0);
  524. if (ret == -ENODEV) return ret;
  525. STARPU_CHECK_RETURN_VALUE(ret, "starpu_insert_task");
  526. }
  527. return 0;
  528. }
  529. int copy_handle(starpu_data_handle_t dst, starpu_data_handle_t src, unsigned nblocks)
  530. {
  531. unsigned b;
  532. for (b = 0; b < nblocks; b++)
  533. starpu_data_cpy(starpu_data_get_sub_data(dst, 1, b), starpu_data_get_sub_data(src, 1, b), 1, NULL, NULL);
  534. return 0;
  535. }