cg_kernels.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010, 2012-2013 Université de Bordeaux 1
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include "cg.h"
  17. #include <math.h>
  18. #include <limits.h>
  19. #if 0
  20. static void print_vector_from_descr(unsigned nx, TYPE *v)
  21. {
  22. unsigned i;
  23. for (i = 0; i < nx; i++)
  24. {
  25. fprintf(stderr, "%2.2e ", v[i]);
  26. }
  27. fprintf(stderr, "\n");
  28. }
  29. static void print_matrix_from_descr(unsigned nx, unsigned ny, unsigned ld, TYPE *mat)
  30. {
  31. unsigned i, j;
  32. for (j = 0; j < nx; j++)
  33. {
  34. for (i = 0; i < ny; i++)
  35. {
  36. fprintf(stderr, "%2.2e ", mat[j+i*ld]);
  37. }
  38. fprintf(stderr, "\n");
  39. }
  40. }
  41. #endif
  42. static int can_execute(unsigned workerid, struct starpu_task *task, unsigned nimpl)
  43. {
  44. enum starpu_archtype type = starpu_worker_get_type(workerid);
  45. if (type == STARPU_CPU_WORKER || type == STARPU_OPENCL_WORKER)
  46. return 1;
  47. #ifdef STARPU_USE_CUDA
  48. /* Cuda device */
  49. const struct cudaDeviceProp *props;
  50. props = starpu_cuda_get_device_properties(workerid);
  51. if (props->major >= 2 || props->minor >= 3)
  52. /* At least compute capability 1.3, supports doubles */
  53. return 1;
  54. #endif
  55. /* Old card, does not support doubles */
  56. return 0;
  57. }
  58. /*
  59. * Reduction accumulation methods
  60. */
  61. #ifdef STARPU_USE_CUDA
  62. static void accumulate_variable_cuda(void *descr[], void *cl_arg)
  63. {
  64. TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  65. TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
  66. cublasaxpy(1, (TYPE)1.0, v_src, 1, v_dst, 1);
  67. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  68. }
  69. #endif
  70. static void accumulate_variable_cpu(void *descr[], void *cl_arg)
  71. {
  72. TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  73. TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
  74. *v_dst = *v_dst + *v_src;
  75. }
  76. static struct starpu_perfmodel accumulate_variable_model =
  77. {
  78. .type = STARPU_HISTORY_BASED,
  79. .symbol = "accumulate_variable"
  80. };
  81. struct starpu_codelet accumulate_variable_cl =
  82. {
  83. .can_execute = can_execute,
  84. .cpu_funcs = {accumulate_variable_cpu, NULL},
  85. #ifdef STARPU_USE_CUDA
  86. .cuda_funcs = {accumulate_variable_cuda, NULL},
  87. #endif
  88. .nbuffers = 2,
  89. .model = &accumulate_variable_model
  90. };
  91. #ifdef STARPU_USE_CUDA
  92. static void accumulate_vector_cuda(void *descr[], void *cl_arg)
  93. {
  94. TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  95. TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  96. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  97. cublasaxpy(n, (TYPE)1.0, v_src, 1, v_dst, 1);
  98. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  99. }
  100. #endif
  101. static void accumulate_vector_cpu(void *descr[], void *cl_arg)
  102. {
  103. TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  104. TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  105. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  106. AXPY(n, (TYPE)1.0, v_src, 1, v_dst, 1);
  107. }
  108. static struct starpu_perfmodel accumulate_vector_model =
  109. {
  110. .type = STARPU_HISTORY_BASED,
  111. .symbol = "accumulate_vector"
  112. };
  113. struct starpu_codelet accumulate_vector_cl =
  114. {
  115. .can_execute = can_execute,
  116. .cpu_funcs = {accumulate_vector_cpu, NULL},
  117. #ifdef STARPU_USE_CUDA
  118. .cuda_funcs = {accumulate_vector_cuda, NULL},
  119. #endif
  120. .nbuffers = 2,
  121. .model = &accumulate_vector_model
  122. };
  123. /*
  124. * Reduction initialization methods
  125. */
  126. #ifdef STARPU_USE_CUDA
  127. extern void zero_vector(TYPE *x, unsigned nelems);
  128. static void bzero_variable_cuda(void *descr[], void *cl_arg)
  129. {
  130. TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  131. zero_vector(v, 1);
  132. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  133. }
  134. #endif
  135. static void bzero_variable_cpu(void *descr[], void *cl_arg)
  136. {
  137. TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  138. *v = (TYPE)0.0;
  139. }
  140. static struct starpu_perfmodel bzero_variable_model =
  141. {
  142. .type = STARPU_HISTORY_BASED,
  143. .symbol = "bzero_variable"
  144. };
  145. struct starpu_codelet bzero_variable_cl =
  146. {
  147. .can_execute = can_execute,
  148. .cpu_funcs = {bzero_variable_cpu, NULL},
  149. #ifdef STARPU_USE_CUDA
  150. .cuda_funcs = {bzero_variable_cuda, NULL},
  151. #endif
  152. .nbuffers = 1,
  153. .model = &bzero_variable_model
  154. };
  155. #ifdef STARPU_USE_CUDA
  156. static void bzero_vector_cuda(void *descr[], void *cl_arg)
  157. {
  158. TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  159. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  160. zero_vector(v, n);
  161. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  162. }
  163. #endif
  164. static void bzero_vector_cpu(void *descr[], void *cl_arg)
  165. {
  166. TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  167. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  168. memset(v, 0, n*sizeof(TYPE));
  169. }
  170. static struct starpu_perfmodel bzero_vector_model =
  171. {
  172. .type = STARPU_HISTORY_BASED,
  173. .symbol = "bzero_vector"
  174. };
  175. struct starpu_codelet bzero_vector_cl =
  176. {
  177. .can_execute = can_execute,
  178. .cpu_funcs = {bzero_vector_cpu, NULL},
  179. #ifdef STARPU_USE_CUDA
  180. .cuda_funcs = {bzero_vector_cuda, NULL},
  181. #endif
  182. .nbuffers = 1,
  183. .model = &bzero_vector_model
  184. };
  185. /*
  186. * DOT kernel : s = dot(v1, v2)
  187. */
  188. #ifdef STARPU_USE_CUDA
  189. extern void dot_host(TYPE *x, TYPE *y, unsigned nelems, TYPE *dot);
  190. static void dot_kernel_cuda(void *descr[], void *cl_arg)
  191. {
  192. TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  193. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  194. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  195. unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
  196. /* Contrary to cublasSdot, this function puts its result directly in
  197. * device memory, so that we don't have to transfer that value back and
  198. * forth. */
  199. dot_host(v1, v2, n, dot);
  200. }
  201. #endif
  202. static void dot_kernel_cpu(void *descr[], void *cl_arg)
  203. {
  204. TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  205. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  206. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  207. unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
  208. TYPE local_dot = 0.0;
  209. /* Note that we explicitely cast the result of the DOT kernel because
  210. * some BLAS library will return a double for sdot for instance. */
  211. local_dot = (TYPE)DOT(n, v1, 1, v2, 1);
  212. *dot = *dot + local_dot;
  213. }
  214. static struct starpu_perfmodel dot_kernel_model =
  215. {
  216. .type = STARPU_HISTORY_BASED,
  217. .symbol = "dot_kernel"
  218. };
  219. static struct starpu_codelet dot_kernel_cl =
  220. {
  221. .can_execute = can_execute,
  222. .cpu_funcs = {dot_kernel_cpu, NULL},
  223. #ifdef STARPU_USE_CUDA
  224. .cuda_funcs = {dot_kernel_cuda, NULL},
  225. #endif
  226. .nbuffers = 3,
  227. .model = &dot_kernel_model
  228. };
  229. int dot_kernel(starpu_data_handle_t v1,
  230. starpu_data_handle_t v2,
  231. starpu_data_handle_t s,
  232. unsigned nblocks,
  233. int use_reduction)
  234. {
  235. int ret;
  236. /* Blank the accumulation variable */
  237. if (use_reduction)
  238. starpu_data_invalidate_submit(s);
  239. else {
  240. ret = starpu_insert_task(&bzero_variable_cl, STARPU_W, s, 0);
  241. if (ret == -ENODEV) return ret;
  242. STARPU_CHECK_RETURN_VALUE(ret, "starpu_insert_task");
  243. }
  244. unsigned b;
  245. for (b = 0; b < nblocks; b++)
  246. {
  247. ret = starpu_insert_task(&dot_kernel_cl,
  248. use_reduction?STARPU_REDUX:STARPU_RW, s,
  249. STARPU_R, starpu_data_get_sub_data(v1, 1, b),
  250. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  251. 0);
  252. STARPU_CHECK_RETURN_VALUE(ret, "starpu_insert_task");
  253. }
  254. return 0;
  255. }
  256. /*
  257. * SCAL kernel : v1 = p1 v1
  258. */
  259. #ifdef STARPU_USE_CUDA
  260. static void scal_kernel_cuda(void *descr[], void *cl_arg)
  261. {
  262. TYPE p1;
  263. starpu_codelet_unpack_args(cl_arg, &p1);
  264. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  265. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  266. /* v1 = p1 v1 */
  267. TYPE alpha = p1;
  268. cublasscal(n, alpha, v1, 1);
  269. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  270. }
  271. #endif
  272. static void scal_kernel_cpu(void *descr[], void *cl_arg)
  273. {
  274. TYPE alpha;
  275. starpu_codelet_unpack_args(cl_arg, &alpha);
  276. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  277. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  278. /* v1 = alpha v1 */
  279. SCAL(n, alpha, v1, 1);
  280. }
  281. static struct starpu_perfmodel scal_kernel_model =
  282. {
  283. .type = STARPU_HISTORY_BASED,
  284. .symbol = "scal_kernel"
  285. };
  286. static struct starpu_codelet scal_kernel_cl =
  287. {
  288. .can_execute = can_execute,
  289. .cpu_funcs = {scal_kernel_cpu, NULL},
  290. #ifdef STARPU_USE_CUDA
  291. .cuda_funcs = {scal_kernel_cuda, NULL},
  292. #endif
  293. .nbuffers = 1,
  294. .model = &scal_kernel_model
  295. };
  296. /*
  297. * GEMV kernel : v1 = p1 * v1 + p2 * M v2
  298. */
  299. #ifdef STARPU_USE_CUDA
  300. static void gemv_kernel_cuda(void *descr[], void *cl_arg)
  301. {
  302. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  303. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  304. TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  305. unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
  306. unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
  307. unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
  308. TYPE alpha, beta;
  309. starpu_codelet_unpack_args(cl_arg, &beta, &alpha);
  310. /* Compute v1 = alpha M v2 + beta v1 */
  311. cublasgemv('N', nx, ny, alpha, M, ld, v2, 1, beta, v1, 1);
  312. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  313. }
  314. #endif
  315. static void gemv_kernel_cpu(void *descr[], void *cl_arg)
  316. {
  317. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  318. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  319. TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  320. unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
  321. unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
  322. unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
  323. TYPE alpha, beta;
  324. starpu_codelet_unpack_args(cl_arg, &beta, &alpha);
  325. int worker_size = starpu_combined_worker_get_size();
  326. if (worker_size > 1)
  327. {
  328. /* Parallel CPU task */
  329. int rank = starpu_combined_worker_get_rank();
  330. int block_size = (ny + worker_size - 1)/worker_size;
  331. int new_nx = STARPU_MIN(nx, block_size*(rank+1)) - block_size*rank;
  332. nx = new_nx;
  333. v1 = &v1[block_size*rank];
  334. M = &M[block_size*rank];
  335. }
  336. /* Compute v1 = alpha M v2 + beta v1 */
  337. GEMV("N", nx, ny, alpha, M, ld, v2, 1, beta, v1, 1);
  338. }
  339. static struct starpu_perfmodel gemv_kernel_model =
  340. {
  341. .type = STARPU_HISTORY_BASED,
  342. .symbol = "gemv_kernel"
  343. };
  344. static struct starpu_codelet gemv_kernel_cl =
  345. {
  346. .can_execute = can_execute,
  347. .type = STARPU_SPMD,
  348. .max_parallelism = INT_MAX,
  349. .cpu_funcs = {gemv_kernel_cpu, NULL},
  350. #ifdef STARPU_USE_CUDA
  351. .cuda_funcs = {gemv_kernel_cuda, NULL},
  352. #endif
  353. .nbuffers = 3,
  354. .model = &gemv_kernel_model
  355. };
  356. int gemv_kernel(starpu_data_handle_t v1,
  357. starpu_data_handle_t matrix,
  358. starpu_data_handle_t v2,
  359. TYPE p1, TYPE p2,
  360. unsigned nblocks,
  361. int use_reduction)
  362. {
  363. unsigned b1, b2;
  364. int ret;
  365. for (b2 = 0; b2 < nblocks; b2++)
  366. {
  367. ret = starpu_insert_task(&scal_kernel_cl,
  368. STARPU_RW, starpu_data_get_sub_data(v1, 1, b2),
  369. STARPU_VALUE, &p1, sizeof(p1),
  370. 0);
  371. if (ret == -ENODEV) return ret;
  372. STARPU_CHECK_RETURN_VALUE(ret, "starpu_insert_task");
  373. }
  374. for (b2 = 0; b2 < nblocks; b2++)
  375. {
  376. for (b1 = 0; b1 < nblocks; b1++)
  377. {
  378. TYPE one = 1.0;
  379. ret = starpu_insert_task(&gemv_kernel_cl,
  380. use_reduction?STARPU_REDUX:STARPU_RW, starpu_data_get_sub_data(v1, 1, b2),
  381. STARPU_R, starpu_data_get_sub_data(matrix, 2, b2, b1),
  382. STARPU_R, starpu_data_get_sub_data(v2, 1, b1),
  383. STARPU_VALUE, &one, sizeof(one),
  384. STARPU_VALUE, &p2, sizeof(p2),
  385. 0);
  386. STARPU_CHECK_RETURN_VALUE(ret, "starpu_insert_task");
  387. }
  388. }
  389. return 0;
  390. }
  391. /*
  392. * AXPY + SCAL kernel : v1 = p1 * v1 + p2 * v2
  393. */
  394. #ifdef STARPU_USE_CUDA
  395. static void scal_axpy_kernel_cuda(void *descr[], void *cl_arg)
  396. {
  397. TYPE p1, p2;
  398. starpu_codelet_unpack_args(cl_arg, &p1, &p2);
  399. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  400. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  401. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  402. /* Compute v1 = p1 * v1 + p2 * v2.
  403. * v1 = p1 v1
  404. * v1 = v1 + p2 v2
  405. */
  406. cublasscal(n, p1, v1, 1);
  407. cublasaxpy(n, p2, v2, 1, v1, 1);
  408. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  409. }
  410. #endif
  411. static void scal_axpy_kernel_cpu(void *descr[], void *cl_arg)
  412. {
  413. TYPE p1, p2;
  414. starpu_codelet_unpack_args(cl_arg, &p1, &p2);
  415. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  416. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  417. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  418. /* Compute v1 = p1 * v1 + p2 * v2.
  419. * v1 = p1 v1
  420. * v1 = v1 + p2 v2
  421. */
  422. SCAL(nx, p1, v1, 1);
  423. AXPY(nx, p2, v2, 1, v1, 1);
  424. }
  425. static struct starpu_perfmodel scal_axpy_kernel_model =
  426. {
  427. .type = STARPU_HISTORY_BASED,
  428. .symbol = "scal_axpy_kernel"
  429. };
  430. static struct starpu_codelet scal_axpy_kernel_cl =
  431. {
  432. .can_execute = can_execute,
  433. .cpu_funcs = {scal_axpy_kernel_cpu, NULL},
  434. #ifdef STARPU_USE_CUDA
  435. .cuda_funcs = {scal_axpy_kernel_cuda, NULL},
  436. #endif
  437. .nbuffers = 2,
  438. .model = &scal_axpy_kernel_model
  439. };
  440. int scal_axpy_kernel(starpu_data_handle_t v1, TYPE p1,
  441. starpu_data_handle_t v2, TYPE p2,
  442. unsigned nblocks)
  443. {
  444. int ret;
  445. unsigned b;
  446. for (b = 0; b < nblocks; b++)
  447. {
  448. ret = starpu_insert_task(&scal_axpy_kernel_cl,
  449. STARPU_RW, starpu_data_get_sub_data(v1, 1, b),
  450. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  451. STARPU_VALUE, &p1, sizeof(p1),
  452. STARPU_VALUE, &p2, sizeof(p2),
  453. 0);
  454. if (ret == -ENODEV) return ret;
  455. STARPU_CHECK_RETURN_VALUE(ret, "starpu_insert_task");
  456. }
  457. return 0;
  458. }
  459. /*
  460. * AXPY kernel : v1 = v1 + p1 * v2
  461. */
  462. #ifdef STARPU_USE_CUDA
  463. static void axpy_kernel_cuda(void *descr[], void *cl_arg)
  464. {
  465. TYPE p1;
  466. starpu_codelet_unpack_args(cl_arg, &p1);
  467. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  468. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  469. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  470. /* Compute v1 = v1 + p1 * v2.
  471. */
  472. cublasaxpy(n, p1, v2, 1, v1, 1);
  473. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  474. }
  475. #endif
  476. static void axpy_kernel_cpu(void *descr[], void *cl_arg)
  477. {
  478. TYPE p1;
  479. starpu_codelet_unpack_args(cl_arg, &p1);
  480. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  481. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  482. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  483. /* Compute v1 = p1 * v1 + p2 * v2.
  484. */
  485. AXPY(nx, p1, v2, 1, v1, 1);
  486. }
  487. static struct starpu_perfmodel axpy_kernel_model =
  488. {
  489. .type = STARPU_HISTORY_BASED,
  490. .symbol = "axpy_kernel"
  491. };
  492. static struct starpu_codelet axpy_kernel_cl =
  493. {
  494. .can_execute = can_execute,
  495. .cpu_funcs = {axpy_kernel_cpu, NULL},
  496. #ifdef STARPU_USE_CUDA
  497. .cuda_funcs = {axpy_kernel_cuda, NULL},
  498. #endif
  499. .nbuffers = 2,
  500. .model = &axpy_kernel_model
  501. };
  502. int axpy_kernel(starpu_data_handle_t v1,
  503. starpu_data_handle_t v2, TYPE p1,
  504. unsigned nblocks)
  505. {
  506. int ret;
  507. unsigned b;
  508. for (b = 0; b < nblocks; b++)
  509. {
  510. ret = starpu_insert_task(&axpy_kernel_cl,
  511. STARPU_RW, starpu_data_get_sub_data(v1, 1, b),
  512. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  513. STARPU_VALUE, &p1, sizeof(p1),
  514. 0);
  515. if (ret == -ENODEV) return ret;
  516. STARPU_CHECK_RETURN_VALUE(ret, "starpu_insert_task");
  517. }
  518. return 0;
  519. }
  520. int copy_handle(starpu_data_handle_t dst, starpu_data_handle_t src, unsigned nblocks)
  521. {
  522. unsigned b;
  523. for (b = 0; b < nblocks; b++)
  524. starpu_data_cpy(starpu_data_get_sub_data(dst, 1, b), starpu_data_get_sub_data(src, 1, b), 1, NULL, NULL);
  525. return 0;
  526. }