cg_kernels.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010 Université de Bordeaux 1
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include "cg.h"
  17. #include <math.h>
  18. #include <limits.h>
  19. #if 0
  20. static void print_vector_from_descr(unsigned nx, TYPE *v)
  21. {
  22. unsigned i;
  23. for (i = 0; i < nx; i++)
  24. {
  25. fprintf(stderr, "%2.2e ", v[i]);
  26. }
  27. fprintf(stderr, "\n");
  28. }
  29. static void print_matrix_from_descr(unsigned nx, unsigned ny, unsigned ld, TYPE *mat)
  30. {
  31. unsigned i, j;
  32. for (j = 0; j < nx; j++)
  33. {
  34. for (i = 0; i < ny; i++)
  35. {
  36. fprintf(stderr, "%2.2e ", mat[j+i*ld]);
  37. }
  38. fprintf(stderr, "\n");
  39. }
  40. }
  41. #endif
  42. /*
  43. * Reduction accumulation methods
  44. */
  45. #ifdef STARPU_USE_CUDA
  46. static void accumulate_variable_cuda(void *descr[], void *cl_arg)
  47. {
  48. TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  49. TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
  50. cublasaxpy(1, (TYPE)1.0, v_src, 1, v_dst, 1);
  51. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  52. }
  53. #endif
  54. static void accumulate_variable_cpu(void *descr[], void *cl_arg)
  55. {
  56. TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  57. TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
  58. *v_dst = *v_dst + *v_src;
  59. }
  60. static struct starpu_perfmodel accumulate_variable_model =
  61. {
  62. .type = STARPU_HISTORY_BASED,
  63. .symbol = "accumulate_variable"
  64. };
  65. struct starpu_codelet accumulate_variable_cl =
  66. {
  67. .where = STARPU_CPU|STARPU_CUDA,
  68. .cpu_funcs = {accumulate_variable_cpu, NULL},
  69. #ifdef STARPU_USE_CUDA
  70. .cuda_funcs = {accumulate_variable_cuda, NULL},
  71. #endif
  72. .nbuffers = 2,
  73. .model = &accumulate_variable_model
  74. };
  75. #ifdef STARPU_USE_CUDA
  76. static void accumulate_vector_cuda(void *descr[], void *cl_arg)
  77. {
  78. TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  79. TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  80. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  81. cublasaxpy(n, (TYPE)1.0, v_src, 1, v_dst, 1);
  82. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  83. }
  84. #endif
  85. static void accumulate_vector_cpu(void *descr[], void *cl_arg)
  86. {
  87. TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  88. TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  89. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  90. AXPY(n, (TYPE)1.0, v_src, 1, v_dst, 1);
  91. }
  92. static struct starpu_perfmodel accumulate_vector_model =
  93. {
  94. .type = STARPU_HISTORY_BASED,
  95. .symbol = "accumulate_vector"
  96. };
  97. struct starpu_codelet accumulate_vector_cl =
  98. {
  99. .where = STARPU_CPU|STARPU_CUDA,
  100. .cpu_funcs = {accumulate_vector_cpu, NULL},
  101. #ifdef STARPU_USE_CUDA
  102. .cuda_funcs = {accumulate_vector_cuda, NULL},
  103. #endif
  104. .nbuffers = 2,
  105. .model = &accumulate_vector_model
  106. };
  107. /*
  108. * Reduction initialization methods
  109. */
  110. #ifdef STARPU_USE_CUDA
  111. extern void zero_vector(TYPE *x, unsigned nelems);
  112. static void bzero_variable_cuda(void *descr[], void *cl_arg)
  113. {
  114. TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  115. zero_vector(v, 1);
  116. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  117. }
  118. #endif
  119. static void bzero_variable_cpu(void *descr[], void *cl_arg)
  120. {
  121. TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  122. *v = (TYPE)0.0;
  123. }
  124. static struct starpu_perfmodel bzero_variable_model =
  125. {
  126. .type = STARPU_HISTORY_BASED,
  127. .symbol = "bzero_variable"
  128. };
  129. struct starpu_codelet bzero_variable_cl =
  130. {
  131. .where = STARPU_CPU|STARPU_CUDA,
  132. .cpu_funcs = {bzero_variable_cpu, NULL},
  133. #ifdef STARPU_USE_CUDA
  134. .cuda_funcs = {bzero_variable_cuda, NULL},
  135. #endif
  136. .nbuffers = 1,
  137. .model = &bzero_variable_model
  138. };
  139. #ifdef STARPU_USE_CUDA
  140. static void bzero_vector_cuda(void *descr[], void *cl_arg)
  141. {
  142. TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  143. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  144. zero_vector(v, n);
  145. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  146. }
  147. #endif
  148. static void bzero_vector_cpu(void *descr[], void *cl_arg)
  149. {
  150. TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  151. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  152. memset(v, 0, n*sizeof(TYPE));
  153. }
  154. static struct starpu_perfmodel bzero_vector_model =
  155. {
  156. .type = STARPU_HISTORY_BASED,
  157. .symbol = "bzero_vector"
  158. };
  159. struct starpu_codelet bzero_vector_cl =
  160. {
  161. .where = STARPU_CPU|STARPU_CUDA,
  162. .cpu_funcs = {bzero_vector_cpu, NULL},
  163. #ifdef STARPU_USE_CUDA
  164. .cuda_funcs = {bzero_vector_cuda, NULL},
  165. #endif
  166. .nbuffers = 1,
  167. .model = &bzero_vector_model
  168. };
  169. /*
  170. * DOT kernel : s = dot(v1, v2)
  171. */
  172. #ifdef STARPU_USE_CUDA
  173. extern void dot_host(TYPE *x, TYPE *y, unsigned nelems, TYPE *dot);
  174. static void dot_kernel_cuda(void *descr[], void *cl_arg)
  175. {
  176. TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  177. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  178. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  179. unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
  180. /* Contrary to cublasSdot, this function puts its result directly in
  181. * device memory, so that we don't have to transfer that value back and
  182. * forth. */
  183. dot_host(v1, v2, n, dot);
  184. }
  185. #endif
  186. static void dot_kernel_cpu(void *descr[], void *cl_arg)
  187. {
  188. TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  189. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  190. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  191. unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
  192. TYPE local_dot = 0.0;
  193. /* Note that we explicitely cast the result of the DOT kernel because
  194. * some BLAS library will return a double for sdot for instance. */
  195. local_dot = (TYPE)DOT(n, v1, 1, v2, 1);
  196. *dot = *dot + local_dot;
  197. }
  198. static struct starpu_perfmodel dot_kernel_model =
  199. {
  200. .type = STARPU_HISTORY_BASED,
  201. .symbol = "dot_kernel"
  202. };
  203. static struct starpu_codelet dot_kernel_cl =
  204. {
  205. .where = STARPU_CPU|STARPU_CUDA,
  206. .cpu_funcs = {dot_kernel_cpu, NULL},
  207. #ifdef STARPU_USE_CUDA
  208. .cuda_funcs = {dot_kernel_cuda, NULL},
  209. #endif
  210. .nbuffers = 3,
  211. .model = &dot_kernel_model
  212. };
  213. void dot_kernel(starpu_data_handle_t v1,
  214. starpu_data_handle_t v2,
  215. starpu_data_handle_t s,
  216. unsigned nblocks,
  217. int use_reduction)
  218. {
  219. int ret;
  220. /* Blank the accumulation variable */
  221. ret = starpu_insert_task(&bzero_variable_cl, STARPU_W, s, 0);
  222. STARPU_CHECK_RETURN_VALUE(ret, "starpu_insert_task");
  223. unsigned b;
  224. for (b = 0; b < nblocks; b++)
  225. {
  226. ret = starpu_insert_task(&dot_kernel_cl,
  227. use_reduction?STARPU_REDUX:STARPU_RW, s,
  228. STARPU_R, starpu_data_get_sub_data(v1, 1, b),
  229. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  230. 0);
  231. STARPU_CHECK_RETURN_VALUE(ret, "starpu_insert_task");
  232. }
  233. }
  234. /*
  235. * SCAL kernel : v1 = p1 v1
  236. */
  237. #ifdef STARPU_USE_CUDA
  238. static void scal_kernel_cuda(void *descr[], void *cl_arg)
  239. {
  240. TYPE p1;
  241. starpu_unpack_cl_args(cl_arg, &p1);
  242. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  243. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  244. /* v1 = p1 v1 */
  245. TYPE alpha = p1;
  246. cublasscal(n, alpha, v1, 1);
  247. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  248. }
  249. #endif
  250. static void scal_kernel_cpu(void *descr[], void *cl_arg)
  251. {
  252. TYPE alpha;
  253. starpu_unpack_cl_args(cl_arg, &alpha);
  254. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  255. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  256. /* v1 = alpha v1 */
  257. SCAL(n, alpha, v1, 1);
  258. }
  259. static struct starpu_perfmodel scal_kernel_model =
  260. {
  261. .type = STARPU_HISTORY_BASED,
  262. .symbol = "scal_kernel"
  263. };
  264. static struct starpu_codelet scal_kernel_cl =
  265. {
  266. .where = STARPU_CPU|STARPU_CUDA,
  267. .cpu_funcs = {scal_kernel_cpu, NULL},
  268. #ifdef STARPU_USE_CUDA
  269. .cuda_funcs = {scal_kernel_cuda, NULL},
  270. #endif
  271. .nbuffers = 1,
  272. .model = &scal_kernel_model
  273. };
  274. /*
  275. * GEMV kernel : v1 = p1 * v1 + p2 * M v2
  276. */
  277. #ifdef STARPU_USE_CUDA
  278. static void gemv_kernel_cuda(void *descr[], void *cl_arg)
  279. {
  280. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  281. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  282. TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  283. unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
  284. unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
  285. unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
  286. TYPE alpha, beta;
  287. starpu_unpack_cl_args(cl_arg, &beta, &alpha);
  288. /* Compute v1 = alpha M v2 + beta v1 */
  289. cublasgemv('N', nx, ny, alpha, M, ld, v2, 1, beta, v1, 1);
  290. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  291. }
  292. #endif
  293. static void gemv_kernel_cpu(void *descr[], void *cl_arg)
  294. {
  295. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  296. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  297. TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  298. unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
  299. unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
  300. unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
  301. TYPE alpha, beta;
  302. starpu_unpack_cl_args(cl_arg, &beta, &alpha);
  303. int worker_size = starpu_combined_worker_get_size();
  304. if (worker_size > 1)
  305. {
  306. /* Parallel CPU task */
  307. int rank = starpu_combined_worker_get_rank();
  308. int block_size = (ny + worker_size - 1)/worker_size;
  309. int new_nx = STARPU_MIN(nx, block_size*(rank+1)) - block_size*rank;
  310. nx = new_nx;
  311. v1 = &v1[block_size*rank];
  312. M = &M[block_size*rank];
  313. }
  314. /* Compute v1 = alpha M v2 + beta v1 */
  315. GEMV("N", nx, ny, alpha, M, ld, v2, 1, beta, v1, 1);
  316. }
  317. static struct starpu_perfmodel gemv_kernel_model =
  318. {
  319. .type = STARPU_HISTORY_BASED,
  320. .symbol = "gemv_kernel"
  321. };
  322. static struct starpu_codelet gemv_kernel_cl =
  323. {
  324. .where = STARPU_CPU|STARPU_CUDA,
  325. .type = STARPU_SPMD,
  326. .max_parallelism = INT_MAX,
  327. .cpu_funcs = {gemv_kernel_cpu, NULL},
  328. #ifdef STARPU_USE_CUDA
  329. .cuda_funcs = {gemv_kernel_cuda, NULL},
  330. #endif
  331. .nbuffers = 3,
  332. .model = &gemv_kernel_model
  333. };
  334. void gemv_kernel(starpu_data_handle_t v1,
  335. starpu_data_handle_t matrix,
  336. starpu_data_handle_t v2,
  337. TYPE p1, TYPE p2,
  338. unsigned nblocks,
  339. int use_reduction)
  340. {
  341. unsigned b1, b2;
  342. int ret;
  343. for (b2 = 0; b2 < nblocks; b2++)
  344. {
  345. ret = starpu_insert_task(&scal_kernel_cl,
  346. STARPU_RW, starpu_data_get_sub_data(v1, 1, b2),
  347. STARPU_VALUE, &p1, sizeof(p1),
  348. 0);
  349. STARPU_CHECK_RETURN_VALUE(ret, "starpu_insert_task");
  350. }
  351. for (b2 = 0; b2 < nblocks; b2++)
  352. {
  353. for (b1 = 0; b1 < nblocks; b1++)
  354. {
  355. TYPE one = 1.0;
  356. ret = starpu_insert_task(&gemv_kernel_cl,
  357. use_reduction?STARPU_REDUX:STARPU_RW, starpu_data_get_sub_data(v1, 1, b2),
  358. STARPU_R, starpu_data_get_sub_data(matrix, 2, b2, b1),
  359. STARPU_R, starpu_data_get_sub_data(v2, 1, b1),
  360. STARPU_VALUE, &one, sizeof(one),
  361. STARPU_VALUE, &p2, sizeof(p2),
  362. 0);
  363. STARPU_CHECK_RETURN_VALUE(ret, "starpu_insert_task");
  364. }
  365. }
  366. }
  367. /*
  368. * AXPY + SCAL kernel : v1 = p1 * v1 + p2 * v2
  369. */
  370. #ifdef STARPU_USE_CUDA
  371. static void scal_axpy_kernel_cuda(void *descr[], void *cl_arg)
  372. {
  373. TYPE p1, p2;
  374. starpu_unpack_cl_args(cl_arg, &p1, &p2);
  375. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  376. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  377. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  378. /* Compute v1 = p1 * v1 + p2 * v2.
  379. * v1 = p1 v1
  380. * v1 = v1 + p2 v2
  381. */
  382. cublasscal(n, p1, v1, 1);
  383. cublasaxpy(n, p2, v2, 1, v1, 1);
  384. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  385. }
  386. #endif
  387. static void scal_axpy_kernel_cpu(void *descr[], void *cl_arg)
  388. {
  389. TYPE p1, p2;
  390. starpu_unpack_cl_args(cl_arg, &p1, &p2);
  391. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  392. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  393. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  394. /* Compute v1 = p1 * v1 + p2 * v2.
  395. * v1 = p1 v1
  396. * v1 = v1 + p2 v2
  397. */
  398. SCAL(nx, p1, v1, 1);
  399. AXPY(nx, p2, v2, 1, v1, 1);
  400. }
  401. static struct starpu_perfmodel scal_axpy_kernel_model =
  402. {
  403. .type = STARPU_HISTORY_BASED,
  404. .symbol = "scal_axpy_kernel"
  405. };
  406. static struct starpu_codelet scal_axpy_kernel_cl =
  407. {
  408. .where = STARPU_CPU|STARPU_CUDA,
  409. .cpu_funcs = {scal_axpy_kernel_cpu, NULL},
  410. #ifdef STARPU_USE_CUDA
  411. .cuda_funcs = {scal_axpy_kernel_cuda, NULL},
  412. #endif
  413. .nbuffers = 2,
  414. .model = &scal_axpy_kernel_model
  415. };
  416. void scal_axpy_kernel(starpu_data_handle_t v1, TYPE p1,
  417. starpu_data_handle_t v2, TYPE p2,
  418. unsigned nblocks)
  419. {
  420. int ret;
  421. unsigned b;
  422. for (b = 0; b < nblocks; b++)
  423. {
  424. ret = starpu_insert_task(&scal_axpy_kernel_cl,
  425. STARPU_RW, starpu_data_get_sub_data(v1, 1, b),
  426. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  427. STARPU_VALUE, &p1, sizeof(p1),
  428. STARPU_VALUE, &p2, sizeof(p2),
  429. 0);
  430. STARPU_CHECK_RETURN_VALUE(ret, "starpu_insert_task");
  431. }
  432. }
  433. /*
  434. * AXPY kernel : v1 = v1 + p1 * v2
  435. */
  436. #ifdef STARPU_USE_CUDA
  437. static void axpy_kernel_cuda(void *descr[], void *cl_arg)
  438. {
  439. TYPE p1;
  440. starpu_unpack_cl_args(cl_arg, &p1);
  441. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  442. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  443. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  444. /* Compute v1 = v1 + p1 * v2.
  445. */
  446. cublasaxpy(n, p1, v2, 1, v1, 1);
  447. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  448. }
  449. #endif
  450. static void axpy_kernel_cpu(void *descr[], void *cl_arg)
  451. {
  452. TYPE p1;
  453. starpu_unpack_cl_args(cl_arg, &p1);
  454. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  455. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  456. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  457. /* Compute v1 = p1 * v1 + p2 * v2.
  458. */
  459. AXPY(nx, p1, v2, 1, v1, 1);
  460. }
  461. static struct starpu_perfmodel axpy_kernel_model =
  462. {
  463. .type = STARPU_HISTORY_BASED,
  464. .symbol = "axpy_kernel"
  465. };
  466. static struct starpu_codelet axpy_kernel_cl =
  467. {
  468. .where = STARPU_CPU|STARPU_CUDA,
  469. .cpu_funcs = {axpy_kernel_cpu, NULL},
  470. #ifdef STARPU_USE_CUDA
  471. .cuda_funcs = {axpy_kernel_cuda, NULL},
  472. #endif
  473. .nbuffers = 2,
  474. .model = &axpy_kernel_model
  475. };
  476. void axpy_kernel(starpu_data_handle_t v1,
  477. starpu_data_handle_t v2, TYPE p1,
  478. unsigned nblocks)
  479. {
  480. int ret;
  481. unsigned b;
  482. for (b = 0; b < nblocks; b++)
  483. {
  484. ret = starpu_insert_task(&axpy_kernel_cl,
  485. STARPU_RW, starpu_data_get_sub_data(v1, 1, b),
  486. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  487. STARPU_VALUE, &p1, sizeof(p1),
  488. 0);
  489. STARPU_CHECK_RETURN_VALUE(ret, "starpu_insert_task");
  490. }
  491. }
  492. /*
  493. * COPY kernel : vector_dst <- vector_src
  494. */
  495. static void copy_handle_cpu(void *descr[], void *cl_arg)
  496. {
  497. TYPE *dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  498. TYPE *src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  499. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  500. size_t elemsize = STARPU_VECTOR_GET_ELEMSIZE(descr[0]);
  501. memcpy(dst, src, nx*elemsize);
  502. }
  503. #ifdef STARPU_USE_CUDA
  504. static void copy_handle_cuda(void *descr[], void *cl_arg)
  505. {
  506. TYPE *dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  507. TYPE *src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  508. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  509. size_t elemsize = STARPU_VECTOR_GET_ELEMSIZE(descr[0]);
  510. cudaMemcpyAsync(dst, src, nx*elemsize, cudaMemcpyDeviceToDevice, starpu_cuda_get_local_stream());
  511. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  512. }
  513. #endif
  514. static struct starpu_perfmodel copy_handle_model =
  515. {
  516. .type = STARPU_HISTORY_BASED,
  517. .symbol = "copy_handle"
  518. };
  519. static struct starpu_codelet copy_handle_cl =
  520. {
  521. .where = STARPU_CPU|STARPU_CUDA,
  522. .cpu_funcs = {copy_handle_cpu, NULL},
  523. #ifdef STARPU_USE_CUDA
  524. .cuda_funcs = {copy_handle_cuda, NULL},
  525. #endif
  526. .nbuffers = 2,
  527. .model = &copy_handle_model
  528. };
  529. void copy_handle(starpu_data_handle_t dst, starpu_data_handle_t src, unsigned nblocks)
  530. {
  531. int ret;
  532. unsigned b;
  533. for (b = 0; b < nblocks; b++)
  534. {
  535. ret = starpu_insert_task(&copy_handle_cl,
  536. STARPU_W, starpu_data_get_sub_data(dst, 1, b),
  537. STARPU_R, starpu_data_get_sub_data(src, 1, b),
  538. 0);
  539. STARPU_CHECK_RETURN_VALUE(ret, "starpu_insert_task");
  540. }
  541. }