cg_kernels.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010 Université de Bordeaux 1
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include "cg.h"
  17. #include <math.h>
  18. #include <limits.h>
  19. #if 0
  20. static void print_vector_from_descr(unsigned nx, TYPE *v)
  21. {
  22. unsigned i;
  23. for (i = 0; i < nx; i++)
  24. {
  25. fprintf(stderr, "%2.2e ", v[i]);
  26. }
  27. fprintf(stderr, "\n");
  28. }
  29. static void print_matrix_from_descr(unsigned nx, unsigned ny, unsigned ld, TYPE *mat)
  30. {
  31. unsigned i, j;
  32. for (j = 0; j < nx; j++)
  33. {
  34. for (i = 0; i < ny; i++)
  35. {
  36. fprintf(stderr, "%2.2e ", mat[j+i*ld]);
  37. }
  38. fprintf(stderr, "\n");
  39. }
  40. }
  41. #endif
  42. /*
  43. * Reduction accumulation methods
  44. */
  45. #ifdef STARPU_USE_CUDA
  46. static void accumulate_variable_cuda(void *descr[], void *cl_arg)
  47. {
  48. TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  49. TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
  50. cublasaxpy(1, (TYPE)1.0, v_src, 1, v_dst, 1);
  51. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  52. }
  53. #endif
  54. static void accumulate_variable_cpu(void *descr[], void *cl_arg)
  55. {
  56. TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  57. TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
  58. *v_dst = *v_dst + *v_src;
  59. }
  60. static struct starpu_perfmodel_t accumulate_variable_model = {
  61. .type = STARPU_HISTORY_BASED,
  62. .symbol = "accumulate_variable"
  63. };
  64. starpu_codelet accumulate_variable_cl = {
  65. .where = STARPU_CPU|STARPU_CUDA,
  66. .cpu_func = accumulate_variable_cpu,
  67. #ifdef STARPU_USE_CUDA
  68. .cuda_func = accumulate_variable_cuda,
  69. #endif
  70. .nbuffers = 2,
  71. .model = &accumulate_variable_model
  72. };
  73. #ifdef STARPU_USE_CUDA
  74. static void accumulate_vector_cuda(void *descr[], void *cl_arg)
  75. {
  76. TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  77. TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  78. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  79. cublasaxpy(n, (TYPE)1.0, v_src, 1, v_dst, 1);
  80. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  81. }
  82. #endif
  83. static void accumulate_vector_cpu(void *descr[], void *cl_arg)
  84. {
  85. TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  86. TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  87. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  88. AXPY(n, (TYPE)1.0, v_src, 1, v_dst, 1);
  89. }
  90. static struct starpu_perfmodel_t accumulate_vector_model = {
  91. .type = STARPU_HISTORY_BASED,
  92. .symbol = "accumulate_vector"
  93. };
  94. starpu_codelet accumulate_vector_cl = {
  95. .where = STARPU_CPU|STARPU_CUDA,
  96. .cpu_func = accumulate_vector_cpu,
  97. #ifdef STARPU_USE_CUDA
  98. .cuda_func = accumulate_vector_cuda,
  99. #endif
  100. .nbuffers = 2,
  101. .model = &accumulate_vector_model
  102. };
  103. /*
  104. * Reduction initialization methods
  105. */
  106. #ifdef STARPU_USE_CUDA
  107. extern void zero_vector(TYPE *x, unsigned nelems);
  108. static void bzero_variable_cuda(void *descr[], void *cl_arg)
  109. {
  110. TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  111. zero_vector(v, 1);
  112. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  113. }
  114. #endif
  115. static void bzero_variable_cpu(void *descr[], void *cl_arg)
  116. {
  117. TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  118. *v = (TYPE)0.0;
  119. }
  120. static struct starpu_perfmodel_t bzero_variable_model = {
  121. .type = STARPU_HISTORY_BASED,
  122. .symbol = "bzero_variable"
  123. };
  124. starpu_codelet bzero_variable_cl = {
  125. .where = STARPU_CPU|STARPU_CUDA,
  126. .cpu_func = bzero_variable_cpu,
  127. #ifdef STARPU_USE_CUDA
  128. .cuda_func = bzero_variable_cuda,
  129. #endif
  130. .nbuffers = 1,
  131. .model = &bzero_variable_model
  132. };
  133. #ifdef STARPU_USE_CUDA
  134. static void bzero_vector_cuda(void *descr[], void *cl_arg)
  135. {
  136. TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  137. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  138. zero_vector(v, n);
  139. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  140. }
  141. #endif
  142. static void bzero_vector_cpu(void *descr[], void *cl_arg)
  143. {
  144. TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  145. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  146. memset(v, 0, n*sizeof(TYPE));
  147. }
  148. static struct starpu_perfmodel_t bzero_vector_model = {
  149. .type = STARPU_HISTORY_BASED,
  150. .symbol = "bzero_vector"
  151. };
  152. starpu_codelet bzero_vector_cl = {
  153. .where = STARPU_CPU|STARPU_CUDA,
  154. .cpu_func = bzero_vector_cpu,
  155. #ifdef STARPU_USE_CUDA
  156. .cuda_func = bzero_vector_cuda,
  157. #endif
  158. .nbuffers = 1,
  159. .model = &bzero_vector_model
  160. };
  161. /*
  162. * DOT kernel : s = dot(v1, v2)
  163. */
  164. #ifdef STARPU_USE_CUDA
  165. extern void dot_host(TYPE *x, TYPE *y, unsigned nelems, TYPE *dot);
  166. static void dot_kernel_cuda(void *descr[], void *cl_arg)
  167. {
  168. TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  169. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  170. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  171. unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
  172. /* Contrary to cublasSdot, this function puts its result directly in
  173. * device memory, so that we don't have to transfer that value back and
  174. * forth. */
  175. dot_host(v1, v2, n, dot);
  176. }
  177. #endif
  178. static void dot_kernel_cpu(void *descr[], void *cl_arg)
  179. {
  180. TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  181. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  182. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  183. unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
  184. TYPE local_dot = 0.0;
  185. /* Note that we explicitely cast the result of the DOT kernel because
  186. * some BLAS library will return a double for sdot for instance. */
  187. local_dot = (TYPE)DOT(n, v1, 1, v2, 1);
  188. *dot = *dot + local_dot;
  189. }
  190. static struct starpu_perfmodel_t dot_kernel_model = {
  191. .type = STARPU_HISTORY_BASED,
  192. .symbol = "dot_kernel"
  193. };
  194. static starpu_codelet dot_kernel_cl = {
  195. .where = STARPU_CPU|STARPU_CUDA,
  196. .cpu_func = dot_kernel_cpu,
  197. #ifdef STARPU_USE_CUDA
  198. .cuda_func = dot_kernel_cuda,
  199. #endif
  200. .nbuffers = 3,
  201. .model = &dot_kernel_model
  202. };
  203. void dot_kernel(starpu_data_handle v1,
  204. starpu_data_handle v2,
  205. starpu_data_handle s,
  206. unsigned nblocks,
  207. int use_reduction)
  208. {
  209. /* Blank the accumulation variable */
  210. starpu_insert_task(&bzero_variable_cl, STARPU_W, s, 0);
  211. unsigned b;
  212. for (b = 0; b < nblocks; b++)
  213. {
  214. starpu_insert_task(&dot_kernel_cl,
  215. use_reduction?STARPU_REDUX:STARPU_RW, s,
  216. STARPU_R, starpu_data_get_sub_data(v1, 1, b),
  217. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  218. 0);
  219. }
  220. }
  221. /*
  222. * SCAL kernel : v1 = p1 v1
  223. */
  224. #ifdef STARPU_USE_CUDA
  225. static void scal_kernel_cuda(void *descr[], void *cl_arg)
  226. {
  227. TYPE p1;
  228. starpu_unpack_cl_args(cl_arg, &p1);
  229. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  230. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  231. /* v1 = p1 v1 */
  232. TYPE alpha = p1;
  233. cublasscal(n, alpha, v1, 1);
  234. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  235. }
  236. #endif
  237. static void scal_kernel_cpu(void *descr[], void *cl_arg)
  238. {
  239. TYPE alpha;
  240. starpu_unpack_cl_args(cl_arg, &alpha);
  241. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  242. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  243. /* v1 = alpha v1 */
  244. SCAL(n, alpha, v1, 1);
  245. }
  246. static struct starpu_perfmodel_t scal_kernel_model = {
  247. .type = STARPU_HISTORY_BASED,
  248. .symbol = "scal_kernel"
  249. };
  250. static starpu_codelet scal_kernel_cl = {
  251. .where = STARPU_CPU|STARPU_CUDA,
  252. .cpu_func = scal_kernel_cpu,
  253. #ifdef STARPU_USE_CUDA
  254. .cuda_func = scal_kernel_cuda,
  255. #endif
  256. .nbuffers = 1,
  257. .model = &scal_kernel_model
  258. };
  259. /*
  260. * GEMV kernel : v1 = p1 * v1 + p2 * M v2
  261. */
  262. #ifdef STARPU_USE_CUDA
  263. static void gemv_kernel_cuda(void *descr[], void *cl_arg)
  264. {
  265. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  266. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  267. TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  268. unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
  269. unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
  270. unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
  271. TYPE alpha, beta;
  272. starpu_unpack_cl_args(cl_arg, &beta, &alpha);
  273. /* Compute v1 = alpha M v2 + beta v1 */
  274. cublasgemv('N', nx, ny, alpha, M, ld, v2, 1, beta, v1, 1);
  275. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  276. }
  277. #endif
  278. static void gemv_kernel_cpu(void *descr[], void *cl_arg)
  279. {
  280. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  281. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  282. TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  283. unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
  284. unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
  285. unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
  286. TYPE alpha, beta;
  287. starpu_unpack_cl_args(cl_arg, &beta, &alpha);
  288. int worker_size = starpu_combined_worker_get_size();
  289. if (worker_size > 1)
  290. {
  291. /* Parallel CPU task */
  292. int rank = starpu_combined_worker_get_rank();
  293. int block_size = (ny + worker_size - 1)/worker_size;
  294. int new_nx = STARPU_MIN(nx, block_size*(rank+1)) - block_size*rank;
  295. nx = new_nx;
  296. v1 = &v1[block_size*rank];
  297. M = &M[block_size*rank];
  298. }
  299. /* Compute v1 = alpha M v2 + beta v1 */
  300. GEMV("N", nx, ny, alpha, M, ld, v2, 1, beta, v1, 1);
  301. }
  302. static struct starpu_perfmodel_t gemv_kernel_model = {
  303. .type = STARPU_HISTORY_BASED,
  304. .symbol = "gemv_kernel"
  305. };
  306. static starpu_codelet gemv_kernel_cl = {
  307. .where = STARPU_CPU|STARPU_CUDA,
  308. .type = STARPU_SPMD,
  309. .max_parallelism = INT_MAX,
  310. .cpu_func = gemv_kernel_cpu,
  311. #ifdef STARPU_USE_CUDA
  312. .cuda_func = gemv_kernel_cuda,
  313. #endif
  314. .nbuffers = 3,
  315. .model = &gemv_kernel_model
  316. };
  317. void gemv_kernel(starpu_data_handle v1,
  318. starpu_data_handle matrix,
  319. starpu_data_handle v2,
  320. TYPE p1, TYPE p2,
  321. unsigned nblocks,
  322. int use_reduction)
  323. {
  324. unsigned b1, b2;
  325. for (b2 = 0; b2 < nblocks; b2++)
  326. {
  327. starpu_insert_task(&scal_kernel_cl,
  328. STARPU_RW, starpu_data_get_sub_data(v1, 1, b2),
  329. STARPU_VALUE, &p1, sizeof(p1),
  330. 0);
  331. }
  332. for (b2 = 0; b2 < nblocks; b2++)
  333. {
  334. for (b1 = 0; b1 < nblocks; b1++)
  335. {
  336. TYPE one = 1.0;
  337. starpu_insert_task(&gemv_kernel_cl,
  338. use_reduction?STARPU_REDUX:STARPU_RW, starpu_data_get_sub_data(v1, 1, b2),
  339. STARPU_R, starpu_data_get_sub_data(matrix, 2, b2, b1),
  340. STARPU_R, starpu_data_get_sub_data(v2, 1, b1),
  341. STARPU_VALUE, &one, sizeof(one),
  342. STARPU_VALUE, &p2, sizeof(p2),
  343. 0);
  344. }
  345. }
  346. }
  347. /*
  348. * AXPY + SCAL kernel : v1 = p1 * v1 + p2 * v2
  349. */
  350. #ifdef STARPU_USE_CUDA
  351. static void scal_axpy_kernel_cuda(void *descr[], void *cl_arg)
  352. {
  353. TYPE p1, p2;
  354. starpu_unpack_cl_args(cl_arg, &p1, &p2);
  355. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  356. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  357. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  358. /* Compute v1 = p1 * v1 + p2 * v2.
  359. * v1 = p1 v1
  360. * v1 = v1 + p2 v2
  361. */
  362. cublasscal(n, p1, v1, 1);
  363. cublasaxpy(n, p2, v2, 1, v1, 1);
  364. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  365. }
  366. #endif
  367. static void scal_axpy_kernel_cpu(void *descr[], void *cl_arg)
  368. {
  369. TYPE p1, p2;
  370. starpu_unpack_cl_args(cl_arg, &p1, &p2);
  371. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  372. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  373. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  374. /* Compute v1 = p1 * v1 + p2 * v2.
  375. * v1 = p1 v1
  376. * v1 = v1 + p2 v2
  377. */
  378. SCAL(nx, p1, v1, 1);
  379. AXPY(nx, p2, v2, 1, v1, 1);
  380. }
  381. static struct starpu_perfmodel_t scal_axpy_kernel_model = {
  382. .type = STARPU_HISTORY_BASED,
  383. .symbol = "scal_axpy_kernel"
  384. };
  385. static starpu_codelet scal_axpy_kernel_cl = {
  386. .where = STARPU_CPU|STARPU_CUDA,
  387. .cpu_func = scal_axpy_kernel_cpu,
  388. #ifdef STARPU_USE_CUDA
  389. .cuda_func = scal_axpy_kernel_cuda,
  390. #endif
  391. .nbuffers = 2,
  392. .model = &scal_axpy_kernel_model
  393. };
  394. void scal_axpy_kernel(starpu_data_handle v1, TYPE p1,
  395. starpu_data_handle v2, TYPE p2,
  396. unsigned nblocks)
  397. {
  398. unsigned b;
  399. for (b = 0; b < nblocks; b++)
  400. {
  401. starpu_insert_task(&scal_axpy_kernel_cl,
  402. STARPU_RW, starpu_data_get_sub_data(v1, 1, b),
  403. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  404. STARPU_VALUE, &p1, sizeof(p1),
  405. STARPU_VALUE, &p2, sizeof(p2),
  406. 0);
  407. }
  408. }
  409. /*
  410. * AXPY kernel : v1 = v1 + p1 * v2
  411. */
  412. #ifdef STARPU_USE_CUDA
  413. static void axpy_kernel_cuda(void *descr[], void *cl_arg)
  414. {
  415. TYPE p1;
  416. starpu_unpack_cl_args(cl_arg, &p1);
  417. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  418. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  419. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  420. /* Compute v1 = v1 + p1 * v2.
  421. */
  422. cublasaxpy(n, p1, v2, 1, v1, 1);
  423. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  424. }
  425. #endif
  426. static void axpy_kernel_cpu(void *descr[], void *cl_arg)
  427. {
  428. TYPE p1;
  429. starpu_unpack_cl_args(cl_arg, &p1);
  430. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  431. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  432. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  433. /* Compute v1 = p1 * v1 + p2 * v2.
  434. */
  435. AXPY(nx, p1, v2, 1, v1, 1);
  436. }
  437. static struct starpu_perfmodel_t axpy_kernel_model = {
  438. .type = STARPU_HISTORY_BASED,
  439. .symbol = "axpy_kernel"
  440. };
  441. static starpu_codelet axpy_kernel_cl = {
  442. .where = STARPU_CPU|STARPU_CUDA,
  443. .cpu_func = axpy_kernel_cpu,
  444. #ifdef STARPU_USE_CUDA
  445. .cuda_func = axpy_kernel_cuda,
  446. #endif
  447. .nbuffers = 2,
  448. .model = &axpy_kernel_model
  449. };
  450. void axpy_kernel(starpu_data_handle v1,
  451. starpu_data_handle v2, TYPE p1,
  452. unsigned nblocks)
  453. {
  454. unsigned b;
  455. for (b = 0; b < nblocks; b++)
  456. {
  457. starpu_insert_task(&axpy_kernel_cl,
  458. STARPU_RW, starpu_data_get_sub_data(v1, 1, b),
  459. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  460. STARPU_VALUE, &p1, sizeof(p1),
  461. 0);
  462. }
  463. }
  464. /*
  465. * COPY kernel : vector_dst <- vector_src
  466. */
  467. static void copy_handle_cpu(void *descr[], void *cl_arg)
  468. {
  469. TYPE *dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  470. TYPE *src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  471. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  472. size_t elemsize = STARPU_VECTOR_GET_ELEMSIZE(descr[0]);
  473. memcpy(dst, src, nx*elemsize);
  474. }
  475. #ifdef STARPU_USE_CUDA
  476. static void copy_handle_cuda(void *descr[], void *cl_arg)
  477. {
  478. TYPE *dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  479. TYPE *src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  480. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  481. size_t elemsize = STARPU_VECTOR_GET_ELEMSIZE(descr[0]);
  482. cudaMemcpyAsync(dst, src, nx*elemsize, cudaMemcpyDeviceToDevice, starpu_cuda_get_local_stream());
  483. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  484. }
  485. #endif
  486. static struct starpu_perfmodel_t copy_handle_model = {
  487. .type = STARPU_HISTORY_BASED,
  488. .symbol = "copy_handle"
  489. };
  490. static starpu_codelet copy_handle_cl = {
  491. .where = STARPU_CPU|STARPU_CUDA,
  492. .cpu_func = copy_handle_cpu,
  493. #ifdef STARPU_USE_CUDA
  494. .cuda_func = copy_handle_cuda,
  495. #endif
  496. .nbuffers = 2,
  497. .model = &copy_handle_model
  498. };
  499. void copy_handle(starpu_data_handle dst, starpu_data_handle src, unsigned nblocks)
  500. {
  501. unsigned b;
  502. for (b = 0; b < nblocks; b++)
  503. {
  504. starpu_insert_task(&copy_handle_cl,
  505. STARPU_W, starpu_data_get_sub_data(dst, 1, b),
  506. STARPU_R, starpu_data_get_sub_data(src, 1, b),
  507. 0);
  508. }
  509. }