cg_kernels.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010 Université de Bordeaux 1
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include "cg.h"
  17. #include <math.h>
  18. #if 0
  19. static void print_vector_from_descr(unsigned nx, TYPE *v)
  20. {
  21. unsigned i;
  22. for (i = 0; i < nx; i++)
  23. {
  24. fprintf(stderr, "%2.2e ", v[i]);
  25. }
  26. fprintf(stderr, "\n");
  27. }
  28. static void print_matrix_from_descr(unsigned nx, unsigned ny, unsigned ld, TYPE *mat)
  29. {
  30. unsigned i, j;
  31. for (j = 0; j < nx; j++)
  32. {
  33. for (i = 0; i < ny; i++)
  34. {
  35. fprintf(stderr, "%2.2e ", mat[j+i*ld]);
  36. }
  37. fprintf(stderr, "\n");
  38. }
  39. }
  40. #endif
  41. /*
  42. * Reduction accumulation methods
  43. */
  44. #ifdef STARPU_USE_CUDA
  45. static void accumulate_variable_cuda(void *descr[], void *cl_arg)
  46. {
  47. TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  48. TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
  49. cublasaxpy(1, (TYPE)1.0, v_src, 1, v_dst, 1);
  50. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  51. }
  52. #endif
  53. static void accumulate_variable_cpu(void *descr[], void *cl_arg)
  54. {
  55. TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  56. TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
  57. *v_dst = *v_dst + *v_src;
  58. }
  59. static struct starpu_perfmodel_t accumulate_variable_model = {
  60. .type = STARPU_HISTORY_BASED,
  61. .symbol = "accumulate_variable"
  62. };
  63. starpu_codelet accumulate_variable_cl = {
  64. .where = STARPU_CPU|STARPU_CUDA,
  65. .cpu_func = accumulate_variable_cpu,
  66. #ifdef STARPU_USE_CUDA
  67. .cuda_func = accumulate_variable_cuda,
  68. #endif
  69. .nbuffers = 2,
  70. .model = &accumulate_variable_model
  71. };
  72. #ifdef STARPU_USE_CUDA
  73. static void accumulate_vector_cuda(void *descr[], void *cl_arg)
  74. {
  75. TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  76. TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  77. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  78. cublasaxpy(n, (TYPE)1.0, v_src, 1, v_dst, 1);
  79. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  80. }
  81. #endif
  82. static void accumulate_vector_cpu(void *descr[], void *cl_arg)
  83. {
  84. TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  85. TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  86. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  87. AXPY(n, (TYPE)1.0, v_src, 1, v_dst, 1);
  88. }
  89. static struct starpu_perfmodel_t accumulate_vector_model = {
  90. .type = STARPU_HISTORY_BASED,
  91. .symbol = "accumulate_vector"
  92. };
  93. starpu_codelet accumulate_vector_cl = {
  94. .where = STARPU_CPU|STARPU_CUDA,
  95. .cpu_func = accumulate_vector_cpu,
  96. #ifdef STARPU_USE_CUDA
  97. .cuda_func = accumulate_vector_cuda,
  98. #endif
  99. .nbuffers = 2,
  100. .model = &accumulate_vector_model
  101. };
  102. /*
  103. * Reduction initialization methods
  104. */
  105. #ifdef STARPU_USE_CUDA
  106. static void bzero_variable_cuda(void *descr[], void *cl_arg)
  107. {
  108. TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  109. cublasscal (1, (TYPE)0.0, v, 1);
  110. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  111. }
  112. #endif
  113. static void bzero_variable_cpu(void *descr[], void *cl_arg)
  114. {
  115. TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  116. *v = (TYPE)0.0;
  117. }
  118. static struct starpu_perfmodel_t bzero_variable_model = {
  119. .type = STARPU_HISTORY_BASED,
  120. .symbol = "bzero_variable"
  121. };
  122. starpu_codelet bzero_variable_cl = {
  123. .where = STARPU_CPU|STARPU_CUDA,
  124. .cpu_func = bzero_variable_cpu,
  125. #ifdef STARPU_USE_CUDA
  126. .cuda_func = bzero_variable_cuda,
  127. #endif
  128. .nbuffers = 1,
  129. .model = &bzero_variable_model
  130. };
  131. #ifdef STARPU_USE_CUDA
  132. static void bzero_vector_cuda(void *descr[], void *cl_arg)
  133. {
  134. TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  135. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  136. cublasscal (n, (TYPE)0.0, v, 1);
  137. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  138. }
  139. #endif
  140. static void bzero_vector_cpu(void *descr[], void *cl_arg)
  141. {
  142. TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  143. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  144. memset(v, 0, n*sizeof(TYPE));
  145. }
  146. static struct starpu_perfmodel_t bzero_vector_model = {
  147. .type = STARPU_HISTORY_BASED,
  148. .symbol = "bzero_vector"
  149. };
  150. starpu_codelet bzero_vector_cl = {
  151. .where = STARPU_CPU|STARPU_CUDA,
  152. .cpu_func = bzero_vector_cpu,
  153. #ifdef STARPU_USE_CUDA
  154. .cuda_func = bzero_vector_cuda,
  155. #endif
  156. .nbuffers = 1,
  157. .model = &bzero_vector_model
  158. };
  159. /*
  160. * DOT kernel : s = dot(v1, v2)
  161. */
  162. #ifdef STARPU_USE_CUDA
  163. extern void dot_host(TYPE *x, TYPE *y, unsigned nelems, TYPE *dot);
  164. static void dot_kernel_cuda(void *descr[], void *cl_arg)
  165. {
  166. TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  167. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  168. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  169. unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
  170. /* Contrary to cublasSdot, this function puts its result directly in
  171. * device memory, so that we don't have to transfer that value back and
  172. * forth. */
  173. dot_host(v1, v2, n, dot);
  174. }
  175. #endif
  176. static void dot_kernel_cpu(void *descr[], void *cl_arg)
  177. {
  178. TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  179. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  180. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  181. unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
  182. TYPE local_dot = 0.0;
  183. /* Note that we explicitely cast the result of the DOT kernel because
  184. * some BLAS library will return a double for sdot for instance. */
  185. local_dot = (TYPE)DOT(n, v1, 1, v2, 1);
  186. *dot = *dot + local_dot;
  187. }
  188. static struct starpu_perfmodel_t dot_kernel_model = {
  189. .type = STARPU_HISTORY_BASED,
  190. .symbol = "dot_kernel"
  191. };
  192. static starpu_codelet dot_kernel_cl = {
  193. .where = STARPU_CPU|STARPU_CUDA,
  194. .cpu_func = dot_kernel_cpu,
  195. #ifdef STARPU_USE_CUDA
  196. .cuda_func = dot_kernel_cuda,
  197. #endif
  198. .nbuffers = 3,
  199. .model = &dot_kernel_model
  200. };
  201. void dot_kernel(starpu_data_handle v1,
  202. starpu_data_handle v2,
  203. starpu_data_handle s,
  204. unsigned nblocks,
  205. int use_reduction)
  206. {
  207. /* Blank the accumulation variable */
  208. starpu_insert_task(&bzero_variable_cl, STARPU_W, s, 0);
  209. unsigned b;
  210. for (b = 0; b < nblocks; b++)
  211. {
  212. starpu_insert_task(&dot_kernel_cl,
  213. use_reduction?STARPU_REDUX:STARPU_RW, s,
  214. STARPU_R, starpu_data_get_sub_data(v1, 1, b),
  215. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  216. 0);
  217. }
  218. }
  219. /*
  220. * SCAL kernel : v1 = p1 v1
  221. */
  222. #ifdef STARPU_USE_CUDA
  223. static void scal_kernel_cuda(void *descr[], void *cl_arg)
  224. {
  225. TYPE p1;
  226. starpu_unpack_cl_args(cl_arg, &p1);
  227. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  228. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  229. /* v1 = p1 v1 */
  230. TYPE alpha = p1;
  231. cublasscal(n, alpha, v1, 1);
  232. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  233. }
  234. #endif
  235. static void scal_kernel_cpu(void *descr[], void *cl_arg)
  236. {
  237. TYPE alpha;
  238. starpu_unpack_cl_args(cl_arg, &alpha);
  239. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  240. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  241. /* v1 = alpha v1 */
  242. SCAL(n, alpha, v1, 1);
  243. }
  244. static struct starpu_perfmodel_t scal_kernel_model = {
  245. .type = STARPU_HISTORY_BASED,
  246. .symbol = "scal_kernel"
  247. };
  248. static starpu_codelet scal_kernel_cl = {
  249. .where = STARPU_CPU|STARPU_CUDA,
  250. .cpu_func = scal_kernel_cpu,
  251. #ifdef STARPU_USE_CUDA
  252. .cuda_func = scal_kernel_cuda,
  253. #endif
  254. .nbuffers = 1,
  255. .model = &scal_kernel_model
  256. };
  257. /*
  258. * GEMV kernel : v1 = p1 * v1 + p2 * M v2
  259. */
  260. #ifdef STARPU_USE_CUDA
  261. static void gemv_kernel_cuda(void *descr[], void *cl_arg)
  262. {
  263. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  264. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  265. TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  266. unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
  267. unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
  268. unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
  269. TYPE alpha, beta;
  270. starpu_unpack_cl_args(cl_arg, &beta, &alpha);
  271. /* Compute v1 = alpha M v2 + beta v1 */
  272. cublasgemv('N', nx, ny, alpha, M, ld, v2, 1, beta, v1, 1);
  273. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  274. }
  275. #endif
  276. static void gemv_kernel_cpu(void *descr[], void *cl_arg)
  277. {
  278. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  279. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  280. TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  281. unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
  282. unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
  283. unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
  284. TYPE alpha, beta;
  285. starpu_unpack_cl_args(cl_arg, &beta, &alpha);
  286. int worker_size = starpu_combined_worker_get_size();
  287. if (worker_size > 1)
  288. {
  289. /* Parallel CPU task */
  290. int rank = starpu_combined_worker_get_rank();
  291. int block_size = (ny + worker_size - 1)/worker_size;
  292. int new_nx = STARPU_MIN(nx, block_size*(rank+1)) - block_size*rank;
  293. nx = new_nx;
  294. v1 = &v1[block_size*rank];
  295. M = &M[block_size*rank];
  296. }
  297. /* Compute v1 = alpha M v2 + beta v1 */
  298. GEMV("N", nx, ny, alpha, M, ld, v2, 1, beta, v1, 1);
  299. }
  300. static struct starpu_perfmodel_t gemv_kernel_model = {
  301. .type = STARPU_HISTORY_BASED,
  302. .symbol = "gemv_kernel"
  303. };
  304. static starpu_codelet gemv_kernel_cl = {
  305. .where = STARPU_CPU|STARPU_CUDA,
  306. .type = STARPU_SPMD,
  307. .max_parallelism = INT_MAX,
  308. .cpu_func = gemv_kernel_cpu,
  309. #ifdef STARPU_USE_CUDA
  310. .cuda_func = gemv_kernel_cuda,
  311. #endif
  312. .nbuffers = 3,
  313. .model = &gemv_kernel_model
  314. };
  315. void gemv_kernel(starpu_data_handle v1,
  316. starpu_data_handle matrix,
  317. starpu_data_handle v2,
  318. TYPE p1, TYPE p2,
  319. unsigned nblocks,
  320. int use_reduction)
  321. {
  322. unsigned b1, b2;
  323. for (b2 = 0; b2 < nblocks; b2++)
  324. {
  325. starpu_insert_task(&scal_kernel_cl,
  326. STARPU_RW, starpu_data_get_sub_data(v1, 1, b2),
  327. STARPU_VALUE, &p1, sizeof(p1),
  328. 0);
  329. }
  330. for (b2 = 0; b2 < nblocks; b2++)
  331. {
  332. for (b1 = 0; b1 < nblocks; b1++)
  333. {
  334. TYPE one = 1.0;
  335. starpu_insert_task(&gemv_kernel_cl,
  336. use_reduction?STARPU_REDUX:STARPU_RW, starpu_data_get_sub_data(v1, 1, b2),
  337. STARPU_R, starpu_data_get_sub_data(matrix, 2, b2, b1),
  338. STARPU_R, starpu_data_get_sub_data(v2, 1, b1),
  339. STARPU_VALUE, &one, sizeof(one),
  340. STARPU_VALUE, &p2, sizeof(p2),
  341. 0);
  342. }
  343. }
  344. }
  345. /*
  346. * AXPY + SCAL kernel : v1 = p1 * v1 + p2 * v2
  347. */
  348. #ifdef STARPU_USE_CUDA
  349. static void scal_axpy_kernel_cuda(void *descr[], void *cl_arg)
  350. {
  351. TYPE p1, p2;
  352. starpu_unpack_cl_args(cl_arg, &p1, &p2);
  353. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  354. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  355. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  356. /* Compute v1 = p1 * v1 + p2 * v2.
  357. * v1 = p1 v1
  358. * v1 = v1 + p2 v2
  359. */
  360. cublasscal(n, p1, v1, 1);
  361. cublasaxpy(n, p2, v2, 1, v1, 1);
  362. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  363. }
  364. #endif
  365. static void scal_axpy_kernel_cpu(void *descr[], void *cl_arg)
  366. {
  367. TYPE p1, p2;
  368. starpu_unpack_cl_args(cl_arg, &p1, &p2);
  369. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  370. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  371. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  372. /* Compute v1 = p1 * v1 + p2 * v2.
  373. * v1 = p1 v1
  374. * v1 = v1 + p2 v2
  375. */
  376. SCAL(nx, p1, v1, 1);
  377. AXPY(nx, p2, v2, 1, v1, 1);
  378. }
  379. static struct starpu_perfmodel_t scal_axpy_kernel_model = {
  380. .type = STARPU_HISTORY_BASED,
  381. .symbol = "scal_axpy_kernel"
  382. };
  383. static starpu_codelet scal_axpy_kernel_cl = {
  384. .where = STARPU_CPU|STARPU_CUDA,
  385. .cpu_func = scal_axpy_kernel_cpu,
  386. #ifdef STARPU_USE_CUDA
  387. .cuda_func = scal_axpy_kernel_cuda,
  388. #endif
  389. .nbuffers = 2,
  390. .model = &scal_axpy_kernel_model
  391. };
  392. void scal_axpy_kernel(starpu_data_handle v1, TYPE p1,
  393. starpu_data_handle v2, TYPE p2,
  394. unsigned nblocks)
  395. {
  396. unsigned b;
  397. for (b = 0; b < nblocks; b++)
  398. {
  399. starpu_insert_task(&scal_axpy_kernel_cl,
  400. STARPU_RW, starpu_data_get_sub_data(v1, 1, b),
  401. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  402. STARPU_VALUE, &p1, sizeof(p1),
  403. STARPU_VALUE, &p2, sizeof(p2),
  404. 0);
  405. }
  406. }
  407. /*
  408. * AXPY kernel : v1 = v1 + p1 * v2
  409. */
  410. #ifdef STARPU_USE_CUDA
  411. static void axpy_kernel_cuda(void *descr[], void *cl_arg)
  412. {
  413. TYPE p1;
  414. starpu_unpack_cl_args(cl_arg, &p1);
  415. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  416. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  417. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  418. /* Compute v1 = v1 + p1 * v2.
  419. */
  420. cublasaxpy(n, p1, v2, 1, v1, 1);
  421. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  422. }
  423. #endif
  424. static void axpy_kernel_cpu(void *descr[], void *cl_arg)
  425. {
  426. TYPE p1;
  427. starpu_unpack_cl_args(cl_arg, &p1);
  428. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  429. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  430. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  431. /* Compute v1 = p1 * v1 + p2 * v2.
  432. */
  433. AXPY(nx, p1, v2, 1, v1, 1);
  434. }
  435. static struct starpu_perfmodel_t axpy_kernel_model = {
  436. .type = STARPU_HISTORY_BASED,
  437. .symbol = "axpy_kernel"
  438. };
  439. static starpu_codelet axpy_kernel_cl = {
  440. .where = STARPU_CPU|STARPU_CUDA,
  441. .cpu_func = axpy_kernel_cpu,
  442. #ifdef STARPU_USE_CUDA
  443. .cuda_func = axpy_kernel_cuda,
  444. #endif
  445. .nbuffers = 2,
  446. .model = &axpy_kernel_model
  447. };
  448. void axpy_kernel(starpu_data_handle v1,
  449. starpu_data_handle v2, TYPE p1,
  450. unsigned nblocks)
  451. {
  452. unsigned b;
  453. for (b = 0; b < nblocks; b++)
  454. {
  455. starpu_insert_task(&axpy_kernel_cl,
  456. STARPU_RW, starpu_data_get_sub_data(v1, 1, b),
  457. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  458. STARPU_VALUE, &p1, sizeof(p1),
  459. 0);
  460. }
  461. }
  462. /*
  463. * COPY kernel : vector_dst <- vector_src
  464. */
  465. static void copy_handle_cpu(void *descr[], void *cl_arg)
  466. {
  467. TYPE *dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  468. TYPE *src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  469. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  470. size_t elemsize = STARPU_VECTOR_GET_ELEMSIZE(descr[0]);
  471. memcpy(dst, src, nx*elemsize);
  472. }
  473. #ifdef STARPU_USE_CUDA
  474. static void copy_handle_cuda(void *descr[], void *cl_arg)
  475. {
  476. TYPE *dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  477. TYPE *src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  478. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  479. size_t elemsize = STARPU_VECTOR_GET_ELEMSIZE(descr[0]);
  480. cudaMemcpy(dst, src, nx*elemsize, cudaMemcpyDeviceToDevice);
  481. cudaThreadSynchronize();
  482. }
  483. #endif
  484. static struct starpu_perfmodel_t copy_handle_model = {
  485. .type = STARPU_HISTORY_BASED,
  486. .symbol = "copy_handle"
  487. };
  488. static starpu_codelet copy_handle_cl = {
  489. .where = STARPU_CPU|STARPU_CUDA,
  490. .cpu_func = copy_handle_cpu,
  491. #ifdef STARPU_USE_CUDA
  492. .cuda_func = copy_handle_cuda,
  493. #endif
  494. .nbuffers = 2,
  495. .model = &copy_handle_model
  496. };
  497. void copy_handle(starpu_data_handle dst, starpu_data_handle src, unsigned nblocks)
  498. {
  499. unsigned b;
  500. for (b = 0; b < nblocks; b++)
  501. {
  502. starpu_insert_task(&copy_handle_cl,
  503. STARPU_W, starpu_data_get_sub_data(dst, 1, b),
  504. STARPU_R, starpu_data_get_sub_data(src, 1, b),
  505. 0);
  506. }
  507. }