cg_kernels.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010 Université de Bordeaux 1
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include "cg.h"
  17. #include <math.h>
  18. #if 0
  19. static void print_vector_from_descr(unsigned nx, TYPE *v)
  20. {
  21. unsigned i;
  22. for (i = 0; i < nx; i++)
  23. {
  24. fprintf(stderr, "%2.2e ", v[i]);
  25. }
  26. fprintf(stderr, "\n");
  27. }
  28. static void print_matrix_from_descr(unsigned nx, unsigned ny, unsigned ld, TYPE *mat)
  29. {
  30. unsigned i, j;
  31. for (j = 0; j < nx; j++)
  32. {
  33. for (i = 0; i < ny; i++)
  34. {
  35. fprintf(stderr, "%2.2e ", mat[j+i*ld]);
  36. }
  37. fprintf(stderr, "\n");
  38. }
  39. }
  40. #endif
  41. /*
  42. * Reduction accumulation methods
  43. */
  44. #ifdef STARPU_USE_CUDA
  45. static void accumulate_variable_cuda(void *descr[], void *cl_arg)
  46. {
  47. TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  48. TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
  49. cublasaxpy(1, (TYPE)1.0, v_src, 1, v_dst, 1);
  50. cudaError_t ret = cudaThreadSynchronize();
  51. if (ret)
  52. STARPU_CUDA_REPORT_ERROR(ret);
  53. }
  54. #endif
  55. static void accumulate_variable_cpu(void *descr[], void *cl_arg)
  56. {
  57. TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  58. TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
  59. *v_dst = *v_dst + *v_src;
  60. }
  61. static struct starpu_perfmodel_t accumulate_variable_model = {
  62. .type = STARPU_HISTORY_BASED,
  63. .symbol = "accumulate_variable"
  64. };
  65. starpu_codelet accumulate_variable_cl = {
  66. .where = STARPU_CPU|STARPU_CUDA,
  67. .cpu_func = accumulate_variable_cpu,
  68. #ifdef STARPU_USE_CUDA
  69. .cuda_func = accumulate_variable_cuda,
  70. #endif
  71. .nbuffers = 2,
  72. .model = &accumulate_variable_model
  73. };
  74. #ifdef STARPU_USE_CUDA
  75. static void accumulate_vector_cuda(void *descr[], void *cl_arg)
  76. {
  77. TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  78. TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  79. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  80. cublasaxpy(n, (TYPE)1.0, v_src, 1, v_dst, 1);
  81. cudaError_t ret = cudaThreadSynchronize();
  82. if (ret)
  83. STARPU_CUDA_REPORT_ERROR(ret);
  84. }
  85. #endif
  86. static void accumulate_vector_cpu(void *descr[], void *cl_arg)
  87. {
  88. TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  89. TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  90. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  91. AXPY(n, (TYPE)1.0, v_src, 1, v_dst, 1);
  92. }
  93. static struct starpu_perfmodel_t accumulate_vector_model = {
  94. .type = STARPU_HISTORY_BASED,
  95. .symbol = "accumulate_vector"
  96. };
  97. starpu_codelet accumulate_vector_cl = {
  98. .where = STARPU_CPU|STARPU_CUDA,
  99. .cpu_func = accumulate_vector_cpu,
  100. #ifdef STARPU_USE_CUDA
  101. .cuda_func = accumulate_vector_cuda,
  102. #endif
  103. .nbuffers = 2,
  104. .model = &accumulate_vector_model
  105. };
  106. /*
  107. * Reduction initialization methods
  108. */
  109. #ifdef STARPU_USE_CUDA
  110. static void bzero_variable_cuda(void *descr[], void *cl_arg)
  111. {
  112. TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  113. cublasscal (1, (TYPE)0.0, v, 1);
  114. cudaThreadSynchronize();
  115. }
  116. #endif
  117. static void bzero_variable_cpu(void *descr[], void *cl_arg)
  118. {
  119. TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  120. *v = (TYPE)0.0;
  121. }
  122. static struct starpu_perfmodel_t bzero_variable_model = {
  123. .type = STARPU_HISTORY_BASED,
  124. .symbol = "bzero_variable"
  125. };
  126. starpu_codelet bzero_variable_cl = {
  127. .where = STARPU_CPU|STARPU_CUDA,
  128. .cpu_func = bzero_variable_cpu,
  129. #ifdef STARPU_USE_CUDA
  130. .cuda_func = bzero_variable_cuda,
  131. #endif
  132. .nbuffers = 1,
  133. .model = &bzero_variable_model
  134. };
  135. #ifdef STARPU_USE_CUDA
  136. static void bzero_vector_cuda(void *descr[], void *cl_arg)
  137. {
  138. TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  139. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  140. cublasscal (n, (TYPE)0.0, v, 1);
  141. cudaError_t ret = cudaThreadSynchronize();
  142. if (ret)
  143. STARPU_CUDA_REPORT_ERROR(ret);
  144. }
  145. #endif
  146. static void bzero_vector_cpu(void *descr[], void *cl_arg)
  147. {
  148. TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  149. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  150. memset(v, 0, n*sizeof(TYPE));
  151. }
  152. static struct starpu_perfmodel_t bzero_vector_model = {
  153. .type = STARPU_HISTORY_BASED,
  154. .symbol = "bzero_vector"
  155. };
  156. starpu_codelet bzero_vector_cl = {
  157. .where = STARPU_CPU|STARPU_CUDA,
  158. .cpu_func = bzero_vector_cpu,
  159. #ifdef STARPU_USE_CUDA
  160. .cuda_func = bzero_vector_cuda,
  161. #endif
  162. .nbuffers = 1,
  163. .model = &bzero_vector_model
  164. };
  165. /*
  166. * DOT kernel : s = dot(v1, v2)
  167. */
  168. #ifdef STARPU_USE_CUDA
  169. static void dot_kernel_cuda(void *descr[], void *cl_arg)
  170. {
  171. cudaError_t ret;
  172. TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  173. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  174. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  175. unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
  176. /* Get current value */
  177. TYPE host_dot;
  178. cudaMemcpy(&host_dot, dot, sizeof(TYPE), cudaMemcpyDeviceToHost);
  179. ret = cudaThreadSynchronize();
  180. if (ret)
  181. STARPU_CUDA_REPORT_ERROR(ret);
  182. TYPE local_dot = cublasdot(n, v1, 1, v2, 1);
  183. host_dot += local_dot;
  184. ret = cudaThreadSynchronize();
  185. if (ret)
  186. STARPU_CUDA_REPORT_ERROR(ret);
  187. cudaMemcpy(dot, &host_dot, sizeof(TYPE), cudaMemcpyHostToDevice);
  188. ret = cudaThreadSynchronize();
  189. if (ret)
  190. STARPU_CUDA_REPORT_ERROR(ret);
  191. }
  192. #endif
  193. static void dot_kernel_cpu(void *descr[], void *cl_arg)
  194. {
  195. TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  196. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  197. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  198. unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
  199. TYPE local_dot = 0.0;
  200. /* Note that we explicitely cast the result of the DOT kernel because
  201. * some BLAS library will return a double for sdot for instance. */
  202. local_dot = (TYPE)DOT(n, v1, 1, v2, 1);
  203. *dot = *dot + local_dot;
  204. }
  205. static struct starpu_perfmodel_t dot_kernel_model = {
  206. .type = STARPU_HISTORY_BASED,
  207. .symbol = "dot_kernel"
  208. };
  209. static starpu_codelet dot_kernel_cl = {
  210. .where = STARPU_CPU|STARPU_CUDA,
  211. .cpu_func = dot_kernel_cpu,
  212. #ifdef STARPU_USE_CUDA
  213. .cuda_func = dot_kernel_cuda,
  214. #endif
  215. .nbuffers = 3,
  216. .model = &dot_kernel_model
  217. };
  218. void dot_kernel(starpu_data_handle v1,
  219. starpu_data_handle v2,
  220. starpu_data_handle s,
  221. unsigned nblocks,
  222. int use_reduction)
  223. {
  224. /* Blank the accumulation variable */
  225. starpu_insert_task(&bzero_variable_cl, STARPU_W, s, 0);
  226. unsigned b;
  227. for (b = 0; b < nblocks; b++)
  228. {
  229. starpu_insert_task(&dot_kernel_cl,
  230. use_reduction?STARPU_REDUX:STARPU_RW, s,
  231. STARPU_R, starpu_data_get_sub_data(v1, 1, b),
  232. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  233. 0);
  234. }
  235. }
  236. /*
  237. * SCAL kernel : v1 = p1 v1
  238. */
  239. #ifdef STARPU_USE_CUDA
  240. static void scal_kernel_cuda(void *descr[], void *cl_arg)
  241. {
  242. TYPE p1;
  243. starpu_unpack_cl_args(cl_arg, &p1);
  244. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  245. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  246. /* v1 = p1 v1 */
  247. TYPE alpha = p1;
  248. cublasscal(n, alpha, v1, 1);
  249. cudaThreadSynchronize();
  250. }
  251. #endif
  252. static void scal_kernel_cpu(void *descr[], void *cl_arg)
  253. {
  254. TYPE alpha;
  255. starpu_unpack_cl_args(cl_arg, &alpha);
  256. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  257. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  258. /* v1 = alpha v1 */
  259. SCAL(n, alpha, v1, 1);
  260. }
  261. static struct starpu_perfmodel_t scal_kernel_model = {
  262. .type = STARPU_HISTORY_BASED,
  263. .symbol = "scal_kernel"
  264. };
  265. static starpu_codelet scal_kernel_cl = {
  266. .where = STARPU_CPU|STARPU_CUDA,
  267. .cpu_func = scal_kernel_cpu,
  268. #ifdef STARPU_USE_CUDA
  269. .cuda_func = scal_kernel_cuda,
  270. #endif
  271. .nbuffers = 1,
  272. .model = &scal_kernel_model
  273. };
  274. /*
  275. * GEMV kernel : v1 = p1 * v1 + p2 * M v2
  276. */
  277. #ifdef STARPU_USE_CUDA
  278. static void gemv_kernel_cuda(void *descr[], void *cl_arg)
  279. {
  280. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  281. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  282. TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  283. unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
  284. unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
  285. unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
  286. TYPE alpha, beta;
  287. starpu_unpack_cl_args(cl_arg, &beta, &alpha);
  288. /* Compute v1 = alpha M v2 + beta v1 */
  289. cublasgemv('N', nx, ny, alpha, M, ld, v2, 1, beta, v1, 1);
  290. cudaThreadSynchronize();
  291. }
  292. #endif
  293. static void gemv_kernel_cpu(void *descr[], void *cl_arg)
  294. {
  295. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  296. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  297. TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  298. unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
  299. unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
  300. unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
  301. TYPE alpha, beta;
  302. starpu_unpack_cl_args(cl_arg, &beta, &alpha);
  303. int worker_size = starpu_combined_worker_get_size();
  304. if (worker_size > 1)
  305. {
  306. /* Parallel CPU task */
  307. int rank = starpu_combined_worker_get_rank();
  308. int block_size = (ny + worker_size - 1)/worker_size;
  309. int new_nx = STARPU_MIN(nx, block_size*(rank+1)) - block_size*rank;
  310. nx = new_nx;
  311. v1 = &v1[block_size*rank];
  312. M = &M[block_size*rank];
  313. }
  314. /* Compute v1 = alpha M v2 + beta v1 */
  315. GEMV("N", nx, ny, alpha, M, ld, v2, 1, beta, v1, 1);
  316. }
  317. static struct starpu_perfmodel_t gemv_kernel_model = {
  318. .type = STARPU_HISTORY_BASED,
  319. .symbol = "gemv_kernel"
  320. };
  321. static starpu_codelet gemv_kernel_cl = {
  322. .where = STARPU_CPU|STARPU_CUDA,
  323. .type = STARPU_SPMD,
  324. .max_parallelism = INT_MAX,
  325. .cpu_func = gemv_kernel_cpu,
  326. #ifdef STARPU_USE_CUDA
  327. .cuda_func = gemv_kernel_cuda,
  328. #endif
  329. .nbuffers = 3,
  330. .model = &gemv_kernel_model
  331. };
  332. void gemv_kernel(starpu_data_handle v1,
  333. starpu_data_handle matrix,
  334. starpu_data_handle v2,
  335. TYPE p1, TYPE p2,
  336. unsigned nblocks,
  337. int use_reduction)
  338. {
  339. unsigned b1, b2;
  340. for (b2 = 0; b2 < nblocks; b2++)
  341. {
  342. starpu_insert_task(&scal_kernel_cl,
  343. STARPU_RW, starpu_data_get_sub_data(v1, 1, b2),
  344. STARPU_VALUE, &p1, sizeof(p1),
  345. 0);
  346. }
  347. for (b2 = 0; b2 < nblocks; b2++)
  348. {
  349. for (b1 = 0; b1 < nblocks; b1++)
  350. {
  351. TYPE one = 1.0;
  352. starpu_insert_task(&gemv_kernel_cl,
  353. use_reduction?STARPU_REDUX:STARPU_RW, starpu_data_get_sub_data(v1, 1, b2),
  354. STARPU_R, starpu_data_get_sub_data(matrix, 2, b2, b1),
  355. STARPU_R, starpu_data_get_sub_data(v2, 1, b1),
  356. STARPU_VALUE, &one, sizeof(one),
  357. STARPU_VALUE, &p2, sizeof(p2),
  358. 0);
  359. }
  360. }
  361. }
  362. /*
  363. * AXPY + SCAL kernel : v1 = p1 * v1 + p2 * v2
  364. */
  365. #ifdef STARPU_USE_CUDA
  366. static void scal_axpy_kernel_cuda(void *descr[], void *cl_arg)
  367. {
  368. TYPE p1, p2;
  369. starpu_unpack_cl_args(cl_arg, &p1, &p2);
  370. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  371. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  372. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  373. /* Compute v1 = p1 * v1 + p2 * v2.
  374. * v1 = p1 v1
  375. * v1 = v1 + p2 v2
  376. */
  377. cublasscal(n, p1, v1, 1);
  378. cublasaxpy(n, p2, v2, 1, v1, 1);
  379. cudaThreadSynchronize();
  380. }
  381. #endif
  382. static void scal_axpy_kernel_cpu(void *descr[], void *cl_arg)
  383. {
  384. TYPE p1, p2;
  385. starpu_unpack_cl_args(cl_arg, &p1, &p2);
  386. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  387. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  388. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  389. /* Compute v1 = p1 * v1 + p2 * v2.
  390. * v1 = p1 v1
  391. * v1 = v1 + p2 v2
  392. */
  393. SCAL(nx, p1, v1, 1);
  394. AXPY(nx, p2, v2, 1, v1, 1);
  395. }
  396. static struct starpu_perfmodel_t scal_axpy_kernel_model = {
  397. .type = STARPU_HISTORY_BASED,
  398. .symbol = "scal_axpy_kernel"
  399. };
  400. static starpu_codelet scal_axpy_kernel_cl = {
  401. .where = STARPU_CPU|STARPU_CUDA,
  402. .cpu_func = scal_axpy_kernel_cpu,
  403. #ifdef STARPU_USE_CUDA
  404. .cuda_func = scal_axpy_kernel_cuda,
  405. #endif
  406. .nbuffers = 2,
  407. .model = &scal_axpy_kernel_model
  408. };
  409. void scal_axpy_kernel(starpu_data_handle v1, TYPE p1,
  410. starpu_data_handle v2, TYPE p2,
  411. unsigned nblocks)
  412. {
  413. unsigned b;
  414. for (b = 0; b < nblocks; b++)
  415. {
  416. starpu_insert_task(&scal_axpy_kernel_cl,
  417. STARPU_RW, starpu_data_get_sub_data(v1, 1, b),
  418. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  419. STARPU_VALUE, &p1, sizeof(p1),
  420. STARPU_VALUE, &p2, sizeof(p2),
  421. 0);
  422. }
  423. }
  424. /*
  425. * AXPY kernel : v1 = v1 + p1 * v2
  426. */
  427. #ifdef STARPU_USE_CUDA
  428. static void axpy_kernel_cuda(void *descr[], void *cl_arg)
  429. {
  430. TYPE p1;
  431. starpu_unpack_cl_args(cl_arg, &p1);
  432. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  433. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  434. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  435. /* Compute v1 = v1 + p1 * v2.
  436. */
  437. cublasaxpy(n, p1, v2, 1, v1, 1);
  438. cudaThreadSynchronize();
  439. }
  440. #endif
  441. static void axpy_kernel_cpu(void *descr[], void *cl_arg)
  442. {
  443. TYPE p1;
  444. starpu_unpack_cl_args(cl_arg, &p1);
  445. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  446. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  447. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  448. /* Compute v1 = p1 * v1 + p2 * v2.
  449. */
  450. AXPY(nx, p1, v2, 1, v1, 1);
  451. }
  452. static struct starpu_perfmodel_t axpy_kernel_model = {
  453. .type = STARPU_HISTORY_BASED,
  454. .symbol = "axpy_kernel"
  455. };
  456. static starpu_codelet axpy_kernel_cl = {
  457. .where = STARPU_CPU|STARPU_CUDA,
  458. .cpu_func = axpy_kernel_cpu,
  459. #ifdef STARPU_USE_CUDA
  460. .cuda_func = axpy_kernel_cuda,
  461. #endif
  462. .nbuffers = 2,
  463. .model = &axpy_kernel_model
  464. };
  465. void axpy_kernel(starpu_data_handle v1,
  466. starpu_data_handle v2, TYPE p1,
  467. unsigned nblocks)
  468. {
  469. unsigned b;
  470. for (b = 0; b < nblocks; b++)
  471. {
  472. starpu_insert_task(&axpy_kernel_cl,
  473. STARPU_RW, starpu_data_get_sub_data(v1, 1, b),
  474. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  475. STARPU_VALUE, &p1, sizeof(p1),
  476. 0);
  477. }
  478. }
  479. /*
  480. * COPY kernel : vector_dst <- vector_src
  481. */
  482. static void copy_handle_cpu(void *descr[], void *cl_arg)
  483. {
  484. TYPE *dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  485. TYPE *src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  486. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  487. size_t elemsize = STARPU_VECTOR_GET_ELEMSIZE(descr[0]);
  488. memcpy(dst, src, nx*elemsize);
  489. }
  490. #ifdef STARPU_USE_CUDA
  491. static void copy_handle_cuda(void *descr[], void *cl_arg)
  492. {
  493. TYPE *dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  494. TYPE *src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  495. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  496. size_t elemsize = STARPU_VECTOR_GET_ELEMSIZE(descr[0]);
  497. cudaMemcpy(dst, src, nx*elemsize, cudaMemcpyDeviceToDevice);
  498. cudaThreadSynchronize();
  499. }
  500. #endif
  501. static struct starpu_perfmodel_t copy_handle_model = {
  502. .type = STARPU_HISTORY_BASED,
  503. .symbol = "copy_handle"
  504. };
  505. static starpu_codelet copy_handle_cl = {
  506. .where = STARPU_CPU|STARPU_CUDA,
  507. .cpu_func = copy_handle_cpu,
  508. #ifdef STARPU_USE_CUDA
  509. .cuda_func = copy_handle_cuda,
  510. #endif
  511. .nbuffers = 2,
  512. .model = &copy_handle_model
  513. };
  514. void copy_handle(starpu_data_handle dst, starpu_data_handle src, unsigned nblocks)
  515. {
  516. unsigned b;
  517. for (b = 0; b < nblocks; b++)
  518. {
  519. starpu_insert_task(&copy_handle_cl,
  520. STARPU_W, starpu_data_get_sub_data(dst, 1, b),
  521. STARPU_R, starpu_data_get_sub_data(src, 1, b),
  522. 0);
  523. }
  524. }