cg_kernels.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010, 2012-2016 Université de Bordeaux
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. /*
  17. * Standard BLAS kernels used by CG
  18. */
  19. #include "cg.h"
  20. #include <math.h>
  21. #include <limits.h>
  22. #if 0
  23. static void print_vector_from_descr(unsigned nx, TYPE *v)
  24. {
  25. unsigned i;
  26. for (i = 0; i < nx; i++)
  27. {
  28. fprintf(stderr, "%2.2e ", v[i]);
  29. }
  30. fprintf(stderr, "\n");
  31. }
  32. static void print_matrix_from_descr(unsigned nx, unsigned ny, unsigned ld, TYPE *mat)
  33. {
  34. unsigned i, j;
  35. for (j = 0; j < nx; j++)
  36. {
  37. for (i = 0; i < ny; i++)
  38. {
  39. fprintf(stderr, "%2.2e ", mat[j+i*ld]);
  40. }
  41. fprintf(stderr, "\n");
  42. }
  43. }
  44. #endif
  45. static int can_execute(unsigned workerid, struct starpu_task *task, unsigned nimpl)
  46. {
  47. enum starpu_worker_archtype type = starpu_worker_get_type(workerid);
  48. if (type == STARPU_CPU_WORKER || type == STARPU_OPENCL_WORKER || type == STARPU_MIC_WORKER || type == STARPU_SCC_WORKER)
  49. return 1;
  50. #ifdef STARPU_USE_CUDA
  51. #ifdef STARPU_SIMGRID
  52. /* We don't know, let's assume it can */
  53. return 1;
  54. #else
  55. /* Cuda device */
  56. const struct cudaDeviceProp *props;
  57. props = starpu_cuda_get_device_properties(workerid);
  58. if (props->major >= 2 || props->minor >= 3)
  59. /* At least compute capability 1.3, supports doubles */
  60. return 1;
  61. #endif
  62. #endif
  63. /* Old card, does not support doubles */
  64. return 0;
  65. }
  66. /*
  67. * Reduction accumulation methods
  68. */
  69. #ifdef STARPU_USE_CUDA
  70. static void accumulate_variable_cuda(void *descr[], void *cl_arg)
  71. {
  72. TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  73. TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
  74. starpu_cublas_set_stream();
  75. cublasaxpy(1, (TYPE)1.0, v_src, 1, v_dst, 1);
  76. }
  77. #endif
  78. void accumulate_variable_cpu(void *descr[], void *cl_arg)
  79. {
  80. TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  81. TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
  82. *v_dst = *v_dst + *v_src;
  83. }
  84. static struct starpu_perfmodel accumulate_variable_model =
  85. {
  86. .type = STARPU_HISTORY_BASED,
  87. .symbol = "accumulate_variable"
  88. };
  89. struct starpu_codelet accumulate_variable_cl =
  90. {
  91. .can_execute = can_execute,
  92. .cpu_funcs = {accumulate_variable_cpu},
  93. .cpu_funcs_name = {"accumulate_variable_cpu"},
  94. #ifdef STARPU_USE_CUDA
  95. .cuda_funcs = {accumulate_variable_cuda},
  96. .cuda_flags = {STARPU_CUDA_ASYNC},
  97. #endif
  98. .modes = {STARPU_RW, STARPU_R},
  99. .nbuffers = 2,
  100. .model = &accumulate_variable_model
  101. };
  102. #ifdef STARPU_USE_CUDA
  103. static void accumulate_vector_cuda(void *descr[], void *cl_arg)
  104. {
  105. TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  106. TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  107. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  108. starpu_cublas_set_stream();
  109. cublasaxpy(n, (TYPE)1.0, v_src, 1, v_dst, 1);
  110. }
  111. #endif
  112. void accumulate_vector_cpu(void *descr[], void *cl_arg)
  113. {
  114. TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  115. TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  116. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  117. AXPY(n, (TYPE)1.0, v_src, 1, v_dst, 1);
  118. }
  119. static struct starpu_perfmodel accumulate_vector_model =
  120. {
  121. .type = STARPU_HISTORY_BASED,
  122. .symbol = "accumulate_vector"
  123. };
  124. struct starpu_codelet accumulate_vector_cl =
  125. {
  126. .can_execute = can_execute,
  127. .cpu_funcs = {accumulate_vector_cpu},
  128. .cpu_funcs_name = {"accumulate_vector_cpu"},
  129. #ifdef STARPU_USE_CUDA
  130. .cuda_funcs = {accumulate_vector_cuda},
  131. .cuda_flags = {STARPU_CUDA_ASYNC},
  132. #endif
  133. .modes = {STARPU_RW, STARPU_R},
  134. .nbuffers = 2,
  135. .model = &accumulate_vector_model
  136. };
  137. /*
  138. * Reduction initialization methods
  139. */
  140. #ifdef STARPU_USE_CUDA
  141. extern void zero_vector(TYPE *x, unsigned nelems);
  142. static void bzero_variable_cuda(void *descr[], void *cl_arg)
  143. {
  144. TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  145. zero_vector(v, 1);
  146. }
  147. #endif
  148. void bzero_variable_cpu(void *descr[], void *cl_arg)
  149. {
  150. TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  151. *v = (TYPE)0.0;
  152. }
  153. static struct starpu_perfmodel bzero_variable_model =
  154. {
  155. .type = STARPU_HISTORY_BASED,
  156. .symbol = "bzero_variable"
  157. };
  158. struct starpu_codelet bzero_variable_cl =
  159. {
  160. .can_execute = can_execute,
  161. .cpu_funcs = {bzero_variable_cpu},
  162. .cpu_funcs_name = {"bzero_variable_cpu"},
  163. #ifdef STARPU_USE_CUDA
  164. .cuda_funcs = {bzero_variable_cuda},
  165. .cuda_flags = {STARPU_CUDA_ASYNC},
  166. #endif
  167. .modes = {STARPU_W},
  168. .nbuffers = 1,
  169. .model = &bzero_variable_model
  170. };
  171. #ifdef STARPU_USE_CUDA
  172. static void bzero_vector_cuda(void *descr[], void *cl_arg)
  173. {
  174. TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  175. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  176. zero_vector(v, n);
  177. }
  178. #endif
  179. void bzero_vector_cpu(void *descr[], void *cl_arg)
  180. {
  181. TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  182. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  183. memset(v, 0, n*sizeof(TYPE));
  184. }
  185. static struct starpu_perfmodel bzero_vector_model =
  186. {
  187. .type = STARPU_HISTORY_BASED,
  188. .symbol = "bzero_vector"
  189. };
  190. struct starpu_codelet bzero_vector_cl =
  191. {
  192. .can_execute = can_execute,
  193. .cpu_funcs = {bzero_vector_cpu},
  194. .cpu_funcs_name = {"bzero_vector_cpu"},
  195. #ifdef STARPU_USE_CUDA
  196. .cuda_funcs = {bzero_vector_cuda},
  197. .cuda_flags = {STARPU_CUDA_ASYNC},
  198. #endif
  199. .modes = {STARPU_W},
  200. .nbuffers = 1,
  201. .model = &bzero_vector_model
  202. };
  203. /*
  204. * DOT kernel : s = dot(v1, v2)
  205. */
  206. #ifdef STARPU_USE_CUDA
  207. extern void dot_host(TYPE *x, TYPE *y, unsigned nelems, TYPE *dot);
  208. static void dot_kernel_cuda(void *descr[], void *cl_arg)
  209. {
  210. TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  211. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  212. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  213. unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
  214. /* Contrary to cublasSdot, this function puts its result directly in
  215. * device memory, so that we don't have to transfer that value back and
  216. * forth. */
  217. dot_host(v1, v2, n, dot);
  218. }
  219. #endif
  220. void dot_kernel_cpu(void *descr[], void *cl_arg)
  221. {
  222. TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  223. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  224. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  225. unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
  226. TYPE local_dot;
  227. /* Note that we explicitely cast the result of the DOT kernel because
  228. * some BLAS library will return a double for sdot for instance. */
  229. local_dot = (TYPE)DOT(n, v1, 1, v2, 1);
  230. *dot = *dot + local_dot;
  231. }
  232. static struct starpu_perfmodel dot_kernel_model =
  233. {
  234. .type = STARPU_HISTORY_BASED,
  235. .symbol = "dot_kernel"
  236. };
  237. static struct starpu_codelet dot_kernel_cl =
  238. {
  239. .can_execute = can_execute,
  240. .cpu_funcs = {dot_kernel_cpu},
  241. .cpu_funcs_name = {"dot_kernel_cpu"},
  242. #ifdef STARPU_USE_CUDA
  243. .cuda_funcs = {dot_kernel_cuda},
  244. #endif
  245. .nbuffers = 3,
  246. .model = &dot_kernel_model
  247. };
  248. int dot_kernel(starpu_data_handle_t v1,
  249. starpu_data_handle_t v2,
  250. starpu_data_handle_t s,
  251. unsigned nblocks,
  252. int use_reduction)
  253. {
  254. int ret;
  255. /* Blank the accumulation variable */
  256. if (use_reduction)
  257. starpu_data_invalidate_submit(s);
  258. else
  259. {
  260. ret = starpu_task_insert(&bzero_variable_cl, STARPU_W, s, 0);
  261. if (ret == -ENODEV) return ret;
  262. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  263. }
  264. unsigned b;
  265. for (b = 0; b < nblocks; b++)
  266. {
  267. ret = starpu_task_insert(&dot_kernel_cl,
  268. use_reduction?STARPU_REDUX:STARPU_RW, s,
  269. STARPU_R, starpu_data_get_sub_data(v1, 1, b),
  270. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  271. STARPU_TAG_ONLY, (starpu_tag_t) b,
  272. 0);
  273. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  274. }
  275. return 0;
  276. }
  277. /*
  278. * SCAL kernel : v1 = p1 v1
  279. */
  280. #ifdef STARPU_USE_CUDA
  281. static void scal_kernel_cuda(void *descr[], void *cl_arg)
  282. {
  283. TYPE p1;
  284. starpu_codelet_unpack_args(cl_arg, &p1);
  285. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  286. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  287. /* v1 = p1 v1 */
  288. TYPE alpha = p1;
  289. starpu_cublas_set_stream();
  290. cublasscal(n, alpha, v1, 1);
  291. }
  292. #endif
  293. void scal_kernel_cpu(void *descr[], void *cl_arg)
  294. {
  295. TYPE alpha;
  296. starpu_codelet_unpack_args(cl_arg, &alpha);
  297. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  298. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  299. /* v1 = alpha v1 */
  300. SCAL(n, alpha, v1, 1);
  301. }
  302. static struct starpu_perfmodel scal_kernel_model =
  303. {
  304. .type = STARPU_HISTORY_BASED,
  305. .symbol = "scal_kernel"
  306. };
  307. static struct starpu_codelet scal_kernel_cl =
  308. {
  309. .can_execute = can_execute,
  310. .cpu_funcs = {scal_kernel_cpu},
  311. .cpu_funcs_name = {"scal_kernel_cpu"},
  312. #ifdef STARPU_USE_CUDA
  313. .cuda_funcs = {scal_kernel_cuda},
  314. .cuda_flags = {STARPU_CUDA_ASYNC},
  315. #endif
  316. .nbuffers = 1,
  317. .model = &scal_kernel_model
  318. };
  319. /*
  320. * GEMV kernel : v1 = p1 * v1 + p2 * M v2
  321. */
  322. #ifdef STARPU_USE_CUDA
  323. static void gemv_kernel_cuda(void *descr[], void *cl_arg)
  324. {
  325. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  326. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  327. TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  328. unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
  329. unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
  330. unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
  331. TYPE alpha, beta;
  332. starpu_codelet_unpack_args(cl_arg, &beta, &alpha);
  333. /* Compute v1 = alpha M v2 + beta v1 */
  334. starpu_cublas_set_stream();
  335. cublasgemv('N', nx, ny, alpha, M, ld, v2, 1, beta, v1, 1);
  336. }
  337. #endif
  338. void gemv_kernel_cpu(void *descr[], void *cl_arg)
  339. {
  340. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  341. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  342. TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  343. unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
  344. unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
  345. unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
  346. TYPE alpha, beta;
  347. starpu_codelet_unpack_args(cl_arg, &beta, &alpha);
  348. int worker_size = starpu_combined_worker_get_size();
  349. if (worker_size > 1)
  350. {
  351. /* Parallel CPU task */
  352. unsigned rank = starpu_combined_worker_get_rank();
  353. unsigned block_size = (ny + worker_size - 1)/worker_size;
  354. unsigned new_nx = STARPU_MIN(nx, block_size*(rank+1)) - block_size*rank;
  355. nx = new_nx;
  356. v1 = &v1[block_size*rank];
  357. M = &M[block_size*rank];
  358. }
  359. /* Compute v1 = alpha M v2 + beta v1 */
  360. GEMV("N", nx, ny, alpha, M, ld, v2, 1, beta, v1, 1);
  361. }
  362. static struct starpu_perfmodel gemv_kernel_model =
  363. {
  364. .type = STARPU_HISTORY_BASED,
  365. .symbol = "gemv_kernel"
  366. };
  367. static struct starpu_codelet gemv_kernel_cl =
  368. {
  369. .can_execute = can_execute,
  370. .type = STARPU_SPMD,
  371. .max_parallelism = INT_MAX,
  372. .cpu_funcs = {gemv_kernel_cpu},
  373. .cpu_funcs_name = {"gemv_kernel_cpu"},
  374. #ifdef STARPU_USE_CUDA
  375. .cuda_funcs = {gemv_kernel_cuda},
  376. .cuda_flags = {STARPU_CUDA_ASYNC},
  377. #endif
  378. .nbuffers = 3,
  379. .model = &gemv_kernel_model
  380. };
  381. int gemv_kernel(starpu_data_handle_t v1,
  382. starpu_data_handle_t matrix,
  383. starpu_data_handle_t v2,
  384. TYPE p1, TYPE p2,
  385. unsigned nblocks,
  386. int use_reduction)
  387. {
  388. unsigned b1, b2;
  389. int ret;
  390. for (b2 = 0; b2 < nblocks; b2++)
  391. {
  392. ret = starpu_task_insert(&scal_kernel_cl,
  393. STARPU_RW, starpu_data_get_sub_data(v1, 1, b2),
  394. STARPU_VALUE, &p1, sizeof(p1),
  395. STARPU_TAG_ONLY, (starpu_tag_t) b2,
  396. 0);
  397. if (ret == -ENODEV) return ret;
  398. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  399. }
  400. for (b2 = 0; b2 < nblocks; b2++)
  401. {
  402. for (b1 = 0; b1 < nblocks; b1++)
  403. {
  404. TYPE one = 1.0;
  405. ret = starpu_task_insert(&gemv_kernel_cl,
  406. use_reduction?STARPU_REDUX:STARPU_RW, starpu_data_get_sub_data(v1, 1, b2),
  407. STARPU_R, starpu_data_get_sub_data(matrix, 2, b2, b1),
  408. STARPU_R, starpu_data_get_sub_data(v2, 1, b1),
  409. STARPU_VALUE, &one, sizeof(one),
  410. STARPU_VALUE, &p2, sizeof(p2),
  411. STARPU_TAG_ONLY, ((starpu_tag_t)b2) * nblocks + b1,
  412. 0);
  413. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  414. }
  415. }
  416. return 0;
  417. }
  418. /*
  419. * AXPY + SCAL kernel : v1 = p1 * v1 + p2 * v2
  420. */
  421. #ifdef STARPU_USE_CUDA
  422. static void scal_axpy_kernel_cuda(void *descr[], void *cl_arg)
  423. {
  424. TYPE p1, p2;
  425. starpu_codelet_unpack_args(cl_arg, &p1, &p2);
  426. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  427. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  428. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  429. /* Compute v1 = p1 * v1 + p2 * v2.
  430. * v1 = p1 v1
  431. * v1 = v1 + p2 v2
  432. */
  433. starpu_cublas_set_stream();
  434. cublasscal(n, p1, v1, 1);
  435. cublasaxpy(n, p2, v2, 1, v1, 1);
  436. }
  437. #endif
  438. void scal_axpy_kernel_cpu(void *descr[], void *cl_arg)
  439. {
  440. TYPE p1, p2;
  441. starpu_codelet_unpack_args(cl_arg, &p1, &p2);
  442. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  443. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  444. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  445. /* Compute v1 = p1 * v1 + p2 * v2.
  446. * v1 = p1 v1
  447. * v1 = v1 + p2 v2
  448. */
  449. SCAL(nx, p1, v1, 1);
  450. AXPY(nx, p2, v2, 1, v1, 1);
  451. }
  452. static struct starpu_perfmodel scal_axpy_kernel_model =
  453. {
  454. .type = STARPU_HISTORY_BASED,
  455. .symbol = "scal_axpy_kernel"
  456. };
  457. static struct starpu_codelet scal_axpy_kernel_cl =
  458. {
  459. .can_execute = can_execute,
  460. .cpu_funcs = {scal_axpy_kernel_cpu},
  461. .cpu_funcs_name = {"scal_axpy_kernel_cpu"},
  462. #ifdef STARPU_USE_CUDA
  463. .cuda_funcs = {scal_axpy_kernel_cuda},
  464. .cuda_flags = {STARPU_CUDA_ASYNC},
  465. #endif
  466. .nbuffers = 2,
  467. .model = &scal_axpy_kernel_model
  468. };
  469. int scal_axpy_kernel(starpu_data_handle_t v1, TYPE p1,
  470. starpu_data_handle_t v2, TYPE p2,
  471. unsigned nblocks)
  472. {
  473. unsigned b;
  474. for (b = 0; b < nblocks; b++)
  475. {
  476. int ret;
  477. ret = starpu_task_insert(&scal_axpy_kernel_cl,
  478. STARPU_RW, starpu_data_get_sub_data(v1, 1, b),
  479. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  480. STARPU_VALUE, &p1, sizeof(p1),
  481. STARPU_VALUE, &p2, sizeof(p2),
  482. STARPU_TAG_ONLY, (starpu_tag_t) b,
  483. 0);
  484. if (ret == -ENODEV) return ret;
  485. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  486. }
  487. return 0;
  488. }
  489. /*
  490. * AXPY kernel : v1 = v1 + p1 * v2
  491. */
  492. #ifdef STARPU_USE_CUDA
  493. static void axpy_kernel_cuda(void *descr[], void *cl_arg)
  494. {
  495. TYPE p1;
  496. starpu_codelet_unpack_args(cl_arg, &p1);
  497. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  498. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  499. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  500. /* Compute v1 = v1 + p1 * v2.
  501. */
  502. starpu_cublas_set_stream();
  503. cublasaxpy(n, p1, v2, 1, v1, 1);
  504. }
  505. #endif
  506. void axpy_kernel_cpu(void *descr[], void *cl_arg)
  507. {
  508. TYPE p1;
  509. starpu_codelet_unpack_args(cl_arg, &p1);
  510. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  511. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  512. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  513. /* Compute v1 = p1 * v1 + p2 * v2.
  514. */
  515. AXPY(nx, p1, v2, 1, v1, 1);
  516. }
  517. static struct starpu_perfmodel axpy_kernel_model =
  518. {
  519. .type = STARPU_HISTORY_BASED,
  520. .symbol = "axpy_kernel"
  521. };
  522. static struct starpu_codelet axpy_kernel_cl =
  523. {
  524. .can_execute = can_execute,
  525. .cpu_funcs = {axpy_kernel_cpu},
  526. .cpu_funcs_name = {"axpy_kernel_cpu"},
  527. #ifdef STARPU_USE_CUDA
  528. .cuda_funcs = {axpy_kernel_cuda},
  529. .cuda_flags = {STARPU_CUDA_ASYNC},
  530. #endif
  531. .nbuffers = 2,
  532. .model = &axpy_kernel_model
  533. };
  534. int axpy_kernel(starpu_data_handle_t v1,
  535. starpu_data_handle_t v2, TYPE p1,
  536. unsigned nblocks)
  537. {
  538. unsigned b;
  539. for (b = 0; b < nblocks; b++)
  540. {
  541. int ret;
  542. ret = starpu_task_insert(&axpy_kernel_cl,
  543. STARPU_RW, starpu_data_get_sub_data(v1, 1, b),
  544. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  545. STARPU_VALUE, &p1, sizeof(p1),
  546. STARPU_TAG_ONLY, (starpu_tag_t) b,
  547. 0);
  548. if (ret == -ENODEV) return ret;
  549. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  550. }
  551. return 0;
  552. }
  553. int copy_handle(starpu_data_handle_t dst, starpu_data_handle_t src, unsigned nblocks)
  554. {
  555. unsigned b;
  556. for (b = 0; b < nblocks; b++)
  557. starpu_data_cpy(starpu_data_get_sub_data(dst, 1, b), starpu_data_get_sub_data(src, 1, b), 1, NULL, NULL);
  558. return 0;
  559. }