cg_kernels.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010, 2012-2016 Université de Bordeaux
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. /*
  17. * Standard BLAS kernels used by CG
  18. */
  19. #include "cg.h"
  20. #include <math.h>
  21. #include <limits.h>
  22. #ifdef STARPU_USE_CUDA
  23. #include <starpu_cublas_v2.h>
  24. static const TYPE p1 = 1.0;
  25. static const TYPE m1 = -1.0;
  26. #endif
  27. #if 0
  28. static void print_vector_from_descr(unsigned nx, TYPE *v)
  29. {
  30. unsigned i;
  31. for (i = 0; i < nx; i++)
  32. {
  33. fprintf(stderr, "%2.2e ", v[i]);
  34. }
  35. fprintf(stderr, "\n");
  36. }
  37. static void print_matrix_from_descr(unsigned nx, unsigned ny, unsigned ld, TYPE *mat)
  38. {
  39. unsigned i, j;
  40. for (j = 0; j < nx; j++)
  41. {
  42. for (i = 0; i < ny; i++)
  43. {
  44. fprintf(stderr, "%2.2e ", mat[j+i*ld]);
  45. }
  46. fprintf(stderr, "\n");
  47. }
  48. }
  49. #endif
  50. static int can_execute(unsigned workerid, struct starpu_task *task, unsigned nimpl)
  51. {
  52. enum starpu_worker_archtype type = starpu_worker_get_type(workerid);
  53. if (type == STARPU_CPU_WORKER || type == STARPU_OPENCL_WORKER || type == STARPU_MIC_WORKER || type == STARPU_SCC_WORKER)
  54. return 1;
  55. #ifdef STARPU_USE_CUDA
  56. #ifdef STARPU_SIMGRID
  57. /* We don't know, let's assume it can */
  58. return 1;
  59. #else
  60. /* Cuda device */
  61. const struct cudaDeviceProp *props;
  62. props = starpu_cuda_get_device_properties(workerid);
  63. if (props->major >= 2 || props->minor >= 3)
  64. /* At least compute capability 1.3, supports doubles */
  65. return 1;
  66. #endif
  67. #endif
  68. /* Old card, does not support doubles */
  69. return 0;
  70. }
  71. /*
  72. * Reduction accumulation methods
  73. */
  74. #ifdef STARPU_USE_CUDA
  75. static void accumulate_variable_cuda(void *descr[], void *cl_arg)
  76. {
  77. TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  78. TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
  79. cublasStatus_t status = cublasaxpy(starpu_cublas_get_local_handle(), 1, &p1, v_src, 1, v_dst, 1);
  80. if (status != CUBLAS_STATUS_SUCCESS)
  81. STARPU_CUBLAS_REPORT_ERROR(status);
  82. }
  83. #endif
  84. void accumulate_variable_cpu(void *descr[], void *cl_arg)
  85. {
  86. TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  87. TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
  88. *v_dst = *v_dst + *v_src;
  89. }
  90. static struct starpu_perfmodel accumulate_variable_model =
  91. {
  92. .type = STARPU_HISTORY_BASED,
  93. .symbol = "accumulate_variable"
  94. };
  95. struct starpu_codelet accumulate_variable_cl =
  96. {
  97. .can_execute = can_execute,
  98. .cpu_funcs = {accumulate_variable_cpu},
  99. .cpu_funcs_name = {"accumulate_variable_cpu"},
  100. #ifdef STARPU_USE_CUDA
  101. .cuda_funcs = {accumulate_variable_cuda},
  102. .cuda_flags = {STARPU_CUDA_ASYNC},
  103. #endif
  104. .modes = {STARPU_RW, STARPU_R},
  105. .nbuffers = 2,
  106. .model = &accumulate_variable_model
  107. };
  108. #ifdef STARPU_USE_CUDA
  109. static void accumulate_vector_cuda(void *descr[], void *cl_arg)
  110. {
  111. TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  112. TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  113. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  114. cublasStatus_t status = cublasaxpy(starpu_cublas_get_local_handle(), n, &p1, v_src, 1, v_dst, 1);
  115. if (status != CUBLAS_STATUS_SUCCESS)
  116. STARPU_CUBLAS_REPORT_ERROR(status);
  117. }
  118. #endif
  119. void accumulate_vector_cpu(void *descr[], void *cl_arg)
  120. {
  121. TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  122. TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  123. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  124. AXPY(n, (TYPE)1.0, v_src, 1, v_dst, 1);
  125. }
  126. static struct starpu_perfmodel accumulate_vector_model =
  127. {
  128. .type = STARPU_HISTORY_BASED,
  129. .symbol = "accumulate_vector"
  130. };
  131. struct starpu_codelet accumulate_vector_cl =
  132. {
  133. .can_execute = can_execute,
  134. .cpu_funcs = {accumulate_vector_cpu},
  135. .cpu_funcs_name = {"accumulate_vector_cpu"},
  136. #ifdef STARPU_USE_CUDA
  137. .cuda_funcs = {accumulate_vector_cuda},
  138. .cuda_flags = {STARPU_CUDA_ASYNC},
  139. #endif
  140. .modes = {STARPU_RW, STARPU_R},
  141. .nbuffers = 2,
  142. .model = &accumulate_vector_model
  143. };
  144. /*
  145. * Reduction initialization methods
  146. */
  147. #ifdef STARPU_USE_CUDA
  148. extern void zero_vector(TYPE *x, unsigned nelems);
  149. static void bzero_variable_cuda(void *descr[], void *cl_arg)
  150. {
  151. TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  152. zero_vector(v, 1);
  153. }
  154. #endif
  155. void bzero_variable_cpu(void *descr[], void *cl_arg)
  156. {
  157. TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  158. *v = (TYPE)0.0;
  159. }
  160. static struct starpu_perfmodel bzero_variable_model =
  161. {
  162. .type = STARPU_HISTORY_BASED,
  163. .symbol = "bzero_variable"
  164. };
  165. struct starpu_codelet bzero_variable_cl =
  166. {
  167. .can_execute = can_execute,
  168. .cpu_funcs = {bzero_variable_cpu},
  169. .cpu_funcs_name = {"bzero_variable_cpu"},
  170. #ifdef STARPU_USE_CUDA
  171. .cuda_funcs = {bzero_variable_cuda},
  172. .cuda_flags = {STARPU_CUDA_ASYNC},
  173. #endif
  174. .modes = {STARPU_W},
  175. .nbuffers = 1,
  176. .model = &bzero_variable_model
  177. };
  178. #ifdef STARPU_USE_CUDA
  179. static void bzero_vector_cuda(void *descr[], void *cl_arg)
  180. {
  181. TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  182. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  183. zero_vector(v, n);
  184. }
  185. #endif
  186. void bzero_vector_cpu(void *descr[], void *cl_arg)
  187. {
  188. TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  189. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  190. memset(v, 0, n*sizeof(TYPE));
  191. }
  192. static struct starpu_perfmodel bzero_vector_model =
  193. {
  194. .type = STARPU_HISTORY_BASED,
  195. .symbol = "bzero_vector"
  196. };
  197. struct starpu_codelet bzero_vector_cl =
  198. {
  199. .can_execute = can_execute,
  200. .cpu_funcs = {bzero_vector_cpu},
  201. .cpu_funcs_name = {"bzero_vector_cpu"},
  202. #ifdef STARPU_USE_CUDA
  203. .cuda_funcs = {bzero_vector_cuda},
  204. .cuda_flags = {STARPU_CUDA_ASYNC},
  205. #endif
  206. .modes = {STARPU_W},
  207. .nbuffers = 1,
  208. .model = &bzero_vector_model
  209. };
  210. /*
  211. * DOT kernel : s = dot(v1, v2)
  212. */
  213. #ifdef STARPU_USE_CUDA
  214. extern void dot_host(TYPE *x, TYPE *y, unsigned nelems, TYPE *dot);
  215. static void dot_kernel_cuda(void *descr[], void *cl_arg)
  216. {
  217. TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  218. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  219. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  220. unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
  221. int version;
  222. cublasGetVersion(starpu_cublas_get_local_handle(), &version);
  223. /* FIXME: check in Nvidia bug #1882017 when this gets fixed */
  224. if (version < 99999)
  225. {
  226. /* This function puts its result directly in device memory, so
  227. * that we don't have to transfer that value back and forth. */
  228. dot_host(v1, v2, n, dot);
  229. }
  230. else
  231. {
  232. /* Should be able to put result in GPU, but does not yet, see
  233. * Nvidia bug #1882017 */
  234. cublasStatus_t status = cublasdot(starpu_cublas_get_local_handle(),
  235. n, v1, 1, v2, 1, dot);
  236. if (status != CUBLAS_STATUS_SUCCESS)
  237. STARPU_CUBLAS_REPORT_ERROR(status);
  238. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  239. }
  240. }
  241. #endif
  242. void dot_kernel_cpu(void *descr[], void *cl_arg)
  243. {
  244. TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
  245. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  246. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  247. unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
  248. TYPE local_dot;
  249. /* Note that we explicitely cast the result of the DOT kernel because
  250. * some BLAS library will return a double for sdot for instance. */
  251. local_dot = (TYPE)DOT(n, v1, 1, v2, 1);
  252. *dot = *dot + local_dot;
  253. }
  254. static struct starpu_perfmodel dot_kernel_model =
  255. {
  256. .type = STARPU_HISTORY_BASED,
  257. .symbol = "dot_kernel"
  258. };
  259. static struct starpu_codelet dot_kernel_cl =
  260. {
  261. .can_execute = can_execute,
  262. .cpu_funcs = {dot_kernel_cpu},
  263. .cpu_funcs_name = {"dot_kernel_cpu"},
  264. #ifdef STARPU_USE_CUDA
  265. .cuda_funcs = {dot_kernel_cuda},
  266. #endif
  267. .nbuffers = 3,
  268. .model = &dot_kernel_model
  269. };
  270. int dot_kernel(starpu_data_handle_t v1,
  271. starpu_data_handle_t v2,
  272. starpu_data_handle_t s,
  273. unsigned nblocks,
  274. int use_reduction)
  275. {
  276. int ret;
  277. /* Blank the accumulation variable */
  278. if (use_reduction)
  279. starpu_data_invalidate_submit(s);
  280. else
  281. {
  282. ret = starpu_task_insert(&bzero_variable_cl, STARPU_W, s, 0);
  283. if (ret == -ENODEV) return ret;
  284. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  285. }
  286. unsigned b;
  287. for (b = 0; b < nblocks; b++)
  288. {
  289. ret = starpu_task_insert(&dot_kernel_cl,
  290. use_reduction?STARPU_REDUX:STARPU_RW, s,
  291. STARPU_R, starpu_data_get_sub_data(v1, 1, b),
  292. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  293. STARPU_TAG_ONLY, (starpu_tag_t) b,
  294. 0);
  295. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  296. }
  297. return 0;
  298. }
  299. /*
  300. * SCAL kernel : v1 = p1 v1
  301. */
  302. #ifdef STARPU_USE_CUDA
  303. static void scal_kernel_cuda(void *descr[], void *cl_arg)
  304. {
  305. TYPE p1;
  306. starpu_codelet_unpack_args(cl_arg, &p1);
  307. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  308. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  309. /* v1 = p1 v1 */
  310. TYPE alpha = p1;
  311. cublasStatus_t status = cublasscal(starpu_cublas_get_local_handle(), n, &alpha, v1, 1);
  312. if (status != CUBLAS_STATUS_SUCCESS)
  313. STARPU_CUBLAS_REPORT_ERROR(status);
  314. }
  315. #endif
  316. void scal_kernel_cpu(void *descr[], void *cl_arg)
  317. {
  318. TYPE alpha;
  319. starpu_codelet_unpack_args(cl_arg, &alpha);
  320. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  321. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  322. /* v1 = alpha v1 */
  323. SCAL(n, alpha, v1, 1);
  324. }
  325. static struct starpu_perfmodel scal_kernel_model =
  326. {
  327. .type = STARPU_HISTORY_BASED,
  328. .symbol = "scal_kernel"
  329. };
  330. static struct starpu_codelet scal_kernel_cl =
  331. {
  332. .can_execute = can_execute,
  333. .cpu_funcs = {scal_kernel_cpu},
  334. .cpu_funcs_name = {"scal_kernel_cpu"},
  335. #ifdef STARPU_USE_CUDA
  336. .cuda_funcs = {scal_kernel_cuda},
  337. .cuda_flags = {STARPU_CUDA_ASYNC},
  338. #endif
  339. .nbuffers = 1,
  340. .model = &scal_kernel_model
  341. };
  342. /*
  343. * GEMV kernel : v1 = p1 * v1 + p2 * M v2
  344. */
  345. #ifdef STARPU_USE_CUDA
  346. static void gemv_kernel_cuda(void *descr[], void *cl_arg)
  347. {
  348. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  349. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  350. TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  351. unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
  352. unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
  353. unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
  354. TYPE alpha, beta;
  355. starpu_codelet_unpack_args(cl_arg, &beta, &alpha);
  356. /* Compute v1 = alpha M v2 + beta v1 */
  357. cublasStatus_t status = cublasgemv(starpu_cublas_get_local_handle(),
  358. CUBLAS_OP_N, nx, ny, &alpha, M, ld, v2, 1, &beta, v1, 1);
  359. if (status != CUBLAS_STATUS_SUCCESS)
  360. STARPU_CUBLAS_REPORT_ERROR(status);
  361. }
  362. #endif
  363. void gemv_kernel_cpu(void *descr[], void *cl_arg)
  364. {
  365. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  366. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
  367. TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  368. unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
  369. unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
  370. unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
  371. TYPE alpha, beta;
  372. starpu_codelet_unpack_args(cl_arg, &beta, &alpha);
  373. int worker_size = starpu_combined_worker_get_size();
  374. if (worker_size > 1)
  375. {
  376. /* Parallel CPU task */
  377. unsigned rank = starpu_combined_worker_get_rank();
  378. unsigned block_size = (ny + worker_size - 1)/worker_size;
  379. unsigned new_nx = STARPU_MIN(nx, block_size*(rank+1)) - block_size*rank;
  380. nx = new_nx;
  381. v1 = &v1[block_size*rank];
  382. M = &M[block_size*rank];
  383. }
  384. /* Compute v1 = alpha M v2 + beta v1 */
  385. GEMV("N", nx, ny, alpha, M, ld, v2, 1, beta, v1, 1);
  386. }
  387. static struct starpu_perfmodel gemv_kernel_model =
  388. {
  389. .type = STARPU_HISTORY_BASED,
  390. .symbol = "gemv_kernel"
  391. };
  392. static struct starpu_codelet gemv_kernel_cl =
  393. {
  394. .can_execute = can_execute,
  395. .type = STARPU_SPMD,
  396. .max_parallelism = INT_MAX,
  397. .cpu_funcs = {gemv_kernel_cpu},
  398. .cpu_funcs_name = {"gemv_kernel_cpu"},
  399. #ifdef STARPU_USE_CUDA
  400. .cuda_funcs = {gemv_kernel_cuda},
  401. .cuda_flags = {STARPU_CUDA_ASYNC},
  402. #endif
  403. .nbuffers = 3,
  404. .model = &gemv_kernel_model
  405. };
  406. int gemv_kernel(starpu_data_handle_t v1,
  407. starpu_data_handle_t matrix,
  408. starpu_data_handle_t v2,
  409. TYPE p1, TYPE p2,
  410. unsigned nblocks,
  411. int use_reduction)
  412. {
  413. unsigned b1, b2;
  414. int ret;
  415. for (b2 = 0; b2 < nblocks; b2++)
  416. {
  417. ret = starpu_task_insert(&scal_kernel_cl,
  418. STARPU_RW, starpu_data_get_sub_data(v1, 1, b2),
  419. STARPU_VALUE, &p1, sizeof(p1),
  420. STARPU_TAG_ONLY, (starpu_tag_t) b2,
  421. 0);
  422. if (ret == -ENODEV) return ret;
  423. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  424. }
  425. for (b2 = 0; b2 < nblocks; b2++)
  426. {
  427. for (b1 = 0; b1 < nblocks; b1++)
  428. {
  429. TYPE one = 1.0;
  430. ret = starpu_task_insert(&gemv_kernel_cl,
  431. use_reduction?STARPU_REDUX:STARPU_RW, starpu_data_get_sub_data(v1, 1, b2),
  432. STARPU_R, starpu_data_get_sub_data(matrix, 2, b2, b1),
  433. STARPU_R, starpu_data_get_sub_data(v2, 1, b1),
  434. STARPU_VALUE, &one, sizeof(one),
  435. STARPU_VALUE, &p2, sizeof(p2),
  436. STARPU_TAG_ONLY, ((starpu_tag_t)b2) * nblocks + b1,
  437. 0);
  438. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  439. }
  440. }
  441. return 0;
  442. }
  443. /*
  444. * AXPY + SCAL kernel : v1 = p1 * v1 + p2 * v2
  445. */
  446. #ifdef STARPU_USE_CUDA
  447. static void scal_axpy_kernel_cuda(void *descr[], void *cl_arg)
  448. {
  449. TYPE p1, p2;
  450. starpu_codelet_unpack_args(cl_arg, &p1, &p2);
  451. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  452. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  453. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  454. /* Compute v1 = p1 * v1 + p2 * v2.
  455. * v1 = p1 v1
  456. * v1 = v1 + p2 v2
  457. */
  458. cublasStatus_t status;
  459. status = cublasscal(starpu_cublas_get_local_handle(), n, &p1, v1, 1);
  460. if (status != CUBLAS_STATUS_SUCCESS)
  461. STARPU_CUBLAS_REPORT_ERROR(status);
  462. status = cublasaxpy(starpu_cublas_get_local_handle(), n, &p2, v2, 1, v1, 1);
  463. if (status != CUBLAS_STATUS_SUCCESS)
  464. STARPU_CUBLAS_REPORT_ERROR(status);
  465. }
  466. #endif
  467. void scal_axpy_kernel_cpu(void *descr[], void *cl_arg)
  468. {
  469. TYPE p1, p2;
  470. starpu_codelet_unpack_args(cl_arg, &p1, &p2);
  471. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  472. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  473. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  474. /* Compute v1 = p1 * v1 + p2 * v2.
  475. * v1 = p1 v1
  476. * v1 = v1 + p2 v2
  477. */
  478. SCAL(nx, p1, v1, 1);
  479. AXPY(nx, p2, v2, 1, v1, 1);
  480. }
  481. static struct starpu_perfmodel scal_axpy_kernel_model =
  482. {
  483. .type = STARPU_HISTORY_BASED,
  484. .symbol = "scal_axpy_kernel"
  485. };
  486. static struct starpu_codelet scal_axpy_kernel_cl =
  487. {
  488. .can_execute = can_execute,
  489. .cpu_funcs = {scal_axpy_kernel_cpu},
  490. .cpu_funcs_name = {"scal_axpy_kernel_cpu"},
  491. #ifdef STARPU_USE_CUDA
  492. .cuda_funcs = {scal_axpy_kernel_cuda},
  493. .cuda_flags = {STARPU_CUDA_ASYNC},
  494. #endif
  495. .nbuffers = 2,
  496. .model = &scal_axpy_kernel_model
  497. };
  498. int scal_axpy_kernel(starpu_data_handle_t v1, TYPE p1,
  499. starpu_data_handle_t v2, TYPE p2,
  500. unsigned nblocks)
  501. {
  502. unsigned b;
  503. for (b = 0; b < nblocks; b++)
  504. {
  505. int ret;
  506. ret = starpu_task_insert(&scal_axpy_kernel_cl,
  507. STARPU_RW, starpu_data_get_sub_data(v1, 1, b),
  508. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  509. STARPU_VALUE, &p1, sizeof(p1),
  510. STARPU_VALUE, &p2, sizeof(p2),
  511. STARPU_TAG_ONLY, (starpu_tag_t) b,
  512. 0);
  513. if (ret == -ENODEV) return ret;
  514. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  515. }
  516. return 0;
  517. }
  518. /*
  519. * AXPY kernel : v1 = v1 + p1 * v2
  520. */
  521. #ifdef STARPU_USE_CUDA
  522. static void axpy_kernel_cuda(void *descr[], void *cl_arg)
  523. {
  524. TYPE p1;
  525. starpu_codelet_unpack_args(cl_arg, &p1);
  526. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  527. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  528. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  529. /* Compute v1 = v1 + p1 * v2.
  530. */
  531. cublasStatus_t status = cublasaxpy(starpu_cublas_get_local_handle(),
  532. n, &p1, v2, 1, v1, 1);
  533. if (status != CUBLAS_STATUS_SUCCESS)
  534. STARPU_CUBLAS_REPORT_ERROR(status);
  535. }
  536. #endif
  537. void axpy_kernel_cpu(void *descr[], void *cl_arg)
  538. {
  539. TYPE p1;
  540. starpu_codelet_unpack_args(cl_arg, &p1);
  541. TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
  542. TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
  543. unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  544. /* Compute v1 = p1 * v1 + p2 * v2.
  545. */
  546. AXPY(nx, p1, v2, 1, v1, 1);
  547. }
  548. static struct starpu_perfmodel axpy_kernel_model =
  549. {
  550. .type = STARPU_HISTORY_BASED,
  551. .symbol = "axpy_kernel"
  552. };
  553. static struct starpu_codelet axpy_kernel_cl =
  554. {
  555. .can_execute = can_execute,
  556. .cpu_funcs = {axpy_kernel_cpu},
  557. .cpu_funcs_name = {"axpy_kernel_cpu"},
  558. #ifdef STARPU_USE_CUDA
  559. .cuda_funcs = {axpy_kernel_cuda},
  560. .cuda_flags = {STARPU_CUDA_ASYNC},
  561. #endif
  562. .nbuffers = 2,
  563. .model = &axpy_kernel_model
  564. };
  565. int axpy_kernel(starpu_data_handle_t v1,
  566. starpu_data_handle_t v2, TYPE p1,
  567. unsigned nblocks)
  568. {
  569. unsigned b;
  570. for (b = 0; b < nblocks; b++)
  571. {
  572. int ret;
  573. ret = starpu_task_insert(&axpy_kernel_cl,
  574. STARPU_RW, starpu_data_get_sub_data(v1, 1, b),
  575. STARPU_R, starpu_data_get_sub_data(v2, 1, b),
  576. STARPU_VALUE, &p1, sizeof(p1),
  577. STARPU_TAG_ONLY, (starpu_tag_t) b,
  578. 0);
  579. if (ret == -ENODEV) return ret;
  580. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert");
  581. }
  582. return 0;
  583. }
  584. int copy_handle(starpu_data_handle_t dst, starpu_data_handle_t src, unsigned nblocks)
  585. {
  586. unsigned b;
  587. for (b = 0; b < nblocks; b++)
  588. starpu_data_cpy(starpu_data_get_sub_data(dst, 1, b), starpu_data_get_sub_data(src, 1, b), 1, NULL, NULL);
  589. return 0;
  590. }