xlu_kernels.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009, 2010-2012, 2014-2015 Université de Bordeaux
  4. * Copyright (C) 2010, 2011, 2012, 2015 CNRS
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. /* LU Kernels */
  18. #include "xlu.h"
  19. #include <math.h>
  20. #define str(s) #s
  21. #define xstr(s) str(s)
  22. #define STARPU_LU_STR(name) xstr(STARPU_LU(name))
  23. #ifdef STARPU_USE_CUDA
  24. static const TYPE p1 = 1.0f;
  25. static const TYPE m1 = -1.0f;
  26. #endif
  27. /*
  28. * U22
  29. */
  30. static inline void STARPU_LU(common_u22)(void *descr[],
  31. int s, STARPU_ATTRIBUTE_UNUSED void *_args)
  32. {
  33. TYPE *right = (TYPE *)STARPU_MATRIX_GET_PTR(descr[0]);
  34. TYPE *left = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  35. TYPE *center = (TYPE *)STARPU_MATRIX_GET_PTR(descr[2]);
  36. unsigned dx = STARPU_MATRIX_GET_NX(descr[2]);
  37. unsigned dy = STARPU_MATRIX_GET_NY(descr[2]);
  38. unsigned dz = STARPU_MATRIX_GET_NY(descr[0]);
  39. unsigned ld12 = STARPU_MATRIX_GET_LD(descr[0]);
  40. unsigned ld21 = STARPU_MATRIX_GET_LD(descr[1]);
  41. unsigned ld22 = STARPU_MATRIX_GET_LD(descr[2]);
  42. #ifdef STARPU_USE_CUDA
  43. cublasStatus status;
  44. cudaError_t cures;
  45. #endif
  46. switch (s)
  47. {
  48. case 0:
  49. CPU_GEMM("N", "N", dy, dx, dz,
  50. (TYPE)-1.0, right, ld21, left, ld12,
  51. (TYPE)1.0, center, ld22);
  52. break;
  53. #ifdef STARPU_USE_CUDA
  54. case 1:
  55. {
  56. CUBLAS_GEMM('n', 'n', dx, dy, dz,
  57. *(CUBLAS_TYPE*)&m1, (CUBLAS_TYPE *)right, ld21, (CUBLAS_TYPE *)left, ld12,
  58. *(CUBLAS_TYPE*)&p1, (CUBLAS_TYPE *)center, ld22);
  59. status = cublasGetError();
  60. if (STARPU_UNLIKELY(status != CUBLAS_STATUS_SUCCESS))
  61. STARPU_CUBLAS_REPORT_ERROR(status);
  62. break;
  63. }
  64. #endif
  65. default:
  66. STARPU_ABORT();
  67. break;
  68. }
  69. }
  70. void STARPU_LU(cpu_u22)(void *descr[], void *_args)
  71. {
  72. STARPU_LU(common_u22)(descr, 0, _args);
  73. }
  74. #ifdef STARPU_USE_CUDA
  75. void STARPU_LU(cublas_u22)(void *descr[], void *_args)
  76. {
  77. STARPU_LU(common_u22)(descr, 1, _args);
  78. }
  79. #endif /* STARPU_USE_CUDA */
  80. static struct starpu_perfmodel STARPU_LU(model_22) =
  81. {
  82. .type = STARPU_HISTORY_BASED,
  83. #ifdef STARPU_ATLAS
  84. .symbol = STARPU_LU_STR(lu_model_22_atlas)
  85. #elif defined(STARPU_GOTO)
  86. .symbol = STARPU_LU_STR(lu_model_22_goto)
  87. #else
  88. .symbol = STARPU_LU_STR(lu_model_22)
  89. #endif
  90. };
  91. #ifdef STARPU_USE_CUDA
  92. static int can_execute(unsigned workerid, struct starpu_task *task, unsigned nimpl)
  93. {
  94. if (starpu_worker_get_type(workerid) == STARPU_CPU_WORKER)
  95. return 1;
  96. #ifdef STARPU_SIMGRID
  97. /* We don't know, let's assume it can */
  98. return 1;
  99. #else
  100. /* Cuda device */
  101. const struct cudaDeviceProp *props;
  102. props = starpu_cuda_get_device_properties(workerid);
  103. if (props->major >= 2 || props->minor >= 3)
  104. {
  105. /* At least compute capability 1.3, supports doubles */
  106. return 1;
  107. }
  108. else
  109. {
  110. /* Old card does not support doubles */
  111. return 0;
  112. }
  113. #endif
  114. }
  115. #endif
  116. struct starpu_codelet cl22 =
  117. {
  118. .where = STARPU_CPU|STARPU_CUDA,
  119. .cpu_funcs = {STARPU_LU(cpu_u22)},
  120. #ifdef STARPU_USE_CUDA
  121. .cuda_funcs = {STARPU_LU(cublas_u22)},
  122. CAN_EXECUTE
  123. #elif defined(STARPU_SIMGRID)
  124. .cuda_funcs = {(void*)1},
  125. #endif
  126. .cuda_flags = {STARPU_CUDA_ASYNC},
  127. .nbuffers = 3,
  128. .modes = {STARPU_R, STARPU_R, STARPU_RW},
  129. .model = &STARPU_LU(model_22)
  130. };
  131. /*
  132. * U12
  133. */
  134. static inline void STARPU_LU(common_u12)(void *descr[],
  135. int s, STARPU_ATTRIBUTE_UNUSED void *_args)
  136. {
  137. TYPE *sub11;
  138. TYPE *sub12;
  139. sub11 = (TYPE *)STARPU_MATRIX_GET_PTR(descr[0]);
  140. sub12 = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  141. unsigned ld11 = STARPU_MATRIX_GET_LD(descr[0]);
  142. unsigned ld12 = STARPU_MATRIX_GET_LD(descr[1]);
  143. unsigned nx12 = STARPU_MATRIX_GET_NX(descr[1]);
  144. unsigned ny12 = STARPU_MATRIX_GET_NY(descr[1]);
  145. #ifdef STARPU_USE_CUDA
  146. cublasStatus status;
  147. cudaError_t cures;
  148. #endif
  149. /* solve L11 U12 = A12 (find U12) */
  150. switch (s)
  151. {
  152. case 0:
  153. CPU_TRSM("L", "L", "N", "N", nx12, ny12,
  154. (TYPE)1.0, sub11, ld11, sub12, ld12);
  155. break;
  156. #ifdef STARPU_USE_CUDA
  157. case 1:
  158. CUBLAS_TRSM('L', 'L', 'N', 'N', ny12, nx12,
  159. *(CUBLAS_TYPE*)&p1, (CUBLAS_TYPE*)sub11, ld11, (CUBLAS_TYPE*)sub12, ld12);
  160. status = cublasGetError();
  161. if (STARPU_UNLIKELY(status != CUBLAS_STATUS_SUCCESS))
  162. STARPU_CUBLAS_REPORT_ERROR(status);
  163. break;
  164. #endif
  165. default:
  166. STARPU_ABORT();
  167. break;
  168. }
  169. }
  170. void STARPU_LU(cpu_u12)(void *descr[], void *_args)
  171. {
  172. STARPU_LU(common_u12)(descr, 0, _args);
  173. }
  174. #ifdef STARPU_USE_CUDA
  175. void STARPU_LU(cublas_u12)(void *descr[], void *_args)
  176. {
  177. STARPU_LU(common_u12)(descr, 1, _args);
  178. }
  179. #endif /* STARPU_USE_CUDA */
  180. static struct starpu_perfmodel STARPU_LU(model_12) =
  181. {
  182. .type = STARPU_HISTORY_BASED,
  183. #ifdef STARPU_ATLAS
  184. .symbol = STARPU_LU_STR(lu_model_12_atlas)
  185. #elif defined(STARPU_GOTO)
  186. .symbol = STARPU_LU_STR(lu_model_12_goto)
  187. #else
  188. .symbol = STARPU_LU_STR(lu_model_12)
  189. #endif
  190. };
  191. struct starpu_codelet cl12 =
  192. {
  193. .where = STARPU_CPU|STARPU_CUDA,
  194. .cpu_funcs = {STARPU_LU(cpu_u12)},
  195. #ifdef STARPU_USE_CUDA
  196. .cuda_funcs = {STARPU_LU(cublas_u12)},
  197. CAN_EXECUTE
  198. #elif defined(STARPU_SIMGRID)
  199. .cuda_funcs = {(void*)1},
  200. #endif
  201. .cuda_flags = {STARPU_CUDA_ASYNC},
  202. .nbuffers = 2,
  203. .modes = {STARPU_R, STARPU_RW},
  204. .model = &STARPU_LU(model_12)
  205. };
  206. /*
  207. * U21
  208. */
  209. static inline void STARPU_LU(common_u21)(void *descr[],
  210. int s, STARPU_ATTRIBUTE_UNUSED void *_args)
  211. {
  212. TYPE *sub11;
  213. TYPE *sub21;
  214. sub11 = (TYPE *)STARPU_MATRIX_GET_PTR(descr[0]);
  215. sub21 = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  216. unsigned ld11 = STARPU_MATRIX_GET_LD(descr[0]);
  217. unsigned ld21 = STARPU_MATRIX_GET_LD(descr[1]);
  218. unsigned nx21 = STARPU_MATRIX_GET_NX(descr[1]);
  219. unsigned ny21 = STARPU_MATRIX_GET_NY(descr[1]);
  220. #ifdef STARPU_USE_CUDA
  221. cublasStatus status;
  222. #endif
  223. switch (s)
  224. {
  225. case 0:
  226. CPU_TRSM("R", "U", "N", "U", nx21, ny21,
  227. (TYPE)1.0, sub11, ld11, sub21, ld21);
  228. break;
  229. #ifdef STARPU_USE_CUDA
  230. case 1:
  231. CUBLAS_TRSM('R', 'U', 'N', 'U', ny21, nx21,
  232. *(CUBLAS_TYPE*)&p1, (CUBLAS_TYPE*)sub11, ld11, (CUBLAS_TYPE*)sub21, ld21);
  233. status = cublasGetError();
  234. if (status != CUBLAS_STATUS_SUCCESS)
  235. STARPU_CUBLAS_REPORT_ERROR(status);
  236. break;
  237. #endif
  238. default:
  239. STARPU_ABORT();
  240. break;
  241. }
  242. }
  243. void STARPU_LU(cpu_u21)(void *descr[], void *_args)
  244. {
  245. STARPU_LU(common_u21)(descr, 0, _args);
  246. }
  247. #ifdef STARPU_USE_CUDA
  248. void STARPU_LU(cublas_u21)(void *descr[], void *_args)
  249. {
  250. STARPU_LU(common_u21)(descr, 1, _args);
  251. }
  252. #endif
  253. static struct starpu_perfmodel STARPU_LU(model_21) =
  254. {
  255. .type = STARPU_HISTORY_BASED,
  256. #ifdef STARPU_ATLAS
  257. .symbol = STARPU_LU_STR(lu_model_21_atlas)
  258. #elif defined(STARPU_GOTO)
  259. .symbol = STARPU_LU_STR(lu_model_21_goto)
  260. #else
  261. .symbol = STARPU_LU_STR(lu_model_21)
  262. #endif
  263. };
  264. struct starpu_codelet cl21 =
  265. {
  266. .where = STARPU_CPU|STARPU_CUDA,
  267. .cpu_funcs = {STARPU_LU(cpu_u21)},
  268. #ifdef STARPU_USE_CUDA
  269. .cuda_funcs = {STARPU_LU(cublas_u21)},
  270. CAN_EXECUTE
  271. #elif defined(STARPU_SIMGRID)
  272. .cuda_funcs = {(void*)1},
  273. #endif
  274. .cuda_flags = {STARPU_CUDA_ASYNC},
  275. .nbuffers = 2,
  276. .modes = {STARPU_R, STARPU_RW},
  277. .model = &STARPU_LU(model_21)
  278. };
  279. /*
  280. * U11
  281. */
  282. static inline void STARPU_LU(common_u11)(void *descr[],
  283. int s, STARPU_ATTRIBUTE_UNUSED void *_args)
  284. {
  285. TYPE *sub11;
  286. sub11 = (TYPE *)STARPU_MATRIX_GET_PTR(descr[0]);
  287. unsigned long nx = STARPU_MATRIX_GET_NX(descr[0]);
  288. unsigned long ld = STARPU_MATRIX_GET_LD(descr[0]);
  289. unsigned long z;
  290. switch (s)
  291. {
  292. case 0:
  293. for (z = 0; z < nx; z++)
  294. {
  295. TYPE pivot;
  296. pivot = sub11[z+z*ld];
  297. STARPU_ASSERT(fpclassify(pivot) != FP_ZERO);
  298. CPU_SCAL(nx - z - 1, (1.0/pivot), &sub11[z+(z+1)*ld], ld);
  299. CPU_GER(nx - z - 1, nx - z - 1, -1.0,
  300. &sub11[(z+1)+z*ld], 1,
  301. &sub11[z+(z+1)*ld], ld,
  302. &sub11[(z+1) + (z+1)*ld],ld);
  303. }
  304. break;
  305. #ifdef STARPU_USE_CUDA
  306. case 1:
  307. for (z = 0; z < nx; z++)
  308. {
  309. TYPE pivot;
  310. TYPE inv_pivot;
  311. cudaMemcpyAsync(&pivot, &sub11[z+z*ld], sizeof(TYPE), cudaMemcpyDeviceToHost, starpu_cuda_get_local_stream());
  312. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  313. STARPU_ASSERT(fpclassify(pivot) != FP_ZERO);
  314. inv_pivot = 1.0/pivot;
  315. CUBLAS_SCAL(nx - z - 1, *(CUBLAS_TYPE*)&inv_pivot, (CUBLAS_TYPE*)&sub11[z+(z+1)*ld], ld);
  316. CUBLAS_GER(nx - z - 1, nx - z - 1, *(CUBLAS_TYPE*)&m1,
  317. (CUBLAS_TYPE*)&sub11[(z+1)+z*ld], 1,
  318. (CUBLAS_TYPE*)&sub11[z+(z+1)*ld], ld,
  319. (CUBLAS_TYPE*)&sub11[(z+1) + (z+1)*ld],ld);
  320. }
  321. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  322. break;
  323. #endif
  324. default:
  325. STARPU_ABORT();
  326. break;
  327. }
  328. }
  329. void STARPU_LU(cpu_u11)(void *descr[], void *_args)
  330. {
  331. STARPU_LU(common_u11)(descr, 0, _args);
  332. }
  333. #ifdef STARPU_USE_CUDA
  334. void STARPU_LU(cublas_u11)(void *descr[], void *_args)
  335. {
  336. STARPU_LU(common_u11)(descr, 1, _args);
  337. }
  338. #endif /* STARPU_USE_CUDA */
  339. static struct starpu_perfmodel STARPU_LU(model_11) =
  340. {
  341. .type = STARPU_HISTORY_BASED,
  342. #ifdef STARPU_ATLAS
  343. .symbol = STARPU_LU_STR(lu_model_11_atlas)
  344. #elif defined(STARPU_GOTO)
  345. .symbol = STARPU_LU_STR(lu_model_11_goto)
  346. #else
  347. .symbol = STARPU_LU_STR(lu_model_11)
  348. #endif
  349. };
  350. struct starpu_codelet cl11 =
  351. {
  352. .where = STARPU_CPU|STARPU_CUDA,
  353. .cpu_funcs = {STARPU_LU(cpu_u11)},
  354. #ifdef STARPU_USE_CUDA
  355. .cuda_funcs = {STARPU_LU(cublas_u11)},
  356. CAN_EXECUTE
  357. #elif defined(STARPU_SIMGRID)
  358. .cuda_funcs = {(void*)1},
  359. #endif
  360. .nbuffers = 1,
  361. .modes = {STARPU_RW},
  362. .model = &STARPU_LU(model_11)
  363. };
  364. /*
  365. * U11 with pivoting
  366. */
  367. static inline void STARPU_LU(common_u11_pivot)(void *descr[],
  368. int s, void *_args)
  369. {
  370. TYPE *sub11;
  371. sub11 = (TYPE *)STARPU_MATRIX_GET_PTR(descr[0]);
  372. unsigned long nx = STARPU_MATRIX_GET_NX(descr[0]);
  373. unsigned long ld = STARPU_MATRIX_GET_LD(descr[0]);
  374. unsigned long z;
  375. struct piv_s *piv = _args;
  376. unsigned *ipiv = piv->piv;
  377. unsigned first = piv->first;
  378. switch (s)
  379. {
  380. case 0:
  381. for (z = 0; z < nx; z++)
  382. {
  383. TYPE pivot;
  384. pivot = sub11[z+z*ld];
  385. if (fabs((double)(pivot)) < PIVOT_THRESHHOLD)
  386. {
  387. /* find the pivot */
  388. int piv_ind = CPU_IAMAX(nx - z, &sub11[z*(ld+1)], ld);
  389. ipiv[z + first] = piv_ind + z + first;
  390. /* swap if needed */
  391. if (piv_ind != 0)
  392. {
  393. CPU_SWAP(nx, &sub11[z*ld], 1, &sub11[(z+piv_ind)*ld], 1);
  394. }
  395. pivot = sub11[z+z*ld];
  396. }
  397. STARPU_ASSERT(pivot != 0.0);
  398. CPU_SCAL(nx - z - 1, (1.0/pivot), &sub11[z+(z+1)*ld], ld);
  399. CPU_GER(nx - z - 1, nx - z - 1, -1.0,
  400. &sub11[(z+1)+z*ld], 1,
  401. &sub11[z+(z+1)*ld], ld,
  402. &sub11[(z+1) + (z+1)*ld],ld);
  403. }
  404. break;
  405. #ifdef STARPU_USE_CUDA
  406. case 1:
  407. for (z = 0; z < nx; z++)
  408. {
  409. TYPE pivot;
  410. TYPE inv_pivot;
  411. cudaMemcpyAsync(&pivot, &sub11[z+z*ld], sizeof(TYPE), cudaMemcpyDeviceToHost, starpu_cuda_get_local_stream());
  412. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  413. if (fabs((double)(pivot)) < PIVOT_THRESHHOLD)
  414. {
  415. /* find the pivot */
  416. int piv_ind = CUBLAS_IAMAX(nx - z, (CUBLAS_TYPE*)&sub11[z*(ld+1)], ld) - 1;
  417. ipiv[z + first] = piv_ind + z + first;
  418. /* swap if needed */
  419. if (piv_ind != 0)
  420. {
  421. CUBLAS_SWAP(nx, (CUBLAS_TYPE*)&sub11[z*ld], 1, (CUBLAS_TYPE*)&sub11[(z+piv_ind)*ld], 1);
  422. }
  423. cudaMemcpyAsync(&pivot, &sub11[z+z*ld], sizeof(TYPE), cudaMemcpyDeviceToHost, starpu_cuda_get_local_stream());
  424. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  425. }
  426. STARPU_ASSERT(pivot != 0.0);
  427. inv_pivot = 1.0/pivot;
  428. CUBLAS_SCAL(nx - z - 1, *(CUBLAS_TYPE*)&inv_pivot, (CUBLAS_TYPE*)&sub11[z+(z+1)*ld], ld);
  429. CUBLAS_GER(nx - z - 1, nx - z - 1, *(CUBLAS_TYPE*)&m1,
  430. (CUBLAS_TYPE*)&sub11[(z+1)+z*ld], 1,
  431. (CUBLAS_TYPE*)&sub11[z+(z+1)*ld], ld,
  432. (CUBLAS_TYPE*)&sub11[(z+1) + (z+1)*ld],ld);
  433. }
  434. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  435. break;
  436. #endif
  437. default:
  438. STARPU_ABORT();
  439. break;
  440. }
  441. }
  442. void STARPU_LU(cpu_u11_pivot)(void *descr[], void *_args)
  443. {
  444. STARPU_LU(common_u11_pivot)(descr, 0, _args);
  445. }
  446. #ifdef STARPU_USE_CUDA
  447. void STARPU_LU(cublas_u11_pivot)(void *descr[], void *_args)
  448. {
  449. STARPU_LU(common_u11_pivot)(descr, 1, _args);
  450. }
  451. #endif /* STARPU_USE_CUDA */
  452. static struct starpu_perfmodel STARPU_LU(model_11_pivot) =
  453. {
  454. .type = STARPU_HISTORY_BASED,
  455. #ifdef STARPU_ATLAS
  456. .symbol = STARPU_LU_STR(lu_model_11_pivot_atlas)
  457. #elif defined(STARPU_GOTO)
  458. .symbol = STARPU_LU_STR(lu_model_11_pivot_goto)
  459. #else
  460. .symbol = STARPU_LU_STR(lu_model_11_pivot)
  461. #endif
  462. };
  463. struct starpu_codelet cl11_pivot =
  464. {
  465. .where = STARPU_CPU|STARPU_CUDA,
  466. .cpu_funcs = {STARPU_LU(cpu_u11_pivot)},
  467. #ifdef STARPU_USE_CUDA
  468. .cuda_funcs = {STARPU_LU(cublas_u11_pivot)},
  469. CAN_EXECUTE
  470. #elif defined(STARPU_SIMGRID)
  471. .cuda_funcs = {(void*)1},
  472. #endif
  473. .nbuffers = 1,
  474. .modes = {STARPU_RW},
  475. .model = &STARPU_LU(model_11_pivot)
  476. };
  477. /*
  478. * Pivoting
  479. */
  480. static inline void STARPU_LU(common_pivot)(void *descr[],
  481. int s, void *_args)
  482. {
  483. TYPE *matrix;
  484. matrix = (TYPE *)STARPU_MATRIX_GET_PTR(descr[0]);
  485. unsigned long nx = STARPU_MATRIX_GET_NX(descr[0]);
  486. unsigned long ld = STARPU_MATRIX_GET_LD(descr[0]);
  487. unsigned row;
  488. struct piv_s *piv = _args;
  489. unsigned *ipiv = piv->piv;
  490. unsigned first = piv->first;
  491. switch (s)
  492. {
  493. case 0:
  494. for (row = 0; row < nx; row++)
  495. {
  496. unsigned rowpiv = ipiv[row+first] - first;
  497. if (rowpiv != row)
  498. {
  499. CPU_SWAP(nx, &matrix[row*ld], 1, &matrix[rowpiv*ld], 1);
  500. }
  501. }
  502. break;
  503. #ifdef STARPU_USE_CUDA
  504. case 1:
  505. for (row = 0; row < nx; row++)
  506. {
  507. unsigned rowpiv = ipiv[row+first] - first;
  508. if (rowpiv != row)
  509. {
  510. CUBLAS_SWAP(nx, (CUBLAS_TYPE*)&matrix[row*ld], 1, (CUBLAS_TYPE*)&matrix[rowpiv*ld], 1);
  511. }
  512. }
  513. break;
  514. #endif
  515. default:
  516. STARPU_ABORT();
  517. break;
  518. }
  519. }
  520. void STARPU_LU(cpu_pivot)(void *descr[], void *_args)
  521. {
  522. STARPU_LU(common_pivot)(descr, 0, _args);
  523. }
  524. #ifdef STARPU_USE_CUDA
  525. void STARPU_LU(cublas_pivot)(void *descr[], void *_args)
  526. {
  527. STARPU_LU(common_pivot)(descr, 1, _args);
  528. }
  529. #endif /* STARPU_USE_CUDA */
  530. static struct starpu_perfmodel STARPU_LU(model_pivot) =
  531. {
  532. .type = STARPU_HISTORY_BASED,
  533. #ifdef STARPU_ATLAS
  534. .symbol = STARPU_LU_STR(lu_model_pivot_atlas)
  535. #elif defined(STARPU_GOTO)
  536. .symbol = STARPU_LU_STR(lu_model_pivot_goto)
  537. #else
  538. .symbol = STARPU_LU_STR(lu_model_pivot)
  539. #endif
  540. };
  541. struct starpu_codelet cl_pivot =
  542. {
  543. .where = STARPU_CPU|STARPU_CUDA,
  544. .cpu_funcs = {STARPU_LU(cpu_pivot)},
  545. #ifdef STARPU_USE_CUDA
  546. .cuda_funcs = {STARPU_LU(cublas_pivot)},
  547. CAN_EXECUTE
  548. #elif defined(STARPU_SIMGRID)
  549. .cuda_funcs = {(void*)1},
  550. #endif
  551. .cuda_flags = {STARPU_CUDA_ASYNC},
  552. .nbuffers = 1,
  553. .modes = {STARPU_RW},
  554. .model = &STARPU_LU(model_pivot)
  555. };