xlu_kernels.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009, 2010-2011 Université de Bordeaux 1
  4. * Copyright (C) 2010 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include "xlu.h"
  18. #include <math.h>
  19. #define str(s) #s
  20. #define xstr(s) str(s)
  21. #define STARPU_LU_STR(name) xstr(STARPU_LU(name))
  22. #ifdef STARPU_USE_CUDA
  23. static const TYPE p1 = 1.0f;
  24. static const TYPE m1 = -1.0f;
  25. #endif
  26. /*
  27. * U22
  28. */
  29. static inline void STARPU_LU(common_u22)(void *descr[],
  30. int s, __attribute__((unused)) void *_args)
  31. {
  32. TYPE *right = (TYPE *)STARPU_MATRIX_GET_PTR(descr[0]);
  33. TYPE *left = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  34. TYPE *center = (TYPE *)STARPU_MATRIX_GET_PTR(descr[2]);
  35. unsigned dx = STARPU_MATRIX_GET_NX(descr[2]);
  36. unsigned dy = STARPU_MATRIX_GET_NY(descr[2]);
  37. unsigned dz = STARPU_MATRIX_GET_NY(descr[0]);
  38. unsigned ld12 = STARPU_MATRIX_GET_LD(descr[0]);
  39. unsigned ld21 = STARPU_MATRIX_GET_LD(descr[1]);
  40. unsigned ld22 = STARPU_MATRIX_GET_LD(descr[2]);
  41. #ifdef STARPU_USE_CUDA
  42. cublasStatus status;
  43. cudaError_t cures;
  44. #endif
  45. switch (s) {
  46. case 0:
  47. CPU_GEMM("N", "N", dy, dx, dz,
  48. (TYPE)-1.0, right, ld21, left, ld12,
  49. (TYPE)1.0, center, ld22);
  50. break;
  51. #ifdef STARPU_USE_CUDA
  52. case 1: {
  53. CUBLAS_GEMM('n', 'n', dx, dy, dz,
  54. *(CUBLAS_TYPE*)&m1, (CUBLAS_TYPE *)right, ld21, (CUBLAS_TYPE *)left, ld12,
  55. *(CUBLAS_TYPE*)&p1, (CUBLAS_TYPE *)center, ld22);
  56. status = cublasGetError();
  57. if (STARPU_UNLIKELY(status != CUBLAS_STATUS_SUCCESS))
  58. STARPU_ABORT();
  59. if (STARPU_UNLIKELY((cures = cudaThreadSynchronize()) != cudaSuccess))
  60. STARPU_CUDA_REPORT_ERROR(cures);
  61. break;
  62. }
  63. #endif
  64. default:
  65. STARPU_ABORT();
  66. break;
  67. }
  68. }
  69. void STARPU_LU(cpu_u22)(void *descr[], void *_args)
  70. {
  71. STARPU_LU(common_u22)(descr, 0, _args);
  72. }
  73. #ifdef STARPU_USE_CUDA
  74. void STARPU_LU(cublas_u22)(void *descr[], void *_args)
  75. {
  76. STARPU_LU(common_u22)(descr, 1, _args);
  77. }
  78. #endif /* STARPU_USE_CUDA */
  79. static struct starpu_perfmodel_t STARPU_LU(model_22) = {
  80. .type = STARPU_HISTORY_BASED,
  81. #ifdef STARPU_ATLAS
  82. .symbol = STARPU_LU_STR(lu_model_22_atlas)
  83. #elif defined(STARPU_GOTO)
  84. .symbol = STARPU_LU_STR(lu_model_22_goto)
  85. #else
  86. .symbol = STARPU_LU_STR(lu_model_22)
  87. #endif
  88. };
  89. starpu_codelet cl22 = {
  90. .where = STARPU_CPU|STARPU_CUDA,
  91. .cpu_func = STARPU_LU(cpu_u22),
  92. #ifdef STARPU_USE_CUDA
  93. .cuda_func = STARPU_LU(cublas_u22),
  94. #endif
  95. .nbuffers = 3,
  96. .model = &STARPU_LU(model_22)
  97. };
  98. /*
  99. * U12
  100. */
  101. static inline void STARPU_LU(common_u12)(void *descr[],
  102. int s, __attribute__((unused)) void *_args)
  103. {
  104. TYPE *sub11;
  105. TYPE *sub12;
  106. sub11 = (TYPE *)STARPU_MATRIX_GET_PTR(descr[0]);
  107. sub12 = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  108. unsigned ld11 = STARPU_MATRIX_GET_LD(descr[0]);
  109. unsigned ld12 = STARPU_MATRIX_GET_LD(descr[1]);
  110. unsigned nx12 = STARPU_MATRIX_GET_NX(descr[1]);
  111. unsigned ny12 = STARPU_MATRIX_GET_NY(descr[1]);
  112. #ifdef STARPU_USE_CUDA
  113. cublasStatus status;
  114. cudaError_t cures;
  115. #endif
  116. /* solve L11 U12 = A12 (find U12) */
  117. switch (s) {
  118. case 0:
  119. CPU_TRSM("L", "L", "N", "N", nx12, ny12,
  120. (TYPE)1.0, sub11, ld11, sub12, ld12);
  121. break;
  122. #ifdef STARPU_USE_CUDA
  123. case 1:
  124. CUBLAS_TRSM('L', 'L', 'N', 'N', ny12, nx12,
  125. *(CUBLAS_TYPE*)&p1, (CUBLAS_TYPE*)sub11, ld11, (CUBLAS_TYPE*)sub12, ld12);
  126. status = cublasGetError();
  127. if (STARPU_UNLIKELY(status != CUBLAS_STATUS_SUCCESS))
  128. STARPU_ABORT();
  129. if (STARPU_UNLIKELY((cures = cudaThreadSynchronize()) != cudaSuccess))
  130. STARPU_CUDA_REPORT_ERROR(cures);
  131. break;
  132. #endif
  133. default:
  134. STARPU_ABORT();
  135. break;
  136. }
  137. }
  138. void STARPU_LU(cpu_u12)(void *descr[], void *_args)
  139. {
  140. STARPU_LU(common_u12)(descr, 0, _args);
  141. }
  142. #ifdef STARPU_USE_CUDA
  143. void STARPU_LU(cublas_u12)(void *descr[], void *_args)
  144. {
  145. STARPU_LU(common_u12)(descr, 1, _args);
  146. }
  147. #endif /* STARPU_USE_CUDA */
  148. static struct starpu_perfmodel_t STARPU_LU(model_12) = {
  149. .type = STARPU_HISTORY_BASED,
  150. #ifdef STARPU_ATLAS
  151. .symbol = STARPU_LU_STR(lu_model_12_atlas)
  152. #elif defined(STARPU_GOTO)
  153. .symbol = STARPU_LU_STR(lu_model_12_goto)
  154. #else
  155. .symbol = STARPU_LU_STR(lu_model_12)
  156. #endif
  157. };
  158. starpu_codelet cl12 = {
  159. .where = STARPU_CPU|STARPU_CUDA,
  160. .cpu_func = STARPU_LU(cpu_u12),
  161. #ifdef STARPU_USE_CUDA
  162. .cuda_func = STARPU_LU(cublas_u12),
  163. #endif
  164. .nbuffers = 2,
  165. .model = &STARPU_LU(model_12)
  166. };
  167. /*
  168. * U21
  169. */
  170. static inline void STARPU_LU(common_u21)(void *descr[],
  171. int s, __attribute__((unused)) void *_args)
  172. {
  173. TYPE *sub11;
  174. TYPE *sub21;
  175. sub11 = (TYPE *)STARPU_MATRIX_GET_PTR(descr[0]);
  176. sub21 = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
  177. unsigned ld11 = STARPU_MATRIX_GET_LD(descr[0]);
  178. unsigned ld21 = STARPU_MATRIX_GET_LD(descr[1]);
  179. unsigned nx21 = STARPU_MATRIX_GET_NX(descr[1]);
  180. unsigned ny21 = STARPU_MATRIX_GET_NY(descr[1]);
  181. #ifdef STARPU_USE_CUDA
  182. cublasStatus status;
  183. #endif
  184. switch (s) {
  185. case 0:
  186. CPU_TRSM("R", "U", "N", "U", nx21, ny21,
  187. (TYPE)1.0, sub11, ld11, sub21, ld21);
  188. break;
  189. #ifdef STARPU_USE_CUDA
  190. case 1:
  191. CUBLAS_TRSM('R', 'U', 'N', 'U', ny21, nx21,
  192. *(CUBLAS_TYPE*)&p1, (CUBLAS_TYPE*)sub11, ld11, (CUBLAS_TYPE*)sub21, ld21);
  193. status = cublasGetError();
  194. if (status != CUBLAS_STATUS_SUCCESS)
  195. STARPU_ABORT();
  196. cudaThreadSynchronize();
  197. break;
  198. #endif
  199. default:
  200. STARPU_ABORT();
  201. break;
  202. }
  203. }
  204. void STARPU_LU(cpu_u21)(void *descr[], void *_args)
  205. {
  206. STARPU_LU(common_u21)(descr, 0, _args);
  207. }
  208. #ifdef STARPU_USE_CUDA
  209. void STARPU_LU(cublas_u21)(void *descr[], void *_args)
  210. {
  211. STARPU_LU(common_u21)(descr, 1, _args);
  212. }
  213. #endif
  214. static struct starpu_perfmodel_t STARPU_LU(model_21) = {
  215. .type = STARPU_HISTORY_BASED,
  216. #ifdef STARPU_ATLAS
  217. .symbol = STARPU_LU_STR(lu_model_21_atlas)
  218. #elif defined(STARPU_GOTO)
  219. .symbol = STARPU_LU_STR(lu_model_21_goto)
  220. #else
  221. .symbol = STARPU_LU_STR(lu_model_21)
  222. #endif
  223. };
  224. starpu_codelet cl21 = {
  225. .where = STARPU_CPU|STARPU_CUDA,
  226. .cpu_func = STARPU_LU(cpu_u21),
  227. #ifdef STARPU_USE_CUDA
  228. .cuda_func = STARPU_LU(cublas_u21),
  229. #endif
  230. .nbuffers = 2,
  231. .model = &STARPU_LU(model_21)
  232. };
  233. /*
  234. * U11
  235. */
  236. static inline void STARPU_LU(common_u11)(void *descr[],
  237. int s, __attribute__((unused)) void *_args)
  238. {
  239. TYPE *sub11;
  240. sub11 = (TYPE *)STARPU_MATRIX_GET_PTR(descr[0]);
  241. unsigned long nx = STARPU_MATRIX_GET_NX(descr[0]);
  242. unsigned long ld = STARPU_MATRIX_GET_LD(descr[0]);
  243. unsigned long z;
  244. switch (s) {
  245. case 0:
  246. for (z = 0; z < nx; z++)
  247. {
  248. TYPE pivot;
  249. pivot = sub11[z+z*ld];
  250. STARPU_ASSERT(pivot != 0.0);
  251. CPU_SCAL(nx - z - 1, (1.0/pivot), &sub11[z+(z+1)*ld], ld);
  252. CPU_GER(nx - z - 1, nx - z - 1, -1.0,
  253. &sub11[(z+1)+z*ld], 1,
  254. &sub11[z+(z+1)*ld], ld,
  255. &sub11[(z+1) + (z+1)*ld],ld);
  256. }
  257. break;
  258. #ifdef STARPU_USE_CUDA
  259. case 1:
  260. for (z = 0; z < nx; z++)
  261. {
  262. TYPE pivot;
  263. TYPE inv_pivot;
  264. cudaMemcpy(&pivot, &sub11[z+z*ld], sizeof(TYPE), cudaMemcpyDeviceToHost);
  265. cudaStreamSynchronize(0);
  266. STARPU_ASSERT(pivot != 0.0);
  267. inv_pivot = 1.0/pivot;
  268. CUBLAS_SCAL(nx - z - 1, *(CUBLAS_TYPE*)&inv_pivot, (CUBLAS_TYPE*)&sub11[z+(z+1)*ld], ld);
  269. CUBLAS_GER(nx - z - 1, nx - z - 1, *(CUBLAS_TYPE*)&m1,
  270. (CUBLAS_TYPE*)&sub11[(z+1)+z*ld], 1,
  271. (CUBLAS_TYPE*)&sub11[z+(z+1)*ld], ld,
  272. (CUBLAS_TYPE*)&sub11[(z+1) + (z+1)*ld],ld);
  273. }
  274. cudaThreadSynchronize();
  275. break;
  276. #endif
  277. default:
  278. STARPU_ABORT();
  279. break;
  280. }
  281. }
  282. void STARPU_LU(cpu_u11)(void *descr[], void *_args)
  283. {
  284. STARPU_LU(common_u11)(descr, 0, _args);
  285. }
  286. #ifdef STARPU_USE_CUDA
  287. void STARPU_LU(cublas_u11)(void *descr[], void *_args)
  288. {
  289. STARPU_LU(common_u11)(descr, 1, _args);
  290. }
  291. #endif /* STARPU_USE_CUDA */
  292. static struct starpu_perfmodel_t STARPU_LU(model_11) = {
  293. .type = STARPU_HISTORY_BASED,
  294. #ifdef STARPU_ATLAS
  295. .symbol = STARPU_LU_STR(lu_model_11_atlas)
  296. #elif defined(STARPU_GOTO)
  297. .symbol = STARPU_LU_STR(lu_model_11_goto)
  298. #else
  299. .symbol = STARPU_LU_STR(lu_model_11)
  300. #endif
  301. };
  302. starpu_codelet cl11 = {
  303. .where = STARPU_CPU|STARPU_CUDA,
  304. .cpu_func = STARPU_LU(cpu_u11),
  305. #ifdef STARPU_USE_CUDA
  306. .cuda_func = STARPU_LU(cublas_u11),
  307. #endif
  308. .nbuffers = 1,
  309. .model = &STARPU_LU(model_11)
  310. };
  311. /*
  312. * U11 with pivoting
  313. */
  314. static inline void STARPU_LU(common_u11_pivot)(void *descr[],
  315. int s, void *_args)
  316. {
  317. TYPE *sub11;
  318. sub11 = (TYPE *)STARPU_MATRIX_GET_PTR(descr[0]);
  319. unsigned long nx = STARPU_MATRIX_GET_NX(descr[0]);
  320. unsigned long ld = STARPU_MATRIX_GET_LD(descr[0]);
  321. unsigned long z;
  322. struct piv_s *piv = _args;
  323. unsigned *ipiv = piv->piv;
  324. unsigned first = piv->first;
  325. switch (s) {
  326. case 0:
  327. for (z = 0; z < nx; z++)
  328. {
  329. TYPE pivot;
  330. pivot = sub11[z+z*ld];
  331. if (fabs((double)(pivot)) < PIVOT_THRESHHOLD)
  332. {
  333. /* find the pivot */
  334. int piv_ind = CPU_IAMAX(nx - z, &sub11[z*(ld+1)], ld);
  335. ipiv[z + first] = piv_ind + z + first;
  336. /* swap if needed */
  337. if (piv_ind != 0)
  338. {
  339. CPU_SWAP(nx, &sub11[z*ld], 1, &sub11[(z+piv_ind)*ld], 1);
  340. }
  341. pivot = sub11[z+z*ld];
  342. }
  343. STARPU_ASSERT(pivot != 0.0);
  344. CPU_SCAL(nx - z - 1, (1.0/pivot), &sub11[z+(z+1)*ld], ld);
  345. CPU_GER(nx - z - 1, nx - z - 1, -1.0,
  346. &sub11[(z+1)+z*ld], 1,
  347. &sub11[z+(z+1)*ld], ld,
  348. &sub11[(z+1) + (z+1)*ld],ld);
  349. }
  350. break;
  351. #ifdef STARPU_USE_CUDA
  352. case 1:
  353. for (z = 0; z < nx; z++)
  354. {
  355. TYPE pivot;
  356. TYPE inv_pivot;
  357. cudaMemcpy(&pivot, &sub11[z+z*ld], sizeof(TYPE), cudaMemcpyDeviceToHost);
  358. cudaStreamSynchronize(0);
  359. if (fabs((double)(pivot)) < PIVOT_THRESHHOLD)
  360. {
  361. /* find the pivot */
  362. int piv_ind = CUBLAS_IAMAX(nx - z, (CUBLAS_TYPE*)&sub11[z*(ld+1)], ld) - 1;
  363. ipiv[z + first] = piv_ind + z + first;
  364. /* swap if needed */
  365. if (piv_ind != 0)
  366. {
  367. CUBLAS_SWAP(nx, (CUBLAS_TYPE*)&sub11[z*ld], 1, (CUBLAS_TYPE*)&sub11[(z+piv_ind)*ld], 1);
  368. }
  369. cudaMemcpy(&pivot, &sub11[z+z*ld], sizeof(TYPE), cudaMemcpyDeviceToHost);
  370. cudaStreamSynchronize(0);
  371. }
  372. STARPU_ASSERT(pivot != 0.0);
  373. inv_pivot = 1.0/pivot;
  374. CUBLAS_SCAL(nx - z - 1, *(CUBLAS_TYPE*)&inv_pivot, (CUBLAS_TYPE*)&sub11[z+(z+1)*ld], ld);
  375. CUBLAS_GER(nx - z - 1, nx - z - 1, *(CUBLAS_TYPE*)&m1,
  376. (CUBLAS_TYPE*)&sub11[(z+1)+z*ld], 1,
  377. (CUBLAS_TYPE*)&sub11[z+(z+1)*ld], ld,
  378. (CUBLAS_TYPE*)&sub11[(z+1) + (z+1)*ld],ld);
  379. }
  380. cudaThreadSynchronize();
  381. break;
  382. #endif
  383. default:
  384. STARPU_ABORT();
  385. break;
  386. }
  387. }
  388. void STARPU_LU(cpu_u11_pivot)(void *descr[], void *_args)
  389. {
  390. STARPU_LU(common_u11_pivot)(descr, 0, _args);
  391. }
  392. #ifdef STARPU_USE_CUDA
  393. void STARPU_LU(cublas_u11_pivot)(void *descr[], void *_args)
  394. {
  395. STARPU_LU(common_u11_pivot)(descr, 1, _args);
  396. }
  397. #endif /* STARPU_USE_CUDA */
  398. static struct starpu_perfmodel_t STARPU_LU(model_11_pivot) = {
  399. .type = STARPU_HISTORY_BASED,
  400. #ifdef STARPU_ATLAS
  401. .symbol = STARPU_LU_STR(lu_model_11_pivot_atlas)
  402. #elif defined(STARPU_GOTO)
  403. .symbol = STARPU_LU_STR(lu_model_11_pivot_goto)
  404. #else
  405. .symbol = STARPU_LU_STR(lu_model_11_pivot)
  406. #endif
  407. };
  408. starpu_codelet cl11_pivot = {
  409. .where = STARPU_CPU|STARPU_CUDA,
  410. .cpu_func = STARPU_LU(cpu_u11_pivot),
  411. #ifdef STARPU_USE_CUDA
  412. .cuda_func = STARPU_LU(cublas_u11_pivot),
  413. #endif
  414. .nbuffers = 1,
  415. .model = &STARPU_LU(model_11_pivot)
  416. };
  417. /*
  418. * Pivoting
  419. */
  420. static inline void STARPU_LU(common_pivot)(void *descr[],
  421. int s, void *_args)
  422. {
  423. TYPE *matrix;
  424. matrix = (TYPE *)STARPU_MATRIX_GET_PTR(descr[0]);
  425. unsigned long nx = STARPU_MATRIX_GET_NX(descr[0]);
  426. unsigned long ld = STARPU_MATRIX_GET_LD(descr[0]);
  427. unsigned row;
  428. struct piv_s *piv = _args;
  429. unsigned *ipiv = piv->piv;
  430. unsigned first = piv->first;
  431. switch (s) {
  432. case 0:
  433. for (row = 0; row < nx; row++)
  434. {
  435. unsigned rowpiv = ipiv[row+first] - first;
  436. if (rowpiv != row)
  437. {
  438. CPU_SWAP(nx, &matrix[row*ld], 1, &matrix[rowpiv*ld], 1);
  439. }
  440. }
  441. break;
  442. #ifdef STARPU_USE_CUDA
  443. case 1:
  444. for (row = 0; row < nx; row++)
  445. {
  446. unsigned rowpiv = ipiv[row+first] - first;
  447. if (rowpiv != row)
  448. {
  449. CUBLAS_SWAP(nx, (CUBLAS_TYPE*)&matrix[row*ld], 1, (CUBLAS_TYPE*)&matrix[rowpiv*ld], 1);
  450. }
  451. }
  452. cudaThreadSynchronize();
  453. break;
  454. #endif
  455. default:
  456. STARPU_ABORT();
  457. break;
  458. }
  459. }
  460. void STARPU_LU(cpu_pivot)(void *descr[], void *_args)
  461. {
  462. STARPU_LU(common_pivot)(descr, 0, _args);
  463. }
  464. #ifdef STARPU_USE_CUDA
  465. void STARPU_LU(cublas_pivot)(void *descr[], void *_args)
  466. {
  467. STARPU_LU(common_pivot)(descr, 1, _args);
  468. }
  469. #endif /* STARPU_USE_CUDA */
  470. static struct starpu_perfmodel_t STARPU_LU(model_pivot) = {
  471. .type = STARPU_HISTORY_BASED,
  472. #ifdef STARPU_ATLAS
  473. .symbol = STARPU_LU_STR(lu_model_pivot_atlas)
  474. #elif defined(STARPU_GOTO)
  475. .symbol = STARPU_LU_STR(lu_model_pivot_goto)
  476. #else
  477. .symbol = STARPU_LU_STR(lu_model_pivot)
  478. #endif
  479. };
  480. starpu_codelet cl_pivot = {
  481. .where = STARPU_CPU|STARPU_CUDA,
  482. .cpu_func = STARPU_LU(cpu_pivot),
  483. #ifdef STARPU_USE_CUDA
  484. .cuda_func = STARPU_LU(cublas_pivot),
  485. #endif
  486. .nbuffers = 1,
  487. .model = &STARPU_LU(model_pivot)
  488. };