starpufftx2d.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2020 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #define DIV_2D_N 8
  17. #define DIV_2D_M 8
  18. #define I_SHIFT (I_BITS/2)
  19. #define J_BITS I_SHIFT
  20. #define STEP_TAG_2D(plan, step, i, j) _STEP_TAG(plan, step, ((starpu_tag_t) i << I_SHIFT) | (starpu_tag_t) j)
  21. #ifdef __STARPU_USE_CUDA
  22. /* Twist the full vector into a n2,m2 chunk */
  23. static void
  24. STARPUFFT(twist1_2d_kernel_gpu)(void *descr[], void *_args)
  25. {
  26. struct STARPUFFT(args) *args = _args;
  27. STARPUFFT(plan) plan = args->plan;
  28. int i = args->i;
  29. int j = args->j;
  30. int n1 = plan->n1[0];
  31. int n2 = plan->n2[0];
  32. int m1 = plan->n1[1];
  33. int m2 = plan->n2[1];
  34. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  35. _cufftComplex * restrict twisted1 = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  36. STARPUFFT(cuda_twist1_2d_host)(in, twisted1, i, j, n1, n2, m1, m2);
  37. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  38. }
  39. /* fft1:
  40. *
  41. * Perform one fft of size n2,m2 */
  42. static void
  43. STARPUFFT(fft1_2d_plan_gpu)(void *args)
  44. {
  45. STARPUFFT(plan) plan = args;
  46. int n2 = plan->n2[0];
  47. int m2 = plan->n2[1];
  48. int workerid = starpu_worker_get_id_check();
  49. cufftResult cures;
  50. cures = cufftPlan2d(&plan->plans[workerid].plan1_cuda, n2, m2, _CUFFT_C2C);
  51. if (cures != CUFFT_SUCCESS)
  52. STARPU_CUFFT_REPORT_ERROR(cures);
  53. cufftSetStream(plan->plans[workerid].plan1_cuda, starpu_cuda_get_local_stream());
  54. if (cures != CUFFT_SUCCESS)
  55. STARPU_CUFFT_REPORT_ERROR(cures);
  56. }
  57. static void
  58. STARPUFFT(fft1_2d_kernel_gpu)(void *descr[], void *_args)
  59. {
  60. struct STARPUFFT(args) *args = _args;
  61. STARPUFFT(plan) plan = args->plan;
  62. int i = args->i;
  63. int j = args->j;
  64. int n2 = plan->n2[0];
  65. int m2 = plan->n2[1];
  66. cufftResult cures;
  67. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  68. _cufftComplex * restrict out = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  69. const _cufftComplex * restrict roots0 = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[2]);
  70. const _cufftComplex * restrict roots1 = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[3]);
  71. int workerid = starpu_worker_get_id_check();
  72. task_per_worker[workerid]++;
  73. cures = _cufftExecC2C(plan->plans[workerid].plan1_cuda, in, out, plan->sign == -1 ? CUFFT_FORWARD : CUFFT_INVERSE);
  74. if (cures != CUFFT_SUCCESS)
  75. STARPU_CUFFT_REPORT_ERROR(cures);
  76. /* synchronization is done after the twiddling */
  77. STARPUFFT(cuda_twiddle_2d_host)(out, roots0, roots1, n2, m2, i, j);
  78. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  79. }
  80. /* fft2:
  81. *
  82. * Perform n3*m3 ffts of size n1,m1 */
  83. static void
  84. STARPUFFT(fft2_2d_plan_gpu(void *args))
  85. {
  86. STARPUFFT(plan) plan = args;
  87. int n1 = plan->n1[0];
  88. int m1 = plan->n1[1];
  89. cufftResult cures;
  90. int workerid = starpu_worker_get_id_check();
  91. cures = cufftPlan2d(&plan->plans[workerid].plan2_cuda, n1, m1, _CUFFT_C2C);
  92. if (cures != CUFFT_SUCCESS)
  93. STARPU_CUFFT_REPORT_ERROR(cures);
  94. cufftSetStream(plan->plans[workerid].plan2_cuda, starpu_cuda_get_local_stream());
  95. if (cures != CUFFT_SUCCESS)
  96. STARPU_CUFFT_REPORT_ERROR(cures);
  97. }
  98. static void
  99. STARPUFFT(fft2_2d_kernel_gpu)(void *descr[], void *_args)
  100. {
  101. struct STARPUFFT(args) *args = _args;
  102. STARPUFFT(plan) plan = args->plan;
  103. int n1 = plan->n1[0];
  104. int n2 = plan->n2[0];
  105. int m1 = plan->n1[1];
  106. int m2 = plan->n2[1];
  107. int n3 = n2/DIV_2D_N;
  108. int m3 = m2/DIV_2D_M;
  109. int n;
  110. cufftResult cures;
  111. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  112. _cufftComplex * restrict out = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  113. int workerid = starpu_worker_get_id_check();
  114. task_per_worker[workerid]++;
  115. for (n = 0; n < n3*m3; n++) {
  116. cures = _cufftExecC2C(plan->plans[workerid].plan2_cuda, in + n * n1*m1, out + n * n1*m1, plan->sign == -1 ? CUFFT_FORWARD : CUFFT_INVERSE);
  117. if (cures != CUFFT_SUCCESS)
  118. STARPU_CUFFT_REPORT_ERROR(cures);
  119. }
  120. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  121. }
  122. #endif
  123. /* Twist the full vector into a n2,m2 chunk */
  124. static void
  125. STARPUFFT(twist1_2d_kernel_cpu)(void *descr[], void *_args)
  126. {
  127. struct STARPUFFT(args) *args = _args;
  128. STARPUFFT(plan) plan = args->plan;
  129. int i = args->i;
  130. int j = args->j;
  131. int k, l;
  132. int n1 = plan->n1[0];
  133. int n2 = plan->n2[0];
  134. int m1 = plan->n1[1];
  135. int m2 = plan->n2[1];
  136. int m = plan->n[1];
  137. STARPUFFT(complex) * restrict in = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  138. STARPUFFT(complex) * restrict twisted1 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  139. /* printf("twist1 %d %d %g\n", i, j, (double) cabs(plan->in[i+j])); */
  140. for (k = 0; k < n2; k++)
  141. for (l = 0; l < m2; l++)
  142. twisted1[k*m2+l] = in[i*m+j+k*m*n1+l*m1];
  143. }
  144. #ifdef STARPU_HAVE_FFTW
  145. /* Perform an n2,m2 fft */
  146. static void
  147. STARPUFFT(fft1_2d_kernel_cpu)(void *descr[], void *_args)
  148. {
  149. struct STARPUFFT(args) *args = _args;
  150. STARPUFFT(plan) plan = args->plan;
  151. int i = args->i;
  152. int j = args->j;
  153. int k, l;
  154. int n2 = plan->n2[0];
  155. int m2 = plan->n2[1];
  156. int workerid = starpu_worker_get_id_check();
  157. task_per_worker[workerid]++;
  158. STARPUFFT(complex) *twisted1 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  159. STARPUFFT(complex) *fft1 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  160. /* printf("fft1 %d %d %g\n", i, j, (double) cabs(twisted1[0])); */
  161. _FFTW(execute_dft)(plan->plans[workerid].plan1_cpu, twisted1, fft1);
  162. for (k = 0; k < n2; k++)
  163. for (l = 0; l < m2; l++)
  164. fft1[k*m2 + l] = fft1[k*m2 + l] * plan->roots[0][i*k] * plan->roots[1][j*l];
  165. }
  166. #endif
  167. /* Twist the full vector into a package of n2/DIV_2D_N,m2/DIV_2D_M (n1,m1) chunks */
  168. static void
  169. STARPUFFT(twist2_2d_kernel_cpu)(void *descr[], void *_args)
  170. {
  171. struct STARPUFFT(args) *args = _args;
  172. STARPUFFT(plan) plan = args->plan;
  173. int kk = args->kk; /* between 0 and DIV_2D_N */
  174. int ll = args->ll; /* between 0 and DIV_2D_M */
  175. int kkk, lll; /* beetween 0,0 and n3,m3 */
  176. int i, j;
  177. int n1 = plan->n1[0];
  178. int n2 = plan->n2[0];
  179. int m1 = plan->n1[1];
  180. int m2 = plan->n2[1];
  181. int n3 = n2/DIV_2D_N;
  182. int m3 = m2/DIV_2D_M;
  183. STARPUFFT(complex) * restrict twisted2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  184. /* printf("twist2 %d %d %g\n", kk, ll, (double) cabs(plan->fft1[kk+ll])); */
  185. for (kkk = 0; kkk < n3; kkk++) {
  186. int k = kk * n3 + kkk;
  187. for (lll = 0; lll < m3; lll++) {
  188. int l = ll * m3 + lll;
  189. for (i = 0; i < n1; i++)
  190. for (j = 0; j < m1; j++)
  191. twisted2[kkk*m3*n1*m1+lll*n1*m1+i*m1+j] = plan->fft1[i*n1*n2*m2+j*n2*m2+k*m2+l];
  192. }
  193. }
  194. }
  195. #ifdef STARPU_HAVE_FFTW
  196. /* Perform (n2/DIV_2D_N)*(m2/DIV_2D_M) (n1,m1) ffts */
  197. static void
  198. STARPUFFT(fft2_2d_kernel_cpu)(void *descr[], void *_args)
  199. {
  200. struct STARPUFFT(args) *args = _args;
  201. STARPUFFT(plan) plan = args->plan;
  202. /* int kk = args->kk; */
  203. /* int ll = args->ll; */
  204. int workerid = starpu_worker_get_id_check();
  205. task_per_worker[workerid]++;
  206. STARPUFFT(complex) *twisted2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  207. STARPUFFT(complex) *fft2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  208. /* printf("fft2 %d %d %g\n", kk, ll, (double) cabs(twisted2[plan->totsize4-1])); */
  209. _FFTW(execute_dft)(plan->plans[workerid].plan2_cpu, twisted2, fft2);
  210. }
  211. #endif
  212. /* Spread the package of (n2/DIV_2D_N)*(m2/DIV_2D_M) (n1,m1) chunks into the full vector */
  213. static void
  214. STARPUFFT(twist3_2d_kernel_cpu)(void *descr[], void *_args)
  215. {
  216. struct STARPUFFT(args) *args = _args;
  217. STARPUFFT(plan) plan = args->plan;
  218. int kk = args->kk; /* between 0 and DIV_2D_N */
  219. int ll = args->ll; /* between 0 and DIV_2D_M */
  220. int kkk, lll; /* beetween 0,0 and n3,m3 */
  221. int i, j;
  222. int n1 = plan->n1[0];
  223. int n2 = plan->n2[0];
  224. int m1 = plan->n1[1];
  225. int m2 = plan->n2[1];
  226. int n3 = n2/DIV_2D_N;
  227. int m3 = m2/DIV_2D_M;
  228. int m = plan->n[1];
  229. const STARPUFFT(complex) * restrict fft2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  230. /* printf("twist3 %d %d %g\n", kk, ll, (double) cabs(fft2[0])); */
  231. for (kkk = 0; kkk < n3; kkk++) {
  232. int k = kk * n3 + kkk;
  233. for (lll = 0; lll < m3; lll++) {
  234. int l = ll * m3 + lll;
  235. for (i = 0; i < n1; i++)
  236. for (j = 0; j < m1; j++)
  237. plan->out[i*n2*m+j*m2+k*m+l] = fft2[kkk*m3*n1*m1+lll*n1*m1+i*m1+j];
  238. }
  239. }
  240. }
  241. struct starpu_perfmodel STARPUFFT(twist1_2d_model) = {
  242. .type = STARPU_HISTORY_BASED,
  243. .symbol = TYPE"twist1_2d"
  244. };
  245. struct starpu_perfmodel STARPUFFT(fft1_2d_model) = {
  246. .type = STARPU_HISTORY_BASED,
  247. .symbol = TYPE"fft1_2d"
  248. };
  249. struct starpu_perfmodel STARPUFFT(twist2_2d_model) = {
  250. .type = STARPU_HISTORY_BASED,
  251. .symbol = TYPE"twist2_2d"
  252. };
  253. struct starpu_perfmodel STARPUFFT(fft2_2d_model) = {
  254. .type = STARPU_HISTORY_BASED,
  255. .symbol = TYPE"fft2_2d"
  256. };
  257. struct starpu_perfmodel STARPUFFT(twist3_2d_model) = {
  258. .type = STARPU_HISTORY_BASED,
  259. .symbol = TYPE"twist3_2d"
  260. };
  261. static struct starpu_codelet STARPUFFT(twist1_2d_codelet) = {
  262. .where =
  263. #ifdef __STARPU_USE_CUDA
  264. STARPU_CUDA|
  265. #endif
  266. STARPU_CPU,
  267. #ifdef __STARPU_USE_CUDA
  268. .cuda_funcs = {STARPUFFT(twist1_2d_kernel_gpu)},
  269. #endif
  270. .cpu_funcs = {STARPUFFT(twist1_2d_kernel_cpu)},
  271. CAN_EXECUTE
  272. .model = &STARPUFFT(twist1_2d_model),
  273. .nbuffers = 2,
  274. .modes = {STARPU_R, STARPU_W},
  275. .name = "twist1_2d_codelet"
  276. };
  277. static struct starpu_codelet STARPUFFT(fft1_2d_codelet) = {
  278. .where =
  279. #ifdef __STARPU_USE_CUDA
  280. STARPU_CUDA|
  281. #endif
  282. #ifdef STARPU_HAVE_FFTW
  283. STARPU_CPU|
  284. #endif
  285. 0,
  286. #ifdef __STARPU_USE_CUDA
  287. .cuda_funcs = {STARPUFFT(fft1_2d_kernel_gpu)},
  288. #endif
  289. #ifdef STARPU_HAVE_FFTW
  290. .cpu_funcs = {STARPUFFT(fft1_2d_kernel_cpu)},
  291. #endif
  292. CAN_EXECUTE
  293. .model = &STARPUFFT(fft1_2d_model),
  294. .nbuffers = 4,
  295. .modes = {STARPU_R, STARPU_W, STARPU_R, STARPU_R},
  296. .name = "fft1_2d_codelet"
  297. };
  298. static struct starpu_codelet STARPUFFT(twist2_2d_codelet) = {
  299. .where = STARPU_CPU,
  300. .cpu_funcs = {STARPUFFT(twist2_2d_kernel_cpu)},
  301. CAN_EXECUTE
  302. .model = &STARPUFFT(twist2_2d_model),
  303. .nbuffers = 1,
  304. .modes = {STARPU_W},
  305. .name = "twist2_2d_codelet"
  306. };
  307. static struct starpu_codelet STARPUFFT(fft2_2d_codelet) = {
  308. .where =
  309. #ifdef __STARPU_USE_CUDA
  310. STARPU_CUDA|
  311. #endif
  312. #ifdef STARPU_HAVE_FFTW
  313. STARPU_CPU|
  314. #endif
  315. 0,
  316. #ifdef __STARPU_USE_CUDA
  317. .cuda_funcs = {STARPUFFT(fft2_2d_kernel_gpu)},
  318. #endif
  319. #ifdef STARPU_HAVE_FFTW
  320. .cpu_funcs = {STARPUFFT(fft2_2d_kernel_cpu)},
  321. #endif
  322. CAN_EXECUTE
  323. .model = &STARPUFFT(fft2_2d_model),
  324. .nbuffers = 2,
  325. .modes = {STARPU_R, STARPU_W},
  326. .name = "fft2_2d_codelet"
  327. };
  328. static struct starpu_codelet STARPUFFT(twist3_2d_codelet) = {
  329. .where = STARPU_CPU,
  330. .cpu_funcs = {STARPUFFT(twist3_2d_kernel_cpu)},
  331. CAN_EXECUTE
  332. .model = &STARPUFFT(twist3_2d_model),
  333. .nbuffers = 1,
  334. .modes = {STARPU_R},
  335. .name = "twist3_2d_codelet"
  336. };
  337. /*
  338. *
  339. * Sequential version
  340. *
  341. */
  342. #ifdef __STARPU_USE_CUDA
  343. /* Perform one fft of size n,m */
  344. static void
  345. STARPUFFT(fft_2d_plan_gpu)(void *args)
  346. {
  347. STARPUFFT(plan) plan = args;
  348. cufftResult cures;
  349. int n = plan->n[0];
  350. int m = plan->n[1];
  351. int workerid = starpu_worker_get_id_check();
  352. cures = cufftPlan2d(&plan->plans[workerid].plan_cuda, n, m, _CUFFT_C2C);
  353. if (cures != CUFFT_SUCCESS)
  354. STARPU_CUFFT_REPORT_ERROR(cures);
  355. cufftSetStream(plan->plans[workerid].plan_cuda, starpu_cuda_get_local_stream());
  356. if (cures != CUFFT_SUCCESS)
  357. STARPU_CUFFT_REPORT_ERROR(cures);
  358. }
  359. static void
  360. STARPUFFT(fft_2d_kernel_gpu)(void *descr[], void *args)
  361. {
  362. STARPUFFT(plan) plan = args;
  363. cufftResult cures;
  364. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  365. _cufftComplex * restrict out = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  366. int workerid = starpu_worker_get_id_check();
  367. task_per_worker[workerid]++;
  368. cures = _cufftExecC2C(plan->plans[workerid].plan_cuda, in, out, plan->sign == -1 ? CUFFT_FORWARD : CUFFT_INVERSE);
  369. if (cures != CUFFT_SUCCESS)
  370. STARPU_CUFFT_REPORT_ERROR(cures);
  371. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  372. }
  373. #endif
  374. #ifdef STARPU_HAVE_FFTW
  375. /* Perform one fft of size n,m */
  376. static void
  377. STARPUFFT(fft_2d_kernel_cpu)(void *descr[], void *_args)
  378. {
  379. STARPUFFT(plan) plan = _args;
  380. int workerid = starpu_worker_get_id_check();
  381. task_per_worker[workerid]++;
  382. STARPUFFT(complex) * restrict in = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  383. STARPUFFT(complex) * restrict out = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  384. _FFTW(execute_dft)(plan->plans[workerid].plan_cpu, in, out);
  385. }
  386. #endif
  387. static struct starpu_perfmodel STARPUFFT(fft_2d_model) = {
  388. .type = STARPU_HISTORY_BASED,
  389. .symbol = TYPE"fft_2d"
  390. };
  391. static struct starpu_codelet STARPUFFT(fft_2d_codelet) = {
  392. .where =
  393. #ifdef __STARPU_USE_CUDA
  394. STARPU_CUDA|
  395. #endif
  396. #ifdef STARPU_HAVE_FFTW
  397. STARPU_CPU|
  398. #endif
  399. 0,
  400. #ifdef __STARPU_USE_CUDA
  401. .cuda_funcs = {STARPUFFT(fft_2d_kernel_gpu)},
  402. #endif
  403. #ifdef STARPU_HAVE_FFTW
  404. .cpu_funcs = {STARPUFFT(fft_2d_kernel_cpu)},
  405. #endif
  406. CAN_EXECUTE
  407. .model = &STARPUFFT(fft_2d_model),
  408. .nbuffers = 2,
  409. .modes = {STARPU_R, STARPU_W},
  410. .name = "fft_2d_codelet"
  411. };
  412. STARPUFFT(plan)
  413. STARPUFFT(plan_dft_2d)(int n, int m, int sign, unsigned flags)
  414. {
  415. unsigned workerid;
  416. int n1 = DIV_2D_N;
  417. int n2 = n / n1;
  418. int n3;
  419. int m1 = DIV_2D_M;
  420. int m2 = m / m1;
  421. int m3;
  422. int z;
  423. struct starpu_task *task;
  424. if (PARALLEL) {
  425. /*
  426. * Simple strategy:
  427. *
  428. * - twist1: twist input in n1*m1 (n2,m2) chunks
  429. * - fft1: perform n1*m1 (n2,m2) ffts
  430. * - twist2: twist into n2*m2 (n1,m1) chunks distributed in
  431. * DIV_2D_N*DIV_2D_M groups
  432. * - fft2: perform DIV_2D_N*DIV_2D_M times n3*m3 (n1,m1) ffts
  433. * - twist3: twist back into output
  434. */
  435. #ifdef __STARPU_USE_CUDA
  436. /* cufft 2D-3D limited to [2,16384] */
  437. while (n2 > 16384) {
  438. n1 *= 2;
  439. n2 /= 2;
  440. }
  441. #endif
  442. STARPU_ASSERT(n == n1*n2);
  443. STARPU_ASSERT((unsigned long long) n1 < (1ULL << J_BITS));
  444. #ifdef __STARPU_USE_CUDA
  445. /* cufft 2D-3D limited to [2,16384] */
  446. while (m2 > 16384) {
  447. m1 *= 2;
  448. m2 /= 2;
  449. }
  450. #endif
  451. STARPU_ASSERT(m == m1*m2);
  452. STARPU_ASSERT((unsigned long long) m1 < (1ULL << J_BITS));
  453. /* distribute the n2*m2 second ffts into DIV_2D_N*DIV_2D_M packages */
  454. n3 = n2 / DIV_2D_N;
  455. STARPU_ASSERT(n2 == n3*DIV_2D_N);
  456. m3 = m2 / DIV_2D_M;
  457. STARPU_ASSERT(m2 == m3*DIV_2D_M);
  458. }
  459. /* TODO: flags? Automatically set FFTW_MEASURE on calibration? */
  460. STARPU_ASSERT(flags == 0);
  461. STARPUFFT(plan) plan = malloc(sizeof(*plan));
  462. memset(plan, 0, sizeof(*plan));
  463. if (PARALLEL) {
  464. plan->number = STARPU_ATOMIC_ADD(&starpufft_last_plan_number, 1) - 1;
  465. /* 4bit limitation in the tag space */
  466. STARPU_ASSERT((unsigned long long) plan->number < (1ULL << NUMBER_BITS));
  467. }
  468. plan->dim = 2;
  469. plan->n = malloc(plan->dim * sizeof(*plan->n));
  470. plan->n[0] = n;
  471. plan->n[1] = m;
  472. if (PARALLEL) {
  473. check_dims(plan);
  474. plan->n1 = malloc(plan->dim * sizeof(*plan->n1));
  475. plan->n1[0] = n1;
  476. plan->n1[1] = m1;
  477. plan->n2 = malloc(plan->dim * sizeof(*plan->n2));
  478. plan->n2[0] = n2;
  479. plan->n2[1] = m2;
  480. }
  481. plan->totsize = n * m;
  482. if (PARALLEL) {
  483. plan->totsize1 = n1 * m1;
  484. plan->totsize2 = n2 * m2;
  485. plan->totsize3 = DIV_2D_N * DIV_2D_M;
  486. plan->totsize4 = plan->totsize / plan->totsize3;
  487. }
  488. plan->type = C2C;
  489. plan->sign = sign;
  490. if (PARALLEL) {
  491. /* Compute the w^k just once. */
  492. compute_roots(plan);
  493. }
  494. /* Initialize per-worker working set */
  495. for (workerid = 0; workerid < starpu_worker_get_count(); workerid++) {
  496. switch (starpu_worker_get_type(workerid)) {
  497. case STARPU_CPU_WORKER:
  498. #ifdef STARPU_HAVE_FFTW
  499. if (PARALLEL) {
  500. /* first fft plan: one n2*m2 fft */
  501. plan->plans[workerid].plan1_cpu = _FFTW(plan_dft_2d)(n2, m2, NULL, (void*) 1, sign, _FFTW_FLAGS);
  502. STARPU_ASSERT(plan->plans[workerid].plan1_cpu);
  503. /* second fft plan: n3*m3 n1*m1 ffts */
  504. plan->plans[workerid].plan2_cpu = _FFTW(plan_many_dft)(plan->dim,
  505. plan->n1, n3*m3,
  506. NULL, NULL, 1, plan->totsize1,
  507. (void*) 1, NULL, 1, plan->totsize1,
  508. sign, _FFTW_FLAGS);
  509. STARPU_ASSERT(plan->plans[workerid].plan2_cpu);
  510. } else {
  511. /* fft plan: one fft of size n, m. */
  512. plan->plans[workerid].plan_cpu = _FFTW(plan_dft_2d)(n, m, NULL, (void*) 1, sign, _FFTW_FLAGS);
  513. STARPU_ASSERT(plan->plans[workerid].plan_cpu);
  514. }
  515. #else
  516. /* #warning libstarpufft can not work correctly if libfftw3 is not installed */
  517. #endif
  518. break;
  519. case STARPU_CUDA_WORKER:
  520. break;
  521. default:
  522. /* Do not care, we won't be executing anything there. */
  523. break;
  524. }
  525. }
  526. #ifdef __STARPU_USE_CUDA
  527. if (PARALLEL) {
  528. starpu_execute_on_each_worker(STARPUFFT(fft1_2d_plan_gpu), plan, STARPU_CUDA);
  529. starpu_execute_on_each_worker(STARPUFFT(fft2_2d_plan_gpu), plan, STARPU_CUDA);
  530. } else {
  531. starpu_execute_on_each_worker(STARPUFFT(fft_2d_plan_gpu), plan, STARPU_CUDA);
  532. }
  533. #endif
  534. if (PARALLEL) {
  535. /* Allocate buffers. */
  536. plan->twisted1 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->twisted1));
  537. memset(plan->twisted1, 0, plan->totsize * sizeof(*plan->twisted1));
  538. plan->fft1 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->fft1));
  539. memset(plan->fft1, 0, plan->totsize * sizeof(*plan->fft1));
  540. plan->twisted2 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->twisted2));
  541. memset(plan->twisted2, 0, plan->totsize * sizeof(*plan->twisted2));
  542. plan->fft2 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->fft2));
  543. memset(plan->fft2, 0, plan->totsize * sizeof(*plan->fft2));
  544. /* Allocate handle arrays */
  545. plan->twisted1_handle = malloc(plan->totsize1 * sizeof(*plan->twisted1_handle));
  546. plan->fft1_handle = malloc(plan->totsize1 * sizeof(*plan->fft1_handle));
  547. plan->twisted2_handle = malloc(plan->totsize3 * sizeof(*plan->twisted2_handle));
  548. plan->fft2_handle = malloc(plan->totsize3 * sizeof(*plan->fft2_handle));
  549. /* Allocate task arrays */
  550. plan->twist1_tasks = malloc(plan->totsize1 * sizeof(*plan->twist1_tasks));
  551. plan->fft1_tasks = malloc(plan->totsize1 * sizeof(*plan->fft1_tasks));
  552. plan->twist2_tasks = malloc(plan->totsize3 * sizeof(*plan->twist2_tasks));
  553. plan->fft2_tasks = malloc(plan->totsize3 * sizeof(*plan->fft2_tasks));
  554. plan->twist3_tasks = malloc(plan->totsize3 * sizeof(*plan->twist3_tasks));
  555. /* Allocate codelet argument arrays */
  556. plan->fft1_args = malloc(plan->totsize1 * sizeof(*plan->fft1_args));
  557. plan->fft2_args = malloc(plan->totsize3 * sizeof(*plan->fft2_args));
  558. /* Create first-round tasks */
  559. for (z = 0; z < plan->totsize1; z++) {
  560. int i = z / m1, j = z % m1;
  561. #define STEP_TAG(step) STEP_TAG_2D(plan, step, i, j)
  562. /* TODO: get rid of tags */
  563. plan->fft1_args[z].plan = plan;
  564. plan->fft1_args[z].i = i;
  565. plan->fft1_args[z].j = j;
  566. /* Register (n2,m2) chunks */
  567. starpu_vector_data_register(&plan->twisted1_handle[z], STARPU_MAIN_RAM, (uintptr_t) &plan->twisted1[z*plan->totsize2], plan->totsize2, sizeof(*plan->twisted1));
  568. starpu_vector_data_register(&plan->fft1_handle[z], STARPU_MAIN_RAM, (uintptr_t) &plan->fft1[z*plan->totsize2], plan->totsize2, sizeof(*plan->fft1));
  569. /* We'll need it on the CPU for the second twist anyway */
  570. starpu_data_set_wt_mask(plan->fft1_handle[z], 1<<0);
  571. /* Create twist1 task */
  572. plan->twist1_tasks[z] = task = starpu_task_create();
  573. task->cl = &STARPUFFT(twist1_2d_codelet);
  574. /* task->handles[0] = to be filled at execution */
  575. task->handles[1] = plan->twisted1_handle[z];
  576. task->cl_arg = &plan->fft1_args[z];
  577. task->tag_id = STEP_TAG(TWIST1);
  578. task->use_tag = 1;
  579. task->destroy = 0;
  580. /* Tell that fft1 depends on twisted1 */
  581. starpu_tag_declare_deps(STEP_TAG(FFT1),
  582. 1, STEP_TAG(TWIST1));
  583. /* Create FFT1 task */
  584. plan->fft1_tasks[z] = task = starpu_task_create();
  585. task->cl = &STARPUFFT(fft1_2d_codelet);
  586. task->handles[0] = plan->twisted1_handle[z];
  587. task->handles[1] = plan->fft1_handle[z];
  588. task->handles[2] = plan->roots_handle[0];
  589. task->handles[3] = plan->roots_handle[1];
  590. task->cl_arg = &plan->fft1_args[z];
  591. task->tag_id = STEP_TAG(FFT1);
  592. task->use_tag = 1;
  593. task->destroy = 0;
  594. /* Tell that to be done with first step we need to have
  595. * finished this fft1 */
  596. starpu_tag_declare_deps(STEP_TAG_2D(plan, JOIN, 0, 0),
  597. 1, STEP_TAG(FFT1));
  598. #undef STEP_TAG
  599. }
  600. /* Create join task */
  601. plan->join_task = task = starpu_task_create();
  602. task->cl = NULL;
  603. task->tag_id = STEP_TAG_2D(plan, JOIN, 0, 0);
  604. task->use_tag = 1;
  605. task->destroy = 0;
  606. /* Create second-round tasks */
  607. for (z = 0; z < plan->totsize3; z++) {
  608. int kk = z / DIV_2D_M, ll = z % DIV_2D_M;
  609. #define STEP_TAG(step) STEP_TAG_2D(plan, step, kk, ll)
  610. plan->fft2_args[z].plan = plan;
  611. plan->fft2_args[z].kk = kk;
  612. plan->fft2_args[z].ll = ll;
  613. /* Register n3*m3 (n1,m1) chunks */
  614. starpu_vector_data_register(&plan->twisted2_handle[z], STARPU_MAIN_RAM, (uintptr_t) &plan->twisted2[z*plan->totsize4], plan->totsize4, sizeof(*plan->twisted2));
  615. starpu_vector_data_register(&plan->fft2_handle[z], STARPU_MAIN_RAM, (uintptr_t) &plan->fft2[z*plan->totsize4], plan->totsize4, sizeof(*plan->fft2));
  616. /* We'll need it on the CPU for the last twist anyway */
  617. starpu_data_set_wt_mask(plan->fft2_handle[z], 1<<0);
  618. /* Tell that twisted2 depends on the whole first step to be
  619. * done */
  620. starpu_tag_declare_deps(STEP_TAG(TWIST2),
  621. 1, STEP_TAG_2D(plan, JOIN, 0, 0));
  622. /* Create twist2 task */
  623. plan->twist2_tasks[z] = task = starpu_task_create();
  624. task->cl = &STARPUFFT(twist2_2d_codelet);
  625. task->handles[0] = plan->twisted2_handle[z];
  626. task->cl_arg = &plan->fft2_args[z];
  627. task->tag_id = STEP_TAG(TWIST2);
  628. task->use_tag = 1;
  629. task->destroy = 0;
  630. /* Tell that fft2 depends on twisted2 */
  631. starpu_tag_declare_deps(STEP_TAG(FFT2),
  632. 1, STEP_TAG(TWIST2));
  633. /* Create FFT2 task */
  634. plan->fft2_tasks[z] = task = starpu_task_create();
  635. task->cl = &STARPUFFT(fft2_2d_codelet);
  636. task->handles[0] = plan->twisted2_handle[z];
  637. task->handles[1] = plan->fft2_handle[z];
  638. task->cl_arg = &plan->fft2_args[z];
  639. task->tag_id = STEP_TAG(FFT2);
  640. task->use_tag = 1;
  641. task->destroy = 0;
  642. /* Tell that twist3 depends on fft2 */
  643. starpu_tag_declare_deps(STEP_TAG(TWIST3),
  644. 1, STEP_TAG(FFT2));
  645. /* Create twist3 tasks */
  646. /* These run only on CPUs and thus write directly into the
  647. * application output buffer. */
  648. plan->twist3_tasks[z] = task = starpu_task_create();
  649. task->cl = &STARPUFFT(twist3_2d_codelet);
  650. task->handles[0] = plan->fft2_handle[z];
  651. task->cl_arg = &plan->fft2_args[z];
  652. task->tag_id = STEP_TAG(TWIST3);
  653. task->use_tag = 1;
  654. task->destroy = 0;
  655. /* Tell that to be completely finished we need to have finished this twisted3 */
  656. starpu_tag_declare_deps(STEP_TAG_2D(plan, END, 0, 0),
  657. 1, STEP_TAG(TWIST3));
  658. #undef STEP_TAG
  659. }
  660. /* Create end task */
  661. plan->end_task = task = starpu_task_create();
  662. task->cl = NULL;
  663. task->tag_id = STEP_TAG_2D(plan, END, 0, 0);
  664. task->use_tag = 1;
  665. task->destroy = 0;
  666. task->detach = 0;
  667. }
  668. return plan;
  669. }
  670. /* Actually submit all the tasks. */
  671. static struct starpu_task *
  672. STARPUFFT(start2dC2C)(STARPUFFT(plan) plan, starpu_data_handle_t in, starpu_data_handle_t out)
  673. {
  674. STARPU_ASSERT(plan->type == C2C);
  675. int z;
  676. int ret;
  677. if (PARALLEL) {
  678. for (z=0; z < plan->totsize1; z++) {
  679. ret = starpu_task_submit(plan->twist1_tasks[z]);
  680. if (ret == -ENODEV) return NULL;
  681. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  682. ret = starpu_task_submit(plan->fft1_tasks[z]);
  683. if (ret == -ENODEV) return NULL;
  684. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  685. }
  686. ret = starpu_task_submit(plan->join_task);
  687. if (ret == -ENODEV) return NULL;
  688. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  689. for (z=0; z < plan->totsize3; z++) {
  690. ret = starpu_task_submit(plan->twist2_tasks[z]);
  691. if (ret == -ENODEV) return NULL;
  692. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  693. ret = starpu_task_submit(plan->fft2_tasks[z]);
  694. if (ret == -ENODEV) return NULL;
  695. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  696. ret = starpu_task_submit(plan->twist3_tasks[z]);
  697. if (ret == -ENODEV) return NULL;
  698. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  699. }
  700. ret = starpu_task_submit(plan->end_task);
  701. if (ret == -ENODEV) return NULL;
  702. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  703. return plan->end_task;
  704. } else /* !PARALLEL */ {
  705. struct starpu_task *task;
  706. /* Create FFT task */
  707. task = starpu_task_create();
  708. task->detach = 0;
  709. task->cl = &STARPUFFT(fft_2d_codelet);
  710. task->handles[0] = in;
  711. task->handles[1] = out;
  712. task->cl_arg = plan;
  713. ret = starpu_task_submit(task);
  714. if (ret == -ENODEV) return NULL;
  715. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  716. return task;
  717. }
  718. }
  719. /* Free all the tags. The generic code handles freeing the buffers. */
  720. static void
  721. STARPUFFT(free_2d_tags)(STARPUFFT(plan) plan)
  722. {
  723. int i, j;
  724. int n1 = plan->n1[0];
  725. int m1 = plan->n1[1];
  726. if (!PARALLEL)
  727. return;
  728. for (i = 0; i < n1; i++) {
  729. for (j = 0; j < m1; j++) {
  730. starpu_tag_remove(STEP_TAG_2D(plan, TWIST1, i, j));
  731. starpu_tag_remove(STEP_TAG_2D(plan, FFT1, i, j));
  732. }
  733. }
  734. starpu_tag_remove(STEP_TAG_2D(plan, JOIN, 0, 0));
  735. for (i = 0; i < DIV_2D_N; i++) {
  736. for (j = 0; j < DIV_2D_M; j++) {
  737. starpu_tag_remove(STEP_TAG_2D(plan, TWIST2, i, j));
  738. starpu_tag_remove(STEP_TAG_2D(plan, FFT2, i, j));
  739. starpu_tag_remove(STEP_TAG_2D(plan, TWIST3, i, j));
  740. }
  741. }
  742. starpu_tag_remove(STEP_TAG_2D(plan, END, 0, 0));
  743. }