starpufftx2d.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2011 Université de Bordeaux 1
  4. * Copyright (C) 2010 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #define DIV_2D_N 8
  18. #define DIV_2D_M 8
  19. #define I_SHIFT (I_BITS/2)
  20. #define J_BITS I_SHIFT
  21. #define STEP_TAG_2D(plan, step, i, j) _STEP_TAG(plan, step, ((starpu_tag_t) i << I_SHIFT) | (starpu_tag_t) j)
  22. #ifdef STARPU_USE_CUDA
  23. /* Twist the full vector into a n2,m2 chunk */
  24. static void
  25. STARPUFFT(twist1_2d_kernel_gpu)(void *descr[], void *_args)
  26. {
  27. struct STARPUFFT(args) *args = _args;
  28. STARPUFFT(plan) plan = args->plan;
  29. int i = args->i;
  30. int j = args->j;
  31. int n1 = plan->n1[0];
  32. int n2 = plan->n2[0];
  33. int m1 = plan->n1[1];
  34. int m2 = plan->n2[1];
  35. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  36. _cufftComplex * restrict twisted1 = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  37. STARPUFFT(cuda_twist1_2d_host)(in, twisted1, i, j, n1, n2, m1, m2);
  38. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  39. }
  40. /* fft1:
  41. *
  42. * Perform one fft of size n2,m2 */
  43. static void
  44. STARPUFFT(fft1_2d_plan_gpu)(void *args)
  45. {
  46. STARPUFFT(plan) plan = args;
  47. int n2 = plan->n2[0];
  48. int m2 = plan->n2[1];
  49. int workerid = starpu_worker_get_id();
  50. cufftResult cures;
  51. cures = cufftPlan2d(&plan->plans[workerid].plan1_cuda, n2, m2, _CUFFT_C2C);
  52. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  53. cufftSetStream(plan->plans[workerid].plan1_cuda, starpu_cuda_get_local_stream());
  54. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  55. }
  56. static void
  57. STARPUFFT(fft1_2d_kernel_gpu)(void *descr[], void *_args)
  58. {
  59. struct STARPUFFT(args) *args = _args;
  60. STARPUFFT(plan) plan = args->plan;
  61. int i = args->i;
  62. int j = args->j;
  63. int n2 = plan->n2[0];
  64. int m2 = plan->n2[1];
  65. cufftResult cures;
  66. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  67. _cufftComplex * restrict out = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  68. const _cufftComplex * restrict roots0 = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[2]);
  69. const _cufftComplex * restrict roots1 = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[3]);
  70. int workerid = starpu_worker_get_id();
  71. task_per_worker[workerid]++;
  72. cures = _cufftExecC2C(plan->plans[workerid].plan1_cuda, in, out, plan->sign == -1 ? CUFFT_FORWARD : CUFFT_INVERSE);
  73. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  74. /* synchronization is done after the twiddling */
  75. STARPUFFT(cuda_twiddle_2d_host)(out, roots0, roots1, n2, m2, i, j);
  76. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  77. }
  78. /* fft2:
  79. *
  80. * Perform n3*m3 ffts of size n1,m1 */
  81. static void
  82. STARPUFFT(fft2_2d_plan_gpu(void *args))
  83. {
  84. STARPUFFT(plan) plan = args;
  85. int n1 = plan->n1[0];
  86. int m1 = plan->n1[1];
  87. cufftResult cures;
  88. int workerid = starpu_worker_get_id();
  89. cures = cufftPlan2d(&plan->plans[workerid].plan2_cuda, n1, m1, _CUFFT_C2C);
  90. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  91. cufftSetStream(plan->plans[workerid].plan2_cuda, starpu_cuda_get_local_stream());
  92. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  93. }
  94. static void
  95. STARPUFFT(fft2_2d_kernel_gpu)(void *descr[], void *_args)
  96. {
  97. struct STARPUFFT(args) *args = _args;
  98. STARPUFFT(plan) plan = args->plan;
  99. int n1 = plan->n1[0];
  100. int n2 = plan->n2[0];
  101. int m1 = plan->n1[1];
  102. int m2 = plan->n2[1];
  103. int n3 = n2/DIV_2D_N;
  104. int m3 = m2/DIV_2D_M;
  105. int n;
  106. cufftResult cures;
  107. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  108. _cufftComplex * restrict out = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  109. int workerid = starpu_worker_get_id();
  110. task_per_worker[workerid]++;
  111. for (n = 0; n < n3*m3; n++) {
  112. cures = _cufftExecC2C(plan->plans[workerid].plan2_cuda, in + n * n1*m1, out + n * n1*m1, plan->sign == -1 ? CUFFT_FORWARD : CUFFT_INVERSE);
  113. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  114. }
  115. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  116. }
  117. #endif
  118. /* Twist the full vector into a n2,m2 chunk */
  119. static void
  120. STARPUFFT(twist1_2d_kernel_cpu)(void *descr[], void *_args)
  121. {
  122. struct STARPUFFT(args) *args = _args;
  123. STARPUFFT(plan) plan = args->plan;
  124. int i = args->i;
  125. int j = args->j;
  126. int k, l;
  127. int n1 = plan->n1[0];
  128. int n2 = plan->n2[0];
  129. int m1 = plan->n1[1];
  130. int m2 = plan->n2[1];
  131. int m = plan->n[1];
  132. STARPUFFT(complex) * restrict in = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  133. STARPUFFT(complex) * restrict twisted1 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  134. /* printf("twist1 %d %d %g\n", i, j, (double) cabs(plan->in[i+j])); */
  135. for (k = 0; k < n2; k++)
  136. for (l = 0; l < m2; l++)
  137. twisted1[k*m2+l] = in[i*m+j+k*m*n1+l*m1];
  138. }
  139. #ifdef STARPU_HAVE_FFTW
  140. /* Perform an n2,m2 fft */
  141. static void
  142. STARPUFFT(fft1_2d_kernel_cpu)(void *descr[], void *_args)
  143. {
  144. struct STARPUFFT(args) *args = _args;
  145. STARPUFFT(plan) plan = args->plan;
  146. int i = args->i;
  147. int j = args->j;
  148. int k, l;
  149. int n2 = plan->n2[0];
  150. int m2 = plan->n2[1];
  151. int workerid = starpu_worker_get_id();
  152. task_per_worker[workerid]++;
  153. STARPUFFT(complex) *twisted1 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  154. STARPUFFT(complex) *fft1 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  155. /* printf("fft1 %d %d %g\n", i, j, (double) cabs(twisted1[0])); */
  156. _FFTW(execute_dft)(plan->plans[workerid].plan1_cpu, twisted1, fft1);
  157. for (k = 0; k < n2; k++)
  158. for (l = 0; l < m2; l++)
  159. fft1[k*m2 + l] = fft1[k*m2 + l] * plan->roots[0][i*k] * plan->roots[1][j*l];
  160. }
  161. #endif
  162. /* Twist the full vector into a package of n2/DIV_2D_N,m2/DIV_2D_M (n1,m1) chunks */
  163. static void
  164. STARPUFFT(twist2_2d_kernel_cpu)(void *descr[], void *_args)
  165. {
  166. struct STARPUFFT(args) *args = _args;
  167. STARPUFFT(plan) plan = args->plan;
  168. int kk = args->kk; /* between 0 and DIV_2D_N */
  169. int ll = args->ll; /* between 0 and DIV_2D_M */
  170. int kkk, lll; /* beetween 0,0 and n3,m3 */
  171. int i, j;
  172. int n1 = plan->n1[0];
  173. int n2 = plan->n2[0];
  174. int m1 = plan->n1[1];
  175. int m2 = plan->n2[1];
  176. int n3 = n2/DIV_2D_N;
  177. int m3 = m2/DIV_2D_M;
  178. STARPUFFT(complex) * restrict twisted2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  179. /* printf("twist2 %d %d %g\n", kk, ll, (double) cabs(plan->fft1[kk+ll])); */
  180. for (kkk = 0; kkk < n3; kkk++) {
  181. int k = kk * n3 + kkk;
  182. for (lll = 0; lll < m3; lll++) {
  183. int l = ll * m3 + lll;
  184. for (i = 0; i < n1; i++)
  185. for (j = 0; j < m1; j++)
  186. twisted2[kkk*m3*n1*m1+lll*n1*m1+i*m1+j] = plan->fft1[i*n1*n2*m2+j*n2*m2+k*m2+l];
  187. }
  188. }
  189. }
  190. #ifdef STARPU_HAVE_FFTW
  191. /* Perform (n2/DIV_2D_N)*(m2/DIV_2D_M) (n1,m1) ffts */
  192. static void
  193. STARPUFFT(fft2_2d_kernel_cpu)(void *descr[], void *_args)
  194. {
  195. struct STARPUFFT(args) *args = _args;
  196. STARPUFFT(plan) plan = args->plan;
  197. /* int kk = args->kk; */
  198. /* int ll = args->ll; */
  199. int workerid = starpu_worker_get_id();
  200. task_per_worker[workerid]++;
  201. STARPUFFT(complex) *twisted2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  202. STARPUFFT(complex) *fft2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  203. /* printf("fft2 %d %d %g\n", kk, ll, (double) cabs(twisted2[plan->totsize4-1])); */
  204. _FFTW(execute_dft)(plan->plans[workerid].plan2_cpu, twisted2, fft2);
  205. }
  206. #endif
  207. /* Spread the package of (n2/DIV_2D_N)*(m2/DIV_2D_M) (n1,m1) chunks into the full vector */
  208. static void
  209. STARPUFFT(twist3_2d_kernel_cpu)(void *descr[], void *_args)
  210. {
  211. struct STARPUFFT(args) *args = _args;
  212. STARPUFFT(plan) plan = args->plan;
  213. int kk = args->kk; /* between 0 and DIV_2D_N */
  214. int ll = args->ll; /* between 0 and DIV_2D_M */
  215. int kkk, lll; /* beetween 0,0 and n3,m3 */
  216. int i, j;
  217. int n1 = plan->n1[0];
  218. int n2 = plan->n2[0];
  219. int m1 = plan->n1[1];
  220. int m2 = plan->n2[1];
  221. int n3 = n2/DIV_2D_N;
  222. int m3 = m2/DIV_2D_M;
  223. int m = plan->n[1];
  224. const STARPUFFT(complex) * restrict fft2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  225. /* printf("twist3 %d %d %g\n", kk, ll, (double) cabs(fft2[0])); */
  226. for (kkk = 0; kkk < n3; kkk++) {
  227. int k = kk * n3 + kkk;
  228. for (lll = 0; lll < m3; lll++) {
  229. int l = ll * m3 + lll;
  230. for (i = 0; i < n1; i++)
  231. for (j = 0; j < m1; j++)
  232. plan->out[i*n2*m+j*m2+k*m+l] = fft2[kkk*m3*n1*m1+lll*n1*m1+i*m1+j];
  233. }
  234. }
  235. }
  236. struct starpu_perfmodel STARPUFFT(twist1_2d_model) = {
  237. .type = STARPU_HISTORY_BASED,
  238. .symbol = TYPE"twist1_2d"
  239. };
  240. struct starpu_perfmodel STARPUFFT(fft1_2d_model) = {
  241. .type = STARPU_HISTORY_BASED,
  242. .symbol = TYPE"fft1_2d"
  243. };
  244. struct starpu_perfmodel STARPUFFT(twist2_2d_model) = {
  245. .type = STARPU_HISTORY_BASED,
  246. .symbol = TYPE"twist2_2d"
  247. };
  248. struct starpu_perfmodel STARPUFFT(fft2_2d_model) = {
  249. .type = STARPU_HISTORY_BASED,
  250. .symbol = TYPE"fft2_2d"
  251. };
  252. struct starpu_perfmodel STARPUFFT(twist3_2d_model) = {
  253. .type = STARPU_HISTORY_BASED,
  254. .symbol = TYPE"twist3_2d"
  255. };
  256. static struct starpu_codelet STARPUFFT(twist1_2d_codelet) = {
  257. .where =
  258. #ifdef STARPU_USE_CUDA
  259. STARPU_CUDA|
  260. #endif
  261. STARPU_CPU,
  262. #ifdef STARPU_USE_CUDA
  263. .cuda_func = STARPUFFT(twist1_2d_kernel_gpu),
  264. #endif
  265. .cpu_func = STARPUFFT(twist1_2d_kernel_cpu),
  266. .model = &STARPUFFT(twist1_2d_model),
  267. .nbuffers = 2
  268. };
  269. static struct starpu_codelet STARPUFFT(fft1_2d_codelet) = {
  270. .where =
  271. #ifdef STARPU_USE_CUDA
  272. STARPU_CUDA|
  273. #endif
  274. #ifdef STARPU_HAVE_FFTW
  275. STARPU_CPU|
  276. #endif
  277. 0,
  278. #ifdef STARPU_USE_CUDA
  279. .cuda_func = STARPUFFT(fft1_2d_kernel_gpu),
  280. #endif
  281. #ifdef STARPU_HAVE_FFTW
  282. .cpu_func = STARPUFFT(fft1_2d_kernel_cpu),
  283. #endif
  284. .model = &STARPUFFT(fft1_2d_model),
  285. .nbuffers = 4
  286. };
  287. static struct starpu_codelet STARPUFFT(twist2_2d_codelet) = {
  288. .where = STARPU_CPU,
  289. .cpu_func = STARPUFFT(twist2_2d_kernel_cpu),
  290. .model = &STARPUFFT(twist2_2d_model),
  291. .nbuffers = 1
  292. };
  293. static struct starpu_codelet STARPUFFT(fft2_2d_codelet) = {
  294. .where =
  295. #ifdef STARPU_USE_CUDA
  296. STARPU_CUDA|
  297. #endif
  298. #ifdef STARPU_HAVE_FFTW
  299. STARPU_CPU|
  300. #endif
  301. 0,
  302. #ifdef STARPU_USE_CUDA
  303. .cuda_func = STARPUFFT(fft2_2d_kernel_gpu),
  304. #endif
  305. #ifdef STARPU_HAVE_FFTW
  306. .cpu_func = STARPUFFT(fft2_2d_kernel_cpu),
  307. #endif
  308. .model = &STARPUFFT(fft2_2d_model),
  309. .nbuffers = 2
  310. };
  311. static struct starpu_codelet STARPUFFT(twist3_2d_codelet) = {
  312. .where = STARPU_CPU,
  313. .cpu_func = STARPUFFT(twist3_2d_kernel_cpu),
  314. .model = &STARPUFFT(twist3_2d_model),
  315. .nbuffers = 1
  316. };
  317. /*
  318. *
  319. * Sequential version
  320. *
  321. */
  322. /* Perform one fft of size n,m */
  323. static void
  324. STARPUFFT(fft_2d_plan_gpu)(void *args)
  325. {
  326. STARPUFFT(plan) plan = args;
  327. cufftResult cures;
  328. int n = plan->n[0];
  329. int m = plan->n[1];
  330. int workerid = starpu_worker_get_id();
  331. cures = cufftPlan2d(&plan->plans[workerid].plan1_cuda, n, m, _CUFFT_C2C);
  332. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  333. cufftSetStream(plan->plans[workerid].plan_cuda, starpu_cuda_get_local_stream());
  334. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  335. }
  336. static void
  337. STARPUFFT(fft_2d_kernel_gpu)(void *descr[], void *args)
  338. {
  339. STARPUFFT(plan) plan = args;
  340. cufftResult cures;
  341. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  342. _cufftComplex * restrict out = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  343. int workerid = starpu_worker_get_id();
  344. task_per_worker[workerid]++;
  345. cures = _cufftExecC2C(plan->plans[workerid].plan_cuda, in, out, plan->sign == -1 ? CUFFT_FORWARD : CUFFT_INVERSE);
  346. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  347. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  348. }
  349. #ifdef STARPU_HAVE_FFTW
  350. /* Perform one fft of size n,m */
  351. static void
  352. STARPUFFT(fft_2d_kernel_cpu)(void *descr[], void *_args)
  353. {
  354. STARPUFFT(plan) plan = _args;
  355. int workerid = starpu_worker_get_id();
  356. task_per_worker[workerid]++;
  357. STARPUFFT(complex) * restrict in = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  358. STARPUFFT(complex) * restrict out = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  359. _FFTW(execute_dft)(plan->plans[workerid].plan_cpu, in, out);
  360. }
  361. #endif
  362. static struct starpu_perfmodel STARPUFFT(fft_2d_model) = {
  363. .type = STARPU_HISTORY_BASED,
  364. .symbol = TYPE"fft_2d"
  365. };
  366. static struct starpu_codelet STARPUFFT(fft_2d_codelet) = {
  367. .where =
  368. #ifdef STARPU_USE_CUDA
  369. STARPU_CUDA|
  370. #endif
  371. #ifdef STARPU_HAVE_FFTW
  372. STARPU_CPU|
  373. #endif
  374. 0,
  375. #ifdef STARPU_USE_CUDA
  376. .cuda_func = STARPUFFT(fft_2d_kernel_gpu),
  377. #endif
  378. #ifdef STARPU_HAVE_FFTW
  379. .cpu_func = STARPUFFT(fft_2d_kernel_cpu),
  380. #endif
  381. .model = &STARPUFFT(fft_2d_model),
  382. .nbuffers = 2
  383. };
  384. STARPUFFT(plan)
  385. STARPUFFT(plan_dft_2d)(int n, int m, int sign, unsigned flags)
  386. {
  387. int workerid;
  388. int n1 = DIV_2D_N;
  389. int n2 = n / n1;
  390. int n3;
  391. int m1 = DIV_2D_M;
  392. int m2 = m / m1;
  393. int m3;
  394. int z;
  395. struct starpu_task *task;
  396. if (PARALLEL) {
  397. /*
  398. * Simple strategy:
  399. *
  400. * - twist1: twist input in n1*m1 (n2,m2) chunks
  401. * - fft1: perform n1*m1 (n2,m2) ffts
  402. * - twist2: twist into n2*m2 (n1,m1) chunks distributed in
  403. * DIV_2D_N*DIV_2D_M groups
  404. * - fft2: perform DIV_2D_N*DIV_2D_M times n3*m3 (n1,m1) ffts
  405. * - twist3: twist back into output
  406. */
  407. #ifdef STARPU_USE_CUDA
  408. /* cufft 2D-3D limited to [2,16384] */
  409. while (n2 > 16384) {
  410. n1 *= 2;
  411. n2 /= 2;
  412. }
  413. #endif
  414. STARPU_ASSERT(n == n1*n2);
  415. STARPU_ASSERT(n1 < (1ULL << J_BITS));
  416. #ifdef STARPU_USE_CUDA
  417. /* cufft 2D-3D limited to [2,16384] */
  418. while (m2 > 16384) {
  419. m1 *= 2;
  420. m2 /= 2;
  421. }
  422. #endif
  423. STARPU_ASSERT(m == m1*m2);
  424. STARPU_ASSERT(m1 < (1ULL << J_BITS));
  425. /* distribute the n2*m2 second ffts into DIV_2D_N*DIV_2D_M packages */
  426. n3 = n2 / DIV_2D_N;
  427. STARPU_ASSERT(n2 == n3*DIV_2D_N);
  428. m3 = m2 / DIV_2D_M;
  429. STARPU_ASSERT(m2 == m3*DIV_2D_M);
  430. }
  431. /* TODO: flags? Automatically set FFTW_MEASURE on calibration? */
  432. STARPU_ASSERT(flags == 0);
  433. STARPUFFT(plan) plan = malloc(sizeof(*plan));
  434. memset(plan, 0, sizeof(*plan));
  435. if (PARALLEL) {
  436. plan->number = STARPU_ATOMIC_ADD(&starpufft_last_plan_number, 1) - 1;
  437. /* 4bit limitation in the tag space */
  438. STARPU_ASSERT(plan->number < (1ULL << NUMBER_BITS));
  439. }
  440. plan->dim = 2;
  441. plan->n = malloc(plan->dim * sizeof(*plan->n));
  442. plan->n[0] = n;
  443. plan->n[1] = m;
  444. if (PARALLEL) {
  445. check_dims(plan);
  446. plan->n1 = malloc(plan->dim * sizeof(*plan->n1));
  447. plan->n1[0] = n1;
  448. plan->n1[1] = m1;
  449. plan->n2 = malloc(plan->dim * sizeof(*plan->n2));
  450. plan->n2[0] = n2;
  451. plan->n2[1] = m2;
  452. plan->totsize = n * m;
  453. plan->totsize1 = n1 * m1;
  454. plan->totsize2 = n2 * m2;
  455. plan->totsize3 = DIV_2D_N * DIV_2D_M;
  456. plan->totsize4 = plan->totsize / plan->totsize3;
  457. }
  458. plan->type = C2C;
  459. plan->sign = sign;
  460. if (PARALLEL) {
  461. /* Compute the w^k just once. */
  462. compute_roots(plan);
  463. }
  464. /* Initialize per-worker working set */
  465. for (workerid = 0; workerid < starpu_worker_get_count(); workerid++) {
  466. switch (starpu_worker_get_type(workerid)) {
  467. case STARPU_CPU_WORKER:
  468. #ifdef STARPU_HAVE_FFTW
  469. if (PARALLEL) {
  470. /* first fft plan: one n2*m2 fft */
  471. plan->plans[workerid].plan1_cpu = _FFTW(plan_dft_2d)(n2, m2, NULL, NULL, sign, _FFTW_FLAGS);
  472. STARPU_ASSERT(plan->plans[workerid].plan1_cpu);
  473. /* second fft plan: n3*m3 n1*m1 ffts */
  474. plan->plans[workerid].plan2_cpu = _FFTW(plan_many_dft)(plan->dim,
  475. plan->n1, n3*m3,
  476. NULL, NULL, 1, plan->totsize1,
  477. NULL, NULL, 1, plan->totsize1,
  478. sign, _FFTW_FLAGS);
  479. STARPU_ASSERT(plan->plans[workerid].plan2_cpu);
  480. } else {
  481. /* fft plan: one fft of size n, m. */
  482. plan->plans[workerid].plan_cpu = _FFTW(plan_dft_2d)(n, m, NULL, NULL, sign, _FFTW_FLAGS);
  483. STARPU_ASSERT(plan->plans[workerid].plan_cpu);
  484. }
  485. #else
  486. /* #warning libstarpufft can not work correctly if libfftw3 is not installed */
  487. #endif
  488. break;
  489. case STARPU_CUDA_WORKER:
  490. break;
  491. default:
  492. STARPU_ABORT();
  493. break;
  494. }
  495. }
  496. #ifdef STARPU_USE_CUDA
  497. if (PARALLEL) {
  498. starpu_execute_on_each_worker(STARPUFFT(fft1_2d_plan_gpu), plan, STARPU_CUDA);
  499. starpu_execute_on_each_worker(STARPUFFT(fft2_2d_plan_gpu), plan, STARPU_CUDA);
  500. } else {
  501. starpu_execute_on_each_worker(STARPUFFT(fft_2d_plan_gpu), plan, STARPU_CUDA);
  502. }
  503. #endif
  504. if (PARALLEL) {
  505. plan->twisted1 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->twisted1));
  506. memset(plan->twisted1, 0, plan->totsize * sizeof(*plan->twisted1));
  507. plan->fft1 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->fft1));
  508. memset(plan->fft1, 0, plan->totsize * sizeof(*plan->fft1));
  509. plan->twisted2 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->twisted2));
  510. memset(plan->twisted2, 0, plan->totsize * sizeof(*plan->twisted2));
  511. plan->fft2 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->fft2));
  512. memset(plan->fft2, 0, plan->totsize * sizeof(*plan->fft2));
  513. plan->twisted1_handle = malloc(plan->totsize1 * sizeof(*plan->twisted1_handle));
  514. plan->fft1_handle = malloc(plan->totsize1 * sizeof(*plan->fft1_handle));
  515. plan->twisted2_handle = malloc(plan->totsize3 * sizeof(*plan->twisted2_handle));
  516. plan->fft2_handle = malloc(plan->totsize3 * sizeof(*plan->fft2_handle));
  517. plan->twist1_tasks = malloc(plan->totsize1 * sizeof(*plan->twist1_tasks));
  518. plan->fft1_tasks = malloc(plan->totsize1 * sizeof(*plan->fft1_tasks));
  519. plan->twist2_tasks = malloc(plan->totsize3 * sizeof(*plan->twist2_tasks));
  520. plan->fft2_tasks = malloc(plan->totsize3 * sizeof(*plan->fft2_tasks));
  521. plan->twist3_tasks = malloc(plan->totsize3 * sizeof(*plan->twist3_tasks));
  522. plan->fft1_args = malloc(plan->totsize1 * sizeof(*plan->fft1_args));
  523. plan->fft2_args = malloc(plan->totsize3 * sizeof(*plan->fft2_args));
  524. /* Create first-round tasks */
  525. for (z = 0; z < plan->totsize1; z++) {
  526. int i = z / m1, j = z % m1;
  527. #define STEP_TAG(step) STEP_TAG_2D(plan, step, i, j)
  528. plan->fft1_args[z].plan = plan;
  529. plan->fft1_args[z].i = i;
  530. plan->fft1_args[z].j = j;
  531. /* Register (n2,m2) chunks */
  532. starpu_vector_data_register(&plan->twisted1_handle[z], 0, (uintptr_t) &plan->twisted1[z*plan->totsize2], plan->totsize2, sizeof(*plan->twisted1));
  533. starpu_vector_data_register(&plan->fft1_handle[z], 0, (uintptr_t) &plan->fft1[z*plan->totsize2], plan->totsize2, sizeof(*plan->fft1));
  534. /* We'll need it on the CPU for the second twist anyway */
  535. starpu_data_set_wt_mask(plan->fft1_handle[z], 1<<0);
  536. /* Create twist1 task */
  537. plan->twist1_tasks[z] = task = starpu_task_create();
  538. task->cl = &STARPUFFT(twist1_2d_codelet);
  539. /* task->buffers[0].handle = to be filled at execution */
  540. task->buffers[0].mode = STARPU_R;
  541. task->buffers[1].handle = plan->twisted1_handle[z];
  542. task->buffers[1].mode = STARPU_W;
  543. task->cl_arg = &plan->fft1_args[z];
  544. task->tag_id = STEP_TAG(TWIST1);
  545. task->use_tag = 1;
  546. task->detach = 1;
  547. task->destroy = 0;
  548. /* Tell that fft1 depends on twisted1 */
  549. starpu_tag_declare_deps(STEP_TAG(FFT1),
  550. 1, STEP_TAG(TWIST1));
  551. /* Create FFT1 task */
  552. plan->fft1_tasks[z] = task = starpu_task_create();
  553. task->cl = &STARPUFFT(fft1_2d_codelet);
  554. task->buffers[0].handle = plan->twisted1_handle[z];
  555. task->buffers[0].mode = STARPU_R;
  556. task->buffers[1].handle = plan->fft1_handle[z];
  557. task->buffers[1].mode = STARPU_W;
  558. task->buffers[2].handle = plan->roots_handle[0];
  559. task->buffers[2].mode = STARPU_R;
  560. task->buffers[3].handle = plan->roots_handle[1];
  561. task->buffers[3].mode = STARPU_R;
  562. task->cl_arg = &plan->fft1_args[z];
  563. task->tag_id = STEP_TAG(FFT1);
  564. task->use_tag = 1;
  565. task->detach = 1;
  566. task->destroy = 0;
  567. /* Tell that to be done with first step we need to have
  568. * finished this fft1 */
  569. starpu_tag_declare_deps(STEP_TAG_2D(plan, JOIN, 0, 0),
  570. 1, STEP_TAG(FFT1));
  571. #undef STEP_TAG
  572. }
  573. /* Create join task */
  574. plan->join_task = task = starpu_task_create();
  575. task->cl = NULL;
  576. task->tag_id = STEP_TAG_2D(plan, JOIN, 0, 0);
  577. task->use_tag = 1;
  578. task->detach = 1;
  579. task->destroy = 0;
  580. /* Create second-round tasks */
  581. for (z = 0; z < plan->totsize3; z++) {
  582. int kk = z / DIV_2D_M, ll = z % DIV_2D_M;
  583. #define STEP_TAG(step) STEP_TAG_2D(plan, step, kk, ll)
  584. plan->fft2_args[z].plan = plan;
  585. plan->fft2_args[z].kk = kk;
  586. plan->fft2_args[z].ll = ll;
  587. /* Register n3*m3 (n1,m1) chunks */
  588. starpu_vector_data_register(&plan->twisted2_handle[z], 0, (uintptr_t) &plan->twisted2[z*plan->totsize4], plan->totsize4, sizeof(*plan->twisted2));
  589. starpu_vector_data_register(&plan->fft2_handle[z], 0, (uintptr_t) &plan->fft2[z*plan->totsize4], plan->totsize4, sizeof(*plan->fft2));
  590. /* We'll need it on the CPU for the last twist anyway */
  591. starpu_data_set_wt_mask(plan->fft2_handle[z], 1<<0);
  592. /* Tell that twisted2 depends on the whole first step to be
  593. * done */
  594. starpu_tag_declare_deps(STEP_TAG(TWIST2),
  595. 1, STEP_TAG_2D(plan, JOIN, 0, 0));
  596. /* Create twist2 task */
  597. plan->twist2_tasks[z] = task = starpu_task_create();
  598. task->cl = &STARPUFFT(twist2_2d_codelet);
  599. task->buffers[0].handle = plan->twisted2_handle[z];
  600. task->buffers[0].mode = STARPU_W;
  601. task->cl_arg = &plan->fft2_args[z];
  602. task->tag_id = STEP_TAG(TWIST2);
  603. task->use_tag = 1;
  604. task->detach = 1;
  605. task->destroy = 0;
  606. /* Tell that fft2 depends on twisted2 */
  607. starpu_tag_declare_deps(STEP_TAG(FFT2),
  608. 1, STEP_TAG(TWIST2));
  609. /* Create FFT2 task */
  610. plan->fft2_tasks[z] = task = starpu_task_create();
  611. task->cl = &STARPUFFT(fft2_2d_codelet);
  612. task->buffers[0].handle = plan->twisted2_handle[z];
  613. task->buffers[0].mode = STARPU_R;
  614. task->buffers[1].handle = plan->fft2_handle[z];
  615. task->buffers[1].mode = STARPU_W;
  616. task->cl_arg = &plan->fft2_args[z];
  617. task->tag_id = STEP_TAG(FFT2);
  618. task->use_tag = 1;
  619. task->detach = 1;
  620. task->destroy = 0;
  621. /* Tell that twist3 depends on fft2 */
  622. starpu_tag_declare_deps(STEP_TAG(TWIST3),
  623. 1, STEP_TAG(FFT2));
  624. /* Create twist3 tasks */
  625. plan->twist3_tasks[z] = task = starpu_task_create();
  626. task->cl = &STARPUFFT(twist3_2d_codelet);
  627. task->buffers[0].handle = plan->fft2_handle[z];
  628. task->buffers[0].mode = STARPU_R;
  629. task->cl_arg = &plan->fft2_args[z];
  630. task->tag_id = STEP_TAG(TWIST3);
  631. task->use_tag = 1;
  632. task->detach = 1;
  633. task->destroy = 0;
  634. /* Tell that to be completely finished we need to have finished this twisted3 */
  635. starpu_tag_declare_deps(STEP_TAG_2D(plan, END, 0, 0),
  636. 1, STEP_TAG(TWIST3));
  637. #undef STEP_TAG
  638. }
  639. /* Create end task */
  640. plan->end_task = task = starpu_task_create();
  641. task->cl = NULL;
  642. task->tag_id = STEP_TAG_2D(plan, END, 0, 0);
  643. task->use_tag = 1;
  644. task->detach = 1;
  645. task->destroy = 0;
  646. }
  647. return plan;
  648. }
  649. /* Actually submit all the tasks. */
  650. static starpu_tag_t
  651. STARPUFFT(start2dC2C)(STARPUFFT(plan) plan)
  652. {
  653. STARPU_ASSERT(plan->type == C2C);
  654. int z;
  655. if (PARALLEL) {
  656. for (z=0; z < plan->totsize1; z++) {
  657. starpu_task_submit(plan->twist1_tasks[z]);
  658. starpu_task_submit(plan->fft1_tasks[z]);
  659. }
  660. starpu_task_submit(plan->join_task);
  661. for (z=0; z < plan->totsize3; z++) {
  662. starpu_task_submit(plan->twist2_tasks[z]);
  663. starpu_task_submit(plan->fft2_tasks[z]);
  664. starpu_task_submit(plan->twist3_tasks[z]);
  665. }
  666. starpu_task_submit(plan->end_task);
  667. return STEP_TAG_2D(plan, END, 0, 0);
  668. } else /* !PARALLEL */ {
  669. struct starpu_task *task;
  670. /* FIXME: rather return the task? */
  671. /* Create FFT task */
  672. plan->fft_task = task = starpu_task_create();
  673. task->cl = &STARPUFFT(fft_2d_codelet);
  674. task->buffers[0].handle = plan->in_handle;
  675. task->buffers[0].mode = STARPU_R;
  676. task->buffers[1].handle = plan->out_handle;
  677. task->buffers[1].mode = STARPU_W;
  678. task->cl_arg = plan;
  679. task->tag_id = STARPU_ATOMIC_ADD(&starpufft_last_tag, 1);
  680. task->use_tag = 1;
  681. starpu_task_submit(plan->fft_task);
  682. return task->tag_id;
  683. }
  684. }
  685. /* Free all the tags. The generic code handles freeing the buffers. */
  686. static void
  687. STARPUFFT(free_2d_tags)(STARPUFFT(plan) plan)
  688. {
  689. unsigned i, j;
  690. int n1 = plan->n1[0];
  691. int m1 = plan->n1[1];
  692. if (!PARALLEL)
  693. return;
  694. for (i = 0; i < n1; i++) {
  695. for (j = 0; j < m1; j++) {
  696. starpu_tag_remove(STEP_TAG_2D(plan, TWIST1, i, j));
  697. starpu_tag_remove(STEP_TAG_2D(plan, FFT1, i, j));
  698. }
  699. }
  700. starpu_tag_remove(STEP_TAG_2D(plan, JOIN, 0, 0));
  701. for (i = 0; i < DIV_2D_N; i++) {
  702. for (j = 0; j < DIV_2D_M; j++) {
  703. starpu_tag_remove(STEP_TAG_2D(plan, TWIST2, i, j));
  704. starpu_tag_remove(STEP_TAG_2D(plan, FFT2, i, j));
  705. starpu_tag_remove(STEP_TAG_2D(plan, TWIST3, i, j));
  706. }
  707. }
  708. starpu_tag_remove(STEP_TAG_2D(plan, END, 0, 0));
  709. }