starpufftx2d.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2011 Université de Bordeaux 1
  4. * Copyright (C) 2010 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #define PARALLEL
  18. #ifdef PARALLEL
  19. #define DIV_2D_N 8
  20. #define DIV_2D_M 8
  21. #define I_SHIFT (I_BITS/2)
  22. #define J_BITS I_SHIFT
  23. #define STEP_TAG_2D(plan, step, i, j) _STEP_TAG(plan, step, ((starpu_tag_t) i << I_SHIFT) | (starpu_tag_t) j)
  24. #ifdef STARPU_USE_CUDA
  25. /* Twist the full vector into a n2,m2 chunk */
  26. static void
  27. STARPUFFT(twist1_2d_kernel_gpu)(void *descr[], void *_args)
  28. {
  29. struct STARPUFFT(args) *args = _args;
  30. STARPUFFT(plan) plan = args->plan;
  31. int i = args->i;
  32. int j = args->j;
  33. int n1 = plan->n1[0];
  34. int n2 = plan->n2[0];
  35. int m1 = plan->n1[1];
  36. int m2 = plan->n2[1];
  37. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  38. _cufftComplex * restrict twisted1 = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  39. STARPUFFT(cuda_twist1_2d_host)(in, twisted1, i, j, n1, n2, m1, m2);
  40. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  41. }
  42. /* fft1:
  43. *
  44. * Perform one fft of size n2,m2 */
  45. static void
  46. STARPUFFT(fft1_2d_plan_gpu)(void *args)
  47. {
  48. STARPUFFT(plan) plan = args;
  49. int n2 = plan->n2[0];
  50. int m2 = plan->n2[1];
  51. int workerid = starpu_worker_get_id();
  52. cufftResult cures;
  53. cures = cufftPlan2d(&plan->plans[workerid].plan1_cuda, n2, m2, _CUFFT_C2C);
  54. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  55. cufftSetStream(plan->plans[workerid].plan1_cuda, starpu_cuda_get_local_stream());
  56. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  57. }
  58. static void
  59. STARPUFFT(fft1_2d_kernel_gpu)(void *descr[], void *_args)
  60. {
  61. struct STARPUFFT(args) *args = _args;
  62. STARPUFFT(plan) plan = args->plan;
  63. int i = args->i;
  64. int j = args->j;
  65. int n2 = plan->n2[0];
  66. int m2 = plan->n2[1];
  67. cufftResult cures;
  68. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  69. _cufftComplex * restrict out = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  70. const _cufftComplex * restrict roots0 = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[2]);
  71. const _cufftComplex * restrict roots1 = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[3]);
  72. int workerid = starpu_worker_get_id();
  73. task_per_worker[workerid]++;
  74. cures = _cufftExecC2C(plan->plans[workerid].plan1_cuda, in, out, plan->sign == -1 ? CUFFT_FORWARD : CUFFT_INVERSE);
  75. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  76. /* synchronization is done after the twiddling */
  77. STARPUFFT(cuda_twiddle_2d_host)(out, roots0, roots1, n2, m2, i, j);
  78. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  79. }
  80. /* fft2:
  81. *
  82. * Perform n3*m3 ffts of size n1,m1 */
  83. static void
  84. STARPUFFT(fft2_2d_plan_gpu(void *args))
  85. {
  86. STARPUFFT(plan) plan = args;
  87. int n1 = plan->n1[0];
  88. int m1 = plan->n1[1];
  89. cufftResult cures;
  90. int workerid = starpu_worker_get_id();
  91. cures = cufftPlan2d(&plan->plans[workerid].plan2_cuda, n1, m1, _CUFFT_C2C);
  92. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  93. cufftSetStream(plan->plans[workerid].plan2_cuda, starpu_cuda_get_local_stream());
  94. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  95. }
  96. static void
  97. STARPUFFT(fft2_2d_kernel_gpu)(void *descr[], void *_args)
  98. {
  99. struct STARPUFFT(args) *args = _args;
  100. STARPUFFT(plan) plan = args->plan;
  101. int n1 = plan->n1[0];
  102. int n2 = plan->n2[0];
  103. int m1 = plan->n1[1];
  104. int m2 = plan->n2[1];
  105. int n3 = n2/DIV_2D_N;
  106. int m3 = m2/DIV_2D_M;
  107. int n;
  108. cufftResult cures;
  109. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  110. _cufftComplex * restrict out = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  111. int workerid = starpu_worker_get_id();
  112. task_per_worker[workerid]++;
  113. for (n = 0; n < n3*m3; n++) {
  114. cures = _cufftExecC2C(plan->plans[workerid].plan2_cuda, in + n * n1*m1, out + n * n1*m1, plan->sign == -1 ? CUFFT_FORWARD : CUFFT_INVERSE);
  115. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  116. }
  117. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  118. }
  119. #endif
  120. /* Twist the full vector into a n2,m2 chunk */
  121. static void
  122. STARPUFFT(twist1_2d_kernel_cpu)(void *descr[], void *_args)
  123. {
  124. struct STARPUFFT(args) *args = _args;
  125. STARPUFFT(plan) plan = args->plan;
  126. int i = args->i;
  127. int j = args->j;
  128. int k, l;
  129. int n1 = plan->n1[0];
  130. int n2 = plan->n2[0];
  131. int m1 = plan->n1[1];
  132. int m2 = plan->n2[1];
  133. int m = plan->n[1];
  134. STARPUFFT(complex) * restrict in = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  135. STARPUFFT(complex) * restrict twisted1 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  136. /* printf("twist1 %d %d %g\n", i, j, (double) cabs(plan->in[i+j])); */
  137. for (k = 0; k < n2; k++)
  138. for (l = 0; l < m2; l++)
  139. twisted1[k*m2+l] = in[i*m+j+k*m*n1+l*m1];
  140. }
  141. #ifdef STARPU_HAVE_FFTW
  142. /* Perform an n2,m2 fft */
  143. static void
  144. STARPUFFT(fft1_2d_kernel_cpu)(void *descr[], void *_args)
  145. {
  146. struct STARPUFFT(args) *args = _args;
  147. STARPUFFT(plan) plan = args->plan;
  148. int i = args->i;
  149. int j = args->j;
  150. int k, l;
  151. int n2 = plan->n2[0];
  152. int m2 = plan->n2[1];
  153. int workerid = starpu_worker_get_id();
  154. task_per_worker[workerid]++;
  155. const STARPUFFT(complex) *twisted1 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  156. STARPUFFT(complex) *fft1 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  157. _fftw_complex * restrict worker_in1 = (STARPUFFT(complex) *)plan->plans[workerid].in1;
  158. _fftw_complex * restrict worker_out1 = (STARPUFFT(complex) *)plan->plans[workerid].out1;
  159. /* printf("fft1 %d %d %g\n", i, j, (double) cabs(twisted1[0])); */
  160. memcpy(worker_in1, twisted1, plan->totsize2 * sizeof(*worker_in1));
  161. _FFTW(execute)(plan->plans[workerid].plan1_cpu);
  162. for (k = 0; k < n2; k++)
  163. for (l = 0; l < m2; l++)
  164. fft1[k*m2 + l] = worker_out1[k*m2 + l] * plan->roots[0][i*k] * plan->roots[1][j*l];
  165. }
  166. #endif
  167. /* Twist the full vector into a package of n2/DIV_2D_N,m2/DIV_2D_M (n1,m1) chunks */
  168. static void
  169. STARPUFFT(twist2_2d_kernel_cpu)(void *descr[], void *_args)
  170. {
  171. struct STARPUFFT(args) *args = _args;
  172. STARPUFFT(plan) plan = args->plan;
  173. int kk = args->kk; /* between 0 and DIV_2D_N */
  174. int ll = args->ll; /* between 0 and DIV_2D_M */
  175. int kkk, lll; /* beetween 0,0 and n3,m3 */
  176. int i, j;
  177. int n1 = plan->n1[0];
  178. int n2 = plan->n2[0];
  179. int m1 = plan->n1[1];
  180. int m2 = plan->n2[1];
  181. int n3 = n2/DIV_2D_N;
  182. int m3 = m2/DIV_2D_M;
  183. STARPUFFT(complex) * restrict twisted2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  184. /* printf("twist2 %d %d %g\n", kk, ll, (double) cabs(plan->fft1[kk+ll])); */
  185. for (kkk = 0; kkk < n3; kkk++) {
  186. int k = kk * n3 + kkk;
  187. for (lll = 0; lll < m3; lll++) {
  188. int l = ll * m3 + lll;
  189. for (i = 0; i < n1; i++)
  190. for (j = 0; j < m1; j++)
  191. twisted2[kkk*m3*n1*m1+lll*n1*m1+i*m1+j] = plan->fft1[i*n1*n2*m2+j*n2*m2+k*m2+l];
  192. }
  193. }
  194. }
  195. #ifdef STARPU_HAVE_FFTW
  196. /* Perform (n2/DIV_2D_N)*(m2/DIV_2D_M) (n1,m1) ffts */
  197. static void
  198. STARPUFFT(fft2_2d_kernel_cpu)(void *descr[], void *_args)
  199. {
  200. struct STARPUFFT(args) *args = _args;
  201. STARPUFFT(plan) plan = args->plan;
  202. /* int kk = args->kk; */
  203. /* int ll = args->ll; */
  204. int workerid = starpu_worker_get_id();
  205. task_per_worker[workerid]++;
  206. const STARPUFFT(complex) *twisted2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  207. STARPUFFT(complex) *fft2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  208. /* printf("fft2 %d %d %g\n", kk, ll, (double) cabs(twisted2[plan->totsize4-1])); */
  209. _fftw_complex * restrict worker_in2 = (STARPUFFT(complex) *)plan->plans[workerid].in2;
  210. _fftw_complex * restrict worker_out2 = (STARPUFFT(complex) *)plan->plans[workerid].out2;
  211. memcpy(worker_in2, twisted2, plan->totsize4 * sizeof(*worker_in2));
  212. _FFTW(execute)(plan->plans[workerid].plan2_cpu);
  213. /* no twiddle */
  214. memcpy(fft2, worker_out2, plan->totsize4 * sizeof(*worker_out2));
  215. }
  216. #endif
  217. /* Spread the package of (n2/DIV_2D_N)*(m2/DIV_2D_M) (n1,m1) chunks into the full vector */
  218. static void
  219. STARPUFFT(twist3_2d_kernel_cpu)(void *descr[], void *_args)
  220. {
  221. struct STARPUFFT(args) *args = _args;
  222. STARPUFFT(plan) plan = args->plan;
  223. int kk = args->kk; /* between 0 and DIV_2D_N */
  224. int ll = args->ll; /* between 0 and DIV_2D_M */
  225. int kkk, lll; /* beetween 0,0 and n3,m3 */
  226. int i, j;
  227. int n1 = plan->n1[0];
  228. int n2 = plan->n2[0];
  229. int m1 = plan->n1[1];
  230. int m2 = plan->n2[1];
  231. int n3 = n2/DIV_2D_N;
  232. int m3 = m2/DIV_2D_M;
  233. int m = plan->n[1];
  234. const STARPUFFT(complex) * restrict fft2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  235. /* printf("twist3 %d %d %g\n", kk, ll, (double) cabs(fft2[0])); */
  236. for (kkk = 0; kkk < n3; kkk++) {
  237. int k = kk * n3 + kkk;
  238. for (lll = 0; lll < m3; lll++) {
  239. int l = ll * m3 + lll;
  240. for (i = 0; i < n1; i++)
  241. for (j = 0; j < m1; j++)
  242. plan->out[i*n2*m+j*m2+k*m+l] = fft2[kkk*m3*n1*m1+lll*n1*m1+i*m1+j];
  243. }
  244. }
  245. }
  246. struct starpu_perfmodel STARPUFFT(twist1_2d_model) = {
  247. .type = STARPU_HISTORY_BASED,
  248. .symbol = TYPE"twist1_2d"
  249. };
  250. struct starpu_perfmodel STARPUFFT(fft1_2d_model) = {
  251. .type = STARPU_HISTORY_BASED,
  252. .symbol = TYPE"fft1_2d"
  253. };
  254. struct starpu_perfmodel STARPUFFT(twist2_2d_model) = {
  255. .type = STARPU_HISTORY_BASED,
  256. .symbol = TYPE"twist2_2d"
  257. };
  258. struct starpu_perfmodel STARPUFFT(fft2_2d_model) = {
  259. .type = STARPU_HISTORY_BASED,
  260. .symbol = TYPE"fft2_2d"
  261. };
  262. struct starpu_perfmodel STARPUFFT(twist3_2d_model) = {
  263. .type = STARPU_HISTORY_BASED,
  264. .symbol = TYPE"twist3_2d"
  265. };
  266. static struct starpu_codelet STARPUFFT(twist1_2d_codelet) = {
  267. .where =
  268. #ifdef STARPU_USE_CUDA
  269. STARPU_CUDA|
  270. #endif
  271. STARPU_CPU,
  272. #ifdef STARPU_USE_CUDA
  273. .cuda_func = STARPUFFT(twist1_2d_kernel_gpu),
  274. #endif
  275. .cpu_func = STARPUFFT(twist1_2d_kernel_cpu),
  276. .model = &STARPUFFT(twist1_2d_model),
  277. .nbuffers = 2
  278. };
  279. static struct starpu_codelet STARPUFFT(fft1_2d_codelet) = {
  280. .where =
  281. #ifdef STARPU_USE_CUDA
  282. STARPU_CUDA|
  283. #endif
  284. #ifdef STARPU_HAVE_FFTW
  285. STARPU_CPU|
  286. #endif
  287. 0,
  288. #ifdef STARPU_USE_CUDA
  289. .cuda_func = STARPUFFT(fft1_2d_kernel_gpu),
  290. #endif
  291. #ifdef STARPU_HAVE_FFTW
  292. .cpu_func = STARPUFFT(fft1_2d_kernel_cpu),
  293. #endif
  294. .model = &STARPUFFT(fft1_2d_model),
  295. .nbuffers = 4
  296. };
  297. static struct starpu_codelet STARPUFFT(twist2_2d_codelet) = {
  298. .where = STARPU_CPU,
  299. .cpu_func = STARPUFFT(twist2_2d_kernel_cpu),
  300. .model = &STARPUFFT(twist2_2d_model),
  301. .nbuffers = 1
  302. };
  303. static struct starpu_codelet STARPUFFT(fft2_2d_codelet) = {
  304. .where =
  305. #ifdef STARPU_USE_CUDA
  306. STARPU_CUDA|
  307. #endif
  308. #ifdef STARPU_HAVE_FFTW
  309. STARPU_CPU|
  310. #endif
  311. 0,
  312. #ifdef STARPU_USE_CUDA
  313. .cuda_func = STARPUFFT(fft2_2d_kernel_gpu),
  314. #endif
  315. #ifdef STARPU_HAVE_FFTW
  316. .cpu_func = STARPUFFT(fft2_2d_kernel_cpu),
  317. #endif
  318. .model = &STARPUFFT(fft2_2d_model),
  319. .nbuffers = 2
  320. };
  321. static struct starpu_codelet STARPUFFT(twist3_2d_codelet) = {
  322. .where = STARPU_CPU,
  323. .cpu_func = STARPUFFT(twist3_2d_kernel_cpu),
  324. .model = &STARPUFFT(twist3_2d_model),
  325. .nbuffers = 1
  326. };
  327. #endif
  328. STARPUFFT(plan)
  329. STARPUFFT(plan_dft_2d)(int n, int m, int sign, unsigned flags)
  330. {
  331. int workerid;
  332. int n1 = DIV_2D_N;
  333. int n2 = n / n1;
  334. int n3;
  335. int m1 = DIV_2D_M;
  336. int m2 = m / m1;
  337. int m3;
  338. int z;
  339. struct starpu_task *task;
  340. /*
  341. * Simple strategy:
  342. *
  343. * - twist1: twist input in n1*m1 (n2,m2) chunks
  344. * - fft1: perform n1*m1 (n2,m2) ffts
  345. * - twist2: twist into n2*m2 (n1,m1) chunks distributed in
  346. * DIV_2D_N*DIV_2D_M groups
  347. * - fft2: perform DIV_2D_N*DIV_2D_M times n3*m3 (n1,m1) ffts
  348. * - twist3: twist back into output
  349. */
  350. #ifdef STARPU_USE_CUDA
  351. /* cufft 2D-3D limited to [2,16384] */
  352. while (n2 > 16384) {
  353. n1 *= 2;
  354. n2 /= 2;
  355. }
  356. #endif
  357. STARPU_ASSERT(n == n1*n2);
  358. STARPU_ASSERT(n1 < (1ULL << J_BITS));
  359. #ifdef STARPU_USE_CUDA
  360. /* cufft 2D-3D limited to [2,16384] */
  361. while (m2 > 16384) {
  362. m1 *= 2;
  363. m2 /= 2;
  364. }
  365. #endif
  366. STARPU_ASSERT(m == m1*m2);
  367. STARPU_ASSERT(m1 < (1ULL << J_BITS));
  368. /* distribute the n2*m2 second ffts into DIV_2D_N*DIV_2D_M packages */
  369. n3 = n2 / DIV_2D_N;
  370. STARPU_ASSERT(n2 == n3*DIV_2D_N);
  371. m3 = m2 / DIV_2D_M;
  372. STARPU_ASSERT(m2 == m3*DIV_2D_M);
  373. /* TODO: flags? Automatically set FFTW_MEASURE on calibration? */
  374. STARPU_ASSERT(flags == 0);
  375. STARPUFFT(plan) plan = malloc(sizeof(*plan));
  376. memset(plan, 0, sizeof(*plan));
  377. plan->number = STARPU_ATOMIC_ADD(&starpufft_last_plan_number, 1) - 1;
  378. /* 4bit limitation in the tag space */
  379. STARPU_ASSERT(plan->number < (1ULL << NUMBER_BITS));
  380. plan->dim = 2;
  381. plan->n = malloc(plan->dim * sizeof(*plan->n));
  382. plan->n[0] = n;
  383. plan->n[1] = m;
  384. check_dims(plan);
  385. plan->n1 = malloc(plan->dim * sizeof(*plan->n1));
  386. plan->n1[0] = n1;
  387. plan->n1[1] = m1;
  388. plan->n2 = malloc(plan->dim * sizeof(*plan->n2));
  389. plan->n2[0] = n2;
  390. plan->n2[1] = m2;
  391. plan->totsize = n * m;
  392. plan->totsize1 = n1 * m1;
  393. plan->totsize2 = n2 * m2;
  394. plan->totsize3 = DIV_2D_N * DIV_2D_M;
  395. plan->totsize4 = plan->totsize / plan->totsize3;
  396. plan->type = C2C;
  397. plan->sign = sign;
  398. compute_roots(plan);
  399. /* Initialize per-worker working set */
  400. for (workerid = 0; workerid < starpu_worker_get_count(); workerid++) {
  401. switch (starpu_worker_get_type(workerid)) {
  402. case STARPU_CPU_WORKER:
  403. #ifdef STARPU_HAVE_FFTW
  404. /* first fft plan: one n2*m2 fft */
  405. plan->plans[workerid].in1 = _FFTW(malloc)(plan->totsize2 * sizeof(_fftw_complex));
  406. memset(plan->plans[workerid].in1, 0, plan->totsize2 * sizeof(_fftw_complex));
  407. plan->plans[workerid].out1 = _FFTW(malloc)(plan->totsize2 * sizeof(_fftw_complex));
  408. memset(plan->plans[workerid].out1, 0, plan->totsize2 * sizeof(_fftw_complex));
  409. plan->plans[workerid].plan1_cpu = _FFTW(plan_dft_2d)(n2, m2, plan->plans[workerid].in1, plan->plans[workerid].out1, sign, _FFTW_FLAGS);
  410. STARPU_ASSERT(plan->plans[workerid].plan1_cpu);
  411. /* second fft plan: n3*m3 n1*m1 ffts */
  412. plan->plans[workerid].in2 = _FFTW(malloc)(plan->totsize4 * sizeof(_fftw_complex));
  413. memset(plan->plans[workerid].in2, 0, plan->totsize4 * sizeof(_fftw_complex));
  414. plan->plans[workerid].out2 = _FFTW(malloc)(plan->totsize4 * sizeof(_fftw_complex));
  415. memset(plan->plans[workerid].out2, 0, plan->totsize4 * sizeof(_fftw_complex));
  416. plan->plans[workerid].plan2_cpu = _FFTW(plan_many_dft)(plan->dim,
  417. plan->n1, n3*m3,
  418. /* input */ plan->plans[workerid].in2, NULL, 1, plan->totsize1,
  419. /* output */ plan->plans[workerid].out2, NULL, 1, plan->totsize1,
  420. sign, _FFTW_FLAGS);
  421. STARPU_ASSERT(plan->plans[workerid].plan2_cpu);
  422. #else
  423. #warning libstarpufft can not work correctly if libfftw3 is not installed
  424. #endif
  425. break;
  426. case STARPU_CUDA_WORKER:
  427. break;
  428. default:
  429. STARPU_ABORT();
  430. break;
  431. }
  432. }
  433. #ifdef STARPU_USE_CUDA
  434. starpu_execute_on_each_worker(STARPUFFT(fft1_2d_plan_gpu), plan, STARPU_CUDA);
  435. starpu_execute_on_each_worker(STARPUFFT(fft2_2d_plan_gpu), plan, STARPU_CUDA);
  436. #endif
  437. plan->twisted1 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->twisted1));
  438. memset(plan->twisted1, 0, plan->totsize * sizeof(*plan->twisted1));
  439. plan->fft1 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->fft1));
  440. memset(plan->fft1, 0, plan->totsize * sizeof(*plan->fft1));
  441. plan->twisted2 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->twisted2));
  442. memset(plan->twisted2, 0, plan->totsize * sizeof(*plan->twisted2));
  443. plan->fft2 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->fft2));
  444. memset(plan->fft2, 0, plan->totsize * sizeof(*plan->fft2));
  445. plan->twisted1_handle = malloc(plan->totsize1 * sizeof(*plan->twisted1_handle));
  446. plan->fft1_handle = malloc(plan->totsize1 * sizeof(*plan->fft1_handle));
  447. plan->twisted2_handle = malloc(plan->totsize3 * sizeof(*plan->twisted2_handle));
  448. plan->fft2_handle = malloc(plan->totsize3 * sizeof(*plan->fft2_handle));
  449. plan->twist1_tasks = malloc(plan->totsize1 * sizeof(*plan->twist1_tasks));
  450. plan->fft1_tasks = malloc(plan->totsize1 * sizeof(*plan->fft1_tasks));
  451. plan->twist2_tasks = malloc(plan->totsize3 * sizeof(*plan->twist2_tasks));
  452. plan->fft2_tasks = malloc(plan->totsize3 * sizeof(*plan->fft2_tasks));
  453. plan->twist3_tasks = malloc(plan->totsize3 * sizeof(*plan->twist3_tasks));
  454. plan->fft1_args = malloc(plan->totsize1 * sizeof(*plan->fft1_args));
  455. plan->fft2_args = malloc(plan->totsize3 * sizeof(*plan->fft2_args));
  456. /* Create first-round tasks */
  457. for (z = 0; z < plan->totsize1; z++) {
  458. int i = z / m1, j = z % m1;
  459. #define STEP_TAG(step) STEP_TAG_2D(plan, step, i, j)
  460. plan->fft1_args[z].plan = plan;
  461. plan->fft1_args[z].i = i;
  462. plan->fft1_args[z].j = j;
  463. /* Register (n2,m2) chunks */
  464. starpu_vector_data_register(&plan->twisted1_handle[z], 0, (uintptr_t) &plan->twisted1[z*plan->totsize2], plan->totsize2, sizeof(*plan->twisted1));
  465. starpu_vector_data_register(&plan->fft1_handle[z], 0, (uintptr_t) &plan->fft1[z*plan->totsize2], plan->totsize2, sizeof(*plan->fft1));
  466. /* We'll need it on the CPU for the second twist anyway */
  467. starpu_data_set_wt_mask(plan->fft1_handle[z], 1<<0);
  468. /* Create twist1 task */
  469. plan->twist1_tasks[z] = task = starpu_task_create();
  470. task->cl = &STARPUFFT(twist1_2d_codelet);
  471. /* task->buffers[0].handle = to be filled at execution */
  472. task->buffers[0].mode = STARPU_R;
  473. task->buffers[1].handle = plan->twisted1_handle[z];
  474. task->buffers[1].mode = STARPU_W;
  475. task->cl_arg = &plan->fft1_args[z];
  476. task->tag_id = STEP_TAG(TWIST1);
  477. task->use_tag = 1;
  478. task->detach = 1;
  479. task->destroy = 0;
  480. /* Tell that fft1 depends on twisted1 */
  481. starpu_tag_declare_deps(STEP_TAG(FFT1),
  482. 1, STEP_TAG(TWIST1));
  483. /* Create FFT1 task */
  484. plan->fft1_tasks[z] = task = starpu_task_create();
  485. task->cl = &STARPUFFT(fft1_2d_codelet);
  486. task->buffers[0].handle = plan->twisted1_handle[z];
  487. task->buffers[0].mode = STARPU_R;
  488. task->buffers[1].handle = plan->fft1_handle[z];
  489. task->buffers[1].mode = STARPU_W;
  490. task->buffers[2].handle = plan->roots_handle[0];
  491. task->buffers[2].mode = STARPU_R;
  492. task->buffers[3].handle = plan->roots_handle[1];
  493. task->buffers[3].mode = STARPU_R;
  494. task->cl_arg = &plan->fft1_args[z];
  495. task->tag_id = STEP_TAG(FFT1);
  496. task->use_tag = 1;
  497. task->detach = 1;
  498. task->destroy = 0;
  499. /* Tell that to be done with first step we need to have
  500. * finished this fft1 */
  501. starpu_tag_declare_deps(STEP_TAG_2D(plan, JOIN, 0, 0),
  502. 1, STEP_TAG(FFT1));
  503. #undef STEP_TAG
  504. }
  505. /* Create join task */
  506. plan->join_task = task = starpu_task_create();
  507. task->cl = NULL;
  508. task->tag_id = STEP_TAG_2D(plan, JOIN, 0, 0);
  509. task->use_tag = 1;
  510. task->detach = 1;
  511. task->destroy = 0;
  512. /* Create second-round tasks */
  513. for (z = 0; z < plan->totsize3; z++) {
  514. int kk = z / DIV_2D_M, ll = z % DIV_2D_M;
  515. #define STEP_TAG(step) STEP_TAG_2D(plan, step, kk, ll)
  516. plan->fft2_args[z].plan = plan;
  517. plan->fft2_args[z].kk = kk;
  518. plan->fft2_args[z].ll = ll;
  519. /* Register n3*m3 (n1,m1) chunks */
  520. starpu_vector_data_register(&plan->twisted2_handle[z], 0, (uintptr_t) &plan->twisted2[z*plan->totsize4], plan->totsize4, sizeof(*plan->twisted2));
  521. starpu_vector_data_register(&plan->fft2_handle[z], 0, (uintptr_t) &plan->fft2[z*plan->totsize4], plan->totsize4, sizeof(*plan->fft2));
  522. /* We'll need it on the CPU for the last twist anyway */
  523. starpu_data_set_wt_mask(plan->fft2_handle[z], 1<<0);
  524. /* Tell that twisted2 depends on the whole first step to be
  525. * done */
  526. starpu_tag_declare_deps(STEP_TAG(TWIST2),
  527. 1, STEP_TAG_2D(plan, JOIN, 0, 0));
  528. /* Create twist2 task */
  529. plan->twist2_tasks[z] = task = starpu_task_create();
  530. task->cl = &STARPUFFT(twist2_2d_codelet);
  531. task->buffers[0].handle = plan->twisted2_handle[z];
  532. task->buffers[0].mode = STARPU_W;
  533. task->cl_arg = &plan->fft2_args[z];
  534. task->tag_id = STEP_TAG(TWIST2);
  535. task->use_tag = 1;
  536. task->detach = 1;
  537. task->destroy = 0;
  538. /* Tell that fft2 depends on twisted2 */
  539. starpu_tag_declare_deps(STEP_TAG(FFT2),
  540. 1, STEP_TAG(TWIST2));
  541. /* Create FFT2 task */
  542. plan->fft2_tasks[z] = task = starpu_task_create();
  543. task->cl = &STARPUFFT(fft2_2d_codelet);
  544. task->buffers[0].handle = plan->twisted2_handle[z];
  545. task->buffers[0].mode = STARPU_R;
  546. task->buffers[1].handle = plan->fft2_handle[z];
  547. task->buffers[1].mode = STARPU_W;
  548. task->cl_arg = &plan->fft2_args[z];
  549. task->tag_id = STEP_TAG(FFT2);
  550. task->use_tag = 1;
  551. task->detach = 1;
  552. task->destroy = 0;
  553. /* Tell that twist3 depends on fft2 */
  554. starpu_tag_declare_deps(STEP_TAG(TWIST3),
  555. 1, STEP_TAG(FFT2));
  556. /* Create twist3 tasks */
  557. plan->twist3_tasks[z] = task = starpu_task_create();
  558. task->cl = &STARPUFFT(twist3_2d_codelet);
  559. task->buffers[0].handle = plan->fft2_handle[z];
  560. task->buffers[0].mode = STARPU_R;
  561. task->cl_arg = &plan->fft2_args[z];
  562. task->tag_id = STEP_TAG(TWIST3);
  563. task->use_tag = 1;
  564. task->detach = 1;
  565. task->destroy = 0;
  566. /* Tell that to be completely finished we need to have finished this twisted3 */
  567. starpu_tag_declare_deps(STEP_TAG_2D(plan, END, 0, 0),
  568. 1, STEP_TAG(TWIST3));
  569. #undef STEP_TAG
  570. }
  571. /* Create end task */
  572. plan->end_task = task = starpu_task_create();
  573. task->cl = NULL;
  574. task->tag_id = STEP_TAG_2D(plan, END, 0, 0);
  575. task->use_tag = 1;
  576. task->detach = 1;
  577. task->destroy = 0;
  578. return plan;
  579. }
  580. static starpu_tag_t
  581. STARPUFFT(start2dC2C)(STARPUFFT(plan) plan)
  582. {
  583. STARPU_ASSERT(plan->type == C2C);
  584. int z;
  585. for (z=0; z < plan->totsize1; z++) {
  586. starpu_task_submit(plan->twist1_tasks[z]);
  587. starpu_task_submit(plan->fft1_tasks[z]);
  588. }
  589. starpu_task_submit(plan->join_task);
  590. for (z=0; z < plan->totsize3; z++) {
  591. starpu_task_submit(plan->twist2_tasks[z]);
  592. starpu_task_submit(plan->fft2_tasks[z]);
  593. starpu_task_submit(plan->twist3_tasks[z]);
  594. }
  595. starpu_task_submit(plan->end_task);
  596. return STEP_TAG_2D(plan, END, 0, 0);
  597. }
  598. static void
  599. STARPUFFT(free_2d_tags)(STARPUFFT(plan) plan)
  600. {
  601. unsigned i, j;
  602. int n1 = plan->n1[0];
  603. int m1 = plan->n1[1];
  604. for (i = 0; i < n1; i++) {
  605. for (j = 0; j < m1; j++) {
  606. starpu_tag_remove(STEP_TAG_2D(plan, TWIST1, i, j));
  607. starpu_tag_remove(STEP_TAG_2D(plan, FFT1, i, j));
  608. }
  609. }
  610. starpu_tag_remove(STEP_TAG_2D(plan, JOIN, 0, 0));
  611. for (i = 0; i < DIV_2D_N; i++) {
  612. for (j = 0; j < DIV_2D_M; j++) {
  613. starpu_tag_remove(STEP_TAG_2D(plan, TWIST2, i, j));
  614. starpu_tag_remove(STEP_TAG_2D(plan, FFT2, i, j));
  615. starpu_tag_remove(STEP_TAG_2D(plan, TWIST3, i, j));
  616. }
  617. }
  618. starpu_tag_remove(STEP_TAG_2D(plan, END, 0, 0));
  619. }