starpufftx2d.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2017 CNRS
  4. * Copyright (C) 2013,2014 Université de Bordeaux
  5. * Copyright (C) 2012,2013 Inria
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. #define DIV_2D_N 8
  19. #define DIV_2D_M 8
  20. #define I_SHIFT (I_BITS/2)
  21. #define J_BITS I_SHIFT
  22. #define STEP_TAG_2D(plan, step, i, j) _STEP_TAG(plan, step, ((starpu_tag_t) i << I_SHIFT) | (starpu_tag_t) j)
  23. #ifdef __STARPU_USE_CUDA
  24. /* Twist the full vector into a n2,m2 chunk */
  25. static void
  26. STARPUFFT(twist1_2d_kernel_gpu)(void *descr[], void *_args)
  27. {
  28. struct STARPUFFT(args) *args = _args;
  29. STARPUFFT(plan) plan = args->plan;
  30. int i = args->i;
  31. int j = args->j;
  32. int n1 = plan->n1[0];
  33. int n2 = plan->n2[0];
  34. int m1 = plan->n1[1];
  35. int m2 = plan->n2[1];
  36. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  37. _cufftComplex * restrict twisted1 = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  38. STARPUFFT(cuda_twist1_2d_host)(in, twisted1, i, j, n1, n2, m1, m2);
  39. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  40. }
  41. /* fft1:
  42. *
  43. * Perform one fft of size n2,m2 */
  44. static void
  45. STARPUFFT(fft1_2d_plan_gpu)(void *args)
  46. {
  47. STARPUFFT(plan) plan = args;
  48. int n2 = plan->n2[0];
  49. int m2 = plan->n2[1];
  50. int workerid = starpu_worker_get_id_check();
  51. cufftResult cures;
  52. cures = cufftPlan2d(&plan->plans[workerid].plan1_cuda, n2, m2, _CUFFT_C2C);
  53. if (cures != CUFFT_SUCCESS)
  54. STARPU_CUFFT_REPORT_ERROR(cures);
  55. cufftSetStream(plan->plans[workerid].plan1_cuda, starpu_cuda_get_local_stream());
  56. if (cures != CUFFT_SUCCESS)
  57. STARPU_CUFFT_REPORT_ERROR(cures);
  58. }
  59. static void
  60. STARPUFFT(fft1_2d_kernel_gpu)(void *descr[], void *_args)
  61. {
  62. struct STARPUFFT(args) *args = _args;
  63. STARPUFFT(plan) plan = args->plan;
  64. int i = args->i;
  65. int j = args->j;
  66. int n2 = plan->n2[0];
  67. int m2 = plan->n2[1];
  68. cufftResult cures;
  69. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  70. _cufftComplex * restrict out = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  71. const _cufftComplex * restrict roots0 = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[2]);
  72. const _cufftComplex * restrict roots1 = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[3]);
  73. int workerid = starpu_worker_get_id_check();
  74. task_per_worker[workerid]++;
  75. cures = _cufftExecC2C(plan->plans[workerid].plan1_cuda, in, out, plan->sign == -1 ? CUFFT_FORWARD : CUFFT_INVERSE);
  76. if (cures != CUFFT_SUCCESS)
  77. STARPU_CUFFT_REPORT_ERROR(cures);
  78. /* synchronization is done after the twiddling */
  79. STARPUFFT(cuda_twiddle_2d_host)(out, roots0, roots1, n2, m2, i, j);
  80. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  81. }
  82. /* fft2:
  83. *
  84. * Perform n3*m3 ffts of size n1,m1 */
  85. static void
  86. STARPUFFT(fft2_2d_plan_gpu(void *args))
  87. {
  88. STARPUFFT(plan) plan = args;
  89. int n1 = plan->n1[0];
  90. int m1 = plan->n1[1];
  91. cufftResult cures;
  92. int workerid = starpu_worker_get_id_check();
  93. cures = cufftPlan2d(&plan->plans[workerid].plan2_cuda, n1, m1, _CUFFT_C2C);
  94. if (cures != CUFFT_SUCCESS)
  95. STARPU_CUFFT_REPORT_ERROR(cures);
  96. cufftSetStream(plan->plans[workerid].plan2_cuda, starpu_cuda_get_local_stream());
  97. if (cures != CUFFT_SUCCESS)
  98. STARPU_CUFFT_REPORT_ERROR(cures);
  99. }
  100. static void
  101. STARPUFFT(fft2_2d_kernel_gpu)(void *descr[], void *_args)
  102. {
  103. struct STARPUFFT(args) *args = _args;
  104. STARPUFFT(plan) plan = args->plan;
  105. int n1 = plan->n1[0];
  106. int n2 = plan->n2[0];
  107. int m1 = plan->n1[1];
  108. int m2 = plan->n2[1];
  109. int n3 = n2/DIV_2D_N;
  110. int m3 = m2/DIV_2D_M;
  111. int n;
  112. cufftResult cures;
  113. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  114. _cufftComplex * restrict out = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  115. int workerid = starpu_worker_get_id_check();
  116. task_per_worker[workerid]++;
  117. for (n = 0; n < n3*m3; n++) {
  118. cures = _cufftExecC2C(plan->plans[workerid].plan2_cuda, in + n * n1*m1, out + n * n1*m1, plan->sign == -1 ? CUFFT_FORWARD : CUFFT_INVERSE);
  119. if (cures != CUFFT_SUCCESS)
  120. STARPU_CUFFT_REPORT_ERROR(cures);
  121. }
  122. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  123. }
  124. #endif
  125. /* Twist the full vector into a n2,m2 chunk */
  126. static void
  127. STARPUFFT(twist1_2d_kernel_cpu)(void *descr[], void *_args)
  128. {
  129. struct STARPUFFT(args) *args = _args;
  130. STARPUFFT(plan) plan = args->plan;
  131. int i = args->i;
  132. int j = args->j;
  133. int k, l;
  134. int n1 = plan->n1[0];
  135. int n2 = plan->n2[0];
  136. int m1 = plan->n1[1];
  137. int m2 = plan->n2[1];
  138. int m = plan->n[1];
  139. STARPUFFT(complex) * restrict in = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  140. STARPUFFT(complex) * restrict twisted1 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  141. /* printf("twist1 %d %d %g\n", i, j, (double) cabs(plan->in[i+j])); */
  142. for (k = 0; k < n2; k++)
  143. for (l = 0; l < m2; l++)
  144. twisted1[k*m2+l] = in[i*m+j+k*m*n1+l*m1];
  145. }
  146. #ifdef STARPU_HAVE_FFTW
  147. /* Perform an n2,m2 fft */
  148. static void
  149. STARPUFFT(fft1_2d_kernel_cpu)(void *descr[], void *_args)
  150. {
  151. struct STARPUFFT(args) *args = _args;
  152. STARPUFFT(plan) plan = args->plan;
  153. int i = args->i;
  154. int j = args->j;
  155. int k, l;
  156. int n2 = plan->n2[0];
  157. int m2 = plan->n2[1];
  158. int workerid = starpu_worker_get_id_check();
  159. task_per_worker[workerid]++;
  160. STARPUFFT(complex) *twisted1 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  161. STARPUFFT(complex) *fft1 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  162. /* printf("fft1 %d %d %g\n", i, j, (double) cabs(twisted1[0])); */
  163. _FFTW(execute_dft)(plan->plans[workerid].plan1_cpu, twisted1, fft1);
  164. for (k = 0; k < n2; k++)
  165. for (l = 0; l < m2; l++)
  166. fft1[k*m2 + l] = fft1[k*m2 + l] * plan->roots[0][i*k] * plan->roots[1][j*l];
  167. }
  168. #endif
  169. /* Twist the full vector into a package of n2/DIV_2D_N,m2/DIV_2D_M (n1,m1) chunks */
  170. static void
  171. STARPUFFT(twist2_2d_kernel_cpu)(void *descr[], void *_args)
  172. {
  173. struct STARPUFFT(args) *args = _args;
  174. STARPUFFT(plan) plan = args->plan;
  175. int kk = args->kk; /* between 0 and DIV_2D_N */
  176. int ll = args->ll; /* between 0 and DIV_2D_M */
  177. int kkk, lll; /* beetween 0,0 and n3,m3 */
  178. int i, j;
  179. int n1 = plan->n1[0];
  180. int n2 = plan->n2[0];
  181. int m1 = plan->n1[1];
  182. int m2 = plan->n2[1];
  183. int n3 = n2/DIV_2D_N;
  184. int m3 = m2/DIV_2D_M;
  185. STARPUFFT(complex) * restrict twisted2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  186. /* printf("twist2 %d %d %g\n", kk, ll, (double) cabs(plan->fft1[kk+ll])); */
  187. for (kkk = 0; kkk < n3; kkk++) {
  188. int k = kk * n3 + kkk;
  189. for (lll = 0; lll < m3; lll++) {
  190. int l = ll * m3 + lll;
  191. for (i = 0; i < n1; i++)
  192. for (j = 0; j < m1; j++)
  193. twisted2[kkk*m3*n1*m1+lll*n1*m1+i*m1+j] = plan->fft1[i*n1*n2*m2+j*n2*m2+k*m2+l];
  194. }
  195. }
  196. }
  197. #ifdef STARPU_HAVE_FFTW
  198. /* Perform (n2/DIV_2D_N)*(m2/DIV_2D_M) (n1,m1) ffts */
  199. static void
  200. STARPUFFT(fft2_2d_kernel_cpu)(void *descr[], void *_args)
  201. {
  202. struct STARPUFFT(args) *args = _args;
  203. STARPUFFT(plan) plan = args->plan;
  204. /* int kk = args->kk; */
  205. /* int ll = args->ll; */
  206. int workerid = starpu_worker_get_id_check();
  207. task_per_worker[workerid]++;
  208. STARPUFFT(complex) *twisted2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  209. STARPUFFT(complex) *fft2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  210. /* printf("fft2 %d %d %g\n", kk, ll, (double) cabs(twisted2[plan->totsize4-1])); */
  211. _FFTW(execute_dft)(plan->plans[workerid].plan2_cpu, twisted2, fft2);
  212. }
  213. #endif
  214. /* Spread the package of (n2/DIV_2D_N)*(m2/DIV_2D_M) (n1,m1) chunks into the full vector */
  215. static void
  216. STARPUFFT(twist3_2d_kernel_cpu)(void *descr[], void *_args)
  217. {
  218. struct STARPUFFT(args) *args = _args;
  219. STARPUFFT(plan) plan = args->plan;
  220. int kk = args->kk; /* between 0 and DIV_2D_N */
  221. int ll = args->ll; /* between 0 and DIV_2D_M */
  222. int kkk, lll; /* beetween 0,0 and n3,m3 */
  223. int i, j;
  224. int n1 = plan->n1[0];
  225. int n2 = plan->n2[0];
  226. int m1 = plan->n1[1];
  227. int m2 = plan->n2[1];
  228. int n3 = n2/DIV_2D_N;
  229. int m3 = m2/DIV_2D_M;
  230. int m = plan->n[1];
  231. const STARPUFFT(complex) * restrict fft2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  232. /* printf("twist3 %d %d %g\n", kk, ll, (double) cabs(fft2[0])); */
  233. for (kkk = 0; kkk < n3; kkk++) {
  234. int k = kk * n3 + kkk;
  235. for (lll = 0; lll < m3; lll++) {
  236. int l = ll * m3 + lll;
  237. for (i = 0; i < n1; i++)
  238. for (j = 0; j < m1; j++)
  239. plan->out[i*n2*m+j*m2+k*m+l] = fft2[kkk*m3*n1*m1+lll*n1*m1+i*m1+j];
  240. }
  241. }
  242. }
  243. struct starpu_perfmodel STARPUFFT(twist1_2d_model) = {
  244. .type = STARPU_HISTORY_BASED,
  245. .symbol = TYPE"twist1_2d"
  246. };
  247. struct starpu_perfmodel STARPUFFT(fft1_2d_model) = {
  248. .type = STARPU_HISTORY_BASED,
  249. .symbol = TYPE"fft1_2d"
  250. };
  251. struct starpu_perfmodel STARPUFFT(twist2_2d_model) = {
  252. .type = STARPU_HISTORY_BASED,
  253. .symbol = TYPE"twist2_2d"
  254. };
  255. struct starpu_perfmodel STARPUFFT(fft2_2d_model) = {
  256. .type = STARPU_HISTORY_BASED,
  257. .symbol = TYPE"fft2_2d"
  258. };
  259. struct starpu_perfmodel STARPUFFT(twist3_2d_model) = {
  260. .type = STARPU_HISTORY_BASED,
  261. .symbol = TYPE"twist3_2d"
  262. };
  263. static struct starpu_codelet STARPUFFT(twist1_2d_codelet) = {
  264. .where =
  265. #ifdef __STARPU_USE_CUDA
  266. STARPU_CUDA|
  267. #endif
  268. STARPU_CPU,
  269. #ifdef __STARPU_USE_CUDA
  270. .cuda_funcs = {STARPUFFT(twist1_2d_kernel_gpu)},
  271. #endif
  272. .cpu_funcs = {STARPUFFT(twist1_2d_kernel_cpu)},
  273. CAN_EXECUTE
  274. .model = &STARPUFFT(twist1_2d_model),
  275. .nbuffers = 2,
  276. .modes = {STARPU_R, STARPU_W},
  277. .name = "twist1_2d_codelet"
  278. };
  279. static struct starpu_codelet STARPUFFT(fft1_2d_codelet) = {
  280. .where =
  281. #ifdef __STARPU_USE_CUDA
  282. STARPU_CUDA|
  283. #endif
  284. #ifdef STARPU_HAVE_FFTW
  285. STARPU_CPU|
  286. #endif
  287. 0,
  288. #ifdef __STARPU_USE_CUDA
  289. .cuda_funcs = {STARPUFFT(fft1_2d_kernel_gpu)},
  290. #endif
  291. #ifdef STARPU_HAVE_FFTW
  292. .cpu_funcs = {STARPUFFT(fft1_2d_kernel_cpu)},
  293. #endif
  294. CAN_EXECUTE
  295. .model = &STARPUFFT(fft1_2d_model),
  296. .nbuffers = 4,
  297. .modes = {STARPU_R, STARPU_W, STARPU_R, STARPU_R},
  298. .name = "fft1_2d_codelet"
  299. };
  300. static struct starpu_codelet STARPUFFT(twist2_2d_codelet) = {
  301. .where = STARPU_CPU,
  302. .cpu_funcs = {STARPUFFT(twist2_2d_kernel_cpu)},
  303. CAN_EXECUTE
  304. .model = &STARPUFFT(twist2_2d_model),
  305. .nbuffers = 1,
  306. .modes = {STARPU_W},
  307. .name = "twist2_2d_codelet"
  308. };
  309. static struct starpu_codelet STARPUFFT(fft2_2d_codelet) = {
  310. .where =
  311. #ifdef __STARPU_USE_CUDA
  312. STARPU_CUDA|
  313. #endif
  314. #ifdef STARPU_HAVE_FFTW
  315. STARPU_CPU|
  316. #endif
  317. 0,
  318. #ifdef __STARPU_USE_CUDA
  319. .cuda_funcs = {STARPUFFT(fft2_2d_kernel_gpu)},
  320. #endif
  321. #ifdef STARPU_HAVE_FFTW
  322. .cpu_funcs = {STARPUFFT(fft2_2d_kernel_cpu)},
  323. #endif
  324. CAN_EXECUTE
  325. .model = &STARPUFFT(fft2_2d_model),
  326. .nbuffers = 2,
  327. .modes = {STARPU_R, STARPU_W},
  328. .name = "fft2_2d_codelet"
  329. };
  330. static struct starpu_codelet STARPUFFT(twist3_2d_codelet) = {
  331. .where = STARPU_CPU,
  332. .cpu_funcs = {STARPUFFT(twist3_2d_kernel_cpu)},
  333. CAN_EXECUTE
  334. .model = &STARPUFFT(twist3_2d_model),
  335. .nbuffers = 1,
  336. .modes = {STARPU_R},
  337. .name = "twist3_2d_codelet"
  338. };
  339. /*
  340. *
  341. * Sequential version
  342. *
  343. */
  344. #ifdef __STARPU_USE_CUDA
  345. /* Perform one fft of size n,m */
  346. static void
  347. STARPUFFT(fft_2d_plan_gpu)(void *args)
  348. {
  349. STARPUFFT(plan) plan = args;
  350. cufftResult cures;
  351. int n = plan->n[0];
  352. int m = plan->n[1];
  353. int workerid = starpu_worker_get_id_check();
  354. cures = cufftPlan2d(&plan->plans[workerid].plan_cuda, n, m, _CUFFT_C2C);
  355. if (cures != CUFFT_SUCCESS)
  356. STARPU_CUFFT_REPORT_ERROR(cures);
  357. cufftSetStream(plan->plans[workerid].plan_cuda, starpu_cuda_get_local_stream());
  358. if (cures != CUFFT_SUCCESS)
  359. STARPU_CUFFT_REPORT_ERROR(cures);
  360. }
  361. static void
  362. STARPUFFT(fft_2d_kernel_gpu)(void *descr[], void *args)
  363. {
  364. STARPUFFT(plan) plan = args;
  365. cufftResult cures;
  366. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  367. _cufftComplex * restrict out = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  368. int workerid = starpu_worker_get_id_check();
  369. task_per_worker[workerid]++;
  370. cures = _cufftExecC2C(plan->plans[workerid].plan_cuda, in, out, plan->sign == -1 ? CUFFT_FORWARD : CUFFT_INVERSE);
  371. if (cures != CUFFT_SUCCESS)
  372. STARPU_CUFFT_REPORT_ERROR(cures);
  373. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  374. }
  375. #endif
  376. #ifdef STARPU_HAVE_FFTW
  377. /* Perform one fft of size n,m */
  378. static void
  379. STARPUFFT(fft_2d_kernel_cpu)(void *descr[], void *_args)
  380. {
  381. STARPUFFT(plan) plan = _args;
  382. int workerid = starpu_worker_get_id_check();
  383. task_per_worker[workerid]++;
  384. STARPUFFT(complex) * restrict in = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  385. STARPUFFT(complex) * restrict out = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  386. _FFTW(execute_dft)(plan->plans[workerid].plan_cpu, in, out);
  387. }
  388. #endif
  389. static struct starpu_perfmodel STARPUFFT(fft_2d_model) = {
  390. .type = STARPU_HISTORY_BASED,
  391. .symbol = TYPE"fft_2d"
  392. };
  393. static struct starpu_codelet STARPUFFT(fft_2d_codelet) = {
  394. .where =
  395. #ifdef __STARPU_USE_CUDA
  396. STARPU_CUDA|
  397. #endif
  398. #ifdef STARPU_HAVE_FFTW
  399. STARPU_CPU|
  400. #endif
  401. 0,
  402. #ifdef __STARPU_USE_CUDA
  403. .cuda_funcs = {STARPUFFT(fft_2d_kernel_gpu)},
  404. #endif
  405. #ifdef STARPU_HAVE_FFTW
  406. .cpu_funcs = {STARPUFFT(fft_2d_kernel_cpu)},
  407. #endif
  408. CAN_EXECUTE
  409. .model = &STARPUFFT(fft_2d_model),
  410. .nbuffers = 2,
  411. .modes = {STARPU_R, STARPU_W},
  412. .name = "fft_2d_codelet"
  413. };
  414. STARPUFFT(plan)
  415. STARPUFFT(plan_dft_2d)(int n, int m, int sign, unsigned flags)
  416. {
  417. unsigned workerid;
  418. int n1 = DIV_2D_N;
  419. int n2 = n / n1;
  420. int n3;
  421. int m1 = DIV_2D_M;
  422. int m2 = m / m1;
  423. int m3;
  424. int z;
  425. struct starpu_task *task;
  426. if (PARALLEL) {
  427. /*
  428. * Simple strategy:
  429. *
  430. * - twist1: twist input in n1*m1 (n2,m2) chunks
  431. * - fft1: perform n1*m1 (n2,m2) ffts
  432. * - twist2: twist into n2*m2 (n1,m1) chunks distributed in
  433. * DIV_2D_N*DIV_2D_M groups
  434. * - fft2: perform DIV_2D_N*DIV_2D_M times n3*m3 (n1,m1) ffts
  435. * - twist3: twist back into output
  436. */
  437. #ifdef __STARPU_USE_CUDA
  438. /* cufft 2D-3D limited to [2,16384] */
  439. while (n2 > 16384) {
  440. n1 *= 2;
  441. n2 /= 2;
  442. }
  443. #endif
  444. STARPU_ASSERT(n == n1*n2);
  445. STARPU_ASSERT(n1 < (1ULL << J_BITS));
  446. #ifdef __STARPU_USE_CUDA
  447. /* cufft 2D-3D limited to [2,16384] */
  448. while (m2 > 16384) {
  449. m1 *= 2;
  450. m2 /= 2;
  451. }
  452. #endif
  453. STARPU_ASSERT(m == m1*m2);
  454. STARPU_ASSERT(m1 < (1ULL << J_BITS));
  455. /* distribute the n2*m2 second ffts into DIV_2D_N*DIV_2D_M packages */
  456. n3 = n2 / DIV_2D_N;
  457. STARPU_ASSERT(n2 == n3*DIV_2D_N);
  458. m3 = m2 / DIV_2D_M;
  459. STARPU_ASSERT(m2 == m3*DIV_2D_M);
  460. }
  461. /* TODO: flags? Automatically set FFTW_MEASURE on calibration? */
  462. STARPU_ASSERT(flags == 0);
  463. STARPUFFT(plan) plan = malloc(sizeof(*plan));
  464. memset(plan, 0, sizeof(*plan));
  465. if (PARALLEL) {
  466. plan->number = STARPU_ATOMIC_ADD(&starpufft_last_plan_number, 1) - 1;
  467. /* 4bit limitation in the tag space */
  468. STARPU_ASSERT(plan->number < (1ULL << NUMBER_BITS));
  469. }
  470. plan->dim = 2;
  471. plan->n = malloc(plan->dim * sizeof(*plan->n));
  472. plan->n[0] = n;
  473. plan->n[1] = m;
  474. if (PARALLEL) {
  475. check_dims(plan);
  476. plan->n1 = malloc(plan->dim * sizeof(*plan->n1));
  477. plan->n1[0] = n1;
  478. plan->n1[1] = m1;
  479. plan->n2 = malloc(plan->dim * sizeof(*plan->n2));
  480. plan->n2[0] = n2;
  481. plan->n2[1] = m2;
  482. }
  483. plan->totsize = n * m;
  484. if (PARALLEL) {
  485. plan->totsize1 = n1 * m1;
  486. plan->totsize2 = n2 * m2;
  487. plan->totsize3 = DIV_2D_N * DIV_2D_M;
  488. plan->totsize4 = plan->totsize / plan->totsize3;
  489. }
  490. plan->type = C2C;
  491. plan->sign = sign;
  492. if (PARALLEL) {
  493. /* Compute the w^k just once. */
  494. compute_roots(plan);
  495. }
  496. /* Initialize per-worker working set */
  497. for (workerid = 0; workerid < starpu_worker_get_count(); workerid++) {
  498. switch (starpu_worker_get_type(workerid)) {
  499. case STARPU_CPU_WORKER:
  500. #ifdef STARPU_HAVE_FFTW
  501. if (PARALLEL) {
  502. /* first fft plan: one n2*m2 fft */
  503. plan->plans[workerid].plan1_cpu = _FFTW(plan_dft_2d)(n2, m2, NULL, (void*) 1, sign, _FFTW_FLAGS);
  504. STARPU_ASSERT(plan->plans[workerid].plan1_cpu);
  505. /* second fft plan: n3*m3 n1*m1 ffts */
  506. plan->plans[workerid].plan2_cpu = _FFTW(plan_many_dft)(plan->dim,
  507. plan->n1, n3*m3,
  508. NULL, NULL, 1, plan->totsize1,
  509. (void*) 1, NULL, 1, plan->totsize1,
  510. sign, _FFTW_FLAGS);
  511. STARPU_ASSERT(plan->plans[workerid].plan2_cpu);
  512. } else {
  513. /* fft plan: one fft of size n, m. */
  514. plan->plans[workerid].plan_cpu = _FFTW(plan_dft_2d)(n, m, NULL, (void*) 1, sign, _FFTW_FLAGS);
  515. STARPU_ASSERT(plan->plans[workerid].plan_cpu);
  516. }
  517. #else
  518. /* #warning libstarpufft can not work correctly if libfftw3 is not installed */
  519. #endif
  520. break;
  521. case STARPU_CUDA_WORKER:
  522. break;
  523. default:
  524. /* Do not care, we won't be executing anything there. */
  525. break;
  526. }
  527. }
  528. #ifdef __STARPU_USE_CUDA
  529. if (PARALLEL) {
  530. starpu_execute_on_each_worker(STARPUFFT(fft1_2d_plan_gpu), plan, STARPU_CUDA);
  531. starpu_execute_on_each_worker(STARPUFFT(fft2_2d_plan_gpu), plan, STARPU_CUDA);
  532. } else {
  533. starpu_execute_on_each_worker(STARPUFFT(fft_2d_plan_gpu), plan, STARPU_CUDA);
  534. }
  535. #endif
  536. if (PARALLEL) {
  537. /* Allocate buffers. */
  538. plan->twisted1 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->twisted1));
  539. memset(plan->twisted1, 0, plan->totsize * sizeof(*plan->twisted1));
  540. plan->fft1 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->fft1));
  541. memset(plan->fft1, 0, plan->totsize * sizeof(*plan->fft1));
  542. plan->twisted2 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->twisted2));
  543. memset(plan->twisted2, 0, plan->totsize * sizeof(*plan->twisted2));
  544. plan->fft2 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->fft2));
  545. memset(plan->fft2, 0, plan->totsize * sizeof(*plan->fft2));
  546. /* Allocate handle arrays */
  547. plan->twisted1_handle = malloc(plan->totsize1 * sizeof(*plan->twisted1_handle));
  548. plan->fft1_handle = malloc(plan->totsize1 * sizeof(*plan->fft1_handle));
  549. plan->twisted2_handle = malloc(plan->totsize3 * sizeof(*plan->twisted2_handle));
  550. plan->fft2_handle = malloc(plan->totsize3 * sizeof(*plan->fft2_handle));
  551. /* Allocate task arrays */
  552. plan->twist1_tasks = malloc(plan->totsize1 * sizeof(*plan->twist1_tasks));
  553. plan->fft1_tasks = malloc(plan->totsize1 * sizeof(*plan->fft1_tasks));
  554. plan->twist2_tasks = malloc(plan->totsize3 * sizeof(*plan->twist2_tasks));
  555. plan->fft2_tasks = malloc(plan->totsize3 * sizeof(*plan->fft2_tasks));
  556. plan->twist3_tasks = malloc(plan->totsize3 * sizeof(*plan->twist3_tasks));
  557. /* Allocate codelet argument arrays */
  558. plan->fft1_args = malloc(plan->totsize1 * sizeof(*plan->fft1_args));
  559. plan->fft2_args = malloc(plan->totsize3 * sizeof(*plan->fft2_args));
  560. /* Create first-round tasks */
  561. for (z = 0; z < plan->totsize1; z++) {
  562. int i = z / m1, j = z % m1;
  563. #define STEP_TAG(step) STEP_TAG_2D(plan, step, i, j)
  564. /* TODO: get rid of tags */
  565. plan->fft1_args[z].plan = plan;
  566. plan->fft1_args[z].i = i;
  567. plan->fft1_args[z].j = j;
  568. /* Register (n2,m2) chunks */
  569. starpu_vector_data_register(&plan->twisted1_handle[z], STARPU_MAIN_RAM, (uintptr_t) &plan->twisted1[z*plan->totsize2], plan->totsize2, sizeof(*plan->twisted1));
  570. starpu_vector_data_register(&plan->fft1_handle[z], STARPU_MAIN_RAM, (uintptr_t) &plan->fft1[z*plan->totsize2], plan->totsize2, sizeof(*plan->fft1));
  571. /* We'll need it on the CPU for the second twist anyway */
  572. starpu_data_set_wt_mask(plan->fft1_handle[z], 1<<0);
  573. /* Create twist1 task */
  574. plan->twist1_tasks[z] = task = starpu_task_create();
  575. task->cl = &STARPUFFT(twist1_2d_codelet);
  576. /* task->handles[0] = to be filled at execution */
  577. task->handles[1] = plan->twisted1_handle[z];
  578. task->cl_arg = &plan->fft1_args[z];
  579. task->tag_id = STEP_TAG(TWIST1);
  580. task->use_tag = 1;
  581. task->destroy = 0;
  582. /* Tell that fft1 depends on twisted1 */
  583. starpu_tag_declare_deps(STEP_TAG(FFT1),
  584. 1, STEP_TAG(TWIST1));
  585. /* Create FFT1 task */
  586. plan->fft1_tasks[z] = task = starpu_task_create();
  587. task->cl = &STARPUFFT(fft1_2d_codelet);
  588. task->handles[0] = plan->twisted1_handle[z];
  589. task->handles[1] = plan->fft1_handle[z];
  590. task->handles[2] = plan->roots_handle[0];
  591. task->handles[3] = plan->roots_handle[1];
  592. task->cl_arg = &plan->fft1_args[z];
  593. task->tag_id = STEP_TAG(FFT1);
  594. task->use_tag = 1;
  595. task->destroy = 0;
  596. /* Tell that to be done with first step we need to have
  597. * finished this fft1 */
  598. starpu_tag_declare_deps(STEP_TAG_2D(plan, JOIN, 0, 0),
  599. 1, STEP_TAG(FFT1));
  600. #undef STEP_TAG
  601. }
  602. /* Create join task */
  603. plan->join_task = task = starpu_task_create();
  604. task->cl = NULL;
  605. task->tag_id = STEP_TAG_2D(plan, JOIN, 0, 0);
  606. task->use_tag = 1;
  607. task->destroy = 0;
  608. /* Create second-round tasks */
  609. for (z = 0; z < plan->totsize3; z++) {
  610. int kk = z / DIV_2D_M, ll = z % DIV_2D_M;
  611. #define STEP_TAG(step) STEP_TAG_2D(plan, step, kk, ll)
  612. plan->fft2_args[z].plan = plan;
  613. plan->fft2_args[z].kk = kk;
  614. plan->fft2_args[z].ll = ll;
  615. /* Register n3*m3 (n1,m1) chunks */
  616. starpu_vector_data_register(&plan->twisted2_handle[z], STARPU_MAIN_RAM, (uintptr_t) &plan->twisted2[z*plan->totsize4], plan->totsize4, sizeof(*plan->twisted2));
  617. starpu_vector_data_register(&plan->fft2_handle[z], STARPU_MAIN_RAM, (uintptr_t) &plan->fft2[z*plan->totsize4], plan->totsize4, sizeof(*plan->fft2));
  618. /* We'll need it on the CPU for the last twist anyway */
  619. starpu_data_set_wt_mask(plan->fft2_handle[z], 1<<0);
  620. /* Tell that twisted2 depends on the whole first step to be
  621. * done */
  622. starpu_tag_declare_deps(STEP_TAG(TWIST2),
  623. 1, STEP_TAG_2D(plan, JOIN, 0, 0));
  624. /* Create twist2 task */
  625. plan->twist2_tasks[z] = task = starpu_task_create();
  626. task->cl = &STARPUFFT(twist2_2d_codelet);
  627. task->handles[0] = plan->twisted2_handle[z];
  628. task->cl_arg = &plan->fft2_args[z];
  629. task->tag_id = STEP_TAG(TWIST2);
  630. task->use_tag = 1;
  631. task->destroy = 0;
  632. /* Tell that fft2 depends on twisted2 */
  633. starpu_tag_declare_deps(STEP_TAG(FFT2),
  634. 1, STEP_TAG(TWIST2));
  635. /* Create FFT2 task */
  636. plan->fft2_tasks[z] = task = starpu_task_create();
  637. task->cl = &STARPUFFT(fft2_2d_codelet);
  638. task->handles[0] = plan->twisted2_handle[z];
  639. task->handles[1] = plan->fft2_handle[z];
  640. task->cl_arg = &plan->fft2_args[z];
  641. task->tag_id = STEP_TAG(FFT2);
  642. task->use_tag = 1;
  643. task->destroy = 0;
  644. /* Tell that twist3 depends on fft2 */
  645. starpu_tag_declare_deps(STEP_TAG(TWIST3),
  646. 1, STEP_TAG(FFT2));
  647. /* Create twist3 tasks */
  648. /* These run only on CPUs and thus write directly into the
  649. * application output buffer. */
  650. plan->twist3_tasks[z] = task = starpu_task_create();
  651. task->cl = &STARPUFFT(twist3_2d_codelet);
  652. task->handles[0] = plan->fft2_handle[z];
  653. task->cl_arg = &plan->fft2_args[z];
  654. task->tag_id = STEP_TAG(TWIST3);
  655. task->use_tag = 1;
  656. task->destroy = 0;
  657. /* Tell that to be completely finished we need to have finished this twisted3 */
  658. starpu_tag_declare_deps(STEP_TAG_2D(plan, END, 0, 0),
  659. 1, STEP_TAG(TWIST3));
  660. #undef STEP_TAG
  661. }
  662. /* Create end task */
  663. plan->end_task = task = starpu_task_create();
  664. task->cl = NULL;
  665. task->tag_id = STEP_TAG_2D(plan, END, 0, 0);
  666. task->use_tag = 1;
  667. task->destroy = 0;
  668. task->detach = 0;
  669. }
  670. return plan;
  671. }
  672. /* Actually submit all the tasks. */
  673. static struct starpu_task *
  674. STARPUFFT(start2dC2C)(STARPUFFT(plan) plan, starpu_data_handle_t in, starpu_data_handle_t out)
  675. {
  676. STARPU_ASSERT(plan->type == C2C);
  677. int z;
  678. int ret;
  679. if (PARALLEL) {
  680. for (z=0; z < plan->totsize1; z++) {
  681. ret = starpu_task_submit(plan->twist1_tasks[z]);
  682. if (ret == -ENODEV) return NULL;
  683. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  684. ret = starpu_task_submit(plan->fft1_tasks[z]);
  685. if (ret == -ENODEV) return NULL;
  686. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  687. }
  688. ret = starpu_task_submit(plan->join_task);
  689. if (ret == -ENODEV) return NULL;
  690. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  691. for (z=0; z < plan->totsize3; z++) {
  692. ret = starpu_task_submit(plan->twist2_tasks[z]);
  693. if (ret == -ENODEV) return NULL;
  694. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  695. ret = starpu_task_submit(plan->fft2_tasks[z]);
  696. if (ret == -ENODEV) return NULL;
  697. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  698. ret = starpu_task_submit(plan->twist3_tasks[z]);
  699. if (ret == -ENODEV) return NULL;
  700. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  701. }
  702. ret = starpu_task_submit(plan->end_task);
  703. if (ret == -ENODEV) return NULL;
  704. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  705. return plan->end_task;
  706. } else /* !PARALLEL */ {
  707. struct starpu_task *task;
  708. /* Create FFT task */
  709. task = starpu_task_create();
  710. task->detach = 0;
  711. task->cl = &STARPUFFT(fft_2d_codelet);
  712. task->handles[0] = in;
  713. task->handles[1] = out;
  714. task->cl_arg = plan;
  715. ret = starpu_task_submit(task);
  716. if (ret == -ENODEV) return NULL;
  717. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  718. return task;
  719. }
  720. }
  721. /* Free all the tags. The generic code handles freeing the buffers. */
  722. static void
  723. STARPUFFT(free_2d_tags)(STARPUFFT(plan) plan)
  724. {
  725. int i, j;
  726. int n1 = plan->n1[0];
  727. int m1 = plan->n1[1];
  728. if (!PARALLEL)
  729. return;
  730. for (i = 0; i < n1; i++) {
  731. for (j = 0; j < m1; j++) {
  732. starpu_tag_remove(STEP_TAG_2D(plan, TWIST1, i, j));
  733. starpu_tag_remove(STEP_TAG_2D(plan, FFT1, i, j));
  734. }
  735. }
  736. starpu_tag_remove(STEP_TAG_2D(plan, JOIN, 0, 0));
  737. for (i = 0; i < DIV_2D_N; i++) {
  738. for (j = 0; j < DIV_2D_M; j++) {
  739. starpu_tag_remove(STEP_TAG_2D(plan, TWIST2, i, j));
  740. starpu_tag_remove(STEP_TAG_2D(plan, FFT2, i, j));
  741. starpu_tag_remove(STEP_TAG_2D(plan, TWIST3, i, j));
  742. }
  743. }
  744. starpu_tag_remove(STEP_TAG_2D(plan, END, 0, 0));
  745. }