starpufftx1d.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2020 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. /*
  17. *
  18. * Dumb parallel version
  19. *
  20. */
  21. #define DIV_1D 64
  22. /*
  23. * Overall strategy for an fft of size n:
  24. * - perform n1 ffts of size n2
  25. * - twiddle
  26. * - perform n2 ffts of size n1
  27. *
  28. * - n1 defaults to DIV_1D, thus n2 defaults to n / DIV_1D.
  29. *
  30. * Precise tasks:
  31. *
  32. * - twist1: twist the whole n-element input (called "in") into n1 chunks of
  33. * size n2, by using n1 tasks taking the whole n-element input as a
  34. * R parameter and one n2 output as a W parameter. The result is
  35. * called twisted1.
  36. * - fft1: perform n1 (n2) ffts, by using n1 tasks doing one fft each. Also
  37. * twiddle the result to prepare for the fft2. The result is called
  38. * fft1.
  39. * - join: depends on all the fft1s, to gather the n1 results of size n2 in
  40. * the fft1 vector.
  41. * - twist2: twist the fft1 vector into n2 chunks of size n1, called twisted2.
  42. * since n2 is typically very large, this step is divided in DIV_1D
  43. * tasks, each of them performing n2/DIV_1D of them
  44. * - fft2: perform n2 ffts of size n1. This is divided in DIV_1D tasks of
  45. * n2/DIV_1D ffts, to be performed in batches. The result is called
  46. * fft2.
  47. * - twist3: twist back the result of the fft2s above into the output buffer.
  48. * Only implemented on CPUs for simplicity of the gathering.
  49. *
  50. * The tag space thus uses 3 dimensions:
  51. * - the number of the plan.
  52. * - the step (TWIST1, FFT1, JOIN, TWIST2, FFT2, TWIST3, END)
  53. * - an index i between 0 and DIV_1D-1.
  54. */
  55. #define STEP_TAG_1D(plan, step, i) _STEP_TAG(plan, step, i)
  56. #ifdef __STARPU_USE_CUDA
  57. /* twist1:
  58. *
  59. * Twist the full input vector (first parameter) into one chunk of size n2
  60. * (second parameter) */
  61. static void
  62. STARPUFFT(twist1_1d_kernel_gpu)(void *descr[], void *_args)
  63. {
  64. struct STARPUFFT(args) *args = _args;
  65. STARPUFFT(plan) plan = args->plan;
  66. int i = args->i;
  67. int n1 = plan->n1[0];
  68. int n2 = plan->n2[0];
  69. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  70. _cufftComplex * restrict twisted1 = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  71. STARPUFFT(cuda_twist1_1d_host)(in, twisted1, i, n1, n2);
  72. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  73. }
  74. /* fft1:
  75. *
  76. * Perform one fft of size n2 */
  77. static void
  78. STARPUFFT(fft1_1d_plan_gpu)(void *args)
  79. {
  80. STARPUFFT(plan) plan = args;
  81. int n2 = plan->n2[0];
  82. int workerid = starpu_worker_get_id_check();
  83. cufftResult cures;
  84. cures = cufftPlan1d(&plan->plans[workerid].plan1_cuda, n2, _CUFFT_C2C, 1);
  85. if (cures != CUFFT_SUCCESS)
  86. STARPU_CUFFT_REPORT_ERROR(cures);
  87. cufftSetStream(plan->plans[workerid].plan1_cuda, starpu_cuda_get_local_stream());
  88. if (cures != CUFFT_SUCCESS)
  89. STARPU_CUFFT_REPORT_ERROR(cures);
  90. }
  91. static void
  92. STARPUFFT(fft1_1d_kernel_gpu)(void *descr[], void *_args)
  93. {
  94. struct STARPUFFT(args) *args = _args;
  95. STARPUFFT(plan) plan = args->plan;
  96. int i = args->i;
  97. int n2 = plan->n2[0];
  98. cufftResult cures;
  99. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  100. _cufftComplex * restrict out = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  101. const _cufftComplex * restrict roots = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[2]);
  102. int workerid = starpu_worker_get_id_check();
  103. task_per_worker[workerid]++;
  104. cures = _cufftExecC2C(plan->plans[workerid].plan1_cuda, in, out, plan->sign == -1 ? CUFFT_FORWARD : CUFFT_INVERSE);
  105. if (cures != CUFFT_SUCCESS)
  106. STARPU_CUFFT_REPORT_ERROR(cures);
  107. STARPUFFT(cuda_twiddle_1d_host)(out, roots, n2, i);
  108. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  109. }
  110. /* fft2:
  111. *
  112. * Perform n3 = n2/DIV_1D ffts of size n1 */
  113. static void
  114. STARPUFFT(fft2_1d_plan_gpu)(void *args)
  115. {
  116. STARPUFFT(plan) plan = args;
  117. int n1 = plan->n1[0];
  118. int n2 = plan->n2[0];
  119. int n3 = n2/DIV_1D;
  120. cufftResult cures;
  121. int workerid = starpu_worker_get_id_check();
  122. cures = cufftPlan1d(&plan->plans[workerid].plan2_cuda, n1, _CUFFT_C2C, n3);
  123. if (cures != CUFFT_SUCCESS)
  124. STARPU_CUFFT_REPORT_ERROR(cures);
  125. cufftSetStream(plan->plans[workerid].plan2_cuda, starpu_cuda_get_local_stream());
  126. if (cures != CUFFT_SUCCESS)
  127. STARPU_CUFFT_REPORT_ERROR(cures);
  128. }
  129. static void
  130. STARPUFFT(fft2_1d_kernel_gpu)(void *descr[], void *_args)
  131. {
  132. struct STARPUFFT(args) *args = _args;
  133. STARPUFFT(plan) plan = args->plan;
  134. cufftResult cures;
  135. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  136. _cufftComplex * restrict out = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  137. int workerid = starpu_worker_get_id_check();
  138. task_per_worker[workerid]++;
  139. /* NOTE using batch support */
  140. cures = _cufftExecC2C(plan->plans[workerid].plan2_cuda, in, out, plan->sign == -1 ? CUFFT_FORWARD : CUFFT_INVERSE);
  141. if (cures != CUFFT_SUCCESS)
  142. STARPU_CUFFT_REPORT_ERROR(cures);
  143. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  144. }
  145. #endif
  146. /* twist1:
  147. *
  148. * Twist the full input vector (first parameter) into one chunk of size n2
  149. * (second parameter) */
  150. static void
  151. STARPUFFT(twist1_1d_kernel_cpu)(void *descr[], void *_args)
  152. {
  153. struct STARPUFFT(args) *args = _args;
  154. STARPUFFT(plan) plan = args->plan;
  155. int i = args->i;
  156. int j;
  157. int n1 = plan->n1[0];
  158. int n2 = plan->n2[0];
  159. STARPUFFT(complex) * restrict in = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  160. STARPUFFT(complex) * restrict twisted1 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  161. /* printf("twist1 %d %g\n", i, (double) cabs(plan->in[i])); */
  162. for (j = 0; j < n2; j++)
  163. twisted1[j] = in[i+j*n1];
  164. }
  165. #ifdef STARPU_HAVE_FFTW
  166. /* fft1:
  167. *
  168. * Perform one fft of size n2 */
  169. static void
  170. STARPUFFT(fft1_1d_kernel_cpu)(void *descr[], void *_args)
  171. {
  172. struct STARPUFFT(args) *args = _args;
  173. STARPUFFT(plan) plan = args->plan;
  174. int i = args->i;
  175. int j;
  176. int n2 = plan->n2[0];
  177. int workerid = starpu_worker_get_id_check();
  178. task_per_worker[workerid]++;
  179. STARPUFFT(complex) * restrict twisted1 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  180. STARPUFFT(complex) * restrict fft1 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  181. /* printf("fft1 %d %g\n", i, (double) cabs(twisted1[0])); */
  182. _FFTW(execute_dft)(plan->plans[workerid].plan1_cpu, twisted1, fft1);
  183. /* twiddle fft1 buffer */
  184. for (j = 0; j < n2; j++)
  185. fft1[j] = fft1[j] * plan->roots[0][i*j];
  186. }
  187. #endif
  188. /* twist2:
  189. *
  190. * Twist the full vector (results of the fft1s) into one package of n2/DIV_1D
  191. * chunks of size n1 */
  192. static void
  193. STARPUFFT(twist2_1d_kernel_cpu)(void *descr[], void *_args)
  194. {
  195. struct STARPUFFT(args) *args = _args;
  196. STARPUFFT(plan) plan = args->plan;
  197. int jj = args->jj; /* between 0 and DIV_1D */
  198. int jjj; /* beetween 0 and n3 */
  199. int i;
  200. int n1 = plan->n1[0];
  201. int n2 = plan->n2[0];
  202. int n3 = n2/DIV_1D;
  203. STARPUFFT(complex) * restrict twisted2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  204. /* printf("twist2 %d %g\n", jj, (double) cabs(plan->fft1[jj])); */
  205. for (jjj = 0; jjj < n3; jjj++) {
  206. int j = jj * n3 + jjj;
  207. for (i = 0; i < n1; i++)
  208. twisted2[jjj*n1+i] = plan->fft1[i*n2+j];
  209. }
  210. }
  211. #ifdef STARPU_HAVE_FFTW
  212. /* fft2:
  213. *
  214. * Perform n3 = n2/DIV_1D ffts of size n1 */
  215. static void
  216. STARPUFFT(fft2_1d_kernel_cpu)(void *descr[], void *_args)
  217. {
  218. struct STARPUFFT(args) *args = _args;
  219. STARPUFFT(plan) plan = args->plan;
  220. /* int jj = args->jj; */
  221. int workerid = starpu_worker_get_id_check();
  222. task_per_worker[workerid]++;
  223. STARPUFFT(complex) * restrict twisted2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  224. STARPUFFT(complex) * restrict fft2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  225. /* printf("fft2 %d %g\n", jj, (double) cabs(twisted2[plan->totsize4-1])); */
  226. _FFTW(execute_dft)(plan->plans[workerid].plan2_cpu, twisted2, fft2);
  227. }
  228. #endif
  229. /* twist3:
  230. *
  231. * Spread the package of n2/DIV_1D chunks of size n1 into the output vector */
  232. static void
  233. STARPUFFT(twist3_1d_kernel_cpu)(void *descr[], void *_args)
  234. {
  235. struct STARPUFFT(args) *args = _args;
  236. STARPUFFT(plan) plan = args->plan;
  237. int jj = args->jj; /* between 0 and DIV_1D */
  238. int jjj; /* beetween 0 and n3 */
  239. int i;
  240. int n1 = plan->n1[0];
  241. int n2 = plan->n2[0];
  242. int n3 = n2/DIV_1D;
  243. const STARPUFFT(complex) * restrict fft2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  244. /* printf("twist3 %d %g\n", jj, (double) cabs(fft2[0])); */
  245. for (jjj = 0; jjj < n3; jjj++) {
  246. int j = jj * n3 + jjj;
  247. for (i = 0; i < n1; i++)
  248. plan->out[i*n2+j] = fft2[jjj*n1+i];
  249. }
  250. }
  251. /* Performance models for the 5 kinds of tasks */
  252. static struct starpu_perfmodel STARPUFFT(twist1_1d_model) = {
  253. .type = STARPU_HISTORY_BASED,
  254. .symbol = TYPE"twist1_1d"
  255. };
  256. static struct starpu_perfmodel STARPUFFT(fft1_1d_model) = {
  257. .type = STARPU_HISTORY_BASED,
  258. .symbol = TYPE"fft1_1d"
  259. };
  260. static struct starpu_perfmodel STARPUFFT(twist2_1d_model) = {
  261. .type = STARPU_HISTORY_BASED,
  262. .symbol = TYPE"twist2_1d"
  263. };
  264. static struct starpu_perfmodel STARPUFFT(fft2_1d_model) = {
  265. .type = STARPU_HISTORY_BASED,
  266. .symbol = TYPE"fft2_1d"
  267. };
  268. static struct starpu_perfmodel STARPUFFT(twist3_1d_model) = {
  269. .type = STARPU_HISTORY_BASED,
  270. .symbol = TYPE"twist3_1d"
  271. };
  272. /* codelet pointers for the 5 kinds of tasks */
  273. static struct starpu_codelet STARPUFFT(twist1_1d_codelet) = {
  274. .where =
  275. #ifdef __STARPU_USE_CUDA
  276. STARPU_CUDA|
  277. #endif
  278. STARPU_CPU,
  279. #ifdef __STARPU_USE_CUDA
  280. .cuda_funcs = {STARPUFFT(twist1_1d_kernel_gpu)},
  281. #endif
  282. .cpu_funcs = {STARPUFFT(twist1_1d_kernel_cpu)},
  283. CAN_EXECUTE
  284. .model = &STARPUFFT(twist1_1d_model),
  285. .nbuffers = 2,
  286. .modes = {STARPU_R, STARPU_W},
  287. .name = "twist1_1d_codelet"
  288. };
  289. static struct starpu_codelet STARPUFFT(fft1_1d_codelet) = {
  290. .where =
  291. #ifdef __STARPU_USE_CUDA
  292. STARPU_CUDA|
  293. #endif
  294. #ifdef STARPU_HAVE_FFTW
  295. STARPU_CPU|
  296. #endif
  297. 0,
  298. #ifdef __STARPU_USE_CUDA
  299. .cuda_funcs = {STARPUFFT(fft1_1d_kernel_gpu)},
  300. #endif
  301. #ifdef STARPU_HAVE_FFTW
  302. .cpu_funcs = {STARPUFFT(fft1_1d_kernel_cpu)},
  303. #endif
  304. CAN_EXECUTE
  305. .model = &STARPUFFT(fft1_1d_model),
  306. .nbuffers = 3,
  307. .modes = {STARPU_R, STARPU_W, STARPU_R},
  308. .name = "fft1_1d_codelet"
  309. };
  310. static struct starpu_codelet STARPUFFT(twist2_1d_codelet) = {
  311. .where = STARPU_CPU,
  312. .cpu_funcs = {STARPUFFT(twist2_1d_kernel_cpu)},
  313. CAN_EXECUTE
  314. .model = &STARPUFFT(twist2_1d_model),
  315. .nbuffers = 1,
  316. .modes = {STARPU_W},
  317. .name = "twist2_1d_codelet"
  318. };
  319. static struct starpu_codelet STARPUFFT(fft2_1d_codelet) = {
  320. .where =
  321. #ifdef __STARPU_USE_CUDA
  322. STARPU_CUDA|
  323. #endif
  324. #ifdef STARPU_HAVE_FFTW
  325. STARPU_CPU|
  326. #endif
  327. 0,
  328. #ifdef __STARPU_USE_CUDA
  329. .cuda_funcs = {STARPUFFT(fft2_1d_kernel_gpu)},
  330. #endif
  331. #ifdef STARPU_HAVE_FFTW
  332. .cpu_funcs = {STARPUFFT(fft2_1d_kernel_cpu)},
  333. #endif
  334. CAN_EXECUTE
  335. .model = &STARPUFFT(fft2_1d_model),
  336. .nbuffers = 2,
  337. .modes = {STARPU_R, STARPU_W},
  338. .name = "fft2_1d_codelet"
  339. };
  340. static struct starpu_codelet STARPUFFT(twist3_1d_codelet) = {
  341. .where = STARPU_CPU,
  342. .cpu_funcs = {STARPUFFT(twist3_1d_kernel_cpu)},
  343. CAN_EXECUTE
  344. .model = &STARPUFFT(twist3_1d_model),
  345. .nbuffers = 1,
  346. .modes = {STARPU_R},
  347. .name = "twist3_1d_codelet"
  348. };
  349. /*
  350. *
  351. * Sequential version
  352. *
  353. */
  354. #ifdef __STARPU_USE_CUDA
  355. /* Perform one fft of size n */
  356. static void
  357. STARPUFFT(fft_1d_plan_gpu)(void *args)
  358. {
  359. STARPUFFT(plan) plan = args;
  360. cufftResult cures;
  361. int n = plan->n[0];
  362. int workerid = starpu_worker_get_id_check();
  363. cures = cufftPlan1d(&plan->plans[workerid].plan_cuda, n, _CUFFT_C2C, 1);
  364. if (cures != CUFFT_SUCCESS)
  365. STARPU_CUFFT_REPORT_ERROR(cures);
  366. cufftSetStream(plan->plans[workerid].plan_cuda, starpu_cuda_get_local_stream());
  367. if (cures != CUFFT_SUCCESS)
  368. STARPU_CUFFT_REPORT_ERROR(cures);
  369. }
  370. static void
  371. STARPUFFT(fft_1d_kernel_gpu)(void *descr[], void *args)
  372. {
  373. STARPUFFT(plan) plan = args;
  374. cufftResult cures;
  375. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  376. _cufftComplex * restrict out = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  377. int workerid = starpu_worker_get_id_check();
  378. task_per_worker[workerid]++;
  379. cures = _cufftExecC2C(plan->plans[workerid].plan_cuda, in, out, plan->sign == -1 ? CUFFT_FORWARD : CUFFT_INVERSE);
  380. if (cures != CUFFT_SUCCESS)
  381. STARPU_CUFFT_REPORT_ERROR(cures);
  382. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  383. }
  384. #endif
  385. #ifdef STARPU_HAVE_FFTW
  386. /* Perform one fft of size n */
  387. static void
  388. STARPUFFT(fft_1d_kernel_cpu)(void *descr[], void *_args)
  389. {
  390. STARPUFFT(plan) plan = _args;
  391. int workerid = starpu_worker_get_id_check();
  392. task_per_worker[workerid]++;
  393. STARPUFFT(complex) * restrict in = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  394. STARPUFFT(complex) * restrict out = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  395. _FFTW(execute_dft)(plan->plans[workerid].plan_cpu, in, out);
  396. }
  397. #endif
  398. static struct starpu_perfmodel STARPUFFT(fft_1d_model) = {
  399. .type = STARPU_HISTORY_BASED,
  400. .symbol = TYPE"fft_1d"
  401. };
  402. static struct starpu_codelet STARPUFFT(fft_1d_codelet) = {
  403. .where =
  404. #ifdef __STARPU_USE_CUDA
  405. STARPU_CUDA|
  406. #endif
  407. #ifdef STARPU_HAVE_FFTW
  408. STARPU_CPU|
  409. #endif
  410. 0,
  411. #ifdef __STARPU_USE_CUDA
  412. .cuda_funcs = {STARPUFFT(fft_1d_kernel_gpu)},
  413. #endif
  414. #ifdef STARPU_HAVE_FFTW
  415. .cpu_funcs = {STARPUFFT(fft_1d_kernel_cpu)},
  416. #endif
  417. CAN_EXECUTE
  418. .model = &STARPUFFT(fft_1d_model),
  419. .nbuffers = 2,
  420. .modes = {STARPU_R, STARPU_W},
  421. .name = "fft_1d_codelet"
  422. };
  423. /* Planning:
  424. *
  425. * - For each CPU worker, we need to plan the two fftw stages.
  426. * - For GPU workers, we need to do the planning in the CUDA context, so we do
  427. * this lazily through the initialised1 and initialised2 flags ; TODO: use
  428. * starpu_execute_on_each_worker instead (done in the omp branch).
  429. * - We allocate all the temporary buffers and register them to starpu.
  430. * - We create all the tasks, but do not submit them yet. It will be possible
  431. * to reuse them at will to perform several ffts with the same planning.
  432. */
  433. STARPUFFT(plan)
  434. STARPUFFT(plan_dft_1d)(int n, int sign, unsigned flags)
  435. {
  436. unsigned workerid;
  437. int n1 = DIV_1D;
  438. int n2 = n / n1;
  439. int n3;
  440. int z;
  441. struct starpu_task *task;
  442. if (PARALLEL) {
  443. #ifdef __STARPU_USE_CUDA
  444. /* cufft 1D limited to 8M elements */
  445. while (n2 > 8 << 20) {
  446. n1 *= 2;
  447. n2 /= 2;
  448. }
  449. #endif
  450. STARPU_ASSERT(n == n1*n2);
  451. STARPU_ASSERT(n1 < (1ULL << I_BITS));
  452. /* distribute the n2 second ffts into DIV_1D packages */
  453. n3 = n2 / DIV_1D;
  454. STARPU_ASSERT(n2 == n3*DIV_1D);
  455. }
  456. /* TODO: flags? Automatically set FFTW_MEASURE on calibration? */
  457. STARPU_ASSERT(flags == 0);
  458. STARPUFFT(plan) plan = malloc(sizeof(*plan));
  459. memset(plan, 0, sizeof(*plan));
  460. if (PARALLEL) {
  461. plan->number = STARPU_ATOMIC_ADD(&starpufft_last_plan_number, 1) - 1;
  462. /* The plan number has a limited size */
  463. STARPU_ASSERT(plan->number < (1ULL << NUMBER_BITS));
  464. }
  465. /* Just one dimension */
  466. plan->dim = 1;
  467. plan->n = malloc(plan->dim * sizeof(*plan->n));
  468. plan->n[0] = n;
  469. if (PARALLEL) {
  470. check_dims(plan);
  471. plan->n1 = malloc(plan->dim * sizeof(*plan->n1));
  472. plan->n1[0] = n1;
  473. plan->n2 = malloc(plan->dim * sizeof(*plan->n2));
  474. plan->n2[0] = n2;
  475. }
  476. /* Note: this is for coherency with the 2D case */
  477. plan->totsize = n;
  478. if (PARALLEL) {
  479. plan->totsize1 = n1;
  480. plan->totsize2 = n2;
  481. plan->totsize3 = DIV_1D;
  482. plan->totsize4 = plan->totsize / plan->totsize3;
  483. }
  484. plan->type = C2C;
  485. plan->sign = sign;
  486. if (PARALLEL) {
  487. /* Compute the w^k just once. */
  488. compute_roots(plan);
  489. }
  490. /* Initialize per-worker working set */
  491. for (workerid = 0; workerid < starpu_worker_get_count(); workerid++) {
  492. switch (starpu_worker_get_type(workerid)) {
  493. case STARPU_CPU_WORKER:
  494. #ifdef STARPU_HAVE_FFTW
  495. if (PARALLEL) {
  496. /* first fft plan: one fft of size n2.
  497. * FFTW imposes that buffer pointers are known at
  498. * planning time. */
  499. plan->plans[workerid].plan1_cpu = _FFTW(plan_dft_1d)(n2, NULL, (void*) 1, sign, _FFTW_FLAGS);
  500. STARPU_ASSERT(plan->plans[workerid].plan1_cpu);
  501. /* second fft plan: n3 ffts of size n1 */
  502. plan->plans[workerid].plan2_cpu = _FFTW(plan_many_dft)(plan->dim,
  503. plan->n1, n3,
  504. NULL, NULL, 1, plan->totsize1,
  505. (void*) 1, NULL, 1, plan->totsize1,
  506. sign, _FFTW_FLAGS);
  507. STARPU_ASSERT(plan->plans[workerid].plan2_cpu);
  508. } else {
  509. /* fft plan: one fft of size n. */
  510. plan->plans[workerid].plan_cpu = _FFTW(plan_dft_1d)(n, NULL, (void*) 1, sign, _FFTW_FLAGS);
  511. STARPU_ASSERT(plan->plans[workerid].plan_cpu);
  512. }
  513. #else
  514. /* #warning libstarpufft can not work correctly if libfftw3 is not installed */
  515. #endif
  516. break;
  517. case STARPU_CUDA_WORKER:
  518. break;
  519. default:
  520. /* Do not care, we won't be executing anything there. */
  521. break;
  522. }
  523. }
  524. #ifdef __STARPU_USE_CUDA
  525. if (PARALLEL) {
  526. starpu_execute_on_each_worker(STARPUFFT(fft1_1d_plan_gpu), plan, STARPU_CUDA);
  527. starpu_execute_on_each_worker(STARPUFFT(fft2_1d_plan_gpu), plan, STARPU_CUDA);
  528. } else {
  529. starpu_execute_on_each_worker(STARPUFFT(fft_1d_plan_gpu), plan, STARPU_CUDA);
  530. }
  531. #endif
  532. if (PARALLEL) {
  533. /* Allocate buffers. */
  534. plan->twisted1 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->twisted1));
  535. memset(plan->twisted1, 0, plan->totsize * sizeof(*plan->twisted1));
  536. plan->fft1 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->fft1));
  537. memset(plan->fft1, 0, plan->totsize * sizeof(*plan->fft1));
  538. plan->twisted2 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->twisted2));
  539. memset(plan->twisted2, 0, plan->totsize * sizeof(*plan->twisted2));
  540. plan->fft2 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->fft2));
  541. memset(plan->fft2, 0, plan->totsize * sizeof(*plan->fft2));
  542. /* Allocate handle arrays */
  543. plan->twisted1_handle = malloc(plan->totsize1 * sizeof(*plan->twisted1_handle));
  544. plan->fft1_handle = malloc(plan->totsize1 * sizeof(*plan->fft1_handle));
  545. plan->twisted2_handle = malloc(plan->totsize3 * sizeof(*plan->twisted2_handle));
  546. plan->fft2_handle = malloc(plan->totsize3 * sizeof(*plan->fft2_handle));
  547. /* Allocate task arrays */
  548. plan->twist1_tasks = malloc(plan->totsize1 * sizeof(*plan->twist1_tasks));
  549. plan->fft1_tasks = malloc(plan->totsize1 * sizeof(*plan->fft1_tasks));
  550. plan->twist2_tasks = malloc(plan->totsize3 * sizeof(*plan->twist2_tasks));
  551. plan->fft2_tasks = malloc(plan->totsize3 * sizeof(*plan->fft2_tasks));
  552. plan->twist3_tasks = malloc(plan->totsize3 * sizeof(*plan->twist3_tasks));
  553. /* Allocate codelet argument arrays */
  554. plan->fft1_args = malloc(plan->totsize1 * sizeof(*plan->fft1_args));
  555. plan->fft2_args = malloc(plan->totsize3 * sizeof(*plan->fft2_args));
  556. /* Create first-round tasks: DIV_1D tasks of type twist1 and fft1 */
  557. for (z = 0; z < plan->totsize1; z++) {
  558. int i = z;
  559. #define STEP_TAG(step) STEP_TAG_1D(plan, step, i)
  560. /* TODO: get rid of tags */
  561. plan->fft1_args[z].plan = plan;
  562. plan->fft1_args[z].i = i;
  563. /* Register the twisted1 buffer of size n2. */
  564. starpu_vector_data_register(&plan->twisted1_handle[z], STARPU_MAIN_RAM, (uintptr_t) &plan->twisted1[z*plan->totsize2], plan->totsize2, sizeof(*plan->twisted1));
  565. /* Register the fft1 buffer of size n2. */
  566. starpu_vector_data_register(&plan->fft1_handle[z], STARPU_MAIN_RAM, (uintptr_t) &plan->fft1[z*plan->totsize2], plan->totsize2, sizeof(*plan->fft1));
  567. /* We'll need the result of fft1 on the CPU for the second
  568. * twist anyway, so tell starpu to not keep the fft1 buffer in
  569. * the GPU. */
  570. starpu_data_set_wt_mask(plan->fft1_handle[z], 1<<0);
  571. /* Create twist1 task */
  572. plan->twist1_tasks[z] = task = starpu_task_create();
  573. task->cl = &STARPUFFT(twist1_1d_codelet);
  574. /* task->handles[0] = to be filled at execution to point
  575. to the application input. */
  576. task->handles[1] = plan->twisted1_handle[z];
  577. task->cl_arg = &plan->fft1_args[z];
  578. task->tag_id = STEP_TAG(TWIST1);
  579. task->use_tag = 1;
  580. task->destroy = 0;
  581. /* Tell that fft1 depends on twisted1 */
  582. starpu_tag_declare_deps(STEP_TAG(FFT1),
  583. 1, STEP_TAG(TWIST1));
  584. /* Create FFT1 task */
  585. plan->fft1_tasks[z] = task = starpu_task_create();
  586. task->cl = &STARPUFFT(fft1_1d_codelet);
  587. task->handles[0] = plan->twisted1_handle[z];
  588. task->handles[1] = plan->fft1_handle[z];
  589. task->handles[2] = plan->roots_handle[0];
  590. task->cl_arg = &plan->fft1_args[z];
  591. task->tag_id = STEP_TAG(FFT1);
  592. task->use_tag = 1;
  593. task->destroy = 0;
  594. /* Tell that the join task will depend on the fft1 task. */
  595. starpu_tag_declare_deps(STEP_TAG_1D(plan, JOIN, 0),
  596. 1, STEP_TAG(FFT1));
  597. #undef STEP_TAG
  598. }
  599. /* Create the join task, only serving as a dependency point between
  600. * fft1 and twist2 tasks */
  601. plan->join_task = task = starpu_task_create();
  602. task->cl = NULL;
  603. task->tag_id = STEP_TAG_1D(plan, JOIN, 0);
  604. task->use_tag = 1;
  605. task->destroy = 0;
  606. /* Create second-round tasks: DIV_1D batches of n2/DIV_1D twist2, fft2,
  607. * and twist3 */
  608. for (z = 0; z < plan->totsize3; z++) {
  609. int jj = z;
  610. #define STEP_TAG(step) STEP_TAG_1D(plan, step, jj)
  611. plan->fft2_args[z].plan = plan;
  612. plan->fft2_args[z].jj = jj;
  613. /* Register n3 twisted2 buffers of size n1 */
  614. starpu_vector_data_register(&plan->twisted2_handle[z], STARPU_MAIN_RAM, (uintptr_t) &plan->twisted2[z*plan->totsize4], plan->totsize4, sizeof(*plan->twisted2));
  615. starpu_vector_data_register(&plan->fft2_handle[z], STARPU_MAIN_RAM, (uintptr_t) &plan->fft2[z*plan->totsize4], plan->totsize4, sizeof(*plan->fft2));
  616. /* We'll need the result of fft2 on the CPU for the third
  617. * twist anyway, so tell starpu to not keep the fft2 buffer in
  618. * the GPU. */
  619. starpu_data_set_wt_mask(plan->fft2_handle[z], 1<<0);
  620. /* Tell that twisted2 depends on the join task */
  621. starpu_tag_declare_deps(STEP_TAG(TWIST2),
  622. 1, STEP_TAG_1D(plan, JOIN, 0));
  623. /* Create twist2 task */
  624. plan->twist2_tasks[z] = task = starpu_task_create();
  625. task->cl = &STARPUFFT(twist2_1d_codelet);
  626. task->handles[0] = plan->twisted2_handle[z];
  627. task->cl_arg = &plan->fft2_args[z];
  628. task->tag_id = STEP_TAG(TWIST2);
  629. task->use_tag = 1;
  630. task->destroy = 0;
  631. /* Tell that fft2 depends on twisted2 */
  632. starpu_tag_declare_deps(STEP_TAG(FFT2),
  633. 1, STEP_TAG(TWIST2));
  634. /* Create FFT2 task */
  635. plan->fft2_tasks[z] = task = starpu_task_create();
  636. task->cl = &STARPUFFT(fft2_1d_codelet);
  637. task->handles[0] = plan->twisted2_handle[z];
  638. task->handles[1] = plan->fft2_handle[z];
  639. task->cl_arg = &plan->fft2_args[z];
  640. task->tag_id = STEP_TAG(FFT2);
  641. task->use_tag = 1;
  642. task->destroy = 0;
  643. /* Tell that twist3 depends on fft2 */
  644. starpu_tag_declare_deps(STEP_TAG(TWIST3),
  645. 1, STEP_TAG(FFT2));
  646. /* Create twist3 tasks */
  647. /* These run only on CPUs and thus write directly into the
  648. * application output buffer. */
  649. plan->twist3_tasks[z] = task = starpu_task_create();
  650. task->cl = &STARPUFFT(twist3_1d_codelet);
  651. task->handles[0] = plan->fft2_handle[z];
  652. task->cl_arg = &plan->fft2_args[z];
  653. task->tag_id = STEP_TAG(TWIST3);
  654. task->use_tag = 1;
  655. task->destroy = 0;
  656. /* Tell that to be completely finished we need to have finished
  657. * this twisted3 */
  658. starpu_tag_declare_deps(STEP_TAG_1D(plan, END, 0),
  659. 1, STEP_TAG(TWIST3));
  660. #undef STEP_TAG
  661. }
  662. /* Create end task, only serving as a join point. */
  663. plan->end_task = task = starpu_task_create();
  664. task->cl = NULL;
  665. task->tag_id = STEP_TAG_1D(plan, END, 0);
  666. task->use_tag = 1;
  667. task->destroy = 0;
  668. task->detach = 0;
  669. }
  670. return plan;
  671. }
  672. /* Actually submit all the tasks. */
  673. static struct starpu_task *
  674. STARPUFFT(start1dC2C)(STARPUFFT(plan) plan, starpu_data_handle_t in, starpu_data_handle_t out)
  675. {
  676. STARPU_ASSERT(plan->type == C2C);
  677. int z;
  678. int ret;
  679. if (PARALLEL) {
  680. for (z=0; z < plan->totsize1; z++) {
  681. ret = starpu_task_submit(plan->twist1_tasks[z]);
  682. if (ret == -ENODEV) return NULL;
  683. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  684. ret = starpu_task_submit(plan->fft1_tasks[z]);
  685. if (ret == -ENODEV) return NULL;
  686. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  687. }
  688. ret = starpu_task_submit(plan->join_task);
  689. if (ret == -ENODEV) return NULL;
  690. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  691. for (z=0; z < plan->totsize3; z++) {
  692. ret = starpu_task_submit(plan->twist2_tasks[z]);
  693. if (ret == -ENODEV) return NULL;
  694. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  695. ret = starpu_task_submit(plan->fft2_tasks[z]);
  696. if (ret == -ENODEV) return NULL;
  697. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  698. ret = starpu_task_submit(plan->twist3_tasks[z]);
  699. if (ret == -ENODEV) return NULL;
  700. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  701. }
  702. ret = starpu_task_submit(plan->end_task);
  703. if (ret == -ENODEV) return NULL;
  704. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  705. return plan->end_task;
  706. } else /* !PARALLEL */ {
  707. struct starpu_task *task;
  708. /* Create FFT task */
  709. task = starpu_task_create();
  710. task->detach = 0;
  711. task->cl = &STARPUFFT(fft_1d_codelet);
  712. task->handles[0] = in;
  713. task->handles[1] = out;
  714. task->cl_arg = plan;
  715. ret = starpu_task_submit(task);
  716. if (ret == -ENODEV) return NULL;
  717. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  718. return task;
  719. }
  720. }
  721. /* Free all the tags. The generic code handles freeing the buffers. */
  722. static void
  723. STARPUFFT(free_1d_tags)(STARPUFFT(plan) plan)
  724. {
  725. int i;
  726. int n1 = plan->n1[0];
  727. if (!PARALLEL)
  728. return;
  729. for (i = 0; i < n1; i++) {
  730. starpu_tag_remove(STEP_TAG_1D(plan, TWIST1, i));
  731. starpu_tag_remove(STEP_TAG_1D(plan, FFT1, i));
  732. }
  733. starpu_tag_remove(STEP_TAG_1D(plan, JOIN, 0));
  734. for (i = 0; i < DIV_1D; i++) {
  735. starpu_tag_remove(STEP_TAG_1D(plan, TWIST2, i));
  736. starpu_tag_remove(STEP_TAG_1D(plan, FFT2, i));
  737. starpu_tag_remove(STEP_TAG_1D(plan, TWIST3, i));
  738. }
  739. starpu_tag_remove(STEP_TAG_1D(plan, END, 0));
  740. }