starpufftx1d.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2012 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. /*
  18. *
  19. * Dumb parallel version
  20. *
  21. */
  22. #define DIV_1D 64
  23. /*
  24. * Overall strategy for an fft of size n:
  25. * - perform n1 ffts of size n2
  26. * - twiddle
  27. * - perform n2 ffts of size n1
  28. *
  29. * - n1 defaults to DIV_1D, thus n2 defaults to n / DIV_1D.
  30. *
  31. * Precise tasks:
  32. *
  33. * - twist1: twist the whole n-element input (called "in") into n1 chunks of
  34. * size n2, by using n1 tasks taking the whole n-element input as a
  35. * R parameter and one n2 output as a W parameter. The result is
  36. * called twisted1.
  37. * - fft1: perform n1 (n2) ffts, by using n1 tasks doing one fft each. Also
  38. * twiddle the result to prepare for the fft2. The result is called
  39. * fft1.
  40. * - join: depends on all the fft1s, to gather the n1 results of size n2 in
  41. * the fft1 vector.
  42. * - twist2: twist the fft1 vector into n2 chunks of size n1, called twisted2.
  43. * since n2 is typically very large, this step is divided in DIV_1D
  44. * tasks, each of them performing n2/DIV_1D of them
  45. * - fft2: perform n2 ffts of size n1. This is divided in DIV_1D tasks of
  46. * n2/DIV_1D ffts, to be performed in batches. The result is called
  47. * fft2.
  48. * - twist3: twist back the result of the fft2s above into the output buffer.
  49. * Only implemented on CPUs for simplicity of the gathering.
  50. *
  51. * The tag space thus uses 3 dimensions:
  52. * - the number of the plan.
  53. * - the step (TWIST1, FFT1, JOIN, TWIST2, FFT2, TWIST3, END)
  54. * - an index i between 0 and DIV_1D-1.
  55. */
  56. #define STEP_TAG_1D(plan, step, i) _STEP_TAG(plan, step, i)
  57. #ifdef __STARPU_USE_CUDA
  58. /* twist1:
  59. *
  60. * Twist the full input vector (first parameter) into one chunk of size n2
  61. * (second parameter) */
  62. static void
  63. STARPUFFT(twist1_1d_kernel_gpu)(void *descr[], void *_args)
  64. {
  65. struct STARPUFFT(args) *args = _args;
  66. STARPUFFT(plan) plan = args->plan;
  67. int i = args->i;
  68. int n1 = plan->n1[0];
  69. int n2 = plan->n2[0];
  70. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  71. _cufftComplex * restrict twisted1 = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  72. STARPUFFT(cuda_twist1_1d_host)(in, twisted1, i, n1, n2);
  73. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  74. }
  75. /* fft1:
  76. *
  77. * Perform one fft of size n2 */
  78. static void
  79. STARPUFFT(fft1_1d_plan_gpu)(void *args)
  80. {
  81. STARPUFFT(plan) plan = args;
  82. int n2 = plan->n2[0];
  83. int workerid = starpu_worker_get_id();
  84. cufftResult cures;
  85. cures = cufftPlan1d(&plan->plans[workerid].plan1_cuda, n2, _CUFFT_C2C, 1);
  86. if (cures != CUFFT_SUCCESS)
  87. STARPU_CUFFT_REPORT_ERROR(cures);
  88. cufftSetStream(plan->plans[workerid].plan1_cuda, starpu_cuda_get_local_stream());
  89. if (cures != CUFFT_SUCCESS)
  90. STARPU_CUFFT_REPORT_ERROR(cures);
  91. }
  92. static void
  93. STARPUFFT(fft1_1d_kernel_gpu)(void *descr[], void *_args)
  94. {
  95. struct STARPUFFT(args) *args = _args;
  96. STARPUFFT(plan) plan = args->plan;
  97. int i = args->i;
  98. int n2 = plan->n2[0];
  99. cufftResult cures;
  100. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  101. _cufftComplex * restrict out = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  102. const _cufftComplex * restrict roots = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[2]);
  103. int workerid = starpu_worker_get_id();
  104. task_per_worker[workerid]++;
  105. cures = _cufftExecC2C(plan->plans[workerid].plan1_cuda, in, out, plan->sign == -1 ? CUFFT_FORWARD : CUFFT_INVERSE);
  106. if (cures != CUFFT_SUCCESS)
  107. STARPU_CUFFT_REPORT_ERROR(cures);
  108. STARPUFFT(cuda_twiddle_1d_host)(out, roots, n2, i);
  109. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  110. }
  111. /* fft2:
  112. *
  113. * Perform n3 = n2/DIV_1D ffts of size n1 */
  114. static void
  115. STARPUFFT(fft2_1d_plan_gpu)(void *args)
  116. {
  117. STARPUFFT(plan) plan = args;
  118. int n1 = plan->n1[0];
  119. int n2 = plan->n2[0];
  120. int n3 = n2/DIV_1D;
  121. cufftResult cures;
  122. int workerid = starpu_worker_get_id();
  123. cures = cufftPlan1d(&plan->plans[workerid].plan2_cuda, n1, _CUFFT_C2C, n3);
  124. if (cures != CUFFT_SUCCESS)
  125. STARPU_CUFFT_REPORT_ERROR(cures);
  126. cufftSetStream(plan->plans[workerid].plan2_cuda, starpu_cuda_get_local_stream());
  127. if (cures != CUFFT_SUCCESS)
  128. STARPU_CUFFT_REPORT_ERROR(cures);
  129. }
  130. static void
  131. STARPUFFT(fft2_1d_kernel_gpu)(void *descr[], void *_args)
  132. {
  133. struct STARPUFFT(args) *args = _args;
  134. STARPUFFT(plan) plan = args->plan;
  135. cufftResult cures;
  136. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  137. _cufftComplex * restrict out = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  138. int workerid = starpu_worker_get_id();
  139. task_per_worker[workerid]++;
  140. /* NOTE using batch support */
  141. cures = _cufftExecC2C(plan->plans[workerid].plan2_cuda, in, out, plan->sign == -1 ? CUFFT_FORWARD : CUFFT_INVERSE);
  142. if (cures != CUFFT_SUCCESS)
  143. STARPU_CUFFT_REPORT_ERROR(cures);
  144. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  145. }
  146. #endif
  147. /* twist1:
  148. *
  149. * Twist the full input vector (first parameter) into one chunk of size n2
  150. * (second parameter) */
  151. static void
  152. STARPUFFT(twist1_1d_kernel_cpu)(void *descr[], void *_args)
  153. {
  154. struct STARPUFFT(args) *args = _args;
  155. STARPUFFT(plan) plan = args->plan;
  156. int i = args->i;
  157. int j;
  158. int n1 = plan->n1[0];
  159. int n2 = plan->n2[0];
  160. STARPUFFT(complex) * restrict in = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  161. STARPUFFT(complex) * restrict twisted1 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  162. /* printf("twist1 %d %g\n", i, (double) cabs(plan->in[i])); */
  163. for (j = 0; j < n2; j++)
  164. twisted1[j] = in[i+j*n1];
  165. }
  166. #ifdef STARPU_HAVE_FFTW
  167. /* fft1:
  168. *
  169. * Perform one fft of size n2 */
  170. static void
  171. STARPUFFT(fft1_1d_kernel_cpu)(void *descr[], void *_args)
  172. {
  173. struct STARPUFFT(args) *args = _args;
  174. STARPUFFT(plan) plan = args->plan;
  175. int i = args->i;
  176. int j;
  177. int n2 = plan->n2[0];
  178. int workerid = starpu_worker_get_id();
  179. task_per_worker[workerid]++;
  180. STARPUFFT(complex) * restrict twisted1 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  181. STARPUFFT(complex) * restrict fft1 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  182. /* printf("fft1 %d %g\n", i, (double) cabs(twisted1[0])); */
  183. _FFTW(execute_dft)(plan->plans[workerid].plan1_cpu, twisted1, fft1);
  184. /* twiddle fft1 buffer */
  185. for (j = 0; j < n2; j++)
  186. fft1[j] = fft1[j] * plan->roots[0][i*j];
  187. }
  188. #endif
  189. /* twist2:
  190. *
  191. * Twist the full vector (results of the fft1s) into one package of n2/DIV_1D
  192. * chunks of size n1 */
  193. static void
  194. STARPUFFT(twist2_1d_kernel_cpu)(void *descr[], void *_args)
  195. {
  196. struct STARPUFFT(args) *args = _args;
  197. STARPUFFT(plan) plan = args->plan;
  198. int jj = args->jj; /* between 0 and DIV_1D */
  199. int jjj; /* beetween 0 and n3 */
  200. int i;
  201. int n1 = plan->n1[0];
  202. int n2 = plan->n2[0];
  203. int n3 = n2/DIV_1D;
  204. STARPUFFT(complex) * restrict twisted2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  205. /* printf("twist2 %d %g\n", jj, (double) cabs(plan->fft1[jj])); */
  206. for (jjj = 0; jjj < n3; jjj++) {
  207. int j = jj * n3 + jjj;
  208. for (i = 0; i < n1; i++)
  209. twisted2[jjj*n1+i] = plan->fft1[i*n2+j];
  210. }
  211. }
  212. #ifdef STARPU_HAVE_FFTW
  213. /* fft2:
  214. *
  215. * Perform n3 = n2/DIV_1D ffts of size n1 */
  216. static void
  217. STARPUFFT(fft2_1d_kernel_cpu)(void *descr[], void *_args)
  218. {
  219. struct STARPUFFT(args) *args = _args;
  220. STARPUFFT(plan) plan = args->plan;
  221. /* int jj = args->jj; */
  222. int workerid = starpu_worker_get_id();
  223. task_per_worker[workerid]++;
  224. STARPUFFT(complex) * restrict twisted2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  225. STARPUFFT(complex) * restrict fft2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  226. /* printf("fft2 %d %g\n", jj, (double) cabs(twisted2[plan->totsize4-1])); */
  227. _FFTW(execute_dft)(plan->plans[workerid].plan2_cpu, twisted2, fft2);
  228. }
  229. #endif
  230. /* twist3:
  231. *
  232. * Spread the package of n2/DIV_1D chunks of size n1 into the output vector */
  233. static void
  234. STARPUFFT(twist3_1d_kernel_cpu)(void *descr[], void *_args)
  235. {
  236. struct STARPUFFT(args) *args = _args;
  237. STARPUFFT(plan) plan = args->plan;
  238. int jj = args->jj; /* between 0 and DIV_1D */
  239. int jjj; /* beetween 0 and n3 */
  240. int i;
  241. int n1 = plan->n1[0];
  242. int n2 = plan->n2[0];
  243. int n3 = n2/DIV_1D;
  244. const STARPUFFT(complex) * restrict fft2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  245. /* printf("twist3 %d %g\n", jj, (double) cabs(fft2[0])); */
  246. for (jjj = 0; jjj < n3; jjj++) {
  247. int j = jj * n3 + jjj;
  248. for (i = 0; i < n1; i++)
  249. plan->out[i*n2+j] = fft2[jjj*n1+i];
  250. }
  251. }
  252. /* Performance models for the 5 kinds of tasks */
  253. static struct starpu_perfmodel STARPUFFT(twist1_1d_model) = {
  254. .type = STARPU_HISTORY_BASED,
  255. .symbol = TYPE"twist1_1d"
  256. };
  257. static struct starpu_perfmodel STARPUFFT(fft1_1d_model) = {
  258. .type = STARPU_HISTORY_BASED,
  259. .symbol = TYPE"fft1_1d"
  260. };
  261. static struct starpu_perfmodel STARPUFFT(twist2_1d_model) = {
  262. .type = STARPU_HISTORY_BASED,
  263. .symbol = TYPE"twist2_1d"
  264. };
  265. static struct starpu_perfmodel STARPUFFT(fft2_1d_model) = {
  266. .type = STARPU_HISTORY_BASED,
  267. .symbol = TYPE"fft2_1d"
  268. };
  269. static struct starpu_perfmodel STARPUFFT(twist3_1d_model) = {
  270. .type = STARPU_HISTORY_BASED,
  271. .symbol = TYPE"twist3_1d"
  272. };
  273. /* codelet pointers for the 5 kinds of tasks */
  274. static struct starpu_codelet STARPUFFT(twist1_1d_codelet) = {
  275. .where =
  276. #ifdef __STARPU_USE_CUDA
  277. STARPU_CUDA|
  278. #endif
  279. STARPU_CPU,
  280. #ifdef __STARPU_USE_CUDA
  281. .cuda_funcs = {STARPUFFT(twist1_1d_kernel_gpu), NULL},
  282. #endif
  283. .cpu_funcs = {STARPUFFT(twist1_1d_kernel_cpu), NULL},
  284. CAN_EXECUTE
  285. .model = &STARPUFFT(twist1_1d_model),
  286. .nbuffers = 2,
  287. .modes = {STARPU_R, STARPU_W}
  288. };
  289. static struct starpu_codelet STARPUFFT(fft1_1d_codelet) = {
  290. .where =
  291. #ifdef __STARPU_USE_CUDA
  292. STARPU_CUDA|
  293. #endif
  294. #ifdef STARPU_HAVE_FFTW
  295. STARPU_CPU|
  296. #endif
  297. 0,
  298. #ifdef __STARPU_USE_CUDA
  299. .cuda_funcs = {STARPUFFT(fft1_1d_kernel_gpu), NULL},
  300. #endif
  301. #ifdef STARPU_HAVE_FFTW
  302. .cpu_funcs = {STARPUFFT(fft1_1d_kernel_cpu), NULL},
  303. #endif
  304. CAN_EXECUTE
  305. .model = &STARPUFFT(fft1_1d_model),
  306. .nbuffers = 3,
  307. .modes = {STARPU_R, STARPU_W, STARPU_R}
  308. };
  309. static struct starpu_codelet STARPUFFT(twist2_1d_codelet) = {
  310. .where = STARPU_CPU,
  311. .cpu_funcs = {STARPUFFT(twist2_1d_kernel_cpu), NULL},
  312. CAN_EXECUTE
  313. .model = &STARPUFFT(twist2_1d_model),
  314. .nbuffers = 1,
  315. .modes = {STARPU_W}
  316. };
  317. static struct starpu_codelet STARPUFFT(fft2_1d_codelet) = {
  318. .where =
  319. #ifdef __STARPU_USE_CUDA
  320. STARPU_CUDA|
  321. #endif
  322. #ifdef STARPU_HAVE_FFTW
  323. STARPU_CPU|
  324. #endif
  325. 0,
  326. #ifdef __STARPU_USE_CUDA
  327. .cuda_funcs = {STARPUFFT(fft2_1d_kernel_gpu), NULL},
  328. #endif
  329. #ifdef STARPU_HAVE_FFTW
  330. .cpu_funcs = {STARPUFFT(fft2_1d_kernel_cpu), NULL},
  331. #endif
  332. CAN_EXECUTE
  333. .model = &STARPUFFT(fft2_1d_model),
  334. .nbuffers = 2,
  335. .modes = {STARPU_R, STARPU_W}
  336. };
  337. static struct starpu_codelet STARPUFFT(twist3_1d_codelet) = {
  338. .where = STARPU_CPU,
  339. .cpu_funcs = {STARPUFFT(twist3_1d_kernel_cpu), NULL},
  340. CAN_EXECUTE
  341. .model = &STARPUFFT(twist3_1d_model),
  342. .nbuffers = 1,
  343. .modes = {STARPU_R}
  344. };
  345. /*
  346. *
  347. * Sequential version
  348. *
  349. */
  350. #ifdef __STARPU_USE_CUDA
  351. /* Perform one fft of size n */
  352. static void
  353. STARPUFFT(fft_1d_plan_gpu)(void *args)
  354. {
  355. STARPUFFT(plan) plan = args;
  356. cufftResult cures;
  357. int n = plan->n[0];
  358. int workerid = starpu_worker_get_id();
  359. cures = cufftPlan1d(&plan->plans[workerid].plan_cuda, n, _CUFFT_C2C, 1);
  360. if (cures != CUFFT_SUCCESS)
  361. STARPU_CUFFT_REPORT_ERROR(cures);
  362. cufftSetStream(plan->plans[workerid].plan_cuda, starpu_cuda_get_local_stream());
  363. if (cures != CUFFT_SUCCESS)
  364. STARPU_CUFFT_REPORT_ERROR(cures);
  365. }
  366. static void
  367. STARPUFFT(fft_1d_kernel_gpu)(void *descr[], void *args)
  368. {
  369. STARPUFFT(plan) plan = args;
  370. cufftResult cures;
  371. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  372. _cufftComplex * restrict out = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  373. int workerid = starpu_worker_get_id();
  374. task_per_worker[workerid]++;
  375. cures = _cufftExecC2C(plan->plans[workerid].plan_cuda, in, out, plan->sign == -1 ? CUFFT_FORWARD : CUFFT_INVERSE);
  376. if (cures != CUFFT_SUCCESS)
  377. STARPU_CUFFT_REPORT_ERROR(cures);
  378. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  379. }
  380. #endif
  381. #ifdef STARPU_HAVE_FFTW
  382. /* Perform one fft of size n */
  383. static void
  384. STARPUFFT(fft_1d_kernel_cpu)(void *descr[], void *_args)
  385. {
  386. STARPUFFT(plan) plan = _args;
  387. int workerid = starpu_worker_get_id();
  388. task_per_worker[workerid]++;
  389. STARPUFFT(complex) * restrict in = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  390. STARPUFFT(complex) * restrict out = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  391. _FFTW(execute_dft)(plan->plans[workerid].plan_cpu, in, out);
  392. }
  393. #endif
  394. static struct starpu_perfmodel STARPUFFT(fft_1d_model) = {
  395. .type = STARPU_HISTORY_BASED,
  396. .symbol = TYPE"fft_1d"
  397. };
  398. static struct starpu_codelet STARPUFFT(fft_1d_codelet) = {
  399. .where =
  400. #ifdef __STARPU_USE_CUDA
  401. STARPU_CUDA|
  402. #endif
  403. #ifdef STARPU_HAVE_FFTW
  404. STARPU_CPU|
  405. #endif
  406. 0,
  407. #ifdef __STARPU_USE_CUDA
  408. .cuda_funcs = {STARPUFFT(fft_1d_kernel_gpu), NULL},
  409. #endif
  410. #ifdef STARPU_HAVE_FFTW
  411. .cpu_funcs = {STARPUFFT(fft_1d_kernel_cpu), NULL},
  412. #endif
  413. CAN_EXECUTE
  414. .model = &STARPUFFT(fft_1d_model),
  415. .nbuffers = 2,
  416. .modes = {STARPU_R, STARPU_W}
  417. };
  418. /* Planning:
  419. *
  420. * - For each CPU worker, we need to plan the two fftw stages.
  421. * - For GPU workers, we need to do the planning in the CUDA context, so we do
  422. * this lazily through the initialised1 and initialised2 flags ; TODO: use
  423. * starpu_execute_on_each_worker instead (done in the omp branch).
  424. * - We allocate all the temporary buffers and register them to starpu.
  425. * - We create all the tasks, but do not submit them yet. It will be possible
  426. * to reuse them at will to perform several ffts with the same planning.
  427. */
  428. STARPUFFT(plan)
  429. STARPUFFT(plan_dft_1d)(int n, int sign, unsigned flags)
  430. {
  431. int workerid;
  432. int n1 = DIV_1D;
  433. int n2 = n / n1;
  434. int n3;
  435. int z;
  436. struct starpu_task *task;
  437. if (PARALLEL) {
  438. #ifdef __STARPU_USE_CUDA
  439. /* cufft 1D limited to 8M elements */
  440. while (n2 > 8 << 20) {
  441. n1 *= 2;
  442. n2 /= 2;
  443. }
  444. #endif
  445. STARPU_ASSERT(n == n1*n2);
  446. STARPU_ASSERT(n1 < (1ULL << I_BITS));
  447. /* distribute the n2 second ffts into DIV_1D packages */
  448. n3 = n2 / DIV_1D;
  449. STARPU_ASSERT(n2 == n3*DIV_1D);
  450. }
  451. /* TODO: flags? Automatically set FFTW_MEASURE on calibration? */
  452. STARPU_ASSERT(flags == 0);
  453. STARPUFFT(plan) plan = malloc(sizeof(*plan));
  454. memset(plan, 0, sizeof(*plan));
  455. if (PARALLEL) {
  456. plan->number = STARPU_ATOMIC_ADD(&starpufft_last_plan_number, 1) - 1;
  457. /* The plan number has a limited size */
  458. STARPU_ASSERT(plan->number < (1ULL << NUMBER_BITS));
  459. }
  460. /* Just one dimension */
  461. plan->dim = 1;
  462. plan->n = malloc(plan->dim * sizeof(*plan->n));
  463. plan->n[0] = n;
  464. if (PARALLEL) {
  465. check_dims(plan);
  466. plan->n1 = malloc(plan->dim * sizeof(*plan->n1));
  467. plan->n1[0] = n1;
  468. plan->n2 = malloc(plan->dim * sizeof(*plan->n2));
  469. plan->n2[0] = n2;
  470. }
  471. /* Note: this is for coherency with the 2D case */
  472. plan->totsize = n;
  473. if (PARALLEL) {
  474. plan->totsize1 = n1;
  475. plan->totsize2 = n2;
  476. plan->totsize3 = DIV_1D;
  477. plan->totsize4 = plan->totsize / plan->totsize3;
  478. }
  479. plan->type = C2C;
  480. plan->sign = sign;
  481. if (PARALLEL) {
  482. /* Compute the w^k just once. */
  483. compute_roots(plan);
  484. }
  485. /* Initialize per-worker working set */
  486. for (workerid = 0; workerid < starpu_worker_get_count(); workerid++) {
  487. switch (starpu_worker_get_type(workerid)) {
  488. case STARPU_CPU_WORKER:
  489. #ifdef STARPU_HAVE_FFTW
  490. if (PARALLEL) {
  491. /* first fft plan: one fft of size n2.
  492. * FFTW imposes that buffer pointers are known at
  493. * planning time. */
  494. plan->plans[workerid].plan1_cpu = _FFTW(plan_dft_1d)(n2, NULL, (void*) 1, sign, _FFTW_FLAGS);
  495. STARPU_ASSERT(plan->plans[workerid].plan1_cpu);
  496. /* second fft plan: n3 ffts of size n1 */
  497. plan->plans[workerid].plan2_cpu = _FFTW(plan_many_dft)(plan->dim,
  498. plan->n1, n3,
  499. NULL, NULL, 1, plan->totsize1,
  500. (void*) 1, NULL, 1, plan->totsize1,
  501. sign, _FFTW_FLAGS);
  502. STARPU_ASSERT(plan->plans[workerid].plan2_cpu);
  503. } else {
  504. /* fft plan: one fft of size n. */
  505. plan->plans[workerid].plan_cpu = _FFTW(plan_dft_1d)(n, NULL, (void*) 1, sign, _FFTW_FLAGS);
  506. STARPU_ASSERT(plan->plans[workerid].plan_cpu);
  507. }
  508. #else
  509. /* #warning libstarpufft can not work correctly if libfftw3 is not installed */
  510. #endif
  511. break;
  512. case STARPU_CUDA_WORKER:
  513. break;
  514. default:
  515. /* Do not care, we won't be executing anything there. */
  516. break;
  517. }
  518. }
  519. #ifdef __STARPU_USE_CUDA
  520. if (PARALLEL) {
  521. starpu_execute_on_each_worker(STARPUFFT(fft1_1d_plan_gpu), plan, STARPU_CUDA);
  522. starpu_execute_on_each_worker(STARPUFFT(fft2_1d_plan_gpu), plan, STARPU_CUDA);
  523. } else {
  524. starpu_execute_on_each_worker(STARPUFFT(fft_1d_plan_gpu), plan, STARPU_CUDA);
  525. }
  526. #endif
  527. if (PARALLEL) {
  528. /* Allocate buffers. */
  529. plan->twisted1 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->twisted1));
  530. memset(plan->twisted1, 0, plan->totsize * sizeof(*plan->twisted1));
  531. plan->fft1 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->fft1));
  532. memset(plan->fft1, 0, plan->totsize * sizeof(*plan->fft1));
  533. plan->twisted2 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->twisted2));
  534. memset(plan->twisted2, 0, plan->totsize * sizeof(*plan->twisted2));
  535. plan->fft2 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->fft2));
  536. memset(plan->fft2, 0, plan->totsize * sizeof(*plan->fft2));
  537. /* Allocate handle arrays */
  538. plan->twisted1_handle = malloc(plan->totsize1 * sizeof(*plan->twisted1_handle));
  539. plan->fft1_handle = malloc(plan->totsize1 * sizeof(*plan->fft1_handle));
  540. plan->twisted2_handle = malloc(plan->totsize3 * sizeof(*plan->twisted2_handle));
  541. plan->fft2_handle = malloc(plan->totsize3 * sizeof(*plan->fft2_handle));
  542. /* Allocate task arrays */
  543. plan->twist1_tasks = malloc(plan->totsize1 * sizeof(*plan->twist1_tasks));
  544. plan->fft1_tasks = malloc(plan->totsize1 * sizeof(*plan->fft1_tasks));
  545. plan->twist2_tasks = malloc(plan->totsize3 * sizeof(*plan->twist2_tasks));
  546. plan->fft2_tasks = malloc(plan->totsize3 * sizeof(*plan->fft2_tasks));
  547. plan->twist3_tasks = malloc(plan->totsize3 * sizeof(*plan->twist3_tasks));
  548. /* Allocate codelet argument arrays */
  549. plan->fft1_args = malloc(plan->totsize1 * sizeof(*plan->fft1_args));
  550. plan->fft2_args = malloc(plan->totsize3 * sizeof(*plan->fft2_args));
  551. /* Create first-round tasks: DIV_1D tasks of type twist1 and fft1 */
  552. for (z = 0; z < plan->totsize1; z++) {
  553. int i = z;
  554. #define STEP_TAG(step) STEP_TAG_1D(plan, step, i)
  555. /* TODO: get rid of tags */
  556. plan->fft1_args[z].plan = plan;
  557. plan->fft1_args[z].i = i;
  558. /* Register the twisted1 buffer of size n2. */
  559. starpu_vector_data_register(&plan->twisted1_handle[z], 0, (uintptr_t) &plan->twisted1[z*plan->totsize2], plan->totsize2, sizeof(*plan->twisted1));
  560. /* Register the fft1 buffer of size n2. */
  561. starpu_vector_data_register(&plan->fft1_handle[z], 0, (uintptr_t) &plan->fft1[z*plan->totsize2], plan->totsize2, sizeof(*plan->fft1));
  562. /* We'll need the result of fft1 on the CPU for the second
  563. * twist anyway, so tell starpu to not keep the fft1 buffer in
  564. * the GPU. */
  565. starpu_data_set_wt_mask(plan->fft1_handle[z], 1<<0);
  566. /* Create twist1 task */
  567. plan->twist1_tasks[z] = task = starpu_task_create();
  568. task->cl = &STARPUFFT(twist1_1d_codelet);
  569. /* task->handles[0] = to be filled at execution to point
  570. to the application input. */
  571. task->handles[1] = plan->twisted1_handle[z];
  572. task->cl_arg = &plan->fft1_args[z];
  573. task->tag_id = STEP_TAG(TWIST1);
  574. task->use_tag = 1;
  575. task->destroy = 0;
  576. /* Tell that fft1 depends on twisted1 */
  577. starpu_tag_declare_deps(STEP_TAG(FFT1),
  578. 1, STEP_TAG(TWIST1));
  579. /* Create FFT1 task */
  580. plan->fft1_tasks[z] = task = starpu_task_create();
  581. task->cl = &STARPUFFT(fft1_1d_codelet);
  582. task->handles[0] = plan->twisted1_handle[z];
  583. task->handles[1] = plan->fft1_handle[z];
  584. task->handles[2] = plan->roots_handle[0];
  585. task->cl_arg = &plan->fft1_args[z];
  586. task->tag_id = STEP_TAG(FFT1);
  587. task->use_tag = 1;
  588. task->destroy = 0;
  589. /* Tell that the join task will depend on the fft1 task. */
  590. starpu_tag_declare_deps(STEP_TAG_1D(plan, JOIN, 0),
  591. 1, STEP_TAG(FFT1));
  592. #undef STEP_TAG
  593. }
  594. /* Create the join task, only serving as a dependency point between
  595. * fft1 and twist2 tasks */
  596. plan->join_task = task = starpu_task_create();
  597. task->cl = NULL;
  598. task->tag_id = STEP_TAG_1D(plan, JOIN, 0);
  599. task->use_tag = 1;
  600. task->destroy = 0;
  601. /* Create second-round tasks: DIV_1D batches of n2/DIV_1D twist2, fft2,
  602. * and twist3 */
  603. for (z = 0; z < plan->totsize3; z++) {
  604. int jj = z;
  605. #define STEP_TAG(step) STEP_TAG_1D(plan, step, jj)
  606. plan->fft2_args[z].plan = plan;
  607. plan->fft2_args[z].jj = jj;
  608. /* Register n3 twisted2 buffers of size n1 */
  609. starpu_vector_data_register(&plan->twisted2_handle[z], 0, (uintptr_t) &plan->twisted2[z*plan->totsize4], plan->totsize4, sizeof(*plan->twisted2));
  610. starpu_vector_data_register(&plan->fft2_handle[z], 0, (uintptr_t) &plan->fft2[z*plan->totsize4], plan->totsize4, sizeof(*plan->fft2));
  611. /* We'll need the result of fft2 on the CPU for the third
  612. * twist anyway, so tell starpu to not keep the fft2 buffer in
  613. * the GPU. */
  614. starpu_data_set_wt_mask(plan->fft2_handle[z], 1<<0);
  615. /* Tell that twisted2 depends on the join task */
  616. starpu_tag_declare_deps(STEP_TAG(TWIST2),
  617. 1, STEP_TAG_1D(plan, JOIN, 0));
  618. /* Create twist2 task */
  619. plan->twist2_tasks[z] = task = starpu_task_create();
  620. task->cl = &STARPUFFT(twist2_1d_codelet);
  621. task->handles[0] = plan->twisted2_handle[z];
  622. task->cl_arg = &plan->fft2_args[z];
  623. task->tag_id = STEP_TAG(TWIST2);
  624. task->use_tag = 1;
  625. task->destroy = 0;
  626. /* Tell that fft2 depends on twisted2 */
  627. starpu_tag_declare_deps(STEP_TAG(FFT2),
  628. 1, STEP_TAG(TWIST2));
  629. /* Create FFT2 task */
  630. plan->fft2_tasks[z] = task = starpu_task_create();
  631. task->cl = &STARPUFFT(fft2_1d_codelet);
  632. task->handles[0] = plan->twisted2_handle[z];
  633. task->handles[1] = plan->fft2_handle[z];
  634. task->cl_arg = &plan->fft2_args[z];
  635. task->tag_id = STEP_TAG(FFT2);
  636. task->use_tag = 1;
  637. task->destroy = 0;
  638. /* Tell that twist3 depends on fft2 */
  639. starpu_tag_declare_deps(STEP_TAG(TWIST3),
  640. 1, STEP_TAG(FFT2));
  641. /* Create twist3 tasks */
  642. /* These run only on CPUs and thus write directly into the
  643. * application output buffer. */
  644. plan->twist3_tasks[z] = task = starpu_task_create();
  645. task->cl = &STARPUFFT(twist3_1d_codelet);
  646. task->handles[0] = plan->fft2_handle[z];
  647. task->cl_arg = &plan->fft2_args[z];
  648. task->tag_id = STEP_TAG(TWIST3);
  649. task->use_tag = 1;
  650. task->destroy = 0;
  651. /* Tell that to be completely finished we need to have finished
  652. * this twisted3 */
  653. starpu_tag_declare_deps(STEP_TAG_1D(plan, END, 0),
  654. 1, STEP_TAG(TWIST3));
  655. #undef STEP_TAG
  656. }
  657. /* Create end task, only serving as a join point. */
  658. plan->end_task = task = starpu_task_create();
  659. task->cl = NULL;
  660. task->tag_id = STEP_TAG_1D(plan, END, 0);
  661. task->use_tag = 1;
  662. task->destroy = 0;
  663. }
  664. return plan;
  665. }
  666. /* Actually submit all the tasks. */
  667. static struct starpu_task *
  668. STARPUFFT(start1dC2C)(STARPUFFT(plan) plan, starpu_data_handle_t in, starpu_data_handle_t out)
  669. {
  670. STARPU_ASSERT(plan->type == C2C);
  671. int z;
  672. int ret;
  673. if (PARALLEL) {
  674. for (z=0; z < plan->totsize1; z++) {
  675. ret = starpu_task_submit(plan->twist1_tasks[z]);
  676. if (ret == -ENODEV) return NULL;
  677. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  678. ret = starpu_task_submit(plan->fft1_tasks[z]);
  679. if (ret == -ENODEV) return NULL;
  680. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  681. }
  682. ret = starpu_task_submit(plan->join_task);
  683. if (ret == -ENODEV) return NULL;
  684. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  685. for (z=0; z < plan->totsize3; z++) {
  686. ret = starpu_task_submit(plan->twist2_tasks[z]);
  687. if (ret == -ENODEV) return NULL;
  688. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  689. ret = starpu_task_submit(plan->fft2_tasks[z]);
  690. if (ret == -ENODEV) return NULL;
  691. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  692. ret = starpu_task_submit(plan->twist3_tasks[z]);
  693. if (ret == -ENODEV) return NULL;
  694. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  695. }
  696. ret = starpu_task_submit(plan->end_task);
  697. if (ret == -ENODEV) return NULL;
  698. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  699. return plan->end_task;
  700. } else /* !PARALLEL */ {
  701. struct starpu_task *task;
  702. /* Create FFT task */
  703. task = starpu_task_create();
  704. task->detach = 0;
  705. task->cl = &STARPUFFT(fft_1d_codelet);
  706. task->handles[0] = in;
  707. task->handles[1] = out;
  708. task->cl_arg = plan;
  709. ret = starpu_task_submit(task);
  710. if (ret == -ENODEV) return NULL;
  711. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  712. return task;
  713. }
  714. }
  715. /* Free all the tags. The generic code handles freeing the buffers. */
  716. static void
  717. STARPUFFT(free_1d_tags)(STARPUFFT(plan) plan)
  718. {
  719. unsigned i;
  720. int n1 = plan->n1[0];
  721. if (!PARALLEL)
  722. return;
  723. for (i = 0; i < n1; i++) {
  724. starpu_tag_remove(STEP_TAG_1D(plan, TWIST1, i));
  725. starpu_tag_remove(STEP_TAG_1D(plan, FFT1, i));
  726. }
  727. starpu_tag_remove(STEP_TAG_1D(plan, JOIN, 0));
  728. for (i = 0; i < DIV_1D; i++) {
  729. starpu_tag_remove(STEP_TAG_1D(plan, TWIST2, i));
  730. starpu_tag_remove(STEP_TAG_1D(plan, FFT2, i));
  731. starpu_tag_remove(STEP_TAG_1D(plan, TWIST3, i));
  732. }
  733. starpu_tag_remove(STEP_TAG_1D(plan, END, 0));
  734. }