starpufftx1d.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2012-2013 Inria
  4. * Copyright (C) 2010-2017 CNRS
  5. * Copyright (C) 2009-2014 Université de Bordeaux
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. /*
  19. *
  20. * Dumb parallel version
  21. *
  22. */
  23. #define DIV_1D 64
  24. /*
  25. * Overall strategy for an fft of size n:
  26. * - perform n1 ffts of size n2
  27. * - twiddle
  28. * - perform n2 ffts of size n1
  29. *
  30. * - n1 defaults to DIV_1D, thus n2 defaults to n / DIV_1D.
  31. *
  32. * Precise tasks:
  33. *
  34. * - twist1: twist the whole n-element input (called "in") into n1 chunks of
  35. * size n2, by using n1 tasks taking the whole n-element input as a
  36. * R parameter and one n2 output as a W parameter. The result is
  37. * called twisted1.
  38. * - fft1: perform n1 (n2) ffts, by using n1 tasks doing one fft each. Also
  39. * twiddle the result to prepare for the fft2. The result is called
  40. * fft1.
  41. * - join: depends on all the fft1s, to gather the n1 results of size n2 in
  42. * the fft1 vector.
  43. * - twist2: twist the fft1 vector into n2 chunks of size n1, called twisted2.
  44. * since n2 is typically very large, this step is divided in DIV_1D
  45. * tasks, each of them performing n2/DIV_1D of them
  46. * - fft2: perform n2 ffts of size n1. This is divided in DIV_1D tasks of
  47. * n2/DIV_1D ffts, to be performed in batches. The result is called
  48. * fft2.
  49. * - twist3: twist back the result of the fft2s above into the output buffer.
  50. * Only implemented on CPUs for simplicity of the gathering.
  51. *
  52. * The tag space thus uses 3 dimensions:
  53. * - the number of the plan.
  54. * - the step (TWIST1, FFT1, JOIN, TWIST2, FFT2, TWIST3, END)
  55. * - an index i between 0 and DIV_1D-1.
  56. */
  57. #define STEP_TAG_1D(plan, step, i) _STEP_TAG(plan, step, i)
  58. #ifdef __STARPU_USE_CUDA
  59. /* twist1:
  60. *
  61. * Twist the full input vector (first parameter) into one chunk of size n2
  62. * (second parameter) */
  63. static void
  64. STARPUFFT(twist1_1d_kernel_gpu)(void *descr[], void *_args)
  65. {
  66. struct STARPUFFT(args) *args = _args;
  67. STARPUFFT(plan) plan = args->plan;
  68. int i = args->i;
  69. int n1 = plan->n1[0];
  70. int n2 = plan->n2[0];
  71. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  72. _cufftComplex * restrict twisted1 = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  73. STARPUFFT(cuda_twist1_1d_host)(in, twisted1, i, n1, n2);
  74. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  75. }
  76. /* fft1:
  77. *
  78. * Perform one fft of size n2 */
  79. static void
  80. STARPUFFT(fft1_1d_plan_gpu)(void *args)
  81. {
  82. STARPUFFT(plan) plan = args;
  83. int n2 = plan->n2[0];
  84. int workerid = starpu_worker_get_id_check();
  85. cufftResult cures;
  86. cures = cufftPlan1d(&plan->plans[workerid].plan1_cuda, n2, _CUFFT_C2C, 1);
  87. if (cures != CUFFT_SUCCESS)
  88. STARPU_CUFFT_REPORT_ERROR(cures);
  89. cufftSetStream(plan->plans[workerid].plan1_cuda, starpu_cuda_get_local_stream());
  90. if (cures != CUFFT_SUCCESS)
  91. STARPU_CUFFT_REPORT_ERROR(cures);
  92. }
  93. static void
  94. STARPUFFT(fft1_1d_kernel_gpu)(void *descr[], void *_args)
  95. {
  96. struct STARPUFFT(args) *args = _args;
  97. STARPUFFT(plan) plan = args->plan;
  98. int i = args->i;
  99. int n2 = plan->n2[0];
  100. cufftResult cures;
  101. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  102. _cufftComplex * restrict out = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  103. const _cufftComplex * restrict roots = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[2]);
  104. int workerid = starpu_worker_get_id_check();
  105. task_per_worker[workerid]++;
  106. cures = _cufftExecC2C(plan->plans[workerid].plan1_cuda, in, out, plan->sign == -1 ? CUFFT_FORWARD : CUFFT_INVERSE);
  107. if (cures != CUFFT_SUCCESS)
  108. STARPU_CUFFT_REPORT_ERROR(cures);
  109. STARPUFFT(cuda_twiddle_1d_host)(out, roots, n2, i);
  110. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  111. }
  112. /* fft2:
  113. *
  114. * Perform n3 = n2/DIV_1D ffts of size n1 */
  115. static void
  116. STARPUFFT(fft2_1d_plan_gpu)(void *args)
  117. {
  118. STARPUFFT(plan) plan = args;
  119. int n1 = plan->n1[0];
  120. int n2 = plan->n2[0];
  121. int n3 = n2/DIV_1D;
  122. cufftResult cures;
  123. int workerid = starpu_worker_get_id_check();
  124. cures = cufftPlan1d(&plan->plans[workerid].plan2_cuda, n1, _CUFFT_C2C, n3);
  125. if (cures != CUFFT_SUCCESS)
  126. STARPU_CUFFT_REPORT_ERROR(cures);
  127. cufftSetStream(plan->plans[workerid].plan2_cuda, starpu_cuda_get_local_stream());
  128. if (cures != CUFFT_SUCCESS)
  129. STARPU_CUFFT_REPORT_ERROR(cures);
  130. }
  131. static void
  132. STARPUFFT(fft2_1d_kernel_gpu)(void *descr[], void *_args)
  133. {
  134. struct STARPUFFT(args) *args = _args;
  135. STARPUFFT(plan) plan = args->plan;
  136. cufftResult cures;
  137. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  138. _cufftComplex * restrict out = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  139. int workerid = starpu_worker_get_id_check();
  140. task_per_worker[workerid]++;
  141. /* NOTE using batch support */
  142. cures = _cufftExecC2C(plan->plans[workerid].plan2_cuda, in, out, plan->sign == -1 ? CUFFT_FORWARD : CUFFT_INVERSE);
  143. if (cures != CUFFT_SUCCESS)
  144. STARPU_CUFFT_REPORT_ERROR(cures);
  145. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  146. }
  147. #endif
  148. /* twist1:
  149. *
  150. * Twist the full input vector (first parameter) into one chunk of size n2
  151. * (second parameter) */
  152. static void
  153. STARPUFFT(twist1_1d_kernel_cpu)(void *descr[], void *_args)
  154. {
  155. struct STARPUFFT(args) *args = _args;
  156. STARPUFFT(plan) plan = args->plan;
  157. int i = args->i;
  158. int j;
  159. int n1 = plan->n1[0];
  160. int n2 = plan->n2[0];
  161. STARPUFFT(complex) * restrict in = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  162. STARPUFFT(complex) * restrict twisted1 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  163. /* printf("twist1 %d %g\n", i, (double) cabs(plan->in[i])); */
  164. for (j = 0; j < n2; j++)
  165. twisted1[j] = in[i+j*n1];
  166. }
  167. #ifdef STARPU_HAVE_FFTW
  168. /* fft1:
  169. *
  170. * Perform one fft of size n2 */
  171. static void
  172. STARPUFFT(fft1_1d_kernel_cpu)(void *descr[], void *_args)
  173. {
  174. struct STARPUFFT(args) *args = _args;
  175. STARPUFFT(plan) plan = args->plan;
  176. int i = args->i;
  177. int j;
  178. int n2 = plan->n2[0];
  179. int workerid = starpu_worker_get_id_check();
  180. task_per_worker[workerid]++;
  181. STARPUFFT(complex) * restrict twisted1 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  182. STARPUFFT(complex) * restrict fft1 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  183. /* printf("fft1 %d %g\n", i, (double) cabs(twisted1[0])); */
  184. _FFTW(execute_dft)(plan->plans[workerid].plan1_cpu, twisted1, fft1);
  185. /* twiddle fft1 buffer */
  186. for (j = 0; j < n2; j++)
  187. fft1[j] = fft1[j] * plan->roots[0][i*j];
  188. }
  189. #endif
  190. /* twist2:
  191. *
  192. * Twist the full vector (results of the fft1s) into one package of n2/DIV_1D
  193. * chunks of size n1 */
  194. static void
  195. STARPUFFT(twist2_1d_kernel_cpu)(void *descr[], void *_args)
  196. {
  197. struct STARPUFFT(args) *args = _args;
  198. STARPUFFT(plan) plan = args->plan;
  199. int jj = args->jj; /* between 0 and DIV_1D */
  200. int jjj; /* beetween 0 and n3 */
  201. int i;
  202. int n1 = plan->n1[0];
  203. int n2 = plan->n2[0];
  204. int n3 = n2/DIV_1D;
  205. STARPUFFT(complex) * restrict twisted2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  206. /* printf("twist2 %d %g\n", jj, (double) cabs(plan->fft1[jj])); */
  207. for (jjj = 0; jjj < n3; jjj++) {
  208. int j = jj * n3 + jjj;
  209. for (i = 0; i < n1; i++)
  210. twisted2[jjj*n1+i] = plan->fft1[i*n2+j];
  211. }
  212. }
  213. #ifdef STARPU_HAVE_FFTW
  214. /* fft2:
  215. *
  216. * Perform n3 = n2/DIV_1D ffts of size n1 */
  217. static void
  218. STARPUFFT(fft2_1d_kernel_cpu)(void *descr[], void *_args)
  219. {
  220. struct STARPUFFT(args) *args = _args;
  221. STARPUFFT(plan) plan = args->plan;
  222. /* int jj = args->jj; */
  223. int workerid = starpu_worker_get_id_check();
  224. task_per_worker[workerid]++;
  225. STARPUFFT(complex) * restrict twisted2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  226. STARPUFFT(complex) * restrict fft2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  227. /* printf("fft2 %d %g\n", jj, (double) cabs(twisted2[plan->totsize4-1])); */
  228. _FFTW(execute_dft)(plan->plans[workerid].plan2_cpu, twisted2, fft2);
  229. }
  230. #endif
  231. /* twist3:
  232. *
  233. * Spread the package of n2/DIV_1D chunks of size n1 into the output vector */
  234. static void
  235. STARPUFFT(twist3_1d_kernel_cpu)(void *descr[], void *_args)
  236. {
  237. struct STARPUFFT(args) *args = _args;
  238. STARPUFFT(plan) plan = args->plan;
  239. int jj = args->jj; /* between 0 and DIV_1D */
  240. int jjj; /* beetween 0 and n3 */
  241. int i;
  242. int n1 = plan->n1[0];
  243. int n2 = plan->n2[0];
  244. int n3 = n2/DIV_1D;
  245. const STARPUFFT(complex) * restrict fft2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  246. /* printf("twist3 %d %g\n", jj, (double) cabs(fft2[0])); */
  247. for (jjj = 0; jjj < n3; jjj++) {
  248. int j = jj * n3 + jjj;
  249. for (i = 0; i < n1; i++)
  250. plan->out[i*n2+j] = fft2[jjj*n1+i];
  251. }
  252. }
  253. /* Performance models for the 5 kinds of tasks */
  254. static struct starpu_perfmodel STARPUFFT(twist1_1d_model) = {
  255. .type = STARPU_HISTORY_BASED,
  256. .symbol = TYPE"twist1_1d"
  257. };
  258. static struct starpu_perfmodel STARPUFFT(fft1_1d_model) = {
  259. .type = STARPU_HISTORY_BASED,
  260. .symbol = TYPE"fft1_1d"
  261. };
  262. static struct starpu_perfmodel STARPUFFT(twist2_1d_model) = {
  263. .type = STARPU_HISTORY_BASED,
  264. .symbol = TYPE"twist2_1d"
  265. };
  266. static struct starpu_perfmodel STARPUFFT(fft2_1d_model) = {
  267. .type = STARPU_HISTORY_BASED,
  268. .symbol = TYPE"fft2_1d"
  269. };
  270. static struct starpu_perfmodel STARPUFFT(twist3_1d_model) = {
  271. .type = STARPU_HISTORY_BASED,
  272. .symbol = TYPE"twist3_1d"
  273. };
  274. /* codelet pointers for the 5 kinds of tasks */
  275. static struct starpu_codelet STARPUFFT(twist1_1d_codelet) = {
  276. .where =
  277. #ifdef __STARPU_USE_CUDA
  278. STARPU_CUDA|
  279. #endif
  280. STARPU_CPU,
  281. #ifdef __STARPU_USE_CUDA
  282. .cuda_funcs = {STARPUFFT(twist1_1d_kernel_gpu)},
  283. #endif
  284. .cpu_funcs = {STARPUFFT(twist1_1d_kernel_cpu)},
  285. CAN_EXECUTE
  286. .model = &STARPUFFT(twist1_1d_model),
  287. .nbuffers = 2,
  288. .modes = {STARPU_R, STARPU_W},
  289. .name = "twist1_1d_codelet"
  290. };
  291. static struct starpu_codelet STARPUFFT(fft1_1d_codelet) = {
  292. .where =
  293. #ifdef __STARPU_USE_CUDA
  294. STARPU_CUDA|
  295. #endif
  296. #ifdef STARPU_HAVE_FFTW
  297. STARPU_CPU|
  298. #endif
  299. 0,
  300. #ifdef __STARPU_USE_CUDA
  301. .cuda_funcs = {STARPUFFT(fft1_1d_kernel_gpu)},
  302. #endif
  303. #ifdef STARPU_HAVE_FFTW
  304. .cpu_funcs = {STARPUFFT(fft1_1d_kernel_cpu)},
  305. #endif
  306. CAN_EXECUTE
  307. .model = &STARPUFFT(fft1_1d_model),
  308. .nbuffers = 3,
  309. .modes = {STARPU_R, STARPU_W, STARPU_R},
  310. .name = "fft1_1d_codelet"
  311. };
  312. static struct starpu_codelet STARPUFFT(twist2_1d_codelet) = {
  313. .where = STARPU_CPU,
  314. .cpu_funcs = {STARPUFFT(twist2_1d_kernel_cpu)},
  315. CAN_EXECUTE
  316. .model = &STARPUFFT(twist2_1d_model),
  317. .nbuffers = 1,
  318. .modes = {STARPU_W},
  319. .name = "twist2_1d_codelet"
  320. };
  321. static struct starpu_codelet STARPUFFT(fft2_1d_codelet) = {
  322. .where =
  323. #ifdef __STARPU_USE_CUDA
  324. STARPU_CUDA|
  325. #endif
  326. #ifdef STARPU_HAVE_FFTW
  327. STARPU_CPU|
  328. #endif
  329. 0,
  330. #ifdef __STARPU_USE_CUDA
  331. .cuda_funcs = {STARPUFFT(fft2_1d_kernel_gpu)},
  332. #endif
  333. #ifdef STARPU_HAVE_FFTW
  334. .cpu_funcs = {STARPUFFT(fft2_1d_kernel_cpu)},
  335. #endif
  336. CAN_EXECUTE
  337. .model = &STARPUFFT(fft2_1d_model),
  338. .nbuffers = 2,
  339. .modes = {STARPU_R, STARPU_W},
  340. .name = "fft2_1d_codelet"
  341. };
  342. static struct starpu_codelet STARPUFFT(twist3_1d_codelet) = {
  343. .where = STARPU_CPU,
  344. .cpu_funcs = {STARPUFFT(twist3_1d_kernel_cpu)},
  345. CAN_EXECUTE
  346. .model = &STARPUFFT(twist3_1d_model),
  347. .nbuffers = 1,
  348. .modes = {STARPU_R},
  349. .name = "twist3_1d_codelet"
  350. };
  351. /*
  352. *
  353. * Sequential version
  354. *
  355. */
  356. #ifdef __STARPU_USE_CUDA
  357. /* Perform one fft of size n */
  358. static void
  359. STARPUFFT(fft_1d_plan_gpu)(void *args)
  360. {
  361. STARPUFFT(plan) plan = args;
  362. cufftResult cures;
  363. int n = plan->n[0];
  364. int workerid = starpu_worker_get_id_check();
  365. cures = cufftPlan1d(&plan->plans[workerid].plan_cuda, n, _CUFFT_C2C, 1);
  366. if (cures != CUFFT_SUCCESS)
  367. STARPU_CUFFT_REPORT_ERROR(cures);
  368. cufftSetStream(plan->plans[workerid].plan_cuda, starpu_cuda_get_local_stream());
  369. if (cures != CUFFT_SUCCESS)
  370. STARPU_CUFFT_REPORT_ERROR(cures);
  371. }
  372. static void
  373. STARPUFFT(fft_1d_kernel_gpu)(void *descr[], void *args)
  374. {
  375. STARPUFFT(plan) plan = args;
  376. cufftResult cures;
  377. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  378. _cufftComplex * restrict out = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  379. int workerid = starpu_worker_get_id_check();
  380. task_per_worker[workerid]++;
  381. cures = _cufftExecC2C(plan->plans[workerid].plan_cuda, in, out, plan->sign == -1 ? CUFFT_FORWARD : CUFFT_INVERSE);
  382. if (cures != CUFFT_SUCCESS)
  383. STARPU_CUFFT_REPORT_ERROR(cures);
  384. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  385. }
  386. #endif
  387. #ifdef STARPU_HAVE_FFTW
  388. /* Perform one fft of size n */
  389. static void
  390. STARPUFFT(fft_1d_kernel_cpu)(void *descr[], void *_args)
  391. {
  392. STARPUFFT(plan) plan = _args;
  393. int workerid = starpu_worker_get_id_check();
  394. task_per_worker[workerid]++;
  395. STARPUFFT(complex) * restrict in = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  396. STARPUFFT(complex) * restrict out = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  397. _FFTW(execute_dft)(plan->plans[workerid].plan_cpu, in, out);
  398. }
  399. #endif
  400. static struct starpu_perfmodel STARPUFFT(fft_1d_model) = {
  401. .type = STARPU_HISTORY_BASED,
  402. .symbol = TYPE"fft_1d"
  403. };
  404. static struct starpu_codelet STARPUFFT(fft_1d_codelet) = {
  405. .where =
  406. #ifdef __STARPU_USE_CUDA
  407. STARPU_CUDA|
  408. #endif
  409. #ifdef STARPU_HAVE_FFTW
  410. STARPU_CPU|
  411. #endif
  412. 0,
  413. #ifdef __STARPU_USE_CUDA
  414. .cuda_funcs = {STARPUFFT(fft_1d_kernel_gpu)},
  415. #endif
  416. #ifdef STARPU_HAVE_FFTW
  417. .cpu_funcs = {STARPUFFT(fft_1d_kernel_cpu)},
  418. #endif
  419. CAN_EXECUTE
  420. .model = &STARPUFFT(fft_1d_model),
  421. .nbuffers = 2,
  422. .modes = {STARPU_R, STARPU_W},
  423. .name = "fft_1d_codelet"
  424. };
  425. /* Planning:
  426. *
  427. * - For each CPU worker, we need to plan the two fftw stages.
  428. * - For GPU workers, we need to do the planning in the CUDA context, so we do
  429. * this lazily through the initialised1 and initialised2 flags ; TODO: use
  430. * starpu_execute_on_each_worker instead (done in the omp branch).
  431. * - We allocate all the temporary buffers and register them to starpu.
  432. * - We create all the tasks, but do not submit them yet. It will be possible
  433. * to reuse them at will to perform several ffts with the same planning.
  434. */
  435. STARPUFFT(plan)
  436. STARPUFFT(plan_dft_1d)(int n, int sign, unsigned flags)
  437. {
  438. unsigned workerid;
  439. int n1 = DIV_1D;
  440. int n2 = n / n1;
  441. int n3;
  442. int z;
  443. struct starpu_task *task;
  444. if (PARALLEL) {
  445. #ifdef __STARPU_USE_CUDA
  446. /* cufft 1D limited to 8M elements */
  447. while (n2 > 8 << 20) {
  448. n1 *= 2;
  449. n2 /= 2;
  450. }
  451. #endif
  452. STARPU_ASSERT(n == n1*n2);
  453. STARPU_ASSERT(n1 < (1ULL << I_BITS));
  454. /* distribute the n2 second ffts into DIV_1D packages */
  455. n3 = n2 / DIV_1D;
  456. STARPU_ASSERT(n2 == n3*DIV_1D);
  457. }
  458. /* TODO: flags? Automatically set FFTW_MEASURE on calibration? */
  459. STARPU_ASSERT(flags == 0);
  460. STARPUFFT(plan) plan = malloc(sizeof(*plan));
  461. memset(plan, 0, sizeof(*plan));
  462. if (PARALLEL) {
  463. plan->number = STARPU_ATOMIC_ADD(&starpufft_last_plan_number, 1) - 1;
  464. /* The plan number has a limited size */
  465. STARPU_ASSERT(plan->number < (1ULL << NUMBER_BITS));
  466. }
  467. /* Just one dimension */
  468. plan->dim = 1;
  469. plan->n = malloc(plan->dim * sizeof(*plan->n));
  470. plan->n[0] = n;
  471. if (PARALLEL) {
  472. check_dims(plan);
  473. plan->n1 = malloc(plan->dim * sizeof(*plan->n1));
  474. plan->n1[0] = n1;
  475. plan->n2 = malloc(plan->dim * sizeof(*plan->n2));
  476. plan->n2[0] = n2;
  477. }
  478. /* Note: this is for coherency with the 2D case */
  479. plan->totsize = n;
  480. if (PARALLEL) {
  481. plan->totsize1 = n1;
  482. plan->totsize2 = n2;
  483. plan->totsize3 = DIV_1D;
  484. plan->totsize4 = plan->totsize / plan->totsize3;
  485. }
  486. plan->type = C2C;
  487. plan->sign = sign;
  488. if (PARALLEL) {
  489. /* Compute the w^k just once. */
  490. compute_roots(plan);
  491. }
  492. /* Initialize per-worker working set */
  493. for (workerid = 0; workerid < starpu_worker_get_count(); workerid++) {
  494. switch (starpu_worker_get_type(workerid)) {
  495. case STARPU_CPU_WORKER:
  496. #ifdef STARPU_HAVE_FFTW
  497. if (PARALLEL) {
  498. /* first fft plan: one fft of size n2.
  499. * FFTW imposes that buffer pointers are known at
  500. * planning time. */
  501. plan->plans[workerid].plan1_cpu = _FFTW(plan_dft_1d)(n2, NULL, (void*) 1, sign, _FFTW_FLAGS);
  502. STARPU_ASSERT(plan->plans[workerid].plan1_cpu);
  503. /* second fft plan: n3 ffts of size n1 */
  504. plan->plans[workerid].plan2_cpu = _FFTW(plan_many_dft)(plan->dim,
  505. plan->n1, n3,
  506. NULL, NULL, 1, plan->totsize1,
  507. (void*) 1, NULL, 1, plan->totsize1,
  508. sign, _FFTW_FLAGS);
  509. STARPU_ASSERT(plan->plans[workerid].plan2_cpu);
  510. } else {
  511. /* fft plan: one fft of size n. */
  512. plan->plans[workerid].plan_cpu = _FFTW(plan_dft_1d)(n, NULL, (void*) 1, sign, _FFTW_FLAGS);
  513. STARPU_ASSERT(plan->plans[workerid].plan_cpu);
  514. }
  515. #else
  516. /* #warning libstarpufft can not work correctly if libfftw3 is not installed */
  517. #endif
  518. break;
  519. case STARPU_CUDA_WORKER:
  520. break;
  521. default:
  522. /* Do not care, we won't be executing anything there. */
  523. break;
  524. }
  525. }
  526. #ifdef __STARPU_USE_CUDA
  527. if (PARALLEL) {
  528. starpu_execute_on_each_worker(STARPUFFT(fft1_1d_plan_gpu), plan, STARPU_CUDA);
  529. starpu_execute_on_each_worker(STARPUFFT(fft2_1d_plan_gpu), plan, STARPU_CUDA);
  530. } else {
  531. starpu_execute_on_each_worker(STARPUFFT(fft_1d_plan_gpu), plan, STARPU_CUDA);
  532. }
  533. #endif
  534. if (PARALLEL) {
  535. /* Allocate buffers. */
  536. plan->twisted1 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->twisted1));
  537. memset(plan->twisted1, 0, plan->totsize * sizeof(*plan->twisted1));
  538. plan->fft1 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->fft1));
  539. memset(plan->fft1, 0, plan->totsize * sizeof(*plan->fft1));
  540. plan->twisted2 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->twisted2));
  541. memset(plan->twisted2, 0, plan->totsize * sizeof(*plan->twisted2));
  542. plan->fft2 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->fft2));
  543. memset(plan->fft2, 0, plan->totsize * sizeof(*plan->fft2));
  544. /* Allocate handle arrays */
  545. plan->twisted1_handle = malloc(plan->totsize1 * sizeof(*plan->twisted1_handle));
  546. plan->fft1_handle = malloc(plan->totsize1 * sizeof(*plan->fft1_handle));
  547. plan->twisted2_handle = malloc(plan->totsize3 * sizeof(*plan->twisted2_handle));
  548. plan->fft2_handle = malloc(plan->totsize3 * sizeof(*plan->fft2_handle));
  549. /* Allocate task arrays */
  550. plan->twist1_tasks = malloc(plan->totsize1 * sizeof(*plan->twist1_tasks));
  551. plan->fft1_tasks = malloc(plan->totsize1 * sizeof(*plan->fft1_tasks));
  552. plan->twist2_tasks = malloc(plan->totsize3 * sizeof(*plan->twist2_tasks));
  553. plan->fft2_tasks = malloc(plan->totsize3 * sizeof(*plan->fft2_tasks));
  554. plan->twist3_tasks = malloc(plan->totsize3 * sizeof(*plan->twist3_tasks));
  555. /* Allocate codelet argument arrays */
  556. plan->fft1_args = malloc(plan->totsize1 * sizeof(*plan->fft1_args));
  557. plan->fft2_args = malloc(plan->totsize3 * sizeof(*plan->fft2_args));
  558. /* Create first-round tasks: DIV_1D tasks of type twist1 and fft1 */
  559. for (z = 0; z < plan->totsize1; z++) {
  560. int i = z;
  561. #define STEP_TAG(step) STEP_TAG_1D(plan, step, i)
  562. /* TODO: get rid of tags */
  563. plan->fft1_args[z].plan = plan;
  564. plan->fft1_args[z].i = i;
  565. /* Register the twisted1 buffer of size n2. */
  566. starpu_vector_data_register(&plan->twisted1_handle[z], STARPU_MAIN_RAM, (uintptr_t) &plan->twisted1[z*plan->totsize2], plan->totsize2, sizeof(*plan->twisted1));
  567. /* Register the fft1 buffer of size n2. */
  568. starpu_vector_data_register(&plan->fft1_handle[z], STARPU_MAIN_RAM, (uintptr_t) &plan->fft1[z*plan->totsize2], plan->totsize2, sizeof(*plan->fft1));
  569. /* We'll need the result of fft1 on the CPU for the second
  570. * twist anyway, so tell starpu to not keep the fft1 buffer in
  571. * the GPU. */
  572. starpu_data_set_wt_mask(plan->fft1_handle[z], 1<<0);
  573. /* Create twist1 task */
  574. plan->twist1_tasks[z] = task = starpu_task_create();
  575. task->cl = &STARPUFFT(twist1_1d_codelet);
  576. /* task->handles[0] = to be filled at execution to point
  577. to the application input. */
  578. task->handles[1] = plan->twisted1_handle[z];
  579. task->cl_arg = &plan->fft1_args[z];
  580. task->tag_id = STEP_TAG(TWIST1);
  581. task->use_tag = 1;
  582. task->destroy = 0;
  583. /* Tell that fft1 depends on twisted1 */
  584. starpu_tag_declare_deps(STEP_TAG(FFT1),
  585. 1, STEP_TAG(TWIST1));
  586. /* Create FFT1 task */
  587. plan->fft1_tasks[z] = task = starpu_task_create();
  588. task->cl = &STARPUFFT(fft1_1d_codelet);
  589. task->handles[0] = plan->twisted1_handle[z];
  590. task->handles[1] = plan->fft1_handle[z];
  591. task->handles[2] = plan->roots_handle[0];
  592. task->cl_arg = &plan->fft1_args[z];
  593. task->tag_id = STEP_TAG(FFT1);
  594. task->use_tag = 1;
  595. task->destroy = 0;
  596. /* Tell that the join task will depend on the fft1 task. */
  597. starpu_tag_declare_deps(STEP_TAG_1D(plan, JOIN, 0),
  598. 1, STEP_TAG(FFT1));
  599. #undef STEP_TAG
  600. }
  601. /* Create the join task, only serving as a dependency point between
  602. * fft1 and twist2 tasks */
  603. plan->join_task = task = starpu_task_create();
  604. task->cl = NULL;
  605. task->tag_id = STEP_TAG_1D(plan, JOIN, 0);
  606. task->use_tag = 1;
  607. task->destroy = 0;
  608. /* Create second-round tasks: DIV_1D batches of n2/DIV_1D twist2, fft2,
  609. * and twist3 */
  610. for (z = 0; z < plan->totsize3; z++) {
  611. int jj = z;
  612. #define STEP_TAG(step) STEP_TAG_1D(plan, step, jj)
  613. plan->fft2_args[z].plan = plan;
  614. plan->fft2_args[z].jj = jj;
  615. /* Register n3 twisted2 buffers of size n1 */
  616. starpu_vector_data_register(&plan->twisted2_handle[z], STARPU_MAIN_RAM, (uintptr_t) &plan->twisted2[z*plan->totsize4], plan->totsize4, sizeof(*plan->twisted2));
  617. starpu_vector_data_register(&plan->fft2_handle[z], STARPU_MAIN_RAM, (uintptr_t) &plan->fft2[z*plan->totsize4], plan->totsize4, sizeof(*plan->fft2));
  618. /* We'll need the result of fft2 on the CPU for the third
  619. * twist anyway, so tell starpu to not keep the fft2 buffer in
  620. * the GPU. */
  621. starpu_data_set_wt_mask(plan->fft2_handle[z], 1<<0);
  622. /* Tell that twisted2 depends on the join task */
  623. starpu_tag_declare_deps(STEP_TAG(TWIST2),
  624. 1, STEP_TAG_1D(plan, JOIN, 0));
  625. /* Create twist2 task */
  626. plan->twist2_tasks[z] = task = starpu_task_create();
  627. task->cl = &STARPUFFT(twist2_1d_codelet);
  628. task->handles[0] = plan->twisted2_handle[z];
  629. task->cl_arg = &plan->fft2_args[z];
  630. task->tag_id = STEP_TAG(TWIST2);
  631. task->use_tag = 1;
  632. task->destroy = 0;
  633. /* Tell that fft2 depends on twisted2 */
  634. starpu_tag_declare_deps(STEP_TAG(FFT2),
  635. 1, STEP_TAG(TWIST2));
  636. /* Create FFT2 task */
  637. plan->fft2_tasks[z] = task = starpu_task_create();
  638. task->cl = &STARPUFFT(fft2_1d_codelet);
  639. task->handles[0] = plan->twisted2_handle[z];
  640. task->handles[1] = plan->fft2_handle[z];
  641. task->cl_arg = &plan->fft2_args[z];
  642. task->tag_id = STEP_TAG(FFT2);
  643. task->use_tag = 1;
  644. task->destroy = 0;
  645. /* Tell that twist3 depends on fft2 */
  646. starpu_tag_declare_deps(STEP_TAG(TWIST3),
  647. 1, STEP_TAG(FFT2));
  648. /* Create twist3 tasks */
  649. /* These run only on CPUs and thus write directly into the
  650. * application output buffer. */
  651. plan->twist3_tasks[z] = task = starpu_task_create();
  652. task->cl = &STARPUFFT(twist3_1d_codelet);
  653. task->handles[0] = plan->fft2_handle[z];
  654. task->cl_arg = &plan->fft2_args[z];
  655. task->tag_id = STEP_TAG(TWIST3);
  656. task->use_tag = 1;
  657. task->destroy = 0;
  658. /* Tell that to be completely finished we need to have finished
  659. * this twisted3 */
  660. starpu_tag_declare_deps(STEP_TAG_1D(plan, END, 0),
  661. 1, STEP_TAG(TWIST3));
  662. #undef STEP_TAG
  663. }
  664. /* Create end task, only serving as a join point. */
  665. plan->end_task = task = starpu_task_create();
  666. task->cl = NULL;
  667. task->tag_id = STEP_TAG_1D(plan, END, 0);
  668. task->use_tag = 1;
  669. task->destroy = 0;
  670. task->detach = 0;
  671. }
  672. return plan;
  673. }
  674. /* Actually submit all the tasks. */
  675. static struct starpu_task *
  676. STARPUFFT(start1dC2C)(STARPUFFT(plan) plan, starpu_data_handle_t in, starpu_data_handle_t out)
  677. {
  678. STARPU_ASSERT(plan->type == C2C);
  679. int z;
  680. int ret;
  681. if (PARALLEL) {
  682. for (z=0; z < plan->totsize1; z++) {
  683. ret = starpu_task_submit(plan->twist1_tasks[z]);
  684. if (ret == -ENODEV) return NULL;
  685. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  686. ret = starpu_task_submit(plan->fft1_tasks[z]);
  687. if (ret == -ENODEV) return NULL;
  688. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  689. }
  690. ret = starpu_task_submit(plan->join_task);
  691. if (ret == -ENODEV) return NULL;
  692. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  693. for (z=0; z < plan->totsize3; z++) {
  694. ret = starpu_task_submit(plan->twist2_tasks[z]);
  695. if (ret == -ENODEV) return NULL;
  696. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  697. ret = starpu_task_submit(plan->fft2_tasks[z]);
  698. if (ret == -ENODEV) return NULL;
  699. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  700. ret = starpu_task_submit(plan->twist3_tasks[z]);
  701. if (ret == -ENODEV) return NULL;
  702. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  703. }
  704. ret = starpu_task_submit(plan->end_task);
  705. if (ret == -ENODEV) return NULL;
  706. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  707. return plan->end_task;
  708. } else /* !PARALLEL */ {
  709. struct starpu_task *task;
  710. /* Create FFT task */
  711. task = starpu_task_create();
  712. task->detach = 0;
  713. task->cl = &STARPUFFT(fft_1d_codelet);
  714. task->handles[0] = in;
  715. task->handles[1] = out;
  716. task->cl_arg = plan;
  717. ret = starpu_task_submit(task);
  718. if (ret == -ENODEV) return NULL;
  719. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  720. return task;
  721. }
  722. }
  723. /* Free all the tags. The generic code handles freeing the buffers. */
  724. static void
  725. STARPUFFT(free_1d_tags)(STARPUFFT(plan) plan)
  726. {
  727. int i;
  728. int n1 = plan->n1[0];
  729. if (!PARALLEL)
  730. return;
  731. for (i = 0; i < n1; i++) {
  732. starpu_tag_remove(STEP_TAG_1D(plan, TWIST1, i));
  733. starpu_tag_remove(STEP_TAG_1D(plan, FFT1, i));
  734. }
  735. starpu_tag_remove(STEP_TAG_1D(plan, JOIN, 0));
  736. for (i = 0; i < DIV_1D; i++) {
  737. starpu_tag_remove(STEP_TAG_1D(plan, TWIST2, i));
  738. starpu_tag_remove(STEP_TAG_1D(plan, FFT2, i));
  739. starpu_tag_remove(STEP_TAG_1D(plan, TWIST3, i));
  740. }
  741. starpu_tag_remove(STEP_TAG_1D(plan, END, 0));
  742. }