starpufftx1d.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2011 Université de Bordeaux 1
  4. * Copyright (C) 2010 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #define PARALLEL
  18. #ifdef PARALLEL
  19. /* Dumb parallel version, disabled */
  20. #define DIV_1D 64
  21. /*
  22. * Overall strategy for an fft of size n:
  23. * - perform n1 ffts of size n2
  24. * - twiddle
  25. * - perform n2 ffts of size n1
  26. *
  27. * - n1 defaults to DIV_1D, thus n2 defaults to n / DIV_1D.
  28. *
  29. * Precise tasks:
  30. *
  31. * - twist1: twist the whole n-element input (called "in") into n1 chunks of
  32. * size n2, by using n1 tasks taking the whole n-element input as a
  33. * R parameter and one n2 output as a W parameter. The result is
  34. * called twisted1.
  35. * - fft1: perform n1 (n2) ffts, by using n1 tasks doing one fft each. Also
  36. * twiddle the result to prepare for the fft2. The result is called
  37. * fft1.
  38. * - join: depends on all the fft1s, to gather the n1 results of size n2 in
  39. * the fft1 vector.
  40. * - twist2: twist the fft1 vector into n2 chunks of size n1, called twisted2.
  41. * since n2 is typically very large, this step is divided in DIV_1D
  42. * tasks, each of them performing n2/DIV_1D of them
  43. * - fft2: perform n2 ffts of size n1. This is divided in DIV_1D tasks of
  44. * n2/DIV_1D ffts, to be performed in batches. The result is called
  45. * fft2.
  46. * - twist3: twist back the result of the fft2s above into the output buffer.
  47. * Only implemented on CPUs for simplicity of the gathering.
  48. *
  49. * The tag space thus uses 3 dimensions:
  50. * - the number of the plan.
  51. * - the step (TWIST1, FFT1, JOIN, TWIST2, FFT2, TWIST3, END)
  52. * - an index i between 0 and DIV_1D-1.
  53. */
  54. #define STEP_TAG_1D(plan, step, i) _STEP_TAG(plan, step, i)
  55. #ifdef STARPU_USE_CUDA
  56. /* twist1:
  57. *
  58. * Twist the full input vector (first parameter) into one chunk of size n2
  59. * (second parameter) */
  60. static void
  61. STARPUFFT(twist1_1d_kernel_gpu)(void *descr[], void *_args)
  62. {
  63. struct STARPUFFT(args) *args = _args;
  64. STARPUFFT(plan) plan = args->plan;
  65. int i = args->i;
  66. int n1 = plan->n1[0];
  67. int n2 = plan->n2[0];
  68. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  69. _cufftComplex * restrict twisted1 = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  70. STARPUFFT(cuda_twist1_1d_host)(in, twisted1, i, n1, n2);
  71. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  72. }
  73. /* fft1:
  74. *
  75. * Perform one fft of size n2 */
  76. static void
  77. STARPUFFT(fft1_1d_plan_gpu)(void *args)
  78. {
  79. STARPUFFT(plan) plan = args;
  80. cufftResult cures;
  81. int n2 = plan->n2[0];
  82. int workerid = starpu_worker_get_id();
  83. cures = cufftPlan1d(&plan->plans[workerid].plan1_cuda, n2, _CUFFT_C2C, 1);
  84. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  85. cufftSetStream(plan->plans[workerid].plan1_cuda, starpu_cuda_get_local_stream());
  86. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  87. }
  88. static void
  89. STARPUFFT(fft1_1d_kernel_gpu)(void *descr[], void *_args)
  90. {
  91. struct STARPUFFT(args) *args = _args;
  92. STARPUFFT(plan) plan = args->plan;
  93. int i = args->i;
  94. int n2 = plan->n2[0];
  95. cufftResult cures;
  96. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  97. _cufftComplex * restrict out = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  98. const _cufftComplex * restrict roots = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[2]);
  99. int workerid = starpu_worker_get_id();
  100. task_per_worker[workerid]++;
  101. cures = _cufftExecC2C(plan->plans[workerid].plan1_cuda, in, out, plan->sign == -1 ? CUFFT_FORWARD : CUFFT_INVERSE);
  102. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  103. STARPUFFT(cuda_twiddle_1d_host)(out, roots, n2, i);
  104. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  105. }
  106. /* fft2:
  107. *
  108. * Perform n3 = n2/DIV_1D ffts of size n1 */
  109. static void
  110. STARPUFFT(fft2_1d_plan_gpu)(void *args)
  111. {
  112. STARPUFFT(plan) plan = args;
  113. cufftResult cures;
  114. int n1 = plan->n1[0];
  115. int n2 = plan->n2[0];
  116. int n3 = n2/DIV_1D;
  117. int workerid = starpu_worker_get_id();
  118. cures = cufftPlan1d(&plan->plans[workerid].plan2_cuda, n1, _CUFFT_C2C, n3);
  119. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  120. cufftSetStream(plan->plans[workerid].plan2_cuda, starpu_cuda_get_local_stream());
  121. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  122. }
  123. static void
  124. STARPUFFT(fft2_1d_kernel_gpu)(void *descr[], void *_args)
  125. {
  126. struct STARPUFFT(args) *args = _args;
  127. STARPUFFT(plan) plan = args->plan;
  128. cufftResult cures;
  129. _cufftComplex * restrict in = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[0]);
  130. _cufftComplex * restrict out = (_cufftComplex *)STARPU_VECTOR_GET_PTR(descr[1]);
  131. int workerid = starpu_worker_get_id();
  132. task_per_worker[workerid]++;
  133. /* NOTE using batch support */
  134. cures = _cufftExecC2C(plan->plans[workerid].plan2_cuda, in, out, plan->sign == -1 ? CUFFT_FORWARD : CUFFT_INVERSE);
  135. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  136. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  137. }
  138. #endif
  139. /* twist1:
  140. *
  141. * Twist the full input vector (first parameter) into one chunk of size n2
  142. * (second parameter) */
  143. static void
  144. STARPUFFT(twist1_1d_kernel_cpu)(void *descr[], void *_args)
  145. {
  146. struct STARPUFFT(args) *args = _args;
  147. STARPUFFT(plan) plan = args->plan;
  148. int i = args->i;
  149. int j;
  150. int n1 = plan->n1[0];
  151. int n2 = plan->n2[0];
  152. STARPUFFT(complex) * restrict in = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  153. STARPUFFT(complex) * restrict twisted1 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  154. /* printf("twist1 %d %g\n", i, (double) cabs(plan->in[i])); */
  155. for (j = 0; j < n2; j++)
  156. twisted1[j] = in[i+j*n1];
  157. }
  158. #ifdef STARPU_HAVE_FFTW
  159. /* fft1:
  160. *
  161. * Perform one fft of size n2 */
  162. static void
  163. STARPUFFT(fft1_1d_kernel_cpu)(void *descr[], void *_args)
  164. {
  165. struct STARPUFFT(args) *args = _args;
  166. STARPUFFT(plan) plan = args->plan;
  167. int i = args->i;
  168. int j;
  169. int n2 = plan->n2[0];
  170. int workerid = starpu_worker_get_id();
  171. task_per_worker[workerid]++;
  172. const STARPUFFT(complex) * restrict twisted1 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  173. STARPUFFT(complex) * restrict fft1 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  174. _fftw_complex * restrict worker_in1 = (STARPUFFT(complex) *)plan->plans[workerid].in1;
  175. _fftw_complex * restrict worker_out1 = (STARPUFFT(complex) *)plan->plans[workerid].out1;
  176. /* printf("fft1 %d %g\n", i, (double) cabs(twisted1[0])); */
  177. memcpy(worker_in1, twisted1, plan->totsize2 * sizeof(*worker_in1));
  178. _FFTW(execute)(plan->plans[workerid].plan1_cpu);
  179. /* twiddle while copying from fftw output buffer to fft1 buffer */
  180. for (j = 0; j < n2; j++)
  181. fft1[j] = worker_out1[j] * plan->roots[0][i*j];
  182. }
  183. #endif
  184. /* twist2:
  185. *
  186. * Twist the full vector (results of the fft1s) into one package of n2/DIV_1D
  187. * chunks of size n1 */
  188. static void
  189. STARPUFFT(twist2_1d_kernel_cpu)(void *descr[], void *_args)
  190. {
  191. struct STARPUFFT(args) *args = _args;
  192. STARPUFFT(plan) plan = args->plan;
  193. int jj = args->jj; /* between 0 and DIV_1D */
  194. int jjj; /* beetween 0 and n3 */
  195. int i;
  196. int n1 = plan->n1[0];
  197. int n2 = plan->n2[0];
  198. int n3 = n2/DIV_1D;
  199. STARPUFFT(complex) * restrict twisted2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  200. /* printf("twist2 %d %g\n", jj, (double) cabs(plan->fft1[jj])); */
  201. for (jjj = 0; jjj < n3; jjj++) {
  202. int j = jj * n3 + jjj;
  203. for (i = 0; i < n1; i++)
  204. twisted2[jjj*n1+i] = plan->fft1[i*n2+j];
  205. }
  206. }
  207. #ifdef STARPU_HAVE_FFTW
  208. /* fft2:
  209. *
  210. * Perform n3 = n2/DIV_1D ffts of size n1 */
  211. static void
  212. STARPUFFT(fft2_1d_kernel_cpu)(void *descr[], void *_args)
  213. {
  214. struct STARPUFFT(args) *args = _args;
  215. STARPUFFT(plan) plan = args->plan;
  216. /* int jj = args->jj; */
  217. int workerid = starpu_worker_get_id();
  218. task_per_worker[workerid]++;
  219. const STARPUFFT(complex) * restrict twisted2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  220. STARPUFFT(complex) * restrict fft2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[1]);
  221. /* printf("fft2 %d %g\n", jj, (double) cabs(twisted2[plan->totsize4-1])); */
  222. _fftw_complex * restrict worker_in2 = (STARPUFFT(complex) *)plan->plans[workerid].in2;
  223. _fftw_complex * restrict worker_out2 = (STARPUFFT(complex) *)plan->plans[workerid].out2;
  224. memcpy(worker_in2, twisted2, plan->totsize4 * sizeof(*worker_in2));
  225. _FFTW(execute)(plan->plans[workerid].plan2_cpu);
  226. /* no twiddle */
  227. memcpy(fft2, worker_out2, plan->totsize4 * sizeof(*worker_out2));
  228. }
  229. #endif
  230. /* twist3:
  231. *
  232. * Spread the package of n2/DIV_1D chunks of size n1 into the output vector */
  233. static void
  234. STARPUFFT(twist3_1d_kernel_cpu)(void *descr[], void *_args)
  235. {
  236. struct STARPUFFT(args) *args = _args;
  237. STARPUFFT(plan) plan = args->plan;
  238. int jj = args->jj; /* between 0 and DIV_1D */
  239. int jjj; /* beetween 0 and n3 */
  240. int i;
  241. int n1 = plan->n1[0];
  242. int n2 = plan->n2[0];
  243. int n3 = n2/DIV_1D;
  244. const STARPUFFT(complex) * restrict fft2 = (STARPUFFT(complex) *)STARPU_VECTOR_GET_PTR(descr[0]);
  245. /* printf("twist3 %d %g\n", jj, (double) cabs(fft2[0])); */
  246. for (jjj = 0; jjj < n3; jjj++) {
  247. int j = jj * n3 + jjj;
  248. for (i = 0; i < n1; i++)
  249. plan->out[i*n2+j] = fft2[jjj*n1+i];
  250. }
  251. }
  252. /* Performance models for the 5 kinds of tasks */
  253. static struct starpu_perfmodel STARPUFFT(twist1_1d_model) = {
  254. .type = STARPU_HISTORY_BASED,
  255. .symbol = TYPE"twist1_1d"
  256. };
  257. static struct starpu_perfmodel STARPUFFT(fft1_1d_model) = {
  258. .type = STARPU_HISTORY_BASED,
  259. .symbol = TYPE"fft1_1d"
  260. };
  261. static struct starpu_perfmodel STARPUFFT(twist2_1d_model) = {
  262. .type = STARPU_HISTORY_BASED,
  263. .symbol = TYPE"twist2_1d"
  264. };
  265. static struct starpu_perfmodel STARPUFFT(fft2_1d_model) = {
  266. .type = STARPU_HISTORY_BASED,
  267. .symbol = TYPE"fft2_1d"
  268. };
  269. static struct starpu_perfmodel STARPUFFT(twist3_1d_model) = {
  270. .type = STARPU_HISTORY_BASED,
  271. .symbol = TYPE"twist3_1d"
  272. };
  273. /* codelet pointers for the 5 kinds of tasks */
  274. static struct starpu_codelet STARPUFFT(twist1_1d_codelet) = {
  275. .where =
  276. #ifdef STARPU_USE_CUDA
  277. STARPU_CUDA|
  278. #endif
  279. STARPU_CPU,
  280. #ifdef STARPU_USE_CUDA
  281. .cuda_func = STARPUFFT(twist1_1d_kernel_gpu),
  282. #endif
  283. .cpu_func = STARPUFFT(twist1_1d_kernel_cpu),
  284. .model = &STARPUFFT(twist1_1d_model),
  285. .nbuffers = 2
  286. };
  287. static struct starpu_codelet STARPUFFT(fft1_1d_codelet) = {
  288. .where =
  289. #ifdef STARPU_USE_CUDA
  290. STARPU_CUDA|
  291. #endif
  292. #ifdef STARPU_HAVE_FFTW
  293. STARPU_CPU|
  294. #endif
  295. 0,
  296. #ifdef STARPU_USE_CUDA
  297. .cuda_func = STARPUFFT(fft1_1d_kernel_gpu),
  298. #endif
  299. #ifdef STARPU_HAVE_FFTW
  300. .cpu_func = STARPUFFT(fft1_1d_kernel_cpu),
  301. #endif
  302. .model = &STARPUFFT(fft1_1d_model),
  303. .nbuffers = 3
  304. };
  305. static struct starpu_codelet STARPUFFT(twist2_1d_codelet) = {
  306. .where = STARPU_CPU,
  307. .cpu_func = STARPUFFT(twist2_1d_kernel_cpu),
  308. .model = &STARPUFFT(twist2_1d_model),
  309. .nbuffers = 1
  310. };
  311. static struct starpu_codelet STARPUFFT(fft2_1d_codelet) = {
  312. .where =
  313. #ifdef STARPU_USE_CUDA
  314. STARPU_CUDA|
  315. #endif
  316. #ifdef STARPU_HAVE_FFTW
  317. STARPU_CPU|
  318. #endif
  319. 0,
  320. #ifdef STARPU_USE_CUDA
  321. .cuda_func = STARPUFFT(fft2_1d_kernel_gpu),
  322. #endif
  323. #ifdef STARPU_HAVE_FFTW
  324. .cpu_func = STARPUFFT(fft2_1d_kernel_cpu),
  325. #endif
  326. .model = &STARPUFFT(fft2_1d_model),
  327. .nbuffers = 2
  328. };
  329. static struct starpu_codelet STARPUFFT(twist3_1d_codelet) = {
  330. .where = STARPU_CPU,
  331. .cpu_func = STARPUFFT(twist3_1d_kernel_cpu),
  332. .model = &STARPUFFT(twist3_1d_model),
  333. .nbuffers = 1
  334. };
  335. #endif /* PARALLEL */
  336. /* Planning:
  337. *
  338. * - For each CPU worker, we need to plan the two fftw stages.
  339. * - For GPU workers, we need to do the planning in the CUDA context, so we do
  340. * this lazily through the initialised1 and initialised2 flags ; TODO: use
  341. * starpu_execute_on_each_worker instead (done in the omp branch).
  342. * - We allocate all the temporary buffers and register them to starpu.
  343. * - We create all the tasks, but do not submit them yet. It will be possible
  344. * to reuse them at will to perform several ffts with the same planning.
  345. */
  346. STARPUFFT(plan)
  347. STARPUFFT(plan_dft_1d)(int n, int sign, unsigned flags)
  348. {
  349. int workerid;
  350. int n1 = DIV_1D;
  351. int n2 = n / n1;
  352. int n3;
  353. int z;
  354. struct starpu_task *task;
  355. #ifdef STARPU_USE_CUDA
  356. /* cufft 1D limited to 8M elements */
  357. while (n2 > 8 << 20) {
  358. n1 *= 2;
  359. n2 /= 2;
  360. }
  361. #endif
  362. STARPU_ASSERT(n == n1*n2);
  363. STARPU_ASSERT(n1 < (1ULL << I_BITS));
  364. /* distribute the n2 second ffts into DIV_1D packages */
  365. n3 = n2 / DIV_1D;
  366. STARPU_ASSERT(n2 == n3*DIV_1D);
  367. /* TODO: flags? Automatically set FFTW_MEASURE on calibration? */
  368. STARPU_ASSERT(flags == 0);
  369. STARPUFFT(plan) plan = malloc(sizeof(*plan));
  370. memset(plan, 0, sizeof(*plan));
  371. plan->number = STARPU_ATOMIC_ADD(&starpufft_last_plan_number, 1) - 1;
  372. /* The plan number has a limited size */
  373. STARPU_ASSERT(plan->number < (1ULL << NUMBER_BITS));
  374. /* Just one dimension */
  375. plan->dim = 1;
  376. plan->n = malloc(plan->dim * sizeof(*plan->n));
  377. plan->n[0] = n;
  378. check_dims(plan);
  379. plan->n1 = malloc(plan->dim * sizeof(*plan->n1));
  380. plan->n1[0] = n1;
  381. plan->n2 = malloc(plan->dim * sizeof(*plan->n2));
  382. plan->n2[0] = n2;
  383. /* Note: this is for coherency with the 2D case */
  384. plan->totsize = n;
  385. plan->totsize1 = n1;
  386. plan->totsize2 = n2;
  387. plan->totsize3 = DIV_1D;
  388. plan->totsize4 = plan->totsize / plan->totsize3;
  389. plan->type = C2C;
  390. plan->sign = sign;
  391. /* Compute the w^k just once. */
  392. compute_roots(plan);
  393. /* Initialize per-worker working set */
  394. for (workerid = 0; workerid < starpu_worker_get_count(); workerid++) {
  395. switch (starpu_worker_get_type(workerid)) {
  396. case STARPU_CPU_WORKER:
  397. #ifdef STARPU_HAVE_FFTW
  398. /* first fft plan: one fft of size n2.
  399. * FFTW imposes that buffer pointers are known at
  400. * planning time. */
  401. plan->plans[workerid].in1 = _FFTW(malloc)(plan->totsize2 * sizeof(_fftw_complex));
  402. memset(plan->plans[workerid].in1, 0, plan->totsize2 * sizeof(_fftw_complex));
  403. plan->plans[workerid].out1 = _FFTW(malloc)(plan->totsize2 * sizeof(_fftw_complex));
  404. memset(plan->plans[workerid].out1, 0, plan->totsize2 * sizeof(_fftw_complex));
  405. plan->plans[workerid].plan1_cpu = _FFTW(plan_dft_1d)(n2, plan->plans[workerid].in1, plan->plans[workerid].out1, sign, _FFTW_FLAGS);
  406. STARPU_ASSERT(plan->plans[workerid].plan1_cpu);
  407. /* second fft plan: n3 ffts of size n1 */
  408. plan->plans[workerid].in2 = _FFTW(malloc)(plan->totsize4 * sizeof(_fftw_complex));
  409. memset(plan->plans[workerid].in2, 0, plan->totsize4 * sizeof(_fftw_complex));
  410. plan->plans[workerid].out2 = _FFTW(malloc)(plan->totsize4 * sizeof(_fftw_complex));
  411. memset(plan->plans[workerid].out2, 0, plan->totsize4 * sizeof(_fftw_complex));
  412. plan->plans[workerid].plan2_cpu = _FFTW(plan_many_dft)(plan->dim,
  413. plan->n1, n3,
  414. /* input */ plan->plans[workerid].in2, NULL, 1, plan->totsize1,
  415. /* output */ plan->plans[workerid].out2, NULL, 1, plan->totsize1,
  416. sign, _FFTW_FLAGS);
  417. STARPU_ASSERT(plan->plans[workerid].plan2_cpu);
  418. #else
  419. #warning libstarpufft can not work correctly if libfftw3 is not installed
  420. #endif
  421. break;
  422. case STARPU_CUDA_WORKER:
  423. break;
  424. default:
  425. STARPU_ABORT();
  426. break;
  427. }
  428. }
  429. #ifdef STARPU_USE_CUDA
  430. starpu_execute_on_each_worker(STARPUFFT(fft1_1d_plan_gpu), plan, STARPU_CUDA);
  431. starpu_execute_on_each_worker(STARPUFFT(fft2_1d_plan_gpu), plan, STARPU_CUDA);
  432. #endif
  433. /* Allocate buffers. */
  434. plan->twisted1 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->twisted1));
  435. memset(plan->twisted1, 0, plan->totsize * sizeof(*plan->twisted1));
  436. plan->fft1 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->fft1));
  437. memset(plan->fft1, 0, plan->totsize * sizeof(*plan->fft1));
  438. plan->twisted2 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->twisted2));
  439. memset(plan->twisted2, 0, plan->totsize * sizeof(*plan->twisted2));
  440. plan->fft2 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->fft2));
  441. memset(plan->fft2, 0, plan->totsize * sizeof(*plan->fft2));
  442. /* Allocate handle arrays */
  443. plan->twisted1_handle = malloc(plan->totsize1 * sizeof(*plan->twisted1_handle));
  444. plan->fft1_handle = malloc(plan->totsize1 * sizeof(*plan->fft1_handle));
  445. plan->twisted2_handle = malloc(plan->totsize3 * sizeof(*plan->twisted2_handle));
  446. plan->fft2_handle = malloc(plan->totsize3 * sizeof(*plan->fft2_handle));
  447. /* Allocate task arrays */
  448. plan->twist1_tasks = malloc(plan->totsize1 * sizeof(*plan->twist1_tasks));
  449. plan->fft1_tasks = malloc(plan->totsize1 * sizeof(*plan->fft1_tasks));
  450. plan->twist2_tasks = malloc(plan->totsize3 * sizeof(*plan->twist2_tasks));
  451. plan->fft2_tasks = malloc(plan->totsize3 * sizeof(*plan->fft2_tasks));
  452. plan->twist3_tasks = malloc(plan->totsize3 * sizeof(*plan->twist3_tasks));
  453. /* Allocate codelet argument arrays */
  454. plan->fft1_args = malloc(plan->totsize1 * sizeof(*plan->fft1_args));
  455. plan->fft2_args = malloc(plan->totsize3 * sizeof(*plan->fft2_args));
  456. /* Create first-round tasks: DIV_1D tasks of type twist1 and fft1 */
  457. for (z = 0; z < plan->totsize1; z++) {
  458. int i = z;
  459. #define STEP_TAG(step) STEP_TAG_1D(plan, step, i)
  460. plan->fft1_args[z].plan = plan;
  461. plan->fft1_args[z].i = i;
  462. /* Register the twisted1 buffer of size n2. */
  463. starpu_vector_data_register(&plan->twisted1_handle[z], 0, (uintptr_t) &plan->twisted1[z*plan->totsize2], plan->totsize2, sizeof(*plan->twisted1));
  464. /* Register the fft1 buffer of size n2. */
  465. starpu_vector_data_register(&plan->fft1_handle[z], 0, (uintptr_t) &plan->fft1[z*plan->totsize2], plan->totsize2, sizeof(*plan->fft1));
  466. /* We'll need the result of fft1 on the CPU for the second
  467. * twist anyway, so tell starpu to not keep the fft1 buffer in
  468. * the GPU. */
  469. starpu_data_set_wt_mask(plan->fft1_handle[z], 1<<0);
  470. /* Create twist1 task */
  471. plan->twist1_tasks[z] = task = starpu_task_create();
  472. task->cl = &STARPUFFT(twist1_1d_codelet);
  473. /* task->buffers[0].handle = to be filled at execution to point
  474. to the application input. */
  475. task->buffers[0].mode = STARPU_R;
  476. task->buffers[1].handle = plan->twisted1_handle[z];
  477. task->buffers[1].mode = STARPU_W;
  478. task->cl_arg = &plan->fft1_args[z];
  479. task->tag_id = STEP_TAG(TWIST1);
  480. task->use_tag = 1;
  481. task->detach = 1;
  482. task->destroy = 0;
  483. /* Tell that fft1 depends on twisted1 */
  484. starpu_tag_declare_deps(STEP_TAG(FFT1),
  485. 1, STEP_TAG(TWIST1));
  486. /* Create FFT1 task */
  487. plan->fft1_tasks[z] = task = starpu_task_create();
  488. task->cl = &STARPUFFT(fft1_1d_codelet);
  489. task->buffers[0].handle = plan->twisted1_handle[z];
  490. task->buffers[0].mode = STARPU_R;
  491. task->buffers[1].handle = plan->fft1_handle[z];
  492. task->buffers[1].mode = STARPU_W;
  493. task->buffers[2].handle = plan->roots_handle[0];
  494. task->buffers[2].mode = STARPU_R;
  495. task->cl_arg = &plan->fft1_args[z];
  496. task->tag_id = STEP_TAG(FFT1);
  497. task->use_tag = 1;
  498. task->detach = 1;
  499. task->destroy = 0;
  500. /* Tell that the join task will depend on the fft1 task. */
  501. starpu_tag_declare_deps(STEP_TAG_1D(plan, JOIN, 0),
  502. 1, STEP_TAG(FFT1));
  503. #undef STEP_TAG
  504. }
  505. /* Create the join task, only serving as a dependency point between
  506. * fft1 and twist2 tasks */
  507. plan->join_task = task = starpu_task_create();
  508. task->cl = NULL;
  509. task->tag_id = STEP_TAG_1D(plan, JOIN, 0);
  510. task->use_tag = 1;
  511. task->detach = 1;
  512. task->destroy = 0;
  513. /* Create second-round tasks: DIV_1D batches of n2/DIV_1D twist2, fft2,
  514. * and twist3 */
  515. for (z = 0; z < plan->totsize3; z++) {
  516. int jj = z;
  517. #define STEP_TAG(step) STEP_TAG_1D(plan, step, jj)
  518. plan->fft2_args[z].plan = plan;
  519. plan->fft2_args[z].jj = jj;
  520. /* Register n3 twisted2 buffers of size n1 */
  521. starpu_vector_data_register(&plan->twisted2_handle[z], 0, (uintptr_t) &plan->twisted2[z*plan->totsize4], plan->totsize4, sizeof(*plan->twisted2));
  522. starpu_vector_data_register(&plan->fft2_handle[z], 0, (uintptr_t) &plan->fft2[z*plan->totsize4], plan->totsize4, sizeof(*plan->fft2));
  523. /* We'll need the result of fft2 on the CPU for the third
  524. * twist anyway, so tell starpu to not keep the fft2 buffer in
  525. * the GPU. */
  526. starpu_data_set_wt_mask(plan->fft2_handle[z], 1<<0);
  527. /* Tell that twisted2 depends on the join task */
  528. starpu_tag_declare_deps(STEP_TAG(TWIST2),
  529. 1, STEP_TAG_1D(plan, JOIN, 0));
  530. /* Create twist2 task */
  531. plan->twist2_tasks[z] = task = starpu_task_create();
  532. task->cl = &STARPUFFT(twist2_1d_codelet);
  533. task->buffers[0].handle = plan->twisted2_handle[z];
  534. task->buffers[0].mode = STARPU_W;
  535. task->cl_arg = &plan->fft2_args[z];
  536. task->tag_id = STEP_TAG(TWIST2);
  537. task->use_tag = 1;
  538. task->detach = 1;
  539. task->destroy = 0;
  540. /* Tell that fft2 depends on twisted2 */
  541. starpu_tag_declare_deps(STEP_TAG(FFT2),
  542. 1, STEP_TAG(TWIST2));
  543. /* Create FFT2 task */
  544. plan->fft2_tasks[z] = task = starpu_task_create();
  545. task->cl = &STARPUFFT(fft2_1d_codelet);
  546. task->buffers[0].handle = plan->twisted2_handle[z];
  547. task->buffers[0].mode = STARPU_R;
  548. task->buffers[1].handle = plan->fft2_handle[z];
  549. task->buffers[1].mode = STARPU_W;
  550. task->cl_arg = &plan->fft2_args[z];
  551. task->tag_id = STEP_TAG(FFT2);
  552. task->use_tag = 1;
  553. task->detach = 1;
  554. task->destroy = 0;
  555. /* Tell that twist3 depends on fft2 */
  556. starpu_tag_declare_deps(STEP_TAG(TWIST3),
  557. 1, STEP_TAG(FFT2));
  558. /* Create twist3 tasks */
  559. /* These run only on CPUs and thus write directly into the
  560. * application output buffer. */
  561. plan->twist3_tasks[z] = task = starpu_task_create();
  562. task->cl = &STARPUFFT(twist3_1d_codelet);
  563. task->buffers[0].handle = plan->fft2_handle[z];
  564. task->buffers[0].mode = STARPU_R;
  565. task->cl_arg = &plan->fft2_args[z];
  566. task->tag_id = STEP_TAG(TWIST3);
  567. task->use_tag = 1;
  568. task->detach = 1;
  569. task->destroy = 0;
  570. /* Tell that to be completely finished we need to have finished
  571. * this twisted3 */
  572. starpu_tag_declare_deps(STEP_TAG_1D(plan, END, 0),
  573. 1, STEP_TAG(TWIST3));
  574. #undef STEP_TAG
  575. }
  576. /* Create end task, only serving as a join point. */
  577. plan->end_task = task = starpu_task_create();
  578. task->cl = NULL;
  579. task->tag_id = STEP_TAG_1D(plan, END, 0);
  580. task->use_tag = 1;
  581. task->detach = 1;
  582. task->destroy = 0;
  583. return plan;
  584. }
  585. /* Actually submit all the tasks. */
  586. static starpu_tag_t
  587. STARPUFFT(start1dC2C)(STARPUFFT(plan) plan)
  588. {
  589. STARPU_ASSERT(plan->type == C2C);
  590. int z;
  591. for (z=0; z < plan->totsize1; z++) {
  592. starpu_task_submit(plan->twist1_tasks[z]);
  593. starpu_task_submit(plan->fft1_tasks[z]);
  594. }
  595. starpu_task_submit(plan->join_task);
  596. for (z=0; z < plan->totsize3; z++) {
  597. starpu_task_submit(plan->twist2_tasks[z]);
  598. starpu_task_submit(plan->fft2_tasks[z]);
  599. starpu_task_submit(plan->twist3_tasks[z]);
  600. }
  601. starpu_task_submit(plan->end_task);
  602. return STEP_TAG_1D(plan, END, 0);
  603. }
  604. /* Free all the tags. The generic code handles freeing the buffers. */
  605. static void
  606. STARPUFFT(free_1d_tags)(STARPUFFT(plan) plan)
  607. {
  608. unsigned i;
  609. int n1 = plan->n1[0];
  610. for (i = 0; i < n1; i++) {
  611. starpu_tag_remove(STEP_TAG_1D(plan, TWIST1, i));
  612. starpu_tag_remove(STEP_TAG_1D(plan, FFT1, i));
  613. }
  614. starpu_tag_remove(STEP_TAG_1D(plan, JOIN, 0));
  615. for (i = 0; i < DIV_1D; i++) {
  616. starpu_tag_remove(STEP_TAG_1D(plan, TWIST2, i));
  617. starpu_tag_remove(STEP_TAG_1D(plan, FFT2, i));
  618. starpu_tag_remove(STEP_TAG_1D(plan, TWIST3, i));
  619. }
  620. starpu_tag_remove(STEP_TAG_1D(plan, END, 0));
  621. }