starpufftx1d.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720
  1. /*
  2. * StarPU
  3. * Copyright (C) INRIA 2009 (see AUTHORS file)
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR in PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #define DIV_1D 64
  17. /*
  18. * Overall strategy for an fft of size n:
  19. * - perform n1 ffts of size n2
  20. * - twiddle
  21. * - perform n2 ffts of size n1
  22. *
  23. * - n1 defaults to DIV_1D, thus n2 defaults to n / DIV_1D.
  24. *
  25. * Precise tasks:
  26. *
  27. * - twist1: twist the whole n-element input (called "in") into n1 chunks of
  28. * size n2, by using n1 tasks taking the whole n-element input as a
  29. * R parameter and one n2 output as a W parameter. The result is
  30. * called twisted1.
  31. * - fft1: perform n1 (n2) ffts, by using n1 tasks doing one fft each. Also
  32. * twiddle the result to prepare for the fft2. The result is called
  33. * fft1.
  34. * - join: depends on all the fft1s, to gather the n1 results of size n2 in
  35. * the fft1 vector.
  36. * - twist2: twist the fft1 vector into n2 chunks of size n1, called twisted2.
  37. * since n2 is typically very large, this step is divided in DIV_1D
  38. * tasks, each of them performing n2/DIV_1D of them
  39. * - fft2: perform n2 ffts of size n1. This is divided in DIV_1D tasks of
  40. * n2/DIV_1D ffts, to be performed in batches. The result is called
  41. * fft2.
  42. * - twist3: twist back the result of the fft2s above into the output buffer.
  43. * Only implemented on CPUs for simplicity of the gathering.
  44. *
  45. * The tag space thus uses 3 dimensions:
  46. * - the number of the plan.
  47. * - the step (TWIST1, FFT1, JOIN, TWIST2, FFT2, TWIST3, END)
  48. * - an index i between 0 and DIV_1D-1.
  49. */
  50. #define STEP_TAG_1D(plan, step, i) _STEP_TAG(plan, step, i)
  51. #ifdef STARPU_USE_CUDA
  52. /* twist1:
  53. *
  54. * Twist the full input vector (first parameter) into one chunk of size n2
  55. * (second parameter) */
  56. static void
  57. STARPUFFT(twist1_1d_kernel_gpu)(void *descr[], void *_args)
  58. {
  59. struct STARPUFFT(args) *args = _args;
  60. STARPUFFT(plan) plan = args->plan;
  61. int i = args->i;
  62. int n1 = plan->n1[0];
  63. int n2 = plan->n2[0];
  64. _cufftComplex * restrict in = (_cufftComplex *)STARPU_GET_VECTOR_PTR(descr[0]);
  65. _cufftComplex * restrict twisted1 = (_cufftComplex *)STARPU_GET_VECTOR_PTR(descr[1]);
  66. cudaStream_t stream = STARPUFFT(get_local_stream)(plan, starpu_worker_get_id());
  67. STARPUFFT(cuda_twist1_1d_host)(in, twisted1, i, n1, n2, stream);
  68. cudaStreamSynchronize(stream);
  69. }
  70. /* fft1:
  71. *
  72. * Perform one fft of size n2 */
  73. static void
  74. STARPUFFT(fft1_1d_kernel_gpu)(void *descr[], void *_args)
  75. {
  76. struct STARPUFFT(args) *args = _args;
  77. STARPUFFT(plan) plan = args->plan;
  78. int i = args->i;
  79. int n2 = plan->n2[0];
  80. cufftResult cures;
  81. _cufftComplex * restrict in = (_cufftComplex *)STARPU_GET_VECTOR_PTR(descr[0]);
  82. _cufftComplex * restrict out = (_cufftComplex *)STARPU_GET_VECTOR_PTR(descr[1]);
  83. const _cufftComplex * restrict roots = (_cufftComplex *)STARPU_GET_VECTOR_PTR(descr[2]);
  84. int workerid = starpu_worker_get_id();
  85. cudaStream_t stream;
  86. if (!plan->plans[workerid].initialized1) {
  87. cures = cufftPlan1d(&plan->plans[workerid].plan1_cuda, n2, _CUFFT_C2C, 1);
  88. stream = STARPUFFT(get_local_stream)(plan, workerid);
  89. cufftSetStream(plan->plans[workerid].plan1_cuda, stream);
  90. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  91. plan->plans[workerid].initialized1 = 1;
  92. }
  93. stream = plan->plans[workerid].stream;
  94. cures = _cufftExecC2C(plan->plans[workerid].plan1_cuda, in, out, plan->sign == -1 ? CUFFT_FORWARD : CUFFT_INVERSE);
  95. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  96. STARPUFFT(cuda_twiddle_1d_host)(out, roots, n2, i, stream);
  97. cudaStreamSynchronize(plan->plans[workerid].stream);
  98. }
  99. /* fft2:
  100. *
  101. * Perform n3 = n2/DIV_1D ffts of size n1 */
  102. static void
  103. STARPUFFT(fft2_1d_kernel_gpu)(void *descr[], void *_args)
  104. {
  105. struct STARPUFFT(args) *args = _args;
  106. STARPUFFT(plan) plan = args->plan;
  107. int n1 = plan->n1[0];
  108. int n2 = plan->n2[0];
  109. int n3 = n2/DIV_1D;
  110. cufftResult cures;
  111. _cufftComplex * restrict in = (_cufftComplex *)STARPU_GET_VECTOR_PTR(descr[0]);
  112. _cufftComplex * restrict out = (_cufftComplex *)STARPU_GET_VECTOR_PTR(descr[1]);
  113. int workerid = starpu_worker_get_id();
  114. if (!plan->plans[workerid].initialized2) {
  115. cures = cufftPlan1d(&plan->plans[workerid].plan2_cuda, n1, _CUFFT_C2C, n3);
  116. cudaStream_t stream = STARPUFFT(get_local_stream)(plan, workerid);
  117. cufftSetStream(plan->plans[workerid].plan2_cuda, stream);
  118. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  119. plan->plans[workerid].initialized2 = 1;
  120. }
  121. /* NOTE using batch support */
  122. cures = _cufftExecC2C(plan->plans[workerid].plan2_cuda, in, out, plan->sign == -1 ? CUFFT_FORWARD : CUFFT_INVERSE);
  123. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  124. cudaStreamSynchronize(plan->plans[workerid].stream);
  125. }
  126. #endif
  127. /* twist1:
  128. *
  129. * Twist the full input vector (first parameter) into one chunk of size n2
  130. * (second parameter) */
  131. static void
  132. STARPUFFT(twist1_1d_kernel_cpu)(void *descr[], void *_args)
  133. {
  134. struct STARPUFFT(args) *args = _args;
  135. STARPUFFT(plan) plan = args->plan;
  136. int i = args->i;
  137. int j;
  138. int n1 = plan->n1[0];
  139. int n2 = plan->n2[0];
  140. STARPUFFT(complex) * restrict in = (STARPUFFT(complex) *)STARPU_GET_VECTOR_PTR(descr[0]);
  141. STARPUFFT(complex) * restrict twisted1 = (STARPUFFT(complex) *)STARPU_GET_VECTOR_PTR(descr[1]);
  142. //printf("twist1 %d %g\n", i, (double) cabs(plan->in[i]));
  143. for (j = 0; j < n2; j++)
  144. twisted1[j] = in[i+j*n1];
  145. }
  146. #ifdef STARPU_HAVE_FFTW
  147. /* fft1:
  148. *
  149. * Perform one fft of size n2 */
  150. static void
  151. STARPUFFT(fft1_1d_kernel_cpu)(void *descr[], void *_args)
  152. {
  153. struct STARPUFFT(args) *args = _args;
  154. STARPUFFT(plan) plan = args->plan;
  155. int i = args->i;
  156. int j;
  157. int n2 = plan->n2[0];
  158. int workerid = starpu_worker_get_id();
  159. const STARPUFFT(complex) * restrict twisted1 = (STARPUFFT(complex) *)STARPU_GET_VECTOR_PTR(descr[0]);
  160. STARPUFFT(complex) * restrict fft1 = (STARPUFFT(complex) *)STARPU_GET_VECTOR_PTR(descr[1]);
  161. _fftw_complex * restrict worker_in1 = (STARPUFFT(complex) *)plan->plans[workerid].in1;
  162. _fftw_complex * restrict worker_out1 = (STARPUFFT(complex) *)plan->plans[workerid].out1;
  163. //printf("fft1 %d %g\n", i, (double) cabs(twisted1[0]));
  164. memcpy(worker_in1, twisted1, plan->totsize2 * sizeof(*worker_in1));
  165. _FFTW(execute)(plan->plans[workerid].plan1_cpu);
  166. /* twiddle while copying from fftw output buffer to fft1 buffer */
  167. for (j = 0; j < n2; j++)
  168. fft1[j] = worker_out1[j] * plan->roots[0][i*j];
  169. }
  170. #endif
  171. /* twist2:
  172. *
  173. * Twist the full vector (results of the fft1s) into one package of n2/DIV_1D
  174. * chunks of size n1 */
  175. static void
  176. STARPUFFT(twist2_1d_kernel_cpu)(void *descr[], void *_args)
  177. {
  178. struct STARPUFFT(args) *args = _args;
  179. STARPUFFT(plan) plan = args->plan;
  180. int jj = args->jj; /* between 0 and DIV_1D */
  181. int jjj; /* beetween 0 and n3 */
  182. int i;
  183. int n1 = plan->n1[0];
  184. int n2 = plan->n2[0];
  185. int n3 = n2/DIV_1D;
  186. STARPUFFT(complex) * restrict twisted2 = (STARPUFFT(complex) *)STARPU_GET_VECTOR_PTR(descr[0]);
  187. //printf("twist2 %d %g\n", jj, (double) cabs(plan->fft1[jj]));
  188. for (jjj = 0; jjj < n3; jjj++) {
  189. int j = jj * n3 + jjj;
  190. for (i = 0; i < n1; i++)
  191. twisted2[jjj*n1+i] = plan->fft1[i*n2+j];
  192. }
  193. }
  194. #ifdef STARPU_HAVE_FFTW
  195. /* fft2:
  196. *
  197. * Perform n3 = n2/DIV_1D ffts of size n1 */
  198. static void
  199. STARPUFFT(fft2_1d_kernel_cpu)(void *descr[], void *_args)
  200. {
  201. struct STARPUFFT(args) *args = _args;
  202. STARPUFFT(plan) plan = args->plan;
  203. //int jj = args->jj;
  204. int workerid = starpu_worker_get_id();
  205. const STARPUFFT(complex) * restrict twisted2 = (STARPUFFT(complex) *)STARPU_GET_VECTOR_PTR(descr[0]);
  206. STARPUFFT(complex) * restrict fft2 = (STARPUFFT(complex) *)STARPU_GET_VECTOR_PTR(descr[1]);
  207. //printf("fft2 %d %g\n", jj, (double) cabs(twisted2[plan->totsize4-1]));
  208. _fftw_complex * restrict worker_in2 = (STARPUFFT(complex) *)plan->plans[workerid].in2;
  209. _fftw_complex * restrict worker_out2 = (STARPUFFT(complex) *)plan->plans[workerid].out2;
  210. memcpy(worker_in2, twisted2, plan->totsize4 * sizeof(*worker_in2));
  211. _FFTW(execute)(plan->plans[workerid].plan2_cpu);
  212. /* no twiddle */
  213. memcpy(fft2, worker_out2, plan->totsize4 * sizeof(*worker_out2));
  214. }
  215. #endif
  216. /* twist3:
  217. *
  218. * Spread the package of n2/DIV_1D chunks of size n1 into the output vector */
  219. static void
  220. STARPUFFT(twist3_1d_kernel_cpu)(void *descr[], void *_args)
  221. {
  222. struct STARPUFFT(args) *args = _args;
  223. STARPUFFT(plan) plan = args->plan;
  224. int jj = args->jj; /* between 0 and DIV_1D */
  225. int jjj; /* beetween 0 and n3 */
  226. int i;
  227. int n1 = plan->n1[0];
  228. int n2 = plan->n2[0];
  229. int n3 = n2/DIV_1D;
  230. const STARPUFFT(complex) * restrict fft2 = (STARPUFFT(complex) *)STARPU_GET_VECTOR_PTR(descr[0]);
  231. //printf("twist3 %d %g\n", jj, (double) cabs(fft2[0]));
  232. for (jjj = 0; jjj < n3; jjj++) {
  233. int j = jj * n3 + jjj;
  234. for (i = 0; i < n1; i++)
  235. plan->out[i*n2+j] = fft2[jjj*n1+i];
  236. }
  237. }
  238. /* Performance models for the 5 kinds of tasks */
  239. static struct starpu_perfmodel_t STARPUFFT(twist1_1d_model) = {
  240. .type = STARPU_HISTORY_BASED,
  241. .symbol = TYPE"twist1_1d"
  242. };
  243. static struct starpu_perfmodel_t STARPUFFT(fft1_1d_model) = {
  244. .type = STARPU_HISTORY_BASED,
  245. .symbol = TYPE"fft1_1d"
  246. };
  247. static struct starpu_perfmodel_t STARPUFFT(twist2_1d_model) = {
  248. .type = STARPU_HISTORY_BASED,
  249. .symbol = TYPE"twist2_1d"
  250. };
  251. static struct starpu_perfmodel_t STARPUFFT(fft2_1d_model) = {
  252. .type = STARPU_HISTORY_BASED,
  253. .symbol = TYPE"fft2_1d"
  254. };
  255. static struct starpu_perfmodel_t STARPUFFT(twist3_1d_model) = {
  256. .type = STARPU_HISTORY_BASED,
  257. .symbol = TYPE"twist3_1d"
  258. };
  259. /* codelet pointers for the 5 kinds of tasks */
  260. static starpu_codelet STARPUFFT(twist1_1d_codelet) = {
  261. .where =
  262. #ifdef STARPU_USE_CUDA
  263. STARPU_CUDA|
  264. #endif
  265. STARPU_CPU,
  266. #ifdef STARPU_USE_CUDA
  267. .cuda_func = STARPUFFT(twist1_1d_kernel_gpu),
  268. #endif
  269. .cpu_func = STARPUFFT(twist1_1d_kernel_cpu),
  270. .model = &STARPUFFT(twist1_1d_model),
  271. .nbuffers = 2
  272. };
  273. static starpu_codelet STARPUFFT(fft1_1d_codelet) = {
  274. .where =
  275. #ifdef STARPU_USE_CUDA
  276. STARPU_CUDA|
  277. #endif
  278. #ifdef STARPU_HAVE_FFTW
  279. STARPU_CPU|
  280. #endif
  281. 0,
  282. #ifdef STARPU_USE_CUDA
  283. .cuda_func = STARPUFFT(fft1_1d_kernel_gpu),
  284. #endif
  285. #ifdef STARPU_HAVE_FFTW
  286. .cpu_func = STARPUFFT(fft1_1d_kernel_cpu),
  287. #endif
  288. .model = &STARPUFFT(fft1_1d_model),
  289. .nbuffers = 3
  290. };
  291. static starpu_codelet STARPUFFT(twist2_1d_codelet) = {
  292. .where = STARPU_CPU,
  293. .cpu_func = STARPUFFT(twist2_1d_kernel_cpu),
  294. .model = &STARPUFFT(twist2_1d_model),
  295. .nbuffers = 1
  296. };
  297. static starpu_codelet STARPUFFT(fft2_1d_codelet) = {
  298. .where =
  299. #ifdef STARPU_USE_CUDA
  300. STARPU_CUDA|
  301. #endif
  302. #ifdef STARPU_HAVE_FFTW
  303. STARPU_CPU|
  304. #endif
  305. 0,
  306. #ifdef STARPU_USE_CUDA
  307. .cuda_func = STARPUFFT(fft2_1d_kernel_gpu),
  308. #endif
  309. #ifdef STARPU_HAVE_FFTW
  310. .cpu_func = STARPUFFT(fft2_1d_kernel_cpu),
  311. #endif
  312. .model = &STARPUFFT(fft2_1d_model),
  313. .nbuffers = 2
  314. };
  315. static starpu_codelet STARPUFFT(twist3_1d_codelet) = {
  316. .where = STARPU_CPU,
  317. .cpu_func = STARPUFFT(twist3_1d_kernel_cpu),
  318. .model = &STARPUFFT(twist3_1d_model),
  319. .nbuffers = 1
  320. };
  321. /* Planning:
  322. *
  323. * - For each CPU worker, we need to plan the two fftw stages.
  324. * - For GPU workers, we need to do the planning in the CUDA context, so we do
  325. * this lazily through the initialised1 and initialised2 flags ; TODO: use
  326. * starpu_execute_on_each_worker instead (done in the omp branch).
  327. * - We allocate all the temporary buffers and register them to starpu.
  328. * - We create all the tasks, but do not submit them yet. It will be possible
  329. * to reuse them at will to perform several ffts with the same planning.
  330. */
  331. STARPUFFT(plan)
  332. STARPUFFT(plan_dft_1d)(int n, int sign, unsigned flags)
  333. {
  334. int workerid;
  335. int n1 = DIV_1D;
  336. int n2 = n / n1;
  337. int n3;
  338. int z;
  339. struct starpu_task *task;
  340. #ifdef STARPU_USE_CUDA
  341. /* cufft 1D limited to 8M elements */
  342. while (n2 > 8 << 20) {
  343. n1 *= 2;
  344. n2 /= 2;
  345. }
  346. #endif
  347. STARPU_ASSERT(n == n1*n2);
  348. STARPU_ASSERT(n1 < (1ULL << I_BITS));
  349. /* distribute the n2 second ffts into DIV_1D packages */
  350. n3 = n2 / DIV_1D;
  351. STARPU_ASSERT(n2 == n3*DIV_1D);
  352. /* TODO: flags? Automatically set FFTW_MEASURE on calibration? */
  353. STARPU_ASSERT(flags == 0);
  354. STARPUFFT(plan) plan = malloc(sizeof(*plan));
  355. memset(plan, 0, sizeof(*plan));
  356. plan->number = STARPU_ATOMIC_ADD(&starpufft_last_plan_number, 1) - 1;
  357. /* The plan number has a limited size */
  358. STARPU_ASSERT(plan->number < (1ULL << NUMBER_BITS));
  359. /* Just one dimension */
  360. plan->dim = 1;
  361. plan->n = malloc(plan->dim * sizeof(*plan->n));
  362. plan->n[0] = n;
  363. check_dims(plan);
  364. plan->n1 = malloc(plan->dim * sizeof(*plan->n1));
  365. plan->n1[0] = n1;
  366. plan->n2 = malloc(plan->dim * sizeof(*plan->n2));
  367. plan->n2[0] = n2;
  368. /* Note: this is for coherency with the 2D case */
  369. plan->totsize = n;
  370. plan->totsize1 = n1;
  371. plan->totsize2 = n2;
  372. plan->totsize3 = DIV_1D;
  373. plan->totsize4 = plan->totsize / plan->totsize3;
  374. plan->type = C2C;
  375. plan->sign = sign;
  376. /* Compute the w^k just once. */
  377. compute_roots(plan);
  378. /* Initialize per-worker working set */
  379. for (workerid = 0; workerid < starpu_worker_get_count(); workerid++) {
  380. switch (starpu_worker_get_type(workerid)) {
  381. case STARPU_CPU_WORKER:
  382. #ifdef STARPU_HAVE_FFTW
  383. /* first fft plan: one fft of size n2.
  384. * FFTW imposes that buffer pointers are known at
  385. * planning time. */
  386. plan->plans[workerid].in1 = _FFTW(malloc)(plan->totsize2 * sizeof(_fftw_complex));
  387. memset(plan->plans[workerid].in1, 0, plan->totsize2 * sizeof(_fftw_complex));
  388. plan->plans[workerid].out1 = _FFTW(malloc)(plan->totsize2 * sizeof(_fftw_complex));
  389. memset(plan->plans[workerid].out1, 0, plan->totsize2 * sizeof(_fftw_complex));
  390. plan->plans[workerid].plan1_cpu = _FFTW(plan_dft_1d)(n2, plan->plans[workerid].in1, plan->plans[workerid].out1, sign, _FFTW_FLAGS);
  391. STARPU_ASSERT(plan->plans[workerid].plan1_cpu);
  392. /* second fft plan: n3 ffts of size n1 */
  393. plan->plans[workerid].in2 = _FFTW(malloc)(plan->totsize4 * sizeof(_fftw_complex));
  394. memset(plan->plans[workerid].in2, 0, plan->totsize4 * sizeof(_fftw_complex));
  395. plan->plans[workerid].out2 = _FFTW(malloc)(plan->totsize4 * sizeof(_fftw_complex));
  396. memset(plan->plans[workerid].out2, 0, plan->totsize4 * sizeof(_fftw_complex));
  397. plan->plans[workerid].plan2_cpu = _FFTW(plan_many_dft)(plan->dim,
  398. plan->n1, n3,
  399. /* input */ plan->plans[workerid].in2, NULL, 1, plan->totsize1,
  400. /* output */ plan->plans[workerid].out2, NULL, 1, plan->totsize1,
  401. sign, _FFTW_FLAGS);
  402. STARPU_ASSERT(plan->plans[workerid].plan2_cpu);
  403. #else
  404. #warning libstarpufft can not work correctly without libfftw3
  405. #endif
  406. break;
  407. case STARPU_CUDA_WORKER:
  408. #ifdef STARPU_USE_CUDA
  409. /* Perform CUFFT planning lazily. */
  410. plan->plans[workerid].initialized1 = 0;
  411. plan->plans[workerid].initialized2 = 0;
  412. #endif
  413. break;
  414. default:
  415. STARPU_ABORT();
  416. break;
  417. }
  418. }
  419. /* Allocate buffers. */
  420. plan->twisted1 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->twisted1));
  421. memset(plan->twisted1, 0, plan->totsize * sizeof(*plan->twisted1));
  422. plan->fft1 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->fft1));
  423. memset(plan->fft1, 0, plan->totsize * sizeof(*plan->fft1));
  424. plan->twisted2 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->twisted2));
  425. memset(plan->twisted2, 0, plan->totsize * sizeof(*plan->twisted2));
  426. plan->fft2 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->fft2));
  427. memset(plan->fft2, 0, plan->totsize * sizeof(*plan->fft2));
  428. /* Allocate handle arrays */
  429. plan->twisted1_handle = malloc(plan->totsize1 * sizeof(*plan->twisted1_handle));
  430. plan->fft1_handle = malloc(plan->totsize1 * sizeof(*plan->fft1_handle));
  431. plan->twisted2_handle = malloc(plan->totsize3 * sizeof(*plan->twisted2_handle));
  432. plan->fft2_handle = malloc(plan->totsize3 * sizeof(*plan->fft2_handle));
  433. /* Allocate task arrays */
  434. plan->twist1_tasks = malloc(plan->totsize1 * sizeof(*plan->twist1_tasks));
  435. plan->fft1_tasks = malloc(plan->totsize1 * sizeof(*plan->fft1_tasks));
  436. plan->twist2_tasks = malloc(plan->totsize3 * sizeof(*plan->twist2_tasks));
  437. plan->fft2_tasks = malloc(plan->totsize3 * sizeof(*plan->fft2_tasks));
  438. plan->twist3_tasks = malloc(plan->totsize3 * sizeof(*plan->twist3_tasks));
  439. /* Allocate codelet argument arrays */
  440. plan->fft1_args = malloc(plan->totsize1 * sizeof(*plan->fft1_args));
  441. plan->fft2_args = malloc(plan->totsize3 * sizeof(*plan->fft2_args));
  442. /* Create first-round tasks: DIV_1D tasks of type twist1 and fft1 */
  443. for (z = 0; z < plan->totsize1; z++) {
  444. int i = z;
  445. #define STEP_TAG(step) STEP_TAG_1D(plan, step, i)
  446. plan->fft1_args[z].plan = plan;
  447. plan->fft1_args[z].i = i;
  448. /* Register the twisted1 buffer of size n2. */
  449. starpu_vector_data_register(&plan->twisted1_handle[z], 0, (uintptr_t) &plan->twisted1[z*plan->totsize2], plan->totsize2, sizeof(*plan->twisted1));
  450. /* Register the fft1 buffer of size n2. */
  451. starpu_vector_data_register(&plan->fft1_handle[z], 0, (uintptr_t) &plan->fft1[z*plan->totsize2], plan->totsize2, sizeof(*plan->fft1));
  452. /* We'll need the result of fft1 on the CPU for the second
  453. * twist anyway, so tell starpu to not keep the fft1 buffer in
  454. * the GPU. */
  455. starpu_data_set_wb_mask(plan->fft1_handle[z], 1<<0);
  456. /* Create twist1 task */
  457. plan->twist1_tasks[z] = task = starpu_task_create();
  458. task->cl = &STARPUFFT(twist1_1d_codelet);
  459. //task->buffers[0].handle = to be filled at execution to point
  460. //to the application input.
  461. task->buffers[0].mode = STARPU_R;
  462. task->buffers[1].handle = plan->twisted1_handle[z];
  463. task->buffers[1].mode = STARPU_W;
  464. task->cl_arg = &plan->fft1_args[z];
  465. task->tag_id = STEP_TAG(TWIST1);
  466. task->use_tag = 1;
  467. task->detach = 1;
  468. task->destroy = 0;
  469. /* Tell that fft1 depends on twisted1 */
  470. starpu_tag_declare_deps(STEP_TAG(FFT1),
  471. 1, STEP_TAG(TWIST1));
  472. /* Create FFT1 task */
  473. plan->fft1_tasks[z] = task = starpu_task_create();
  474. task->cl = &STARPUFFT(fft1_1d_codelet);
  475. task->buffers[0].handle = plan->twisted1_handle[z];
  476. task->buffers[0].mode = STARPU_R;
  477. task->buffers[1].handle = plan->fft1_handle[z];
  478. task->buffers[1].mode = STARPU_W;
  479. task->buffers[2].handle = plan->roots_handle[0];
  480. task->buffers[2].mode = STARPU_R;
  481. task->cl_arg = &plan->fft1_args[z];
  482. task->tag_id = STEP_TAG(FFT1);
  483. task->use_tag = 1;
  484. task->detach = 1;
  485. task->destroy = 0;
  486. /* Tell that the join task will depend on the fft1 task. */
  487. starpu_tag_declare_deps(STEP_TAG_1D(plan, JOIN, 0),
  488. 1, STEP_TAG(FFT1));
  489. #undef STEP_TAG
  490. }
  491. /* Create the join task, only serving as a dependency point between
  492. * fft1 and twist2 tasks */
  493. plan->join_task = task = starpu_task_create();
  494. task->cl = NULL;
  495. task->tag_id = STEP_TAG_1D(plan, JOIN, 0);
  496. task->use_tag = 1;
  497. task->detach = 1;
  498. task->destroy = 0;
  499. /* Create second-round tasks: DIV_1D batches of n2/DIV_1D twist2, fft2,
  500. * and twist3 */
  501. for (z = 0; z < plan->totsize3; z++) {
  502. int jj = z;
  503. #define STEP_TAG(step) STEP_TAG_1D(plan, step, jj)
  504. plan->fft2_args[z].plan = plan;
  505. plan->fft2_args[z].jj = jj;
  506. /* Register n3 twisted2 buffers of size n1 */
  507. starpu_vector_data_register(&plan->twisted2_handle[z], 0, (uintptr_t) &plan->twisted2[z*plan->totsize4], plan->totsize4, sizeof(*plan->twisted2));
  508. starpu_vector_data_register(&plan->fft2_handle[z], 0, (uintptr_t) &plan->fft2[z*plan->totsize4], plan->totsize4, sizeof(*plan->fft2));
  509. /* We'll need the result of fft2 on the CPU for the third
  510. * twist anyway, so tell starpu to not keep the fft2 buffer in
  511. * the GPU. */
  512. starpu_data_set_wb_mask(plan->fft2_handle[z], 1<<0);
  513. /* Tell that twisted2 depends on the join task */
  514. starpu_tag_declare_deps(STEP_TAG(TWIST2),
  515. 1, STEP_TAG_1D(plan, JOIN, 0));
  516. /* Create twist2 task */
  517. plan->twist2_tasks[z] = task = starpu_task_create();
  518. task->cl = &STARPUFFT(twist2_1d_codelet);
  519. task->buffers[0].handle = plan->twisted2_handle[z];
  520. task->buffers[0].mode = STARPU_W;
  521. task->cl_arg = &plan->fft2_args[z];
  522. task->tag_id = STEP_TAG(TWIST2);
  523. task->use_tag = 1;
  524. task->detach = 1;
  525. task->destroy = 0;
  526. /* Tell that fft2 depends on twisted2 */
  527. starpu_tag_declare_deps(STEP_TAG(FFT2),
  528. 1, STEP_TAG(TWIST2));
  529. /* Create FFT2 task */
  530. plan->fft2_tasks[z] = task = starpu_task_create();
  531. task->cl = &STARPUFFT(fft2_1d_codelet);
  532. task->buffers[0].handle = plan->twisted2_handle[z];
  533. task->buffers[0].mode = STARPU_R;
  534. task->buffers[1].handle = plan->fft2_handle[z];
  535. task->buffers[1].mode = STARPU_W;
  536. task->cl_arg = &plan->fft2_args[z];
  537. task->tag_id = STEP_TAG(FFT2);
  538. task->use_tag = 1;
  539. task->detach = 1;
  540. task->destroy = 0;
  541. /* Tell that twist3 depends on fft2 */
  542. starpu_tag_declare_deps(STEP_TAG(TWIST3),
  543. 1, STEP_TAG(FFT2));
  544. /* Create twist3 tasks */
  545. /* These run only on CPUs and thus write directly into the
  546. * application output buffer. */
  547. plan->twist3_tasks[z] = task = starpu_task_create();
  548. task->cl = &STARPUFFT(twist3_1d_codelet);
  549. task->buffers[0].handle = plan->fft2_handle[z];
  550. task->buffers[0].mode = STARPU_R;
  551. task->cl_arg = &plan->fft2_args[z];
  552. task->tag_id = STEP_TAG(TWIST3);
  553. task->use_tag = 1;
  554. task->detach = 1;
  555. task->destroy = 0;
  556. /* Tell that to be completely finished we need to have finished
  557. * this twisted3 */
  558. starpu_tag_declare_deps(STEP_TAG_1D(plan, END, 0),
  559. 1, STEP_TAG(TWIST3));
  560. #undef STEP_TAG
  561. }
  562. /* Create end task, only serving as a join point. */
  563. plan->end_task = task = starpu_task_create();
  564. task->cl = NULL;
  565. task->tag_id = STEP_TAG_1D(plan, END, 0);
  566. task->use_tag = 1;
  567. task->detach = 1;
  568. task->destroy = 0;
  569. return plan;
  570. }
  571. /* Actually submit all the tasks. */
  572. static starpu_tag_t
  573. STARPUFFT(start1dC2C)(STARPUFFT(plan) plan)
  574. {
  575. STARPU_ASSERT(plan->type == C2C);
  576. int z;
  577. for (z=0; z < plan->totsize1; z++) {
  578. starpu_task_submit(plan->twist1_tasks[z]);
  579. starpu_task_submit(plan->fft1_tasks[z]);
  580. }
  581. starpu_task_submit(plan->join_task);
  582. for (z=0; z < plan->totsize3; z++) {
  583. starpu_task_submit(plan->twist2_tasks[z]);
  584. starpu_task_submit(plan->fft2_tasks[z]);
  585. starpu_task_submit(plan->twist3_tasks[z]);
  586. }
  587. starpu_task_submit(plan->end_task);
  588. return STEP_TAG_1D(plan, END, 0);
  589. }
  590. /* Free all the tags. The generic code handles freeing the buffers. */
  591. static void
  592. STARPUFFT(free_1d_tags)(STARPUFFT(plan) plan)
  593. {
  594. unsigned i;
  595. int n1 = plan->n1[0];
  596. for (i = 0; i < n1; i++) {
  597. starpu_tag_remove(STEP_TAG_1D(plan, TWIST1, i));
  598. starpu_tag_remove(STEP_TAG_1D(plan, FFT1, i));
  599. }
  600. starpu_tag_remove(STEP_TAG_1D(plan, JOIN, 0));
  601. for (i = 0; i < DIV_1D; i++) {
  602. starpu_tag_remove(STEP_TAG_1D(plan, TWIST2, i));
  603. starpu_tag_remove(STEP_TAG_1D(plan, FFT2, i));
  604. starpu_tag_remove(STEP_TAG_1D(plan, TWIST3, i));
  605. }
  606. starpu_tag_remove(STEP_TAG_1D(plan, END, 0));
  607. }