starpufftx1d.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728
  1. /*
  2. * StarPU
  3. * Copyright (C) INRIA 2009 (see AUTHORS file)
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR in PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #define DIV_1D 64
  17. /*
  18. * Overall strategy for an fft of size n:
  19. * - perform n1 ffts of size n2
  20. * - twiddle
  21. * - perform n2 ffts of size n1
  22. *
  23. * - n1 defaults to DIV_1D, thus n2 defaults to n / DIV_1D.
  24. *
  25. * Precise tasks:
  26. *
  27. * - twist1: twist the whole n-element input (called "in") into n1 chunks of
  28. * size n2, by using n1 tasks taking the whole n-element input as a
  29. * R parameter and one n2 output as a W parameter. The result is
  30. * called twisted1.
  31. * - fft1: perform n1 (n2) ffts, by using n1 tasks doing one fft each. Also
  32. * twiddle the result to prepare for the fft2. The result is called
  33. * fft1.
  34. * - join: depends on all the fft1s, to gather the n1 results of size n2 in
  35. * the fft1 vector.
  36. * - twist2: twist the fft1 vector into n2 chunks of size n1, called twisted2.
  37. * since n2 is typically very large, this step is divided in DIV_1D
  38. * tasks, each of them performing n2/DIV_1D of them
  39. * - fft2: perform n2 ffts of size n1. This is divided in DIV_1D tasks of
  40. * n2/DIV_1D ffts, to be performed in batches. The result is called
  41. * fft2.
  42. * - twist3: twist back the result of the fft2s above into the output buffer.
  43. * Only implemented on CPUs for simplicity of the gathering.
  44. *
  45. * The tag space thus uses 3 dimensions:
  46. * - the number of the plan.
  47. * - the step (TWIST1, FFT1, JOIN, TWIST2, FFT2, TWIST3, END)
  48. * - an index i between 0 and DIV_1D-1.
  49. */
  50. #define STEP_TAG_1D(plan, step, i) _STEP_TAG(plan, step, i)
  51. #ifdef STARPU_USE_CUDA
  52. /* twist1:
  53. *
  54. * Twist the full input vector (first parameter) into one chunk of size n2
  55. * (second parameter) */
  56. static void
  57. STARPUFFT(twist1_1d_kernel_gpu)(void *descr[], void *_args)
  58. {
  59. struct STARPUFFT(args) *args = _args;
  60. STARPUFFT(plan) plan = args->plan;
  61. int i = args->i;
  62. int n1 = plan->n1[0];
  63. int n2 = plan->n2[0];
  64. _cufftComplex * restrict in = (_cufftComplex *)STARPU_GET_VECTOR_PTR(descr[0]);
  65. _cufftComplex * restrict twisted1 = (_cufftComplex *)STARPU_GET_VECTOR_PTR(descr[1]);
  66. cudaStream_t stream = STARPUFFT(get_local_stream)(plan, starpu_worker_get_id());
  67. STARPUFFT(cuda_twist1_1d_host)(in, twisted1, i, n1, n2, stream);
  68. cudaStreamSynchronize(stream);
  69. }
  70. /* fft1:
  71. *
  72. * Perform one fft of size n2 */
  73. static void
  74. STARPUFFT(fft1_1d_kernel_gpu)(void *descr[], void *_args)
  75. {
  76. struct STARPUFFT(args) *args = _args;
  77. STARPUFFT(plan) plan = args->plan;
  78. int i = args->i;
  79. int n2 = plan->n2[0];
  80. cufftResult cures;
  81. _cufftComplex * restrict in = (_cufftComplex *)STARPU_GET_VECTOR_PTR(descr[0]);
  82. _cufftComplex * restrict out = (_cufftComplex *)STARPU_GET_VECTOR_PTR(descr[1]);
  83. const _cufftComplex * restrict roots = (_cufftComplex *)STARPU_GET_VECTOR_PTR(descr[2]);
  84. int workerid = starpu_worker_get_id();
  85. task_per_worker[workerid]++;
  86. cudaStream_t stream;
  87. if (!plan->plans[workerid].initialized1) {
  88. cures = cufftPlan1d(&plan->plans[workerid].plan1_cuda, n2, _CUFFT_C2C, 1);
  89. stream = STARPUFFT(get_local_stream)(plan, workerid);
  90. cufftSetStream(plan->plans[workerid].plan1_cuda, stream);
  91. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  92. plan->plans[workerid].initialized1 = 1;
  93. }
  94. stream = plan->plans[workerid].stream;
  95. cures = _cufftExecC2C(plan->plans[workerid].plan1_cuda, in, out, plan->sign == -1 ? CUFFT_FORWARD : CUFFT_INVERSE);
  96. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  97. STARPUFFT(cuda_twiddle_1d_host)(out, roots, n2, i, stream);
  98. cudaStreamSynchronize(plan->plans[workerid].stream);
  99. }
  100. /* fft2:
  101. *
  102. * Perform n3 = n2/DIV_1D ffts of size n1 */
  103. static void
  104. STARPUFFT(fft2_1d_kernel_gpu)(void *descr[], void *_args)
  105. {
  106. struct STARPUFFT(args) *args = _args;
  107. STARPUFFT(plan) plan = args->plan;
  108. int n1 = plan->n1[0];
  109. int n2 = plan->n2[0];
  110. int n3 = n2/DIV_1D;
  111. cufftResult cures;
  112. _cufftComplex * restrict in = (_cufftComplex *)STARPU_GET_VECTOR_PTR(descr[0]);
  113. _cufftComplex * restrict out = (_cufftComplex *)STARPU_GET_VECTOR_PTR(descr[1]);
  114. int workerid = starpu_worker_get_id();
  115. task_per_worker[workerid]++;
  116. if (!plan->plans[workerid].initialized2) {
  117. cures = cufftPlan1d(&plan->plans[workerid].plan2_cuda, n1, _CUFFT_C2C, n3);
  118. cudaStream_t stream = STARPUFFT(get_local_stream)(plan, workerid);
  119. cufftSetStream(plan->plans[workerid].plan2_cuda, stream);
  120. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  121. plan->plans[workerid].initialized2 = 1;
  122. }
  123. /* NOTE using batch support */
  124. cures = _cufftExecC2C(plan->plans[workerid].plan2_cuda, in, out, plan->sign == -1 ? CUFFT_FORWARD : CUFFT_INVERSE);
  125. STARPU_ASSERT(cures == CUFFT_SUCCESS);
  126. cudaStreamSynchronize(plan->plans[workerid].stream);
  127. }
  128. #endif
  129. /* twist1:
  130. *
  131. * Twist the full input vector (first parameter) into one chunk of size n2
  132. * (second parameter) */
  133. static void
  134. STARPUFFT(twist1_1d_kernel_cpu)(void *descr[], void *_args)
  135. {
  136. struct STARPUFFT(args) *args = _args;
  137. STARPUFFT(plan) plan = args->plan;
  138. int i = args->i;
  139. int j;
  140. int n1 = plan->n1[0];
  141. int n2 = plan->n2[0];
  142. STARPUFFT(complex) * restrict in = (STARPUFFT(complex) *)STARPU_GET_VECTOR_PTR(descr[0]);
  143. STARPUFFT(complex) * restrict twisted1 = (STARPUFFT(complex) *)STARPU_GET_VECTOR_PTR(descr[1]);
  144. //printf("twist1 %d %g\n", i, (double) cabs(plan->in[i]));
  145. for (j = 0; j < n2; j++)
  146. twisted1[j] = in[i+j*n1];
  147. }
  148. #ifdef STARPU_HAVE_FFTW
  149. /* fft1:
  150. *
  151. * Perform one fft of size n2 */
  152. static void
  153. STARPUFFT(fft1_1d_kernel_cpu)(void *descr[], void *_args)
  154. {
  155. struct STARPUFFT(args) *args = _args;
  156. STARPUFFT(plan) plan = args->plan;
  157. int i = args->i;
  158. int j;
  159. int n2 = plan->n2[0];
  160. int workerid = starpu_worker_get_id();
  161. task_per_worker[workerid]++;
  162. const STARPUFFT(complex) * restrict twisted1 = (STARPUFFT(complex) *)STARPU_GET_VECTOR_PTR(descr[0]);
  163. STARPUFFT(complex) * restrict fft1 = (STARPUFFT(complex) *)STARPU_GET_VECTOR_PTR(descr[1]);
  164. _fftw_complex * restrict worker_in1 = (STARPUFFT(complex) *)plan->plans[workerid].in1;
  165. _fftw_complex * restrict worker_out1 = (STARPUFFT(complex) *)plan->plans[workerid].out1;
  166. //printf("fft1 %d %g\n", i, (double) cabs(twisted1[0]));
  167. memcpy(worker_in1, twisted1, plan->totsize2 * sizeof(*worker_in1));
  168. _FFTW(execute)(plan->plans[workerid].plan1_cpu);
  169. /* twiddle while copying from fftw output buffer to fft1 buffer */
  170. for (j = 0; j < n2; j++)
  171. fft1[j] = worker_out1[j] * plan->roots[0][i*j];
  172. }
  173. #endif
  174. /* twist2:
  175. *
  176. * Twist the full vector (results of the fft1s) into one package of n2/DIV_1D
  177. * chunks of size n1 */
  178. static void
  179. STARPUFFT(twist2_1d_kernel_cpu)(void *descr[], void *_args)
  180. {
  181. struct STARPUFFT(args) *args = _args;
  182. STARPUFFT(plan) plan = args->plan;
  183. int jj = args->jj; /* between 0 and DIV_1D */
  184. int jjj; /* beetween 0 and n3 */
  185. int i;
  186. int n1 = plan->n1[0];
  187. int n2 = plan->n2[0];
  188. int n3 = n2/DIV_1D;
  189. STARPUFFT(complex) * restrict twisted2 = (STARPUFFT(complex) *)STARPU_GET_VECTOR_PTR(descr[0]);
  190. //printf("twist2 %d %g\n", jj, (double) cabs(plan->fft1[jj]));
  191. for (jjj = 0; jjj < n3; jjj++) {
  192. int j = jj * n3 + jjj;
  193. for (i = 0; i < n1; i++)
  194. twisted2[jjj*n1+i] = plan->fft1[i*n2+j];
  195. }
  196. }
  197. #ifdef STARPU_HAVE_FFTW
  198. /* fft2:
  199. *
  200. * Perform n3 = n2/DIV_1D ffts of size n1 */
  201. static void
  202. STARPUFFT(fft2_1d_kernel_cpu)(void *descr[], void *_args)
  203. {
  204. struct STARPUFFT(args) *args = _args;
  205. STARPUFFT(plan) plan = args->plan;
  206. //int jj = args->jj;
  207. int workerid = starpu_worker_get_id();
  208. task_per_worker[workerid]++;
  209. const STARPUFFT(complex) * restrict twisted2 = (STARPUFFT(complex) *)STARPU_GET_VECTOR_PTR(descr[0]);
  210. STARPUFFT(complex) * restrict fft2 = (STARPUFFT(complex) *)STARPU_GET_VECTOR_PTR(descr[1]);
  211. //printf("fft2 %d %g\n", jj, (double) cabs(twisted2[plan->totsize4-1]));
  212. _fftw_complex * restrict worker_in2 = (STARPUFFT(complex) *)plan->plans[workerid].in2;
  213. _fftw_complex * restrict worker_out2 = (STARPUFFT(complex) *)plan->plans[workerid].out2;
  214. memcpy(worker_in2, twisted2, plan->totsize4 * sizeof(*worker_in2));
  215. _FFTW(execute)(plan->plans[workerid].plan2_cpu);
  216. /* no twiddle */
  217. memcpy(fft2, worker_out2, plan->totsize4 * sizeof(*worker_out2));
  218. }
  219. #endif
  220. /* twist3:
  221. *
  222. * Spread the package of n2/DIV_1D chunks of size n1 into the output vector */
  223. static void
  224. STARPUFFT(twist3_1d_kernel_cpu)(void *descr[], void *_args)
  225. {
  226. struct STARPUFFT(args) *args = _args;
  227. STARPUFFT(plan) plan = args->plan;
  228. int jj = args->jj; /* between 0 and DIV_1D */
  229. int jjj; /* beetween 0 and n3 */
  230. int i;
  231. int n1 = plan->n1[0];
  232. int n2 = plan->n2[0];
  233. int n3 = n2/DIV_1D;
  234. const STARPUFFT(complex) * restrict fft2 = (STARPUFFT(complex) *)STARPU_GET_VECTOR_PTR(descr[0]);
  235. //printf("twist3 %d %g\n", jj, (double) cabs(fft2[0]));
  236. for (jjj = 0; jjj < n3; jjj++) {
  237. int j = jj * n3 + jjj;
  238. for (i = 0; i < n1; i++)
  239. plan->out[i*n2+j] = fft2[jjj*n1+i];
  240. }
  241. }
  242. /* Performance models for the 5 kinds of tasks */
  243. static struct starpu_perfmodel_t STARPUFFT(twist1_1d_model) = {
  244. .type = STARPU_HISTORY_BASED,
  245. .symbol = TYPE"twist1_1d"
  246. };
  247. static struct starpu_perfmodel_t STARPUFFT(fft1_1d_model) = {
  248. .type = STARPU_HISTORY_BASED,
  249. .symbol = TYPE"fft1_1d"
  250. };
  251. static struct starpu_perfmodel_t STARPUFFT(twist2_1d_model) = {
  252. .type = STARPU_HISTORY_BASED,
  253. .symbol = TYPE"twist2_1d"
  254. };
  255. static struct starpu_perfmodel_t STARPUFFT(fft2_1d_model) = {
  256. .type = STARPU_HISTORY_BASED,
  257. .symbol = TYPE"fft2_1d"
  258. };
  259. static struct starpu_perfmodel_t STARPUFFT(twist3_1d_model) = {
  260. .type = STARPU_HISTORY_BASED,
  261. .symbol = TYPE"twist3_1d"
  262. };
  263. /* codelet pointers for the 5 kinds of tasks */
  264. static starpu_codelet STARPUFFT(twist1_1d_codelet) = {
  265. .where =
  266. #ifdef STARPU_USE_CUDA
  267. STARPU_CUDA|
  268. #endif
  269. STARPU_CPU,
  270. #ifdef STARPU_USE_CUDA
  271. .cuda_func = STARPUFFT(twist1_1d_kernel_gpu),
  272. #endif
  273. .cpu_func = STARPUFFT(twist1_1d_kernel_cpu),
  274. .model = &STARPUFFT(twist1_1d_model),
  275. .nbuffers = 2
  276. };
  277. static starpu_codelet STARPUFFT(fft1_1d_codelet) = {
  278. .where =
  279. #ifdef STARPU_USE_CUDA
  280. STARPU_CUDA|
  281. #endif
  282. #ifdef STARPU_HAVE_FFTW
  283. STARPU_CPU|
  284. #endif
  285. 0,
  286. #ifdef STARPU_USE_CUDA
  287. .cuda_func = STARPUFFT(fft1_1d_kernel_gpu),
  288. #endif
  289. #ifdef STARPU_HAVE_FFTW
  290. .cpu_func = STARPUFFT(fft1_1d_kernel_cpu),
  291. #endif
  292. .model = &STARPUFFT(fft1_1d_model),
  293. .nbuffers = 3
  294. };
  295. static starpu_codelet STARPUFFT(twist2_1d_codelet) = {
  296. .where = STARPU_CPU,
  297. .cpu_func = STARPUFFT(twist2_1d_kernel_cpu),
  298. .model = &STARPUFFT(twist2_1d_model),
  299. .nbuffers = 1
  300. };
  301. static starpu_codelet STARPUFFT(fft2_1d_codelet) = {
  302. .where =
  303. #ifdef STARPU_USE_CUDA
  304. STARPU_CUDA|
  305. #endif
  306. #ifdef STARPU_HAVE_FFTW
  307. STARPU_CPU|
  308. #endif
  309. 0,
  310. #ifdef STARPU_USE_CUDA
  311. .cuda_func = STARPUFFT(fft2_1d_kernel_gpu),
  312. #endif
  313. #ifdef STARPU_HAVE_FFTW
  314. .cpu_func = STARPUFFT(fft2_1d_kernel_cpu),
  315. #endif
  316. .model = &STARPUFFT(fft2_1d_model),
  317. .nbuffers = 2
  318. };
  319. static starpu_codelet STARPUFFT(twist3_1d_codelet) = {
  320. .where = STARPU_CPU,
  321. .cpu_func = STARPUFFT(twist3_1d_kernel_cpu),
  322. .model = &STARPUFFT(twist3_1d_model),
  323. .nbuffers = 1
  324. };
  325. /* Planning:
  326. *
  327. * - For each CPU worker, we need to plan the two fftw stages.
  328. * - For GPU workers, we need to do the planning in the CUDA context, so we do
  329. * this lazily through the initialised1 and initialised2 flags ; TODO: use
  330. * starpu_execute_on_each_worker instead (done in the omp branch).
  331. * - We allocate all the temporary buffers and register them to starpu.
  332. * - We create all the tasks, but do not submit them yet. It will be possible
  333. * to reuse them at will to perform several ffts with the same planning.
  334. */
  335. STARPUFFT(plan)
  336. STARPUFFT(plan_dft_1d)(int n, int sign, unsigned flags)
  337. {
  338. int workerid;
  339. int n1 = DIV_1D;
  340. int n2 = n / n1;
  341. int n3;
  342. int z;
  343. struct starpu_task *task;
  344. #ifdef STARPU_USE_CUDA
  345. /* cufft 1D limited to 8M elements */
  346. while (n2 > 8 << 20) {
  347. n1 *= 2;
  348. n2 /= 2;
  349. }
  350. #endif
  351. STARPU_ASSERT(n == n1*n2);
  352. STARPU_ASSERT(n1 < (1ULL << I_BITS));
  353. /* distribute the n2 second ffts into DIV_1D packages */
  354. n3 = n2 / DIV_1D;
  355. STARPU_ASSERT(n2 == n3*DIV_1D);
  356. /* TODO: flags? Automatically set FFTW_MEASURE on calibration? */
  357. STARPU_ASSERT(flags == 0);
  358. STARPUFFT(plan) plan = malloc(sizeof(*plan));
  359. memset(plan, 0, sizeof(*plan));
  360. plan->number = STARPU_ATOMIC_ADD(&starpufft_last_plan_number, 1) - 1;
  361. /* The plan number has a limited size */
  362. STARPU_ASSERT(plan->number < (1ULL << NUMBER_BITS));
  363. /* Just one dimension */
  364. plan->dim = 1;
  365. plan->n = malloc(plan->dim * sizeof(*plan->n));
  366. plan->n[0] = n;
  367. check_dims(plan);
  368. plan->n1 = malloc(plan->dim * sizeof(*plan->n1));
  369. plan->n1[0] = n1;
  370. plan->n2 = malloc(plan->dim * sizeof(*plan->n2));
  371. plan->n2[0] = n2;
  372. /* Note: this is for coherency with the 2D case */
  373. plan->totsize = n;
  374. plan->totsize1 = n1;
  375. plan->totsize2 = n2;
  376. plan->totsize3 = DIV_1D;
  377. plan->totsize4 = plan->totsize / plan->totsize3;
  378. plan->type = C2C;
  379. plan->sign = sign;
  380. /* Compute the w^k just once. */
  381. compute_roots(plan);
  382. /* Initialize per-worker working set */
  383. for (workerid = 0; workerid < starpu_worker_get_count(); workerid++) {
  384. switch (starpu_worker_get_type(workerid)) {
  385. case STARPU_CPU_WORKER:
  386. #ifdef STARPU_HAVE_FFTW
  387. /* first fft plan: one fft of size n2.
  388. * FFTW imposes that buffer pointers are known at
  389. * planning time. */
  390. plan->plans[workerid].in1 = _FFTW(malloc)(plan->totsize2 * sizeof(_fftw_complex));
  391. memset(plan->plans[workerid].in1, 0, plan->totsize2 * sizeof(_fftw_complex));
  392. plan->plans[workerid].out1 = _FFTW(malloc)(plan->totsize2 * sizeof(_fftw_complex));
  393. memset(plan->plans[workerid].out1, 0, plan->totsize2 * sizeof(_fftw_complex));
  394. plan->plans[workerid].plan1_cpu = _FFTW(plan_dft_1d)(n2, plan->plans[workerid].in1, plan->plans[workerid].out1, sign, _FFTW_FLAGS);
  395. STARPU_ASSERT(plan->plans[workerid].plan1_cpu);
  396. /* second fft plan: n3 ffts of size n1 */
  397. plan->plans[workerid].in2 = _FFTW(malloc)(plan->totsize4 * sizeof(_fftw_complex));
  398. memset(plan->plans[workerid].in2, 0, plan->totsize4 * sizeof(_fftw_complex));
  399. plan->plans[workerid].out2 = _FFTW(malloc)(plan->totsize4 * sizeof(_fftw_complex));
  400. memset(plan->plans[workerid].out2, 0, plan->totsize4 * sizeof(_fftw_complex));
  401. plan->plans[workerid].plan2_cpu = _FFTW(plan_many_dft)(plan->dim,
  402. plan->n1, n3,
  403. /* input */ plan->plans[workerid].in2, NULL, 1, plan->totsize1,
  404. /* output */ plan->plans[workerid].out2, NULL, 1, plan->totsize1,
  405. sign, _FFTW_FLAGS);
  406. STARPU_ASSERT(plan->plans[workerid].plan2_cpu);
  407. #else
  408. #warning libstarpufft can not work correctly without libfftw3
  409. #endif
  410. break;
  411. case STARPU_CUDA_WORKER:
  412. #ifdef STARPU_USE_CUDA
  413. /* Perform CUFFT planning lazily. */
  414. plan->plans[workerid].initialized1 = 0;
  415. plan->plans[workerid].initialized2 = 0;
  416. #endif
  417. break;
  418. default:
  419. STARPU_ABORT();
  420. break;
  421. }
  422. }
  423. /* Allocate buffers. */
  424. plan->twisted1 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->twisted1));
  425. memset(plan->twisted1, 0, plan->totsize * sizeof(*plan->twisted1));
  426. plan->fft1 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->fft1));
  427. memset(plan->fft1, 0, plan->totsize * sizeof(*plan->fft1));
  428. plan->twisted2 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->twisted2));
  429. memset(plan->twisted2, 0, plan->totsize * sizeof(*plan->twisted2));
  430. plan->fft2 = STARPUFFT(malloc)(plan->totsize * sizeof(*plan->fft2));
  431. memset(plan->fft2, 0, plan->totsize * sizeof(*plan->fft2));
  432. /* Allocate handle arrays */
  433. plan->twisted1_handle = malloc(plan->totsize1 * sizeof(*plan->twisted1_handle));
  434. plan->fft1_handle = malloc(plan->totsize1 * sizeof(*plan->fft1_handle));
  435. plan->twisted2_handle = malloc(plan->totsize3 * sizeof(*plan->twisted2_handle));
  436. plan->fft2_handle = malloc(plan->totsize3 * sizeof(*plan->fft2_handle));
  437. /* Allocate task arrays */
  438. plan->twist1_tasks = malloc(plan->totsize1 * sizeof(*plan->twist1_tasks));
  439. plan->fft1_tasks = malloc(plan->totsize1 * sizeof(*plan->fft1_tasks));
  440. plan->twist2_tasks = malloc(plan->totsize3 * sizeof(*plan->twist2_tasks));
  441. plan->fft2_tasks = malloc(plan->totsize3 * sizeof(*plan->fft2_tasks));
  442. plan->twist3_tasks = malloc(plan->totsize3 * sizeof(*plan->twist3_tasks));
  443. /* Allocate codelet argument arrays */
  444. plan->fft1_args = malloc(plan->totsize1 * sizeof(*plan->fft1_args));
  445. plan->fft2_args = malloc(plan->totsize3 * sizeof(*plan->fft2_args));
  446. /* Create first-round tasks: DIV_1D tasks of type twist1 and fft1 */
  447. for (z = 0; z < plan->totsize1; z++) {
  448. int i = z;
  449. #define STEP_TAG(step) STEP_TAG_1D(plan, step, i)
  450. plan->fft1_args[z].plan = plan;
  451. plan->fft1_args[z].i = i;
  452. /* Register the twisted1 buffer of size n2. */
  453. starpu_vector_data_register(&plan->twisted1_handle[z], 0, (uintptr_t) &plan->twisted1[z*plan->totsize2], plan->totsize2, sizeof(*plan->twisted1));
  454. /* Register the fft1 buffer of size n2. */
  455. starpu_vector_data_register(&plan->fft1_handle[z], 0, (uintptr_t) &plan->fft1[z*plan->totsize2], plan->totsize2, sizeof(*plan->fft1));
  456. /* We'll need the result of fft1 on the CPU for the second
  457. * twist anyway, so tell starpu to not keep the fft1 buffer in
  458. * the GPU. */
  459. starpu_data_set_wb_mask(plan->fft1_handle[z], 1<<0);
  460. /* Create twist1 task */
  461. plan->twist1_tasks[z] = task = starpu_task_create();
  462. task->cl = &STARPUFFT(twist1_1d_codelet);
  463. //task->buffers[0].handle = to be filled at execution to point
  464. //to the application input.
  465. task->buffers[0].mode = STARPU_R;
  466. task->buffers[1].handle = plan->twisted1_handle[z];
  467. task->buffers[1].mode = STARPU_W;
  468. task->cl_arg = &plan->fft1_args[z];
  469. task->tag_id = STEP_TAG(TWIST1);
  470. task->use_tag = 1;
  471. task->detach = 1;
  472. task->destroy = 0;
  473. /* Tell that fft1 depends on twisted1 */
  474. starpu_tag_declare_deps(STEP_TAG(FFT1),
  475. 1, STEP_TAG(TWIST1));
  476. /* Create FFT1 task */
  477. plan->fft1_tasks[z] = task = starpu_task_create();
  478. task->cl = &STARPUFFT(fft1_1d_codelet);
  479. task->buffers[0].handle = plan->twisted1_handle[z];
  480. task->buffers[0].mode = STARPU_R;
  481. task->buffers[1].handle = plan->fft1_handle[z];
  482. task->buffers[1].mode = STARPU_W;
  483. task->buffers[2].handle = plan->roots_handle[0];
  484. task->buffers[2].mode = STARPU_R;
  485. task->cl_arg = &plan->fft1_args[z];
  486. task->tag_id = STEP_TAG(FFT1);
  487. task->use_tag = 1;
  488. task->detach = 1;
  489. task->destroy = 0;
  490. /* Tell that the join task will depend on the fft1 task. */
  491. starpu_tag_declare_deps(STEP_TAG_1D(plan, JOIN, 0),
  492. 1, STEP_TAG(FFT1));
  493. #undef STEP_TAG
  494. }
  495. /* Create the join task, only serving as a dependency point between
  496. * fft1 and twist2 tasks */
  497. plan->join_task = task = starpu_task_create();
  498. task->cl = NULL;
  499. task->tag_id = STEP_TAG_1D(plan, JOIN, 0);
  500. task->use_tag = 1;
  501. task->detach = 1;
  502. task->destroy = 0;
  503. /* Create second-round tasks: DIV_1D batches of n2/DIV_1D twist2, fft2,
  504. * and twist3 */
  505. for (z = 0; z < plan->totsize3; z++) {
  506. int jj = z;
  507. #define STEP_TAG(step) STEP_TAG_1D(plan, step, jj)
  508. plan->fft2_args[z].plan = plan;
  509. plan->fft2_args[z].jj = jj;
  510. /* Register n3 twisted2 buffers of size n1 */
  511. starpu_vector_data_register(&plan->twisted2_handle[z], 0, (uintptr_t) &plan->twisted2[z*plan->totsize4], plan->totsize4, sizeof(*plan->twisted2));
  512. starpu_vector_data_register(&plan->fft2_handle[z], 0, (uintptr_t) &plan->fft2[z*plan->totsize4], plan->totsize4, sizeof(*plan->fft2));
  513. /* We'll need the result of fft2 on the CPU for the third
  514. * twist anyway, so tell starpu to not keep the fft2 buffer in
  515. * the GPU. */
  516. starpu_data_set_wb_mask(plan->fft2_handle[z], 1<<0);
  517. /* Tell that twisted2 depends on the join task */
  518. starpu_tag_declare_deps(STEP_TAG(TWIST2),
  519. 1, STEP_TAG_1D(plan, JOIN, 0));
  520. /* Create twist2 task */
  521. plan->twist2_tasks[z] = task = starpu_task_create();
  522. task->cl = &STARPUFFT(twist2_1d_codelet);
  523. task->buffers[0].handle = plan->twisted2_handle[z];
  524. task->buffers[0].mode = STARPU_W;
  525. task->cl_arg = &plan->fft2_args[z];
  526. task->tag_id = STEP_TAG(TWIST2);
  527. task->use_tag = 1;
  528. task->detach = 1;
  529. task->destroy = 0;
  530. /* Tell that fft2 depends on twisted2 */
  531. starpu_tag_declare_deps(STEP_TAG(FFT2),
  532. 1, STEP_TAG(TWIST2));
  533. /* Create FFT2 task */
  534. plan->fft2_tasks[z] = task = starpu_task_create();
  535. task->cl = &STARPUFFT(fft2_1d_codelet);
  536. task->buffers[0].handle = plan->twisted2_handle[z];
  537. task->buffers[0].mode = STARPU_R;
  538. task->buffers[1].handle = plan->fft2_handle[z];
  539. task->buffers[1].mode = STARPU_W;
  540. task->cl_arg = &plan->fft2_args[z];
  541. task->tag_id = STEP_TAG(FFT2);
  542. task->use_tag = 1;
  543. task->detach = 1;
  544. task->destroy = 0;
  545. /* Tell that twist3 depends on fft2 */
  546. starpu_tag_declare_deps(STEP_TAG(TWIST3),
  547. 1, STEP_TAG(FFT2));
  548. /* Create twist3 tasks */
  549. /* These run only on CPUs and thus write directly into the
  550. * application output buffer. */
  551. plan->twist3_tasks[z] = task = starpu_task_create();
  552. task->cl = &STARPUFFT(twist3_1d_codelet);
  553. task->buffers[0].handle = plan->fft2_handle[z];
  554. task->buffers[0].mode = STARPU_R;
  555. task->cl_arg = &plan->fft2_args[z];
  556. task->tag_id = STEP_TAG(TWIST3);
  557. task->use_tag = 1;
  558. task->detach = 1;
  559. task->destroy = 0;
  560. /* Tell that to be completely finished we need to have finished
  561. * this twisted3 */
  562. starpu_tag_declare_deps(STEP_TAG_1D(plan, END, 0),
  563. 1, STEP_TAG(TWIST3));
  564. #undef STEP_TAG
  565. }
  566. /* Create end task, only serving as a join point. */
  567. plan->end_task = task = starpu_task_create();
  568. task->cl = NULL;
  569. task->tag_id = STEP_TAG_1D(plan, END, 0);
  570. task->use_tag = 1;
  571. task->detach = 1;
  572. task->destroy = 0;
  573. return plan;
  574. }
  575. /* Actually submit all the tasks. */
  576. static starpu_tag_t
  577. STARPUFFT(start1dC2C)(STARPUFFT(plan) plan)
  578. {
  579. STARPU_ASSERT(plan->type == C2C);
  580. int z;
  581. for (z=0; z < plan->totsize1; z++) {
  582. starpu_task_submit(plan->twist1_tasks[z]);
  583. starpu_task_submit(plan->fft1_tasks[z]);
  584. }
  585. starpu_task_submit(plan->join_task);
  586. for (z=0; z < plan->totsize3; z++) {
  587. starpu_task_submit(plan->twist2_tasks[z]);
  588. starpu_task_submit(plan->fft2_tasks[z]);
  589. starpu_task_submit(plan->twist3_tasks[z]);
  590. }
  591. starpu_task_submit(plan->end_task);
  592. return STEP_TAG_1D(plan, END, 0);
  593. }
  594. /* Free all the tags. The generic code handles freeing the buffers. */
  595. static void
  596. STARPUFFT(free_1d_tags)(STARPUFFT(plan) plan)
  597. {
  598. unsigned i;
  599. int n1 = plan->n1[0];
  600. for (i = 0; i < n1; i++) {
  601. starpu_tag_remove(STEP_TAG_1D(plan, TWIST1, i));
  602. starpu_tag_remove(STEP_TAG_1D(plan, FFT1, i));
  603. }
  604. starpu_tag_remove(STEP_TAG_1D(plan, JOIN, 0));
  605. for (i = 0; i < DIV_1D; i++) {
  606. starpu_tag_remove(STEP_TAG_1D(plan, TWIST2, i));
  607. starpu_tag_remove(STEP_TAG_1D(plan, FFT2, i));
  608. starpu_tag_remove(STEP_TAG_1D(plan, TWIST3, i));
  609. }
  610. starpu_tag_remove(STEP_TAG_1D(plan, END, 0));
  611. }