starpufftx.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2012, 2014 Université de Bordeaux
  4. * Copyright (C) 2010, 2011, 2012, 2013, 2014 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #define PARALLEL 0
  18. #include <math.h>
  19. #include <unistd.h>
  20. #include <sys/time.h>
  21. #include <starpu.h>
  22. #include <config.h>
  23. #include "starpufft.h"
  24. #ifdef STARPU_USE_CUDA
  25. #define _externC extern
  26. #include "cudax_kernels.h"
  27. #if defined(FLOAT) || defined(STARPU_HAVE_CUFFTDOUBLECOMPLEX)
  28. # define __STARPU_USE_CUDA
  29. #else
  30. # undef __STARPU_USE_CUDA
  31. #endif
  32. #endif
  33. #define _FFTW_FLAGS FFTW_ESTIMATE
  34. /* Steps for the parallel variant */
  35. enum steps
  36. {
  37. SPECIAL, TWIST1, FFT1, JOIN, TWIST2, FFT2, TWIST3, END
  38. };
  39. #define NUMBER_BITS 5
  40. #define NUMBER_SHIFT (64 - NUMBER_BITS)
  41. #define STEP_BITS 3
  42. #define STEP_SHIFT (NUMBER_SHIFT - STEP_BITS)
  43. /* Tags for the steps of the parallel variant */
  44. #define _STEP_TAG(plan, step, i) (((starpu_tag_t) plan->number << NUMBER_SHIFT) | ((starpu_tag_t)(step) << STEP_SHIFT) | (starpu_tag_t) (i))
  45. #define I_BITS STEP_SHIFT
  46. enum type
  47. {
  48. R2C,
  49. C2R,
  50. C2C
  51. };
  52. static unsigned task_per_worker[STARPU_NMAXWORKERS];
  53. static unsigned samples_per_worker[STARPU_NMAXWORKERS];
  54. static struct timeval start, submit_tasks, end;
  55. /*
  56. *
  57. * The actual kernels
  58. *
  59. */
  60. struct STARPUFFT(plan)
  61. {
  62. int number; /* uniquely identifies the plan, for starpu tags */
  63. int *n;
  64. int *n1;
  65. int *n2;
  66. int totsize;
  67. int totsize1; /* Number of first-round tasks */
  68. int totsize2; /* Size of first-round tasks */
  69. int totsize3; /* Number of second-round tasks */
  70. int totsize4; /* Size of second-round tasks */
  71. int dim;
  72. enum type type;
  73. int sign;
  74. STARPUFFT(complex) *roots[2];
  75. starpu_data_handle_t roots_handle[2];
  76. /* For each worker, we need some data */
  77. struct
  78. {
  79. #ifdef STARPU_USE_CUDA
  80. /* CUFFT plans */
  81. cufftHandle plan1_cuda, plan2_cuda;
  82. /* Sequential version */
  83. cufftHandle plan_cuda;
  84. #endif
  85. #ifdef STARPU_HAVE_FFTW
  86. /* FFTW plans */
  87. _fftw_plan plan1_cpu, plan2_cpu;
  88. /* Sequential version */
  89. _fftw_plan plan_cpu;
  90. #endif
  91. } plans[STARPU_NMAXWORKERS];
  92. /* Buffers for codelets */
  93. STARPUFFT(complex) *in, *twisted1, *fft1, *twisted2, *fft2, *out;
  94. /* corresponding starpu DSM handles */
  95. starpu_data_handle_t in_handle, *twisted1_handle, *fft1_handle, *twisted2_handle, *fft2_handle, out_handle;
  96. /* Tasks */
  97. struct starpu_task **twist1_tasks, **fft1_tasks, **twist2_tasks, **fft2_tasks, **twist3_tasks;
  98. struct starpu_task *join_task, *end_task;
  99. /* Arguments for tasks */
  100. struct STARPUFFT(args) *fft1_args, *fft2_args;
  101. };
  102. struct STARPUFFT(args)
  103. {
  104. struct STARPUFFT(plan) *plan;
  105. int i, j, jj, kk, ll, *iv, *kkv;
  106. };
  107. static void
  108. check_dims(STARPUFFT(plan) plan)
  109. {
  110. int dim;
  111. for (dim = 0; dim < plan->dim; dim++)
  112. if (plan->n[dim] & (plan->n[dim]-1))
  113. {
  114. fprintf(stderr,"can't cope with non-power-of-2\n");
  115. STARPU_ABORT();
  116. }
  117. }
  118. static void
  119. compute_roots(STARPUFFT(plan) plan)
  120. {
  121. int dim, k;
  122. /* Compute the n-roots and m-roots of unity for twiddling */
  123. for (dim = 0; dim < plan->dim; dim++)
  124. {
  125. STARPUFFT(complex) exp = (plan->sign * 2. * 4.*atan(1.)) * _Complex_I / (STARPUFFT(complex)) plan->n[dim];
  126. plan->roots[dim] = malloc(plan->n[dim] * sizeof(**plan->roots));
  127. for (k = 0; k < plan->n[dim]; k++)
  128. plan->roots[dim][k] = cexp(exp*k);
  129. starpu_vector_data_register(&plan->roots_handle[dim], STARPU_MAIN_RAM, (uintptr_t) plan->roots[dim], plan->n[dim], sizeof(**plan->roots));
  130. #ifdef STARPU_USE_CUDA
  131. if (plan->n[dim] > 100000)
  132. {
  133. /* prefetch the big root array on GPUs */
  134. unsigned worker;
  135. unsigned nworkers = starpu_worker_get_count();
  136. for (worker = 0; worker < nworkers; worker++)
  137. {
  138. unsigned node = starpu_worker_get_memory_node(worker);
  139. if (starpu_worker_get_type(worker) == STARPU_CUDA_WORKER)
  140. starpu_data_prefetch_on_node(plan->roots_handle[dim], node, 0);
  141. }
  142. }
  143. #endif
  144. }
  145. }
  146. /* Only CUDA capability >= 1.3 supports doubles, rule old card out. */
  147. #ifdef DOUBLE
  148. static int can_execute(unsigned workerid, struct starpu_task *task STARPU_ATTRIBUTE_UNUSED, unsigned nimpl STARPU_ATTRIBUTE_UNUSED) {
  149. if (starpu_worker_get_type(workerid) == STARPU_CPU_WORKER)
  150. return 1;
  151. #ifdef STARPU_USE_CUDA
  152. {
  153. /* Cuda device */
  154. const struct cudaDeviceProp *props;
  155. props = starpu_cuda_get_device_properties(workerid);
  156. if (props->major >= 2 || props->minor >= 3)
  157. /* At least compute capability 1.3, supports doubles */
  158. return 1;
  159. /* Old card does not support doubles */
  160. return 0;
  161. }
  162. #endif
  163. return 0;
  164. }
  165. #define CAN_EXECUTE .can_execute = can_execute,
  166. #else
  167. #define CAN_EXECUTE
  168. #endif
  169. #include "starpufftx1d.c"
  170. #include "starpufftx2d.c"
  171. struct starpu_task *
  172. STARPUFFT(start)(STARPUFFT(plan) plan, void *_in, void *_out)
  173. {
  174. struct starpu_task *task;
  175. int z;
  176. plan->in = _in;
  177. plan->out = _out;
  178. switch (plan->dim)
  179. {
  180. case 1:
  181. {
  182. switch (plan->type)
  183. {
  184. case C2C:
  185. starpu_vector_data_register(&plan->in_handle, STARPU_MAIN_RAM, (uintptr_t) plan->in, plan->totsize, sizeof(STARPUFFT(complex)));
  186. if (!PARALLEL)
  187. starpu_vector_data_register(&plan->out_handle, STARPU_MAIN_RAM, (uintptr_t) plan->out, plan->totsize, sizeof(STARPUFFT(complex)));
  188. if (PARALLEL)
  189. {
  190. for (z = 0; z < plan->totsize1; z++)
  191. plan->twist1_tasks[z]->handles[0] = plan->in_handle;
  192. }
  193. task = STARPUFFT(start1dC2C)(plan, plan->in_handle, plan->out_handle);
  194. break;
  195. default:
  196. STARPU_ABORT();
  197. break;
  198. }
  199. break;
  200. }
  201. case 2:
  202. starpu_vector_data_register(&plan->in_handle, STARPU_MAIN_RAM, (uintptr_t) plan->in, plan->totsize, sizeof(STARPUFFT(complex)));
  203. if (!PARALLEL)
  204. starpu_vector_data_register(&plan->out_handle, STARPU_MAIN_RAM, (uintptr_t) plan->out, plan->totsize, sizeof(STARPUFFT(complex)));
  205. if (PARALLEL)
  206. {
  207. for (z = 0; z < plan->totsize1; z++)
  208. plan->twist1_tasks[z]->handles[0] = plan->in_handle;
  209. }
  210. task = STARPUFFT(start2dC2C)(plan, plan->in_handle, plan->out_handle);
  211. break;
  212. default:
  213. STARPU_ABORT();
  214. break;
  215. }
  216. return task;
  217. }
  218. void
  219. STARPUFFT(cleanup)(STARPUFFT(plan) plan)
  220. {
  221. if (plan->in_handle)
  222. starpu_data_unregister(plan->in_handle);
  223. if (!PARALLEL)
  224. {
  225. if (plan->out_handle)
  226. starpu_data_unregister(plan->out_handle);
  227. }
  228. }
  229. struct starpu_task *
  230. STARPUFFT(start_handle)(STARPUFFT(plan) plan, starpu_data_handle_t in, starpu_data_handle_t out)
  231. {
  232. return STARPUFFT(start1dC2C)(plan, in, out);
  233. }
  234. int
  235. STARPUFFT(execute)(STARPUFFT(plan) plan, void *in, void *out)
  236. {
  237. int ret;
  238. memset(task_per_worker, 0, sizeof(task_per_worker));
  239. memset(samples_per_worker, 0, sizeof(task_per_worker));
  240. gettimeofday(&start, NULL);
  241. struct starpu_task *task = STARPUFFT(start)(plan, in, out);
  242. gettimeofday(&submit_tasks, NULL);
  243. if (task)
  244. {
  245. ret = starpu_task_wait(task);
  246. STARPU_ASSERT(ret == 0);
  247. }
  248. STARPUFFT(cleanup)(plan);
  249. gettimeofday(&end, NULL);
  250. return (task == NULL ? -1 : 0);
  251. }
  252. int
  253. STARPUFFT(execute_handle)(STARPUFFT(plan) plan, starpu_data_handle_t in, starpu_data_handle_t out)
  254. {
  255. int ret;
  256. struct starpu_task *task = STARPUFFT(start_handle)(plan, in, out);
  257. if (!task) return -1;
  258. ret = starpu_task_wait(task);
  259. STARPU_ASSERT(ret == 0);
  260. return 0;
  261. }
  262. /* Destroy FFTW plans, unregister and free buffers, and free tags */
  263. void
  264. STARPUFFT(destroy_plan)(STARPUFFT(plan) plan)
  265. {
  266. unsigned workerid;
  267. int dim, i;
  268. for (workerid = 0; workerid < starpu_worker_get_count(); workerid++)
  269. {
  270. switch (starpu_worker_get_type(workerid))
  271. {
  272. case STARPU_CPU_WORKER:
  273. #ifdef STARPU_HAVE_FFTW
  274. if (PARALLEL)
  275. {
  276. _FFTW(destroy_plan)(plan->plans[workerid].plan1_cpu);
  277. _FFTW(destroy_plan)(plan->plans[workerid].plan2_cpu);
  278. }
  279. else
  280. {
  281. _FFTW(destroy_plan)(plan->plans[workerid].plan_cpu);
  282. }
  283. #endif
  284. break;
  285. case STARPU_CUDA_WORKER:
  286. #ifdef STARPU_USE_CUDA
  287. /* FIXME: Can't deallocate */
  288. #endif
  289. break;
  290. default:
  291. /* Do not care, we won't be executing anything there. */
  292. break;
  293. }
  294. }
  295. if (PARALLEL)
  296. {
  297. for (i = 0; i < plan->totsize1; i++)
  298. {
  299. starpu_data_unregister(plan->twisted1_handle[i]);
  300. free(plan->twist1_tasks[i]);
  301. starpu_data_unregister(plan->fft1_handle[i]);
  302. free(plan->fft1_tasks[i]);
  303. }
  304. free(plan->twisted1_handle);
  305. free(plan->twist1_tasks);
  306. free(plan->fft1_handle);
  307. free(plan->fft1_tasks);
  308. free(plan->fft1_args);
  309. free(plan->join_task);
  310. for (i = 0; i < plan->totsize3; i++)
  311. {
  312. starpu_data_unregister(plan->twisted2_handle[i]);
  313. free(plan->twist2_tasks[i]);
  314. starpu_data_unregister(plan->fft2_handle[i]);
  315. free(plan->fft2_tasks[i]);
  316. free(plan->twist3_tasks[i]);
  317. }
  318. free(plan->end_task);
  319. free(plan->twisted2_handle);
  320. free(plan->twist2_tasks);
  321. free(plan->fft2_handle);
  322. free(plan->fft2_tasks);
  323. free(plan->twist3_tasks);
  324. free(plan->fft2_args);
  325. for (dim = 0; dim < plan->dim; dim++)
  326. {
  327. starpu_data_unregister(plan->roots_handle[dim]);
  328. free(plan->roots[dim]);
  329. }
  330. switch (plan->dim)
  331. {
  332. case 1:
  333. STARPUFFT(free_1d_tags)(plan);
  334. break;
  335. case 2:
  336. STARPUFFT(free_2d_tags)(plan);
  337. break;
  338. default:
  339. STARPU_ABORT();
  340. break;
  341. }
  342. free(plan->n1);
  343. free(plan->n2);
  344. STARPUFFT(free)(plan->twisted1);
  345. STARPUFFT(free)(plan->fft1);
  346. STARPUFFT(free)(plan->twisted2);
  347. STARPUFFT(free)(plan->fft2);
  348. }
  349. free(plan->n);
  350. free(plan);
  351. }
  352. void *
  353. STARPUFFT(malloc)(size_t n)
  354. {
  355. #ifdef STARPU_USE_CUDA
  356. void *res;
  357. starpu_malloc(&res, n);
  358. return res;
  359. #else
  360. # ifdef STARPU_HAVE_FFTW
  361. return _FFTW(malloc)(n);
  362. # else
  363. return malloc(n);
  364. # endif
  365. #endif
  366. }
  367. void
  368. STARPUFFT(free)(void *p)
  369. {
  370. #ifdef STARPU_USE_CUDA
  371. starpu_free(p);
  372. #else
  373. # ifdef STARPU_HAVE_FFTW
  374. _FFTW(free)(p);
  375. # else
  376. free(p);
  377. # endif
  378. #endif
  379. }
  380. void
  381. STARPUFFT(showstats)(FILE *out)
  382. {
  383. unsigned worker;
  384. unsigned total;
  385. #define TIMING(begin,end) (double)((end.tv_sec - begin.tv_sec)*1000000 + (end.tv_usec - begin.tv_usec))
  386. #define MSTIMING(begin,end) (TIMING(begin,end)/1000.)
  387. double paratiming = TIMING(start,end);
  388. fprintf(out, "Tasks submission took %2.2f ms\n", MSTIMING(start,submit_tasks));
  389. fprintf(out, "Tasks termination took %2.2f ms\n", MSTIMING(submit_tasks,end));
  390. fprintf(out, "Total %2.2f ms\n", MSTIMING(start,end));
  391. for (worker = 0, total = 0; worker < starpu_worker_get_count(); worker++)
  392. total += task_per_worker[worker];
  393. for (worker = 0; worker < starpu_worker_get_count(); worker++)
  394. {
  395. if (task_per_worker[worker])
  396. {
  397. char name[32];
  398. starpu_worker_get_name(worker, name, sizeof(name));
  399. unsigned long bytes = sizeof(STARPUFFT(complex))*samples_per_worker[worker];
  400. fprintf(stderr, "\t%s -> %2.2f MB\t%2.2f\tMB/s\t%u %2.2f %%\n", name, (1.0*bytes)/(1024*1024), bytes/paratiming, task_per_worker[worker], (100.0*task_per_worker[worker])/total);
  401. }
  402. }
  403. }
  404. #ifdef STARPU_USE_CUDA
  405. void
  406. STARPUFFT(report_error)(const char *func, const char *file, int line, cufftResult status)
  407. {
  408. char *errormsg;
  409. switch (status)
  410. {
  411. case CUFFT_SUCCESS:
  412. errormsg = "success"; /* It'd be weird to get here. */
  413. break;
  414. case CUFFT_INVALID_PLAN:
  415. errormsg = "invalid plan";
  416. break;
  417. case CUFFT_ALLOC_FAILED:
  418. errormsg = "alloc failed";
  419. break;
  420. case CUFFT_INVALID_TYPE:
  421. errormsg = "invalid type";
  422. break;
  423. case CUFFT_INVALID_VALUE:
  424. errormsg = "invalid value";
  425. break;
  426. case CUFFT_INTERNAL_ERROR:
  427. errormsg = "internal error";
  428. break;
  429. case CUFFT_EXEC_FAILED:
  430. errormsg = "exec failed";
  431. break;
  432. case CUFFT_SETUP_FAILED:
  433. errormsg = "setup failed";
  434. break;
  435. case CUFFT_INVALID_SIZE:
  436. errormsg = "invalid size";
  437. break;
  438. case CUFFT_UNALIGNED_DATA:
  439. errormsg = "unaligned data";
  440. break;
  441. default:
  442. errormsg = "unknown error";
  443. break;
  444. }
  445. fprintf(stderr, "oops in %s (%s:%d)... %d: %s\n",
  446. func, file, line, status, errormsg);
  447. STARPU_ABORT();
  448. }
  449. #endif /* !STARPU_USE_CUDA */