starpufftx.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2012 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012, 2013, 2014 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #define PARALLEL 0
  18. #include <math.h>
  19. #include <unistd.h>
  20. #include <sys/time.h>
  21. #include <starpu.h>
  22. #include <config.h>
  23. #include "starpufft.h"
  24. #ifdef STARPU_USE_CUDA
  25. #define _externC extern
  26. #include "cudax_kernels.h"
  27. #if defined(FLOAT) || defined(STARPU_HAVE_CUFFTDOUBLECOMPLEX)
  28. # define __STARPU_USE_CUDA
  29. #else
  30. # undef __STARPU_USE_CUDA
  31. #endif
  32. #endif
  33. #define _FFTW_FLAGS FFTW_ESTIMATE
  34. /* Steps for the parallel variant */
  35. enum steps
  36. {
  37. SPECIAL, TWIST1, FFT1, JOIN, TWIST2, FFT2, TWIST3, END
  38. };
  39. #define NUMBER_BITS 5
  40. #define NUMBER_SHIFT (64 - NUMBER_BITS)
  41. #define STEP_BITS 3
  42. #define STEP_SHIFT (NUMBER_SHIFT - STEP_BITS)
  43. /* Tags for the steps of the parallel variant */
  44. #define _STEP_TAG(plan, step, i) (((starpu_tag_t) plan->number << NUMBER_SHIFT) | ((starpu_tag_t)(step) << STEP_SHIFT) | (starpu_tag_t) (i))
  45. #define I_BITS STEP_SHIFT
  46. enum type
  47. {
  48. R2C,
  49. C2R,
  50. C2C
  51. };
  52. static unsigned task_per_worker[STARPU_NMAXWORKERS];
  53. static unsigned samples_per_worker[STARPU_NMAXWORKERS];
  54. static struct timeval start, submit_tasks, end;
  55. /*
  56. *
  57. * The actual kernels
  58. *
  59. */
  60. struct STARPUFFT(plan)
  61. {
  62. int number; /* uniquely identifies the plan, for starpu tags */
  63. int *n;
  64. int *n1;
  65. int *n2;
  66. int totsize;
  67. int totsize1; /* Number of first-round tasks */
  68. int totsize2; /* Size of first-round tasks */
  69. int totsize3; /* Number of second-round tasks */
  70. int totsize4; /* Size of second-round tasks */
  71. int dim;
  72. enum type type;
  73. int sign;
  74. STARPUFFT(complex) *roots[2];
  75. starpu_data_handle_t roots_handle[2];
  76. /* For each worker, we need some data */
  77. struct
  78. {
  79. #ifdef STARPU_USE_CUDA
  80. /* CUFFT plans */
  81. cufftHandle plan1_cuda, plan2_cuda;
  82. /* Sequential version */
  83. cufftHandle plan_cuda;
  84. #endif
  85. #ifdef STARPU_HAVE_FFTW
  86. /* FFTW plans */
  87. _fftw_plan plan1_cpu, plan2_cpu;
  88. /* Sequential version */
  89. _fftw_plan plan_cpu;
  90. #endif
  91. } plans[STARPU_NMAXWORKERS];
  92. /* Buffers for codelets */
  93. STARPUFFT(complex) *in, *twisted1, *fft1, *twisted2, *fft2, *out;
  94. /* corresponding starpu DSM handles */
  95. starpu_data_handle_t in_handle, *twisted1_handle, *fft1_handle, *twisted2_handle, *fft2_handle, out_handle;
  96. /* Tasks */
  97. struct starpu_task **twist1_tasks, **fft1_tasks, **twist2_tasks, **fft2_tasks, **twist3_tasks;
  98. struct starpu_task *join_task, *end_task;
  99. /* Arguments for tasks */
  100. struct STARPUFFT(args) *fft1_args, *fft2_args;
  101. };
  102. struct STARPUFFT(args)
  103. {
  104. struct STARPUFFT(plan) *plan;
  105. int i, j, jj, kk, ll, *iv, *kkv;
  106. };
  107. static void
  108. check_dims(STARPUFFT(plan) plan)
  109. {
  110. int dim;
  111. for (dim = 0; dim < plan->dim; dim++)
  112. if (plan->n[dim] & (plan->n[dim]-1))
  113. {
  114. fprintf(stderr,"can't cope with non-power-of-2\n");
  115. STARPU_ABORT();
  116. }
  117. }
  118. static void
  119. compute_roots(STARPUFFT(plan) plan)
  120. {
  121. int dim, k;
  122. /* Compute the n-roots and m-roots of unity for twiddling */
  123. for (dim = 0; dim < plan->dim; dim++)
  124. {
  125. STARPUFFT(complex) exp = (plan->sign * 2. * 4.*atan(1.)) * _Complex_I / (STARPUFFT(complex)) plan->n[dim];
  126. plan->roots[dim] = malloc(plan->n[dim] * sizeof(**plan->roots));
  127. for (k = 0; k < plan->n[dim]; k++)
  128. plan->roots[dim][k] = cexp(exp*k);
  129. starpu_vector_data_register(&plan->roots_handle[dim], STARPU_MAIN_RAM, (uintptr_t) plan->roots[dim], plan->n[dim], sizeof(**plan->roots));
  130. #ifdef STARPU_USE_CUDA
  131. if (plan->n[dim] > 100000)
  132. {
  133. /* prefetch the big root array on GPUs */
  134. unsigned worker;
  135. unsigned nworkers = starpu_worker_get_count();
  136. for (worker = 0; worker < nworkers; worker++)
  137. {
  138. unsigned node = starpu_worker_get_memory_node(worker);
  139. if (starpu_worker_get_type(worker) == STARPU_CUDA_WORKER)
  140. starpu_data_prefetch_on_node(plan->roots_handle[dim], node, 0);
  141. }
  142. }
  143. #endif
  144. }
  145. }
  146. /* Only CUDA capability >= 1.3 supports doubles, rule old card out. */
  147. #ifdef DOUBLE
  148. static int can_execute(unsigned workerid, struct starpu_task *task, unsigned nimpl) {
  149. if (starpu_worker_get_type(workerid) == STARPU_CPU_WORKER)
  150. return 1;
  151. #ifdef STARPU_USE_CUDA
  152. {
  153. /* Cuda device */
  154. const struct cudaDeviceProp *props;
  155. props = starpu_cuda_get_device_properties(workerid);
  156. if (props->major >= 2 || props->minor >= 3)
  157. /* At least compute capability 1.3, supports doubles */
  158. return 1;
  159. /* Old card does not support doubles */
  160. return 0;
  161. }
  162. #endif
  163. return 0;
  164. }
  165. #define CAN_EXECUTE .can_execute = can_execute,
  166. #else
  167. #define CAN_EXECUTE
  168. #endif
  169. #include "starpufftx1d.c"
  170. #include "starpufftx2d.c"
  171. struct starpu_task *
  172. STARPUFFT(start)(STARPUFFT(plan) plan, void *_in, void *_out)
  173. {
  174. struct starpu_task *task;
  175. int z;
  176. plan->in = _in;
  177. plan->out = _out;
  178. switch (plan->dim)
  179. {
  180. case 1:
  181. {
  182. switch (plan->type)
  183. {
  184. case C2C:
  185. starpu_vector_data_register(&plan->in_handle, STARPU_MAIN_RAM, (uintptr_t) plan->in, plan->totsize, sizeof(STARPUFFT(complex)));
  186. if (!PARALLEL)
  187. starpu_vector_data_register(&plan->out_handle, STARPU_MAIN_RAM, (uintptr_t) plan->out, plan->totsize, sizeof(STARPUFFT(complex)));
  188. if (PARALLEL)
  189. {
  190. for (z = 0; z < plan->totsize1; z++)
  191. plan->twist1_tasks[z]->handles[0] = plan->in_handle;
  192. }
  193. task = STARPUFFT(start1dC2C)(plan, plan->in_handle, plan->out_handle);
  194. break;
  195. default:
  196. STARPU_ABORT();
  197. break;
  198. }
  199. break;
  200. }
  201. case 2:
  202. starpu_vector_data_register(&plan->in_handle, STARPU_MAIN_RAM, (uintptr_t) plan->in, plan->totsize, sizeof(STARPUFFT(complex)));
  203. if (!PARALLEL)
  204. starpu_vector_data_register(&plan->out_handle, STARPU_MAIN_RAM, (uintptr_t) plan->out, plan->totsize, sizeof(STARPUFFT(complex)));
  205. if (PARALLEL)
  206. {
  207. for (z = 0; z < plan->totsize1; z++)
  208. plan->twist1_tasks[z]->handles[0] = plan->in_handle;
  209. }
  210. task = STARPUFFT(start2dC2C)(plan, plan->in_handle, plan->out_handle);
  211. break;
  212. default:
  213. STARPU_ABORT();
  214. break;
  215. }
  216. return task;
  217. }
  218. void
  219. STARPUFFT(cleanup)(STARPUFFT(plan) plan)
  220. {
  221. if (plan->in_handle)
  222. starpu_data_unregister(plan->in_handle);
  223. if (!PARALLEL)
  224. {
  225. if (plan->out_handle)
  226. starpu_data_unregister(plan->out_handle);
  227. }
  228. }
  229. struct starpu_task *
  230. STARPUFFT(start_handle)(STARPUFFT(plan) plan, starpu_data_handle_t in, starpu_data_handle_t out)
  231. {
  232. return STARPUFFT(start1dC2C)(plan, in, out);
  233. }
  234. int
  235. STARPUFFT(execute)(STARPUFFT(plan) plan, void *in, void *out)
  236. {
  237. int ret;
  238. memset(task_per_worker, 0, sizeof(task_per_worker));
  239. memset(samples_per_worker, 0, sizeof(task_per_worker));
  240. gettimeofday(&start, NULL);
  241. struct starpu_task *task = STARPUFFT(start)(plan, in, out);
  242. gettimeofday(&submit_tasks, NULL);
  243. if (task)
  244. {
  245. ret = starpu_task_wait(task);
  246. STARPU_ASSERT(ret == 0);
  247. }
  248. STARPUFFT(cleanup)(plan);
  249. gettimeofday(&end, NULL);
  250. return (task == NULL ? -1 : 0);
  251. }
  252. int
  253. STARPUFFT(execute_handle)(STARPUFFT(plan) plan, starpu_data_handle_t in, starpu_data_handle_t out)
  254. {
  255. int ret;
  256. struct starpu_task *task = STARPUFFT(start_handle)(plan, in, out);
  257. if (!task) return -1;
  258. ret = starpu_task_wait(task);
  259. STARPU_ASSERT(ret == 0);
  260. return 0;
  261. }
  262. /* Destroy FFTW plans, unregister and free buffers, and free tags */
  263. void
  264. STARPUFFT(destroy_plan)(STARPUFFT(plan) plan)
  265. {
  266. int workerid, dim, i;
  267. for (workerid = 0; workerid < starpu_worker_get_count(); workerid++)
  268. {
  269. switch (starpu_worker_get_type(workerid))
  270. {
  271. case STARPU_CPU_WORKER:
  272. #ifdef STARPU_HAVE_FFTW
  273. if (PARALLEL)
  274. {
  275. _FFTW(destroy_plan)(plan->plans[workerid].plan1_cpu);
  276. _FFTW(destroy_plan)(plan->plans[workerid].plan2_cpu);
  277. }
  278. else
  279. {
  280. _FFTW(destroy_plan)(plan->plans[workerid].plan_cpu);
  281. }
  282. #endif
  283. break;
  284. case STARPU_CUDA_WORKER:
  285. #ifdef STARPU_USE_CUDA
  286. /* FIXME: Can't deallocate */
  287. #endif
  288. break;
  289. default:
  290. /* Do not care, we won't be executing anything there. */
  291. break;
  292. }
  293. }
  294. if (PARALLEL)
  295. {
  296. for (i = 0; i < plan->totsize1; i++)
  297. {
  298. starpu_data_unregister(plan->twisted1_handle[i]);
  299. free(plan->twist1_tasks[i]);
  300. starpu_data_unregister(plan->fft1_handle[i]);
  301. free(plan->fft1_tasks[i]);
  302. }
  303. free(plan->twisted1_handle);
  304. free(plan->twist1_tasks);
  305. free(plan->fft1_handle);
  306. free(plan->fft1_tasks);
  307. free(plan->fft1_args);
  308. free(plan->join_task);
  309. for (i = 0; i < plan->totsize3; i++)
  310. {
  311. starpu_data_unregister(plan->twisted2_handle[i]);
  312. free(plan->twist2_tasks[i]);
  313. starpu_data_unregister(plan->fft2_handle[i]);
  314. free(plan->fft2_tasks[i]);
  315. free(plan->twist3_tasks[i]);
  316. }
  317. free(plan->end_task);
  318. free(plan->twisted2_handle);
  319. free(plan->twist2_tasks);
  320. free(plan->fft2_handle);
  321. free(plan->fft2_tasks);
  322. free(plan->twist3_tasks);
  323. free(plan->fft2_args);
  324. for (dim = 0; dim < plan->dim; dim++)
  325. {
  326. starpu_data_unregister(plan->roots_handle[dim]);
  327. free(plan->roots[dim]);
  328. }
  329. switch (plan->dim)
  330. {
  331. case 1:
  332. STARPUFFT(free_1d_tags)(plan);
  333. break;
  334. case 2:
  335. STARPUFFT(free_2d_tags)(plan);
  336. break;
  337. default:
  338. STARPU_ABORT();
  339. break;
  340. }
  341. free(plan->n1);
  342. free(plan->n2);
  343. STARPUFFT(free)(plan->twisted1);
  344. STARPUFFT(free)(plan->fft1);
  345. STARPUFFT(free)(plan->twisted2);
  346. STARPUFFT(free)(plan->fft2);
  347. }
  348. free(plan->n);
  349. free(plan);
  350. }
  351. void *
  352. STARPUFFT(malloc)(size_t n)
  353. {
  354. #ifdef STARPU_USE_CUDA
  355. void *res;
  356. starpu_malloc(&res, n);
  357. return res;
  358. #else
  359. # ifdef STARPU_HAVE_FFTW
  360. return _FFTW(malloc)(n);
  361. # else
  362. return malloc(n);
  363. # endif
  364. #endif
  365. }
  366. void
  367. STARPUFFT(free)(void *p)
  368. {
  369. #ifdef STARPU_USE_CUDA
  370. starpu_free(p);
  371. #else
  372. # ifdef STARPU_HAVE_FFTW
  373. _FFTW(free)(p);
  374. # else
  375. free(p);
  376. # endif
  377. #endif
  378. }
  379. void
  380. STARPUFFT(showstats)(FILE *out)
  381. {
  382. int worker;
  383. unsigned total;
  384. #define TIMING(begin,end) (double)((end.tv_sec - begin.tv_sec)*1000000 + (end.tv_usec - begin.tv_usec))
  385. #define MSTIMING(begin,end) (TIMING(begin,end)/1000.)
  386. double paratiming = TIMING(start,end);
  387. fprintf(out, "Tasks submission took %2.2f ms\n", MSTIMING(start,submit_tasks));
  388. fprintf(out, "Tasks termination took %2.2f ms\n", MSTIMING(submit_tasks,end));
  389. fprintf(out, "Total %2.2f ms\n", MSTIMING(start,end));
  390. for (worker = 0, total = 0; worker < starpu_worker_get_count(); worker++)
  391. total += task_per_worker[worker];
  392. for (worker = 0; worker < starpu_worker_get_count(); worker++)
  393. {
  394. if (task_per_worker[worker])
  395. {
  396. char name[32];
  397. starpu_worker_get_name(worker, name, sizeof(name));
  398. unsigned long bytes = sizeof(STARPUFFT(complex))*samples_per_worker[worker];
  399. fprintf(stderr, "\t%s -> %2.2f MB\t%2.2f\tMB/s\t%u %2.2f %%\n", name, (1.0*bytes)/(1024*1024), bytes/paratiming, task_per_worker[worker], (100.0*task_per_worker[worker])/total);
  400. }
  401. }
  402. }
  403. #ifdef STARPU_USE_CUDA
  404. void
  405. STARPUFFT(report_error)(const char *func, const char *file, int line, cufftResult status)
  406. {
  407. char *errormsg;
  408. switch (status)
  409. {
  410. case CUFFT_SUCCESS:
  411. errormsg = "success"; /* It'd be weird to get here. */
  412. break;
  413. case CUFFT_INVALID_PLAN:
  414. errormsg = "invalid plan";
  415. break;
  416. case CUFFT_ALLOC_FAILED:
  417. errormsg = "alloc failed";
  418. break;
  419. case CUFFT_INVALID_TYPE:
  420. errormsg = "invalid type";
  421. break;
  422. case CUFFT_INVALID_VALUE:
  423. errormsg = "invalid value";
  424. break;
  425. case CUFFT_INTERNAL_ERROR:
  426. errormsg = "internal error";
  427. break;
  428. case CUFFT_EXEC_FAILED:
  429. errormsg = "exec failed";
  430. break;
  431. case CUFFT_SETUP_FAILED:
  432. errormsg = "setup failed";
  433. break;
  434. case CUFFT_INVALID_SIZE:
  435. errormsg = "invalid size";
  436. break;
  437. case CUFFT_UNALIGNED_DATA:
  438. errormsg = "unaligned data";
  439. break;
  440. default:
  441. errormsg = "unknown error";
  442. break;
  443. }
  444. fprintf(stderr, "oops in %s (%s:%d)... %d: %s\n",
  445. func, file, line, status, errormsg);
  446. STARPU_ABORT();
  447. }
  448. #endif /* !STARPU_USE_CUDA */