starpufftx.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2012 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #define PARALLEL 0
  18. #include <math.h>
  19. #include <pthread.h>
  20. #include <unistd.h>
  21. #include <sys/time.h>
  22. #include <starpu.h>
  23. #include <config.h>
  24. #include "starpufft.h"
  25. #ifdef STARPU_USE_CUDA
  26. #define _externC extern
  27. #include "cudax_kernels.h"
  28. #if defined(FLOAT) || defined(STARPU_HAVE_CUFFTDOUBLECOMPLEX)
  29. # define __STARPU_USE_CUDA
  30. #else
  31. # undef __STARPU_USE_CUDA
  32. #endif
  33. #endif
  34. #define _FFTW_FLAGS FFTW_ESTIMATE
  35. /* Steps for the parallel variant */
  36. enum steps
  37. {
  38. SPECIAL, TWIST1, FFT1, JOIN, TWIST2, FFT2, TWIST3, END
  39. };
  40. #define NUMBER_BITS 5
  41. #define NUMBER_SHIFT (64 - NUMBER_BITS)
  42. #define STEP_BITS 3
  43. #define STEP_SHIFT (NUMBER_SHIFT - STEP_BITS)
  44. /* Tags for the steps of the parallel variant */
  45. #define _STEP_TAG(plan, step, i) (((starpu_tag_t) plan->number << NUMBER_SHIFT) | ((starpu_tag_t)(step) << STEP_SHIFT) | (starpu_tag_t) (i))
  46. #define I_BITS STEP_SHIFT
  47. enum type
  48. {
  49. R2C,
  50. C2R,
  51. C2C
  52. };
  53. static unsigned task_per_worker[STARPU_NMAXWORKERS];
  54. static unsigned samples_per_worker[STARPU_NMAXWORKERS];
  55. static struct timeval start, submit_tasks, end;
  56. /*
  57. *
  58. * The actual kernels
  59. *
  60. */
  61. struct STARPUFFT(plan)
  62. {
  63. int number; /* uniquely identifies the plan, for starpu tags */
  64. int *n;
  65. int *n1;
  66. int *n2;
  67. int totsize;
  68. int totsize1; /* Number of first-round tasks */
  69. int totsize2; /* Size of first-round tasks */
  70. int totsize3; /* Number of second-round tasks */
  71. int totsize4; /* Size of second-round tasks */
  72. int dim;
  73. enum type type;
  74. int sign;
  75. STARPUFFT(complex) *roots[2];
  76. starpu_data_handle_t roots_handle[2];
  77. /* For each worker, we need some data */
  78. struct
  79. {
  80. #ifdef STARPU_USE_CUDA
  81. /* CUFFT plans */
  82. cufftHandle plan1_cuda, plan2_cuda;
  83. /* Sequential version */
  84. cufftHandle plan_cuda;
  85. #endif
  86. #ifdef STARPU_HAVE_FFTW
  87. /* FFTW plans */
  88. _fftw_plan plan1_cpu, plan2_cpu;
  89. /* Sequential version */
  90. _fftw_plan plan_cpu;
  91. #endif
  92. } plans[STARPU_NMAXWORKERS];
  93. /* Buffers for codelets */
  94. STARPUFFT(complex) *in, *twisted1, *fft1, *twisted2, *fft2, *out;
  95. /* corresponding starpu DSM handles */
  96. starpu_data_handle_t in_handle, *twisted1_handle, *fft1_handle, *twisted2_handle, *fft2_handle, out_handle;
  97. /* Tasks */
  98. struct starpu_task **twist1_tasks, **fft1_tasks, **twist2_tasks, **fft2_tasks, **twist3_tasks;
  99. struct starpu_task *join_task, *end_task;
  100. /* Arguments for tasks */
  101. struct STARPUFFT(args) *fft1_args, *fft2_args;
  102. };
  103. struct STARPUFFT(args)
  104. {
  105. struct STARPUFFT(plan) *plan;
  106. int i, j, jj, kk, ll, *iv, *kkv;
  107. };
  108. static void
  109. check_dims(STARPUFFT(plan) plan)
  110. {
  111. int dim;
  112. for (dim = 0; dim < plan->dim; dim++)
  113. if (plan->n[dim] & (plan->n[dim]-1))
  114. {
  115. fprintf(stderr,"can't cope with non-power-of-2\n");
  116. STARPU_ABORT();
  117. }
  118. }
  119. static void
  120. compute_roots(STARPUFFT(plan) plan)
  121. {
  122. int dim, k;
  123. /* Compute the n-roots and m-roots of unity for twiddling */
  124. for (dim = 0; dim < plan->dim; dim++)
  125. {
  126. STARPUFFT(complex) exp = (plan->sign * 2. * 4.*atan(1.)) * _Complex_I / (STARPUFFT(complex)) plan->n[dim];
  127. plan->roots[dim] = malloc(plan->n[dim] * sizeof(**plan->roots));
  128. for (k = 0; k < plan->n[dim]; k++)
  129. plan->roots[dim][k] = cexp(exp*k);
  130. starpu_vector_data_register(&plan->roots_handle[dim], 0, (uintptr_t) plan->roots[dim], plan->n[dim], sizeof(**plan->roots));
  131. #ifdef STARPU_USE_CUDA
  132. if (plan->n[dim] > 100000)
  133. {
  134. /* prefetch the big root array on GPUs */
  135. unsigned worker;
  136. unsigned nworkers = starpu_worker_get_count();
  137. for (worker = 0; worker < nworkers; worker++)
  138. {
  139. unsigned node = starpu_worker_get_memory_node(worker);
  140. if (starpu_worker_get_type(worker) == STARPU_CUDA_WORKER)
  141. starpu_data_prefetch_on_node(plan->roots_handle[dim], node, 0);
  142. }
  143. }
  144. #endif
  145. }
  146. }
  147. /* Only CUDA capability >= 1.3 supports doubles, rule old card out. */
  148. #ifdef DOUBLE
  149. static int can_execute(unsigned workerid, struct starpu_task *task, unsigned nimpl) {
  150. if (starpu_worker_get_type(workerid) == STARPU_CPU_WORKER)
  151. return 1;
  152. #ifdef STARPU_USE_CUDA
  153. {
  154. /* Cuda device */
  155. const struct cudaDeviceProp *props;
  156. props = starpu_cuda_get_device_properties(workerid);
  157. if (props->major >= 2 || props->minor >= 3)
  158. /* At least compute capability 1.3, supports doubles */
  159. return 1;
  160. /* Old card does not support doubles */
  161. return 0;
  162. }
  163. #endif
  164. return 0;
  165. }
  166. #define CAN_EXECUTE .can_execute = can_execute,
  167. #else
  168. #define CAN_EXECUTE
  169. #endif
  170. #include "starpufftx1d.c"
  171. #include "starpufftx2d.c"
  172. struct starpu_task *
  173. STARPUFFT(start)(STARPUFFT(plan) plan, void *_in, void *_out)
  174. {
  175. struct starpu_task *task;
  176. int z;
  177. plan->in = _in;
  178. plan->out = _out;
  179. switch (plan->dim)
  180. {
  181. case 1:
  182. {
  183. switch (plan->type)
  184. {
  185. case C2C:
  186. starpu_vector_data_register(&plan->in_handle, 0, (uintptr_t) plan->in, plan->totsize, sizeof(STARPUFFT(complex)));
  187. if (!PARALLEL)
  188. starpu_vector_data_register(&plan->out_handle, 0, (uintptr_t) plan->out, plan->totsize, sizeof(STARPUFFT(complex)));
  189. if (PARALLEL)
  190. {
  191. for (z = 0; z < plan->totsize1; z++)
  192. plan->twist1_tasks[z]->handles[0] = plan->in_handle;
  193. }
  194. task = STARPUFFT(start1dC2C)(plan, plan->in_handle, plan->out_handle);
  195. break;
  196. default:
  197. STARPU_ABORT();
  198. break;
  199. }
  200. break;
  201. }
  202. case 2:
  203. starpu_vector_data_register(&plan->in_handle, 0, (uintptr_t) plan->in, plan->totsize, sizeof(STARPUFFT(complex)));
  204. if (!PARALLEL)
  205. starpu_vector_data_register(&plan->out_handle, 0, (uintptr_t) plan->out, plan->totsize, sizeof(STARPUFFT(complex)));
  206. if (PARALLEL)
  207. {
  208. for (z = 0; z < plan->totsize1; z++)
  209. plan->twist1_tasks[z]->handles[0] = plan->in_handle;
  210. }
  211. task = STARPUFFT(start2dC2C)(plan, plan->in_handle, plan->out_handle);
  212. break;
  213. default:
  214. STARPU_ABORT();
  215. break;
  216. }
  217. return task;
  218. }
  219. void
  220. STARPUFFT(cleanup)(STARPUFFT(plan) plan)
  221. {
  222. if (plan->in_handle)
  223. starpu_data_unregister(plan->in_handle);
  224. if (!PARALLEL)
  225. {
  226. if (plan->out_handle)
  227. starpu_data_unregister(plan->out_handle);
  228. }
  229. }
  230. struct starpu_task *
  231. STARPUFFT(start_handle)(STARPUFFT(plan) plan, starpu_data_handle_t in, starpu_data_handle_t out)
  232. {
  233. return STARPUFFT(start1dC2C)(plan, in, out);
  234. }
  235. int
  236. STARPUFFT(execute)(STARPUFFT(plan) plan, void *in, void *out)
  237. {
  238. int ret;
  239. memset(task_per_worker, 0, sizeof(task_per_worker));
  240. memset(samples_per_worker, 0, sizeof(task_per_worker));
  241. gettimeofday(&start, NULL);
  242. struct starpu_task *task = STARPUFFT(start)(plan, in, out);
  243. gettimeofday(&submit_tasks, NULL);
  244. if (task)
  245. {
  246. ret = starpu_task_wait(task);
  247. STARPU_ASSERT(ret == 0);
  248. }
  249. STARPUFFT(cleanup)(plan);
  250. gettimeofday(&end, NULL);
  251. return (task == NULL ? -1 : 0);
  252. }
  253. int
  254. STARPUFFT(execute_handle)(STARPUFFT(plan) plan, starpu_data_handle_t in, starpu_data_handle_t out)
  255. {
  256. int ret;
  257. struct starpu_task *task = STARPUFFT(start_handle)(plan, in, out);
  258. if (!task) return -1;
  259. ret = starpu_task_wait(task);
  260. STARPU_ASSERT(ret == 0);
  261. return 0;
  262. }
  263. /* Destroy FFTW plans, unregister and free buffers, and free tags */
  264. void
  265. STARPUFFT(destroy_plan)(STARPUFFT(plan) plan)
  266. {
  267. int workerid, dim, i;
  268. for (workerid = 0; workerid < starpu_worker_get_count(); workerid++)
  269. {
  270. switch (starpu_worker_get_type(workerid))
  271. {
  272. case STARPU_CPU_WORKER:
  273. #ifdef STARPU_HAVE_FFTW
  274. if (PARALLEL)
  275. {
  276. _FFTW(destroy_plan)(plan->plans[workerid].plan1_cpu);
  277. _FFTW(destroy_plan)(plan->plans[workerid].plan2_cpu);
  278. }
  279. else
  280. {
  281. _FFTW(destroy_plan)(plan->plans[workerid].plan_cpu);
  282. }
  283. #endif
  284. break;
  285. case STARPU_CUDA_WORKER:
  286. #ifdef STARPU_USE_CUDA
  287. /* FIXME: Can't deallocate */
  288. #endif
  289. break;
  290. default:
  291. /* Do not care, we won't be executing anything there. */
  292. break;
  293. }
  294. }
  295. if (PARALLEL)
  296. {
  297. for (i = 0; i < plan->totsize1; i++)
  298. {
  299. starpu_data_unregister(plan->twisted1_handle[i]);
  300. free(plan->twist1_tasks[i]);
  301. starpu_data_unregister(plan->fft1_handle[i]);
  302. free(plan->fft1_tasks[i]);
  303. }
  304. free(plan->twisted1_handle);
  305. free(plan->twist1_tasks);
  306. free(plan->fft1_handle);
  307. free(plan->fft1_tasks);
  308. free(plan->fft1_args);
  309. free(plan->join_task);
  310. for (i = 0; i < plan->totsize3; i++)
  311. {
  312. starpu_data_unregister(plan->twisted2_handle[i]);
  313. free(plan->twist2_tasks[i]);
  314. starpu_data_unregister(plan->fft2_handle[i]);
  315. free(plan->fft2_tasks[i]);
  316. free(plan->twist3_tasks[i]);
  317. }
  318. free(plan->end_task);
  319. free(plan->twisted2_handle);
  320. free(plan->twist2_tasks);
  321. free(plan->fft2_handle);
  322. free(plan->fft2_tasks);
  323. free(plan->twist3_tasks);
  324. free(plan->fft2_args);
  325. for (dim = 0; dim < plan->dim; dim++)
  326. {
  327. starpu_data_unregister(plan->roots_handle[dim]);
  328. free(plan->roots[dim]);
  329. }
  330. switch (plan->dim)
  331. {
  332. case 1:
  333. STARPUFFT(free_1d_tags)(plan);
  334. break;
  335. case 2:
  336. STARPUFFT(free_2d_tags)(plan);
  337. break;
  338. default:
  339. STARPU_ABORT();
  340. break;
  341. }
  342. free(plan->n1);
  343. free(plan->n2);
  344. STARPUFFT(free)(plan->twisted1);
  345. STARPUFFT(free)(plan->fft1);
  346. STARPUFFT(free)(plan->twisted2);
  347. STARPUFFT(free)(plan->fft2);
  348. }
  349. free(plan->n);
  350. free(plan);
  351. }
  352. void *
  353. STARPUFFT(malloc)(size_t n)
  354. {
  355. #ifdef STARPU_USE_CUDA
  356. void *res;
  357. starpu_malloc(&res, n);
  358. return res;
  359. #else
  360. # ifdef STARPU_HAVE_FFTW
  361. return _FFTW(malloc)(n);
  362. # else
  363. return malloc(n);
  364. # endif
  365. #endif
  366. }
  367. void
  368. STARPUFFT(free)(void *p)
  369. {
  370. #ifdef STARPU_USE_CUDA
  371. starpu_free(p);
  372. #else
  373. # ifdef STARPU_HAVE_FFTW
  374. _FFTW(free)(p);
  375. # else
  376. free(p);
  377. # endif
  378. #endif
  379. }
  380. void
  381. STARPUFFT(showstats)(FILE *out)
  382. {
  383. int worker;
  384. unsigned total;
  385. #define TIMING(begin,end) (double)((end.tv_sec - begin.tv_sec)*1000000 + (end.tv_usec - begin.tv_usec))
  386. #define MSTIMING(begin,end) (TIMING(begin,end)/1000.)
  387. double paratiming = TIMING(start,end);
  388. fprintf(out, "Tasks submission took %2.2f ms\n", MSTIMING(start,submit_tasks));
  389. fprintf(out, "Tasks termination took %2.2f ms\n", MSTIMING(submit_tasks,end));
  390. fprintf(out, "Total %2.2f ms\n", MSTIMING(start,end));
  391. for (worker = 0, total = 0; worker < starpu_worker_get_count(); worker++)
  392. total += task_per_worker[worker];
  393. for (worker = 0; worker < starpu_worker_get_count(); worker++)
  394. {
  395. if (task_per_worker[worker])
  396. {
  397. char name[32];
  398. starpu_worker_get_name(worker, name, sizeof(name));
  399. unsigned long bytes = sizeof(STARPUFFT(complex))*samples_per_worker[worker];
  400. fprintf(stderr, "\t%s -> %2.2f MB\t%2.2f\tMB/s\t%u %2.2f %%\n", name, (1.0*bytes)/(1024*1024), bytes/paratiming, task_per_worker[worker], (100.0*task_per_worker[worker])/total);
  401. }
  402. }
  403. }
  404. #ifdef STARPU_USE_CUDA
  405. void
  406. STARPUFFT(report_error)(const char *func, const char *file, int line, cufftResult status)
  407. {
  408. char *errormsg;
  409. switch (status)
  410. {
  411. case CUFFT_SUCCESS:
  412. errormsg = "success"; /* It'd be weird to get here. */
  413. break;
  414. case CUFFT_INVALID_PLAN:
  415. errormsg = "invalid plan";
  416. break;
  417. case CUFFT_ALLOC_FAILED:
  418. errormsg = "alloc failed";
  419. break;
  420. case CUFFT_INVALID_TYPE:
  421. errormsg = "invalid type";
  422. break;
  423. case CUFFT_INVALID_VALUE:
  424. errormsg = "invalid value";
  425. break;
  426. case CUFFT_INTERNAL_ERROR:
  427. errormsg = "internal error";
  428. break;
  429. case CUFFT_EXEC_FAILED:
  430. errormsg = "exec failed";
  431. break;
  432. case CUFFT_SETUP_FAILED:
  433. errormsg = "setup failed";
  434. break;
  435. case CUFFT_INVALID_SIZE:
  436. errormsg = "invalid size";
  437. break;
  438. case CUFFT_UNALIGNED_DATA:
  439. errormsg = "unaligned data";
  440. break;
  441. default:
  442. errormsg = "unknown error";
  443. break;
  444. }
  445. fprintf(stderr, "oops in %s (%s:%d)... %d: %s\n",
  446. func, file, line, status, errormsg);
  447. STARPU_ABORT();
  448. }
  449. #endif /* !STARPU_USE_CUDA */