starpufftx.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2012 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #define PARALLEL 0
  18. #include <math.h>
  19. #include <pthread.h>
  20. #include <unistd.h>
  21. #include <sys/time.h>
  22. #include <starpu.h>
  23. #include <config.h>
  24. #include "starpufft.h"
  25. #ifdef STARPU_USE_CUDA
  26. #define _externC extern
  27. #include "cudax_kernels.h"
  28. #if defined(FLOAT) || defined(STARPU_HAVE_CUFFTDOUBLECOMPLEX)
  29. # define __STARPU_USE_CUDA
  30. #else
  31. # undef __STARPU_USE_CUDA
  32. #endif
  33. #endif
  34. #define _FFTW_FLAGS FFTW_ESTIMATE
  35. /* Steps for the parallel variant */
  36. enum steps
  37. {
  38. SPECIAL, TWIST1, FFT1, JOIN, TWIST2, FFT2, TWIST3, END
  39. };
  40. #define NUMBER_BITS 5
  41. #define NUMBER_SHIFT (64 - NUMBER_BITS)
  42. #define STEP_BITS 3
  43. #define STEP_SHIFT (NUMBER_SHIFT - STEP_BITS)
  44. /* Tags for the steps of the parallel variant */
  45. #define _STEP_TAG(plan, step, i) (((starpu_tag_t) plan->number << NUMBER_SHIFT) | ((starpu_tag_t)(step) << STEP_SHIFT) | (starpu_tag_t) (i))
  46. #define I_BITS STEP_SHIFT
  47. enum type
  48. {
  49. R2C,
  50. C2R,
  51. C2C
  52. };
  53. static unsigned task_per_worker[STARPU_NMAXWORKERS];
  54. static unsigned samples_per_worker[STARPU_NMAXWORKERS];
  55. static struct timeval start, submit_tasks, end;
  56. /*
  57. *
  58. * The actual kernels
  59. *
  60. */
  61. struct STARPUFFT(plan)
  62. {
  63. int number; /* uniquely identifies the plan, for starpu tags */
  64. int *n;
  65. int *n1;
  66. int *n2;
  67. int totsize;
  68. int totsize1; /* Number of first-round tasks */
  69. int totsize2; /* Size of first-round tasks */
  70. int totsize3; /* Number of second-round tasks */
  71. int totsize4; /* Size of second-round tasks */
  72. int dim;
  73. enum type type;
  74. int sign;
  75. STARPUFFT(complex) *roots[2];
  76. starpu_data_handle_t roots_handle[2];
  77. /* For each worker, we need some data */
  78. struct
  79. {
  80. #ifdef STARPU_USE_CUDA
  81. /* CUFFT plans */
  82. cufftHandle plan1_cuda, plan2_cuda;
  83. /* Sequential version */
  84. cufftHandle plan_cuda;
  85. #endif
  86. #ifdef STARPU_HAVE_FFTW
  87. /* FFTW plans */
  88. _fftw_plan plan1_cpu, plan2_cpu;
  89. /* Sequential version */
  90. _fftw_plan plan_cpu;
  91. #endif
  92. } plans[STARPU_NMAXWORKERS];
  93. /* Buffers for codelets */
  94. STARPUFFT(complex) *in, *twisted1, *fft1, *twisted2, *fft2, *out;
  95. /* corresponding starpu DSM handles */
  96. starpu_data_handle_t in_handle, *twisted1_handle, *fft1_handle, *twisted2_handle, *fft2_handle, out_handle;
  97. /* Tasks */
  98. struct starpu_task **twist1_tasks, **fft1_tasks, **twist2_tasks, **fft2_tasks, **twist3_tasks;
  99. struct starpu_task *join_task, *end_task;
  100. /* Arguments for tasks */
  101. struct STARPUFFT(args) *fft1_args, *fft2_args;
  102. };
  103. struct STARPUFFT(args)
  104. {
  105. struct STARPUFFT(plan) *plan;
  106. int i, j, jj, kk, ll, *iv, *kkv;
  107. };
  108. static void
  109. check_dims(STARPUFFT(plan) plan)
  110. {
  111. int dim;
  112. for (dim = 0; dim < plan->dim; dim++)
  113. if (plan->n[dim] & (plan->n[dim]-1))
  114. {
  115. fprintf(stderr,"can't cope with non-power-of-2\n");
  116. STARPU_ABORT();
  117. }
  118. }
  119. static void
  120. compute_roots(STARPUFFT(plan) plan)
  121. {
  122. int dim, k;
  123. /* Compute the n-roots and m-roots of unity for twiddling */
  124. for (dim = 0; dim < plan->dim; dim++)
  125. {
  126. STARPUFFT(complex) exp = (plan->sign * 2. * 4.*atan(1.)) * _Complex_I / (STARPUFFT(complex)) plan->n[dim];
  127. plan->roots[dim] = malloc(plan->n[dim] * sizeof(**plan->roots));
  128. for (k = 0; k < plan->n[dim]; k++)
  129. plan->roots[dim][k] = cexp(exp*k);
  130. starpu_vector_data_register(&plan->roots_handle[dim], 0, (uintptr_t) plan->roots[dim], plan->n[dim], sizeof(**plan->roots));
  131. #ifdef STARPU_USE_CUDA
  132. if (plan->n[dim] > 100000)
  133. {
  134. /* prefetch the big root array on GPUs */
  135. unsigned worker;
  136. unsigned nworkers = starpu_worker_get_count();
  137. for (worker = 0; worker < nworkers; worker++)
  138. {
  139. unsigned node = starpu_worker_get_memory_node(worker);
  140. if (starpu_worker_get_type(worker) == STARPU_CUDA_WORKER)
  141. starpu_data_prefetch_on_node(plan->roots_handle[dim], node, 0);
  142. }
  143. }
  144. #endif
  145. }
  146. }
  147. /* Only CUDA capability >= 1.3 supports doubles, rule old card out. */
  148. #ifdef DOUBLE
  149. static int can_execute(unsigned workerid, struct starpu_task *task, unsigned nimpl) {
  150. if (starpu_worker_get_type(workerid) == STARPU_CPU_WORKER)
  151. return 1;
  152. #ifdef STARPU_USE_CUDA
  153. {
  154. /* Cuda device */
  155. const struct cudaDeviceProp *props;
  156. props = starpu_cuda_get_device_properties(workerid);
  157. if (props->major >= 2 || props->minor >= 3)
  158. /* At least compute capability 1.3, supports doubles */
  159. return 1;
  160. /* Old card does not support doubles */
  161. return 0;
  162. }
  163. #endif
  164. return 0;
  165. }
  166. #define CAN_EXECUTE .can_execute = can_execute,
  167. #else
  168. #define CAN_EXECUTE
  169. #endif
  170. #include "starpufftx1d.c"
  171. #include "starpufftx2d.c"
  172. struct starpu_task *
  173. STARPUFFT(start)(STARPUFFT(plan) plan, void *_in, void *_out)
  174. {
  175. struct starpu_task *task;
  176. int z;
  177. plan->in = _in;
  178. plan->out = _out;
  179. switch (plan->dim)
  180. {
  181. case 1:
  182. {
  183. switch (plan->type)
  184. {
  185. case C2C:
  186. starpu_vector_data_register(&plan->in_handle, 0, (uintptr_t) plan->in, plan->totsize, sizeof(STARPUFFT(complex)));
  187. if (!PARALLEL)
  188. starpu_vector_data_register(&plan->out_handle, 0, (uintptr_t) plan->out, plan->totsize, sizeof(STARPUFFT(complex)));
  189. if (PARALLEL)
  190. {
  191. for (z = 0; z < plan->totsize1; z++)
  192. plan->twist1_tasks[z]->handles[0] = plan->in_handle;
  193. }
  194. task = STARPUFFT(start1dC2C)(plan, plan->in_handle, plan->out_handle);
  195. break;
  196. default:
  197. STARPU_ABORT();
  198. break;
  199. }
  200. break;
  201. }
  202. case 2:
  203. starpu_vector_data_register(&plan->in_handle, 0, (uintptr_t) plan->in, plan->totsize, sizeof(STARPUFFT(complex)));
  204. if (!PARALLEL)
  205. starpu_vector_data_register(&plan->out_handle, 0, (uintptr_t) plan->out, plan->totsize, sizeof(STARPUFFT(complex)));
  206. if (PARALLEL)
  207. {
  208. for (z = 0; z < plan->totsize1; z++)
  209. plan->twist1_tasks[z]->handles[0] = plan->in_handle;
  210. }
  211. task = STARPUFFT(start2dC2C)(plan, plan->in_handle, plan->out_handle);
  212. break;
  213. default:
  214. STARPU_ABORT();
  215. break;
  216. }
  217. return task;
  218. }
  219. void
  220. STARPUFFT(cleanup)(STARPUFFT(plan) plan)
  221. {
  222. if (plan->in_handle)
  223. starpu_data_unregister(plan->in_handle);
  224. if (!PARALLEL)
  225. {
  226. if (plan->out_handle)
  227. starpu_data_unregister(plan->out_handle);
  228. }
  229. }
  230. struct starpu_task *
  231. STARPUFFT(start_handle)(STARPUFFT(plan) plan, starpu_data_handle_t in, starpu_data_handle_t out)
  232. {
  233. return STARPUFFT(start1dC2C)(plan, in, out);
  234. }
  235. void
  236. STARPUFFT(execute)(STARPUFFT(plan) plan, void *in, void *out)
  237. {
  238. memset(task_per_worker, 0, sizeof(task_per_worker));
  239. memset(samples_per_worker, 0, sizeof(task_per_worker));
  240. gettimeofday(&start, NULL);
  241. struct starpu_task *task = STARPUFFT(start)(plan, in, out);
  242. gettimeofday(&submit_tasks, NULL);
  243. starpu_task_wait(task);
  244. STARPUFFT(cleanup)(plan);
  245. gettimeofday(&end, NULL);
  246. }
  247. void
  248. STARPUFFT(execute_handle)(STARPUFFT(plan) plan, starpu_data_handle_t in, starpu_data_handle_t out)
  249. {
  250. struct starpu_task *task = STARPUFFT(start_handle)(plan, in, out);
  251. starpu_task_wait(task);
  252. }
  253. /* Destroy FFTW plans, unregister and free buffers, and free tags */
  254. void
  255. STARPUFFT(destroy_plan)(STARPUFFT(plan) plan)
  256. {
  257. int workerid, dim, i;
  258. for (workerid = 0; workerid < starpu_worker_get_count(); workerid++)
  259. {
  260. switch (starpu_worker_get_type(workerid))
  261. {
  262. case STARPU_CPU_WORKER:
  263. #ifdef STARPU_HAVE_FFTW
  264. if (PARALLEL)
  265. {
  266. _FFTW(destroy_plan)(plan->plans[workerid].plan1_cpu);
  267. _FFTW(destroy_plan)(plan->plans[workerid].plan2_cpu);
  268. }
  269. else
  270. {
  271. _FFTW(destroy_plan)(plan->plans[workerid].plan_cpu);
  272. }
  273. #endif
  274. break;
  275. case STARPU_CUDA_WORKER:
  276. #ifdef STARPU_USE_CUDA
  277. /* FIXME: Can't deallocate */
  278. #endif
  279. break;
  280. default:
  281. /* Do not care, we won't be executing anything there. */
  282. break;
  283. }
  284. }
  285. if (PARALLEL)
  286. {
  287. for (i = 0; i < plan->totsize1; i++)
  288. {
  289. starpu_data_unregister(plan->twisted1_handle[i]);
  290. free(plan->twist1_tasks[i]);
  291. starpu_data_unregister(plan->fft1_handle[i]);
  292. free(plan->fft1_tasks[i]);
  293. }
  294. free(plan->twisted1_handle);
  295. free(plan->twist1_tasks);
  296. free(plan->fft1_handle);
  297. free(plan->fft1_tasks);
  298. free(plan->fft1_args);
  299. free(plan->join_task);
  300. for (i = 0; i < plan->totsize3; i++)
  301. {
  302. starpu_data_unregister(plan->twisted2_handle[i]);
  303. free(plan->twist2_tasks[i]);
  304. starpu_data_unregister(plan->fft2_handle[i]);
  305. free(plan->fft2_tasks[i]);
  306. free(plan->twist3_tasks[i]);
  307. }
  308. free(plan->end_task);
  309. free(plan->twisted2_handle);
  310. free(plan->twist2_tasks);
  311. free(plan->fft2_handle);
  312. free(plan->fft2_tasks);
  313. free(plan->twist3_tasks);
  314. free(plan->fft2_args);
  315. for (dim = 0; dim < plan->dim; dim++)
  316. {
  317. starpu_data_unregister(plan->roots_handle[dim]);
  318. free(plan->roots[dim]);
  319. }
  320. switch (plan->dim)
  321. {
  322. case 1:
  323. STARPUFFT(free_1d_tags)(plan);
  324. break;
  325. case 2:
  326. STARPUFFT(free_2d_tags)(plan);
  327. break;
  328. default:
  329. STARPU_ABORT();
  330. break;
  331. }
  332. free(plan->n1);
  333. free(plan->n2);
  334. STARPUFFT(free)(plan->twisted1);
  335. STARPUFFT(free)(plan->fft1);
  336. STARPUFFT(free)(plan->twisted2);
  337. STARPUFFT(free)(plan->fft2);
  338. }
  339. free(plan->n);
  340. free(plan);
  341. }
  342. void *
  343. STARPUFFT(malloc)(size_t n)
  344. {
  345. #ifdef STARPU_USE_CUDA
  346. void *res;
  347. starpu_malloc(&res, n);
  348. return res;
  349. #else
  350. # ifdef STARPU_HAVE_FFTW
  351. return _FFTW(malloc)(n);
  352. # else
  353. return malloc(n);
  354. # endif
  355. #endif
  356. }
  357. void
  358. STARPUFFT(free)(void *p)
  359. {
  360. #ifdef STARPU_USE_CUDA
  361. starpu_free(p);
  362. #else
  363. # ifdef STARPU_HAVE_FFTW
  364. _FFTW(free)(p);
  365. # else
  366. free(p);
  367. # endif
  368. #endif
  369. }
  370. void
  371. STARPUFFT(showstats)(FILE *out)
  372. {
  373. int worker;
  374. unsigned total;
  375. #define TIMING(begin,end) (double)((end.tv_sec - begin.tv_sec)*1000000 + (end.tv_usec - begin.tv_usec))
  376. #define MSTIMING(begin,end) (TIMING(begin,end)/1000.)
  377. double paratiming = TIMING(start,end);
  378. fprintf(out, "Tasks submission took %2.2f ms\n", MSTIMING(start,submit_tasks));
  379. fprintf(out, "Tasks termination took %2.2f ms\n", MSTIMING(submit_tasks,end));
  380. fprintf(out, "Total %2.2f ms\n", MSTIMING(start,end));
  381. for (worker = 0, total = 0; worker < starpu_worker_get_count(); worker++)
  382. total += task_per_worker[worker];
  383. for (worker = 0; worker < starpu_worker_get_count(); worker++)
  384. {
  385. if (task_per_worker[worker])
  386. {
  387. char name[32];
  388. starpu_worker_get_name(worker, name, sizeof(name));
  389. unsigned long bytes = sizeof(STARPUFFT(complex))*samples_per_worker[worker];
  390. fprintf(stderr, "\t%s -> %2.2f MB\t%2.2f\tMB/s\t%u %2.2f %%\n", name, (1.0*bytes)/(1024*1024), bytes/paratiming, task_per_worker[worker], (100.0*task_per_worker[worker])/total);
  391. }
  392. }
  393. }