pi_redux.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2015,2017 Université de Bordeaux
  4. * Copyright (C) 2012,2013,2015 Inria
  5. * Copyright (C) 2011-2013,2016,2017,2019 CNRS
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. /*
  19. * This computes Pi by using drawing random coordinates (thanks to the sobol
  20. * generator) and check whether they fall within one quarter of a circle. The
  21. * proportion gives an approximation of Pi. For each task, we draw a number of
  22. * coordinates, and we gather the number of successful draws.
  23. *
  24. * This version uses reduction to optimize gathering the number of successful
  25. * draws.
  26. */
  27. #include <starpu.h>
  28. #include <stdlib.h>
  29. #include <math.h>
  30. #define FPRINTF(ofile, fmt, ...) do { if (!getenv("STARPU_SSILENT")) {fprintf(ofile, fmt, ## __VA_ARGS__); }} while(0)
  31. #define PI 3.14159265358979323846
  32. #if defined(STARPU_USE_CUDA) && !defined(STARPU_HAVE_CURAND)
  33. #warning CURAND is required to run that example on CUDA devices
  34. #endif
  35. #ifdef STARPU_HAVE_CURAND
  36. #include <cuda.h>
  37. #include <curand.h>
  38. #endif
  39. static unsigned long long nshot_per_task = 16*1024*1024ULL;
  40. /* default value */
  41. static unsigned long ntasks = 1024;
  42. static unsigned long ntasks_warmup = 0;
  43. static unsigned use_redux = 1;
  44. static unsigned do_warmup = 0;
  45. /*
  46. * Initialization of the Random Number Generators (RNG)
  47. */
  48. #ifdef STARPU_HAVE_CURAND
  49. /* RNG for the CURAND library */
  50. static curandGenerator_t curandgens[STARPU_NMAXWORKERS];
  51. #endif
  52. /* state for the erand48 function : note the huge padding to avoid false-sharing */
  53. #define PADDING 1024
  54. static unsigned short xsubi[STARPU_NMAXWORKERS*PADDING];
  55. static starpu_drand48_data randbuffer[STARPU_NMAXWORKERS*PADDING];
  56. /* Function to initialize the random number generator in the current worker */
  57. static void init_rng(void *arg)
  58. {
  59. (void)arg;
  60. #ifdef STARPU_HAVE_CURAND
  61. curandStatus_t res;
  62. #endif
  63. int workerid = starpu_worker_get_id_check();
  64. switch (starpu_worker_get_type(workerid))
  65. {
  66. case STARPU_CPU_WORKER:
  67. case STARPU_MIC_WORKER:
  68. /* create a seed */
  69. starpu_srand48_r((long int)workerid, &randbuffer[PADDING*workerid]);
  70. xsubi[0 + PADDING*workerid] = (unsigned short)workerid;
  71. xsubi[1 + PADDING*workerid] = (unsigned short)workerid;
  72. xsubi[2 + PADDING*workerid] = (unsigned short)workerid;
  73. break;
  74. #ifdef STARPU_HAVE_CURAND
  75. case STARPU_CUDA_WORKER:
  76. /* Create a RNG */
  77. res = curandCreateGenerator(&curandgens[workerid],
  78. CURAND_RNG_PSEUDO_DEFAULT);
  79. STARPU_ASSERT(res == CURAND_STATUS_SUCCESS);
  80. /* Seed it with worker's id */
  81. res = curandSetPseudoRandomGeneratorSeed(curandgens[workerid],
  82. (unsigned long long)workerid);
  83. STARPU_ASSERT(res == CURAND_STATUS_SUCCESS);
  84. break;
  85. #endif
  86. default:
  87. STARPU_ABORT();
  88. break;
  89. }
  90. }
  91. /* The amount of work does not depend on the data size at all :) */
  92. static size_t size_base(struct starpu_task *task, unsigned nimpl)
  93. {
  94. (void)task;
  95. (void)nimpl;
  96. return nshot_per_task;
  97. }
  98. static void parse_args(int argc, char **argv)
  99. {
  100. int i;
  101. for (i = 1; i < argc; i++)
  102. {
  103. if (strcmp(argv[i], "-ntasks") == 0)
  104. {
  105. char *argptr;
  106. ntasks = strtol(argv[++i], &argptr, 10);
  107. }
  108. if (strcmp(argv[i], "-nshot") == 0)
  109. {
  110. char *argptr;
  111. nshot_per_task = strtol(argv[++i], &argptr, 10);
  112. }
  113. if (strcmp(argv[i], "-noredux") == 0)
  114. {
  115. use_redux = 0;
  116. }
  117. if (strcmp(argv[i], "-warmup") == 0)
  118. {
  119. do_warmup = 1;
  120. ntasks_warmup = 8; /* arbitrary number of warmup tasks */
  121. }
  122. if (strcmp(argv[i], "-h") == 0 || strcmp(argv[i], "--help") == 0)
  123. {
  124. fprintf(stderr, "Usage: %s [-ntasks n] [-noredux] [-warmup] [-h]\n", argv[0]);
  125. exit(-1);
  126. }
  127. }
  128. }
  129. /*
  130. * Monte-carlo kernel
  131. */
  132. void pi_func_cpu(void *descr[], void *cl_arg)
  133. {
  134. (void)cl_arg;
  135. int workerid = starpu_worker_get_id_check();
  136. unsigned short *worker_xsub;
  137. worker_xsub = &xsubi[PADDING*workerid];
  138. starpu_drand48_data *buffer;
  139. buffer = &randbuffer[PADDING*workerid];
  140. unsigned long local_cnt = 0;
  141. /* Fill the scratchpad with random numbers */
  142. unsigned i;
  143. for (i = 0; i < nshot_per_task; i++)
  144. {
  145. double randx, randy;
  146. starpu_erand48_r(worker_xsub, buffer, &randx);
  147. starpu_erand48_r(worker_xsub, buffer, &randy);
  148. double x = (2.0*randx - 1.0);
  149. double y = (2.0*randy - 1.0);
  150. double dist = x*x + y*y;
  151. if (dist < 1.0)
  152. local_cnt++;
  153. }
  154. /* Put the contribution of that task into the counter */
  155. unsigned long *cnt = (unsigned long *)STARPU_VARIABLE_GET_PTR(descr[1]);
  156. *cnt = *cnt + local_cnt;
  157. }
  158. extern void pi_redux_cuda_kernel(float *x, float *y, unsigned n, unsigned long *shot_cnt);
  159. #ifdef STARPU_HAVE_CURAND
  160. static void pi_func_cuda(void *descr[], void *cl_arg)
  161. {
  162. (void)cl_arg;
  163. curandStatus_t res;
  164. int workerid = starpu_worker_get_id_check();
  165. /* CURAND is a bit silly: it assumes that any error is fatal. Calling
  166. * cudaGetLastError resets the last error value. */
  167. (void) cudaGetLastError();
  168. /* Fill the scratchpad with random numbers. Note that both x and y
  169. * arrays are in stored the same vector. */
  170. float *scratchpad_xy = (float *)STARPU_VECTOR_GET_PTR(descr[0]);
  171. res = curandGenerateUniform(curandgens[workerid], scratchpad_xy, 2*nshot_per_task);
  172. STARPU_ASSERT(res == CURAND_STATUS_SUCCESS);
  173. float *x = &scratchpad_xy[0];
  174. float *y = &scratchpad_xy[nshot_per_task];
  175. unsigned long *shot_cnt = (unsigned long *)STARPU_VARIABLE_GET_PTR(descr[1]);
  176. pi_redux_cuda_kernel(x, y, nshot_per_task, shot_cnt);
  177. }
  178. #endif
  179. static struct starpu_perfmodel pi_model =
  180. {
  181. .type = STARPU_HISTORY_BASED,
  182. .size_base = size_base,
  183. .symbol = "monte_carlo_pi_scratch"
  184. };
  185. static struct starpu_codelet pi_cl =
  186. {
  187. .cpu_funcs = {pi_func_cpu},
  188. .cpu_funcs_name = {"pi_func_cpu"},
  189. #ifdef STARPU_HAVE_CURAND
  190. .cuda_funcs = {pi_func_cuda},
  191. #endif
  192. .nbuffers = 2,
  193. .modes = {STARPU_SCRATCH, STARPU_RW},
  194. .model = &pi_model
  195. };
  196. static struct starpu_perfmodel pi_model_redux =
  197. {
  198. .type = STARPU_HISTORY_BASED,
  199. .size_base = size_base,
  200. .symbol = "monte_carlo_pi_scratch_redux"
  201. };
  202. static struct starpu_codelet pi_cl_redux =
  203. {
  204. .cpu_funcs = {pi_func_cpu},
  205. .cpu_funcs_name = {"pi_func_cpu"},
  206. #ifdef STARPU_HAVE_CURAND
  207. .cuda_funcs = {pi_func_cuda},
  208. #endif
  209. .nbuffers = 2,
  210. .modes = {STARPU_SCRATCH, STARPU_REDUX},
  211. .model = &pi_model_redux
  212. };
  213. /*
  214. * Codelets to implement reduction
  215. */
  216. void init_cpu_func(void *descr[], void *cl_arg)
  217. {
  218. (void)cl_arg;
  219. unsigned long *val = (unsigned long *)STARPU_VARIABLE_GET_PTR(descr[0]);
  220. *val = 0;
  221. }
  222. #ifdef STARPU_HAVE_CURAND
  223. static void init_cuda_func(void *descr[], void *cl_arg)
  224. {
  225. (void)cl_arg;
  226. unsigned long *val = (unsigned long *)STARPU_VARIABLE_GET_PTR(descr[0]);
  227. cudaMemsetAsync(val, 0, sizeof(unsigned long), starpu_cuda_get_local_stream());
  228. }
  229. #endif
  230. static struct starpu_codelet init_codelet =
  231. {
  232. .cpu_funcs = {init_cpu_func},
  233. .cpu_funcs_name = {"init_cpu_func"},
  234. #ifdef STARPU_HAVE_CURAND
  235. .cuda_funcs = {init_cuda_func},
  236. .cuda_flags = {STARPU_CUDA_ASYNC},
  237. #endif
  238. .modes = {STARPU_W},
  239. .nbuffers = 1
  240. };
  241. #ifdef STARPU_HAVE_CURAND
  242. /* Dummy implementation of the addition of two unsigned longs in CUDA */
  243. static void redux_cuda_func(void *descr[], void *cl_arg)
  244. {
  245. (void)cl_arg;
  246. unsigned long *d_a = (unsigned long *)STARPU_VARIABLE_GET_PTR(descr[0]);
  247. unsigned long *d_b = (unsigned long *)STARPU_VARIABLE_GET_PTR(descr[1]);
  248. unsigned long h_a, h_b;
  249. cudaMemcpyAsync(&h_a, d_a, sizeof(h_a), cudaMemcpyDeviceToHost, starpu_cuda_get_local_stream());
  250. cudaMemcpyAsync(&h_b, d_b, sizeof(h_b), cudaMemcpyDeviceToHost, starpu_cuda_get_local_stream());
  251. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  252. h_a += h_b;
  253. cudaMemcpyAsync(d_a, &h_a, sizeof(h_a), cudaMemcpyHostToDevice, starpu_cuda_get_local_stream());
  254. }
  255. #endif
  256. void redux_cpu_func(void *descr[], void *cl_arg)
  257. {
  258. (void)cl_arg;
  259. unsigned long *a = (unsigned long *)STARPU_VARIABLE_GET_PTR(descr[0]);
  260. unsigned long *b = (unsigned long *)STARPU_VARIABLE_GET_PTR(descr[1]);
  261. *a = *a + *b;
  262. }
  263. static struct starpu_codelet redux_codelet =
  264. {
  265. .cpu_funcs = {redux_cpu_func},
  266. .cpu_funcs_name = {"redux_cpu_func"},
  267. #ifdef STARPU_HAVE_CURAND
  268. .cuda_funcs = {redux_cuda_func},
  269. .cuda_flags = {STARPU_CUDA_ASYNC},
  270. #endif
  271. .modes = {STARPU_RW, STARPU_R},
  272. .nbuffers = 2
  273. };
  274. /*
  275. * Main program
  276. */
  277. int main(int argc, char **argv)
  278. {
  279. unsigned i;
  280. int ret;
  281. /* Not supported yet */
  282. if (starpu_get_env_number_default("STARPU_GLOBAL_ARBITER", 0) > 0)
  283. return 77;
  284. parse_args(argc, argv);
  285. ret = starpu_init(NULL);
  286. if (ret == -ENODEV) return 77;
  287. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  288. /* Launch a Random Number Generator (RNG) on each worker */
  289. starpu_execute_on_each_worker(init_rng, NULL, STARPU_CPU|STARPU_CUDA);
  290. /* Create a scratchpad data */
  291. starpu_data_handle_t xy_scratchpad_handle;
  292. starpu_vector_data_register(&xy_scratchpad_handle, -1, (uintptr_t)NULL,
  293. 2*nshot_per_task, sizeof(float));
  294. /* Create a variable that will be used to count the number of shots
  295. * that actually hit the unit circle when shooting randomly in
  296. * [-1,1]^2. */
  297. unsigned long shot_cnt = 0;
  298. starpu_data_handle_t shot_cnt_handle;
  299. starpu_variable_data_register(&shot_cnt_handle, STARPU_MAIN_RAM,
  300. (uintptr_t)&shot_cnt, sizeof(shot_cnt));
  301. starpu_data_set_reduction_methods(shot_cnt_handle,
  302. &redux_codelet, &init_codelet);
  303. double start;
  304. double end;
  305. for (i = 0; i < ntasks_warmup; i++)
  306. {
  307. struct starpu_task *task = starpu_task_create();
  308. task->cl = use_redux?&pi_cl_redux:&pi_cl;
  309. task->handles[0] = xy_scratchpad_handle;
  310. task->handles[1] = shot_cnt_handle;
  311. ret = starpu_task_submit(task);
  312. STARPU_ASSERT(!ret);
  313. }
  314. start = starpu_timing_now();
  315. for (i = 0; i < ntasks; i++)
  316. {
  317. struct starpu_task *task = starpu_task_create();
  318. task->cl = use_redux?&pi_cl_redux:&pi_cl;
  319. task->handles[0] = xy_scratchpad_handle;
  320. task->handles[1] = shot_cnt_handle;
  321. ret = starpu_task_submit(task);
  322. STARPU_ASSERT(!ret);
  323. }
  324. starpu_data_unregister(shot_cnt_handle);
  325. starpu_data_unregister(xy_scratchpad_handle);
  326. end = starpu_timing_now();
  327. double timing = end - start;
  328. /* Total surface : Pi * r^ 2 = Pi*1^2, total square surface : 2^2 = 4,
  329. * probability to impact the disk: pi/4 */
  330. unsigned long total = (ntasks + ntasks_warmup)*nshot_per_task;
  331. double pi_approx = ((double)shot_cnt*4.0)/total;
  332. FPRINTF(stderr, "Reductions? %s\n", use_redux?"yes":"no");
  333. FPRINTF(stderr, "Pi approximation : %f (%lu / %lu)\n", pi_approx, shot_cnt, total);
  334. FPRINTF(stderr, "Error %e \n", pi_approx - PI);
  335. FPRINTF(stderr, "Total time : %f ms\n", timing/1000.0);
  336. FPRINTF(stderr, "Speed : %f GShot/s\n", total/(1e3*timing));
  337. starpu_shutdown();
  338. if (fabs(pi_approx - PI) > 1.0)
  339. return 1;
  340. return 0;
  341. }