pi_redux.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2015, 2017 Université de Bordeaux
  4. * Copyright (C) 2016 CNRS
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. /*
  18. * This computes Pi by using drawing random coordinates (thanks to the sobol
  19. * generator) and check whether they fall within one quarter of a circle. The
  20. * proportion gives an approximation of Pi. For each task, we draw a number of
  21. * coordinates, and we gather the number of successful draws.
  22. *
  23. * This version uses reduction to optimize gathering the number of successful
  24. * draws.
  25. */
  26. #include <starpu.h>
  27. #include <stdlib.h>
  28. #include <math.h>
  29. #define FPRINTF(ofile, fmt, ...) do { if (!getenv("STARPU_SSILENT")) {fprintf(ofile, fmt, ## __VA_ARGS__); }} while(0)
  30. #define PI 3.14159265358979323846
  31. #if defined(STARPU_USE_CUDA) && !defined(STARPU_HAVE_CURAND)
  32. #warning CURAND is required to run that example on CUDA devices
  33. #endif
  34. #ifdef STARPU_HAVE_CURAND
  35. #include <cuda.h>
  36. #include <curand.h>
  37. #endif
  38. static unsigned long long nshot_per_task = 16*1024*1024ULL;
  39. /* default value */
  40. static unsigned long ntasks = 1024;
  41. static unsigned long ntasks_warmup = 0;
  42. static unsigned use_redux = 1;
  43. static unsigned do_warmup = 0;
  44. /*
  45. * Initialization of the Random Number Generators (RNG)
  46. */
  47. #ifdef STARPU_HAVE_CURAND
  48. /* RNG for the CURAND library */
  49. static curandGenerator_t curandgens[STARPU_NMAXWORKERS];
  50. #endif
  51. /* state for the erand48 function : note the huge padding to avoid false-sharing */
  52. #define PADDING 1024
  53. static unsigned short xsubi[STARPU_NMAXWORKERS*PADDING];
  54. static starpu_drand48_data randbuffer[STARPU_NMAXWORKERS*PADDING];
  55. /* Function to initialize the random number generator in the current worker */
  56. static void init_rng(void *arg STARPU_ATTRIBUTE_UNUSED)
  57. {
  58. #ifdef STARPU_HAVE_CURAND
  59. curandStatus_t res;
  60. #endif
  61. int workerid = starpu_worker_get_id_check();
  62. switch (starpu_worker_get_type(workerid))
  63. {
  64. case STARPU_CPU_WORKER:
  65. case STARPU_MIC_WORKER:
  66. case STARPU_SCC_WORKER:
  67. /* create a seed */
  68. starpu_srand48_r((long int)workerid, &randbuffer[PADDING*workerid]);
  69. xsubi[0 + PADDING*workerid] = (unsigned short)workerid;
  70. xsubi[1 + PADDING*workerid] = (unsigned short)workerid;
  71. xsubi[2 + PADDING*workerid] = (unsigned short)workerid;
  72. break;
  73. #ifdef STARPU_HAVE_CURAND
  74. case STARPU_CUDA_WORKER:
  75. /* Create a RNG */
  76. res = curandCreateGenerator(&curandgens[workerid],
  77. CURAND_RNG_PSEUDO_DEFAULT);
  78. STARPU_ASSERT(res == CURAND_STATUS_SUCCESS);
  79. /* Seed it with worker's id */
  80. res = curandSetPseudoRandomGeneratorSeed(curandgens[workerid],
  81. (unsigned long long)workerid);
  82. STARPU_ASSERT(res == CURAND_STATUS_SUCCESS);
  83. break;
  84. #endif
  85. default:
  86. STARPU_ABORT();
  87. break;
  88. }
  89. }
  90. /* The amount of work does not depend on the data size at all :) */
  91. static size_t size_base(struct starpu_task *task, unsigned nimpl)
  92. {
  93. return nshot_per_task;
  94. }
  95. static void parse_args(int argc, char **argv)
  96. {
  97. int i;
  98. for (i = 1; i < argc; i++)
  99. {
  100. if (strcmp(argv[i], "-ntasks") == 0)
  101. {
  102. char *argptr;
  103. ntasks = strtol(argv[++i], &argptr, 10);
  104. }
  105. if (strcmp(argv[i], "-nshot") == 0)
  106. {
  107. char *argptr;
  108. nshot_per_task = strtol(argv[++i], &argptr, 10);
  109. }
  110. if (strcmp(argv[i], "-noredux") == 0)
  111. {
  112. use_redux = 0;
  113. }
  114. if (strcmp(argv[i], "-warmup") == 0)
  115. {
  116. do_warmup = 1;
  117. ntasks_warmup = 8; /* arbitrary number of warmup tasks */
  118. }
  119. if (strcmp(argv[i], "-h") == 0 || strcmp(argv[i], "--help") == 0)
  120. {
  121. fprintf(stderr, "Usage: %s [-ntasks n] [-noredux] [-warmup] [-h]\n", argv[0]);
  122. exit(-1);
  123. }
  124. }
  125. }
  126. /*
  127. * Monte-carlo kernel
  128. */
  129. void pi_func_cpu(void *descr[], void *cl_arg STARPU_ATTRIBUTE_UNUSED)
  130. {
  131. int workerid = starpu_worker_get_id_check();
  132. unsigned short *worker_xsub;
  133. worker_xsub = &xsubi[PADDING*workerid];
  134. starpu_drand48_data *buffer;
  135. buffer = &randbuffer[PADDING*workerid];
  136. unsigned long local_cnt = 0;
  137. /* Fill the scratchpad with random numbers */
  138. unsigned i;
  139. for (i = 0; i < nshot_per_task; i++)
  140. {
  141. double randx, randy;
  142. starpu_erand48_r(worker_xsub, buffer, &randx);
  143. starpu_erand48_r(worker_xsub, buffer, &randy);
  144. double x = (2.0*randx - 1.0);
  145. double y = (2.0*randy - 1.0);
  146. double dist = x*x + y*y;
  147. if (dist < 1.0)
  148. local_cnt++;
  149. }
  150. /* Put the contribution of that task into the counter */
  151. unsigned long *cnt = (unsigned long *)STARPU_VARIABLE_GET_PTR(descr[1]);
  152. *cnt = *cnt + local_cnt;
  153. }
  154. extern void pi_redux_cuda_kernel(float *x, float *y, unsigned n, unsigned long *shot_cnt);
  155. #ifdef STARPU_HAVE_CURAND
  156. static void pi_func_cuda(void *descr[], void *cl_arg STARPU_ATTRIBUTE_UNUSED)
  157. {
  158. curandStatus_t res;
  159. int workerid = starpu_worker_get_id_check();
  160. /* CURAND is a bit silly: it assumes that any error is fatal. Calling
  161. * cudaGetLastError resets the last error value. */
  162. (void) cudaGetLastError();
  163. /* Fill the scratchpad with random numbers. Note that both x and y
  164. * arrays are in stored the same vector. */
  165. float *scratchpad_xy = (float *)STARPU_VECTOR_GET_PTR(descr[0]);
  166. res = curandGenerateUniform(curandgens[workerid], scratchpad_xy, 2*nshot_per_task);
  167. STARPU_ASSERT(res == CURAND_STATUS_SUCCESS);
  168. float *x = &scratchpad_xy[0];
  169. float *y = &scratchpad_xy[nshot_per_task];
  170. unsigned long *shot_cnt = (unsigned long *)STARPU_VARIABLE_GET_PTR(descr[1]);
  171. pi_redux_cuda_kernel(x, y, nshot_per_task, shot_cnt);
  172. }
  173. #endif
  174. static struct starpu_perfmodel pi_model =
  175. {
  176. .type = STARPU_HISTORY_BASED,
  177. .size_base = size_base,
  178. .symbol = "monte_carlo_pi_scratch"
  179. };
  180. static struct starpu_codelet pi_cl =
  181. {
  182. .cpu_funcs = {pi_func_cpu},
  183. .cpu_funcs_name = {"pi_func_cpu"},
  184. #ifdef STARPU_HAVE_CURAND
  185. .cuda_funcs = {pi_func_cuda},
  186. #endif
  187. .nbuffers = 2,
  188. .modes = {STARPU_SCRATCH, STARPU_RW},
  189. .model = &pi_model
  190. };
  191. static struct starpu_perfmodel pi_model_redux =
  192. {
  193. .type = STARPU_HISTORY_BASED,
  194. .size_base = size_base,
  195. .symbol = "monte_carlo_pi_scratch_redux"
  196. };
  197. static struct starpu_codelet pi_cl_redux =
  198. {
  199. .cpu_funcs = {pi_func_cpu},
  200. .cpu_funcs_name = {"pi_func_cpu"},
  201. #ifdef STARPU_HAVE_CURAND
  202. .cuda_funcs = {pi_func_cuda},
  203. #endif
  204. .nbuffers = 2,
  205. .modes = {STARPU_SCRATCH, STARPU_REDUX},
  206. .model = &pi_model_redux
  207. };
  208. /*
  209. * Codelets to implement reduction
  210. */
  211. void init_cpu_func(void *descr[], void *cl_arg)
  212. {
  213. unsigned long *val = (unsigned long *)STARPU_VARIABLE_GET_PTR(descr[0]);
  214. *val = 0;
  215. }
  216. #ifdef STARPU_HAVE_CURAND
  217. static void init_cuda_func(void *descr[], void *cl_arg)
  218. {
  219. unsigned long *val = (unsigned long *)STARPU_VARIABLE_GET_PTR(descr[0]);
  220. cudaMemsetAsync(val, 0, sizeof(unsigned long), starpu_cuda_get_local_stream());
  221. }
  222. #endif
  223. static struct starpu_codelet init_codelet =
  224. {
  225. .cpu_funcs = {init_cpu_func},
  226. .cpu_funcs_name = {"init_cpu_func"},
  227. #ifdef STARPU_HAVE_CURAND
  228. .cuda_funcs = {init_cuda_func},
  229. .cuda_flags = {STARPU_CUDA_ASYNC},
  230. #endif
  231. .modes = {STARPU_W},
  232. .nbuffers = 1
  233. };
  234. #ifdef STARPU_HAVE_CURAND
  235. /* Dummy implementation of the addition of two unsigned longs in CUDA */
  236. static void redux_cuda_func(void *descr[], void *cl_arg)
  237. {
  238. unsigned long *d_a = (unsigned long *)STARPU_VARIABLE_GET_PTR(descr[0]);
  239. unsigned long *d_b = (unsigned long *)STARPU_VARIABLE_GET_PTR(descr[1]);
  240. unsigned long h_a, h_b;
  241. cudaMemcpyAsync(&h_a, d_a, sizeof(h_a), cudaMemcpyDeviceToHost, starpu_cuda_get_local_stream());
  242. cudaMemcpyAsync(&h_b, d_b, sizeof(h_b), cudaMemcpyDeviceToHost, starpu_cuda_get_local_stream());
  243. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  244. h_a += h_b;
  245. cudaMemcpyAsync(d_a, &h_a, sizeof(h_a), cudaMemcpyHostToDevice, starpu_cuda_get_local_stream());
  246. }
  247. #endif
  248. void redux_cpu_func(void *descr[], void *cl_arg)
  249. {
  250. unsigned long *a = (unsigned long *)STARPU_VARIABLE_GET_PTR(descr[0]);
  251. unsigned long *b = (unsigned long *)STARPU_VARIABLE_GET_PTR(descr[1]);
  252. *a = *a + *b;
  253. }
  254. static struct starpu_codelet redux_codelet =
  255. {
  256. .cpu_funcs = {redux_cpu_func},
  257. .cpu_funcs_name = {"redux_cpu_func"},
  258. #ifdef STARPU_HAVE_CURAND
  259. .cuda_funcs = {redux_cuda_func},
  260. .cuda_flags = {STARPU_CUDA_ASYNC},
  261. #endif
  262. .modes = {STARPU_RW, STARPU_R},
  263. .nbuffers = 2
  264. };
  265. /*
  266. * Main program
  267. */
  268. int main(int argc, char **argv)
  269. {
  270. unsigned i;
  271. int ret;
  272. /* Not supported yet */
  273. if (starpu_get_env_number_default("STARPU_GLOBAL_ARBITER", 0) > 0)
  274. return 77;
  275. parse_args(argc, argv);
  276. ret = starpu_init(NULL);
  277. if (ret == -ENODEV) return 77;
  278. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  279. /* Launch a Random Number Generator (RNG) on each worker */
  280. starpu_execute_on_each_worker(init_rng, NULL, STARPU_CPU|STARPU_CUDA);
  281. /* Create a scratchpad data */
  282. starpu_data_handle_t xy_scratchpad_handle;
  283. starpu_vector_data_register(&xy_scratchpad_handle, -1, (uintptr_t)NULL,
  284. 2*nshot_per_task, sizeof(float));
  285. /* Create a variable that will be used to count the number of shots
  286. * that actually hit the unit circle when shooting randomly in
  287. * [-1,1]^2. */
  288. unsigned long shot_cnt = 0;
  289. starpu_data_handle_t shot_cnt_handle;
  290. starpu_variable_data_register(&shot_cnt_handle, STARPU_MAIN_RAM,
  291. (uintptr_t)&shot_cnt, sizeof(shot_cnt));
  292. starpu_data_set_reduction_methods(shot_cnt_handle,
  293. &redux_codelet, &init_codelet);
  294. double start;
  295. double end;
  296. for (i = 0; i < ntasks_warmup; i++)
  297. {
  298. struct starpu_task *task = starpu_task_create();
  299. task->cl = use_redux?&pi_cl_redux:&pi_cl;
  300. task->handles[0] = xy_scratchpad_handle;
  301. task->handles[1] = shot_cnt_handle;
  302. ret = starpu_task_submit(task);
  303. STARPU_ASSERT(!ret);
  304. }
  305. start = starpu_timing_now();
  306. for (i = 0; i < ntasks; i++)
  307. {
  308. struct starpu_task *task = starpu_task_create();
  309. task->cl = use_redux?&pi_cl_redux:&pi_cl;
  310. task->handles[0] = xy_scratchpad_handle;
  311. task->handles[1] = shot_cnt_handle;
  312. ret = starpu_task_submit(task);
  313. STARPU_ASSERT(!ret);
  314. }
  315. starpu_data_unregister(shot_cnt_handle);
  316. starpu_data_unregister(xy_scratchpad_handle);
  317. end = starpu_timing_now();
  318. double timing = end - start;
  319. /* Total surface : Pi * r^ 2 = Pi*1^2, total square surface : 2^2 = 4,
  320. * probability to impact the disk: pi/4 */
  321. unsigned long total = (ntasks + ntasks_warmup)*nshot_per_task;
  322. double pi_approx = ((double)shot_cnt*4.0)/total;
  323. FPRINTF(stderr, "Reductions? %s\n", use_redux?"yes":"no");
  324. FPRINTF(stderr, "Pi approximation : %f (%lu / %lu)\n", pi_approx, shot_cnt, total);
  325. FPRINTF(stderr, "Error %e \n", pi_approx - PI);
  326. FPRINTF(stderr, "Total time : %f ms\n", timing/1000.0);
  327. FPRINTF(stderr, "Speed : %f GShot/s\n", total/(1e3*timing));
  328. starpu_shutdown();
  329. if (fabs(pi_approx - PI) > 1.0)
  330. return 1;
  331. return 0;
  332. }