pi_kernel.cu 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2012 Inria
  4. * Copyright (C) 2010,2012,2015 CNRS
  5. * Copyright (C) 2010,2013-2015 Université de Bordeaux
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. /* First draw a series of coordinates, then count how many fall inside the
  19. * circle quarter */
  20. #include "SobolQRNG/sobol_gpu.h"
  21. #include "pi.h"
  22. #define MAXNBLOCKS 128
  23. #define MAXTHREADSPERBLOCK 256
  24. static __global__ void monte_carlo(TYPE *random_numbers_x, TYPE *random_numbers_y,
  25. unsigned n, unsigned *output_cnt)
  26. {
  27. __shared__ unsigned scnt[MAXTHREADSPERBLOCK];
  28. /* Do we have a successful shot ? */
  29. const int tid = threadIdx.x + blockIdx.x*blockDim.x;
  30. const int nthreads = gridDim.x * blockDim.x;
  31. /* Blank the shared mem buffer */
  32. if (threadIdx.x < MAXTHREADSPERBLOCK)
  33. scnt[threadIdx.x] = 0;
  34. __syncthreads();
  35. int ind;
  36. for (ind = tid; ind < n; ind += nthreads)
  37. {
  38. TYPE x = random_numbers_x[ind];
  39. TYPE y = random_numbers_y[ind];
  40. TYPE dist = (x*x + y*y);
  41. unsigned success = (dist <= 1.0f)?1:0;
  42. scnt[threadIdx.x] += success;
  43. }
  44. __syncthreads();
  45. /* Perform a reduction to compute the sum on each thread within that block */
  46. /* NB: We assume that the number of threads per block is a power of 2 ! */
  47. unsigned s;
  48. for (s = blockDim.x/2; s!=0; s>>=1)
  49. {
  50. if (threadIdx.x < s)
  51. scnt[threadIdx.x] += scnt[threadIdx.x + s];
  52. __syncthreads();
  53. }
  54. /* report the number of successful shots in the block */
  55. if (threadIdx.x == 0)
  56. output_cnt[blockIdx.x] = scnt[0];
  57. __syncthreads();
  58. }
  59. static __global__ void sum_per_block_cnt(unsigned *output_cnt, unsigned *cnt)
  60. {
  61. __shared__ unsigned accumulator[MAXNBLOCKS];
  62. unsigned i;
  63. /* Load the values from global mem */
  64. for (i = 0; i < blockDim.x; i++)
  65. accumulator[i] = output_cnt[i];
  66. __syncthreads();
  67. /* Perform a reduction in shared memory */
  68. unsigned s;
  69. for (s = blockDim.x/2; s!=0; s>>=1)
  70. {
  71. if (threadIdx.x < s)
  72. accumulator[threadIdx.x] += accumulator[threadIdx.x + s];
  73. __syncthreads();
  74. }
  75. /* Save the result in global memory */
  76. if (threadIdx.x == 0)
  77. *cnt = accumulator[0];
  78. }
  79. extern "C" void cuda_kernel(void *descr[], void *cl_arg)
  80. {
  81. cudaError_t cures;
  82. unsigned *directions = (unsigned *)STARPU_VECTOR_GET_PTR(descr[0]);
  83. unsigned long long *nshot_per_task = (unsigned long long *) cl_arg;
  84. unsigned nx = *nshot_per_task;
  85. /* Generate Random numbers */
  86. float *random_numbers;
  87. cudaMalloc((void **)&random_numbers, 2*nx*sizeof(float));
  88. STARPU_ASSERT(random_numbers);
  89. sobolGPU(2*nx/n_dimensions, n_dimensions, directions, random_numbers);
  90. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  91. TYPE *random_numbers_x = &random_numbers[0];
  92. TYPE *random_numbers_y = &random_numbers[nx];
  93. unsigned *cnt = (unsigned *)STARPU_VECTOR_GET_PTR(descr[1]);
  94. /* How many blocks do we use ? */
  95. unsigned nblocks = 128; // TODO
  96. STARPU_ASSERT(nblocks <= MAXNBLOCKS);
  97. unsigned *per_block_cnt;
  98. cudaMalloc((void **)&per_block_cnt, nblocks*sizeof(unsigned));
  99. STARPU_ASSERT((nx % nblocks) == 0);
  100. /* How many threads per block ? At most 256, but no more threads than
  101. * there are entries to process per block. */
  102. unsigned nthread_per_block = STARPU_MIN(MAXTHREADSPERBLOCK, (nx / nblocks));
  103. /* each entry of per_block_cnt contains the number of successful shots
  104. * in the corresponding block. */
  105. monte_carlo<<<nblocks, nthread_per_block, 0, starpu_cuda_get_local_stream()>>>(random_numbers_x, random_numbers_y, nx, per_block_cnt);
  106. /* Note that we do not synchronize between kernel calls because there is an implicit serialization */
  107. /* compute the total number of successful shots by adding the elements
  108. * of the per_block_cnt array */
  109. sum_per_block_cnt<<<1, nblocks, 0, starpu_cuda_get_local_stream()>>>(per_block_cnt, cnt);
  110. cures = cudaStreamSynchronize(starpu_cuda_get_local_stream());
  111. if (cures)
  112. STARPU_CUDA_REPORT_ERROR(cures);
  113. cudaFree(per_block_cnt);
  114. cudaFree(random_numbers);
  115. }