pi_kernel.cu 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899
  1. /*
  2. * StarPU
  3. * Copyright (C) INRIA 2008-2010 (see AUTHORS file)
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include "pi.h"
  17. #define BLOCK_SIZE 256
  18. static __global__ void monte_carlo(TYPE *random_numbers_x, TYPE *random_numbers_y,
  19. unsigned n, unsigned *output_cnt)
  20. {
  21. __shared__ unsigned scnt[BLOCK_SIZE];
  22. const int tid = threadIdx.x + blockIdx.x*blockDim.x;
  23. /* Blank the shared mem buffer */
  24. if (threadIdx.x < 32)
  25. scnt[threadIdx.x] = 0;
  26. __syncthreads();
  27. /* Do we have a successful shot ? */
  28. TYPE x = random_numbers_x[tid];
  29. TYPE y = random_numbers_y[tid];
  30. TYPE dist = (x*x + y*y);
  31. scnt[threadIdx.x] = (dist <= 1.0f);
  32. __syncthreads();
  33. /* Perform a reduction to compute the sum on each thread within that block */
  34. unsigned s;
  35. for (s = blockDim.x/2; s!=0; s>>=1)
  36. {
  37. if (threadIdx.x < s)
  38. scnt[threadIdx.x] += scnt[threadIdx.x + s];
  39. __syncthreads();
  40. }
  41. /* report the number of successful shots in the block */
  42. if (threadIdx.x == 0)
  43. output_cnt[blockIdx.x] = scnt[0];
  44. }
  45. static __global__ void sum_per_block_cnt(unsigned previous_nblocks,
  46. unsigned *output_cnt,
  47. unsigned *cnt)
  48. {
  49. /* XXX that's totally unoptimized yet : we should do a reduction ! */
  50. if (threadIdx.x == 0)
  51. {
  52. unsigned total_cnt = 0;
  53. unsigned i;
  54. for (i = 0; i < previous_nblocks; i++)
  55. total_cnt += output_cnt[i];
  56. *cnt = total_cnt;
  57. }
  58. }
  59. extern "C" void cuda_kernel(void *descr[], void *cl_arg)
  60. {
  61. TYPE *random_numbers_x = (TYPE *)STARPU_GET_VECTOR_PTR(descr[0]);
  62. TYPE *random_numbers_y = (TYPE *)STARPU_GET_VECTOR_PTR(descr[1]);
  63. unsigned nx = STARPU_GET_VECTOR_NX(descr[0]);
  64. unsigned *cnt = (unsigned *)STARPU_GET_VECTOR_PTR(descr[2]);
  65. unsigned *per_block_cnt;
  66. cudaMalloc((void **)&per_block_cnt, (nx/BLOCK_SIZE)*sizeof(unsigned));
  67. assert((nx % BLOCK_SIZE) == 0);
  68. /* each entry of per_block_cnt contains the number of successful shots
  69. * in the corresponding block. */
  70. monte_carlo<<<nx/BLOCK_SIZE, BLOCK_SIZE>>>(random_numbers_x, random_numbers_y, nx, per_block_cnt);
  71. cudaThreadSynchronize();
  72. /* compute the total number of successful shots by adding the elements
  73. * of the per_block_cnt array */
  74. sum_per_block_cnt<<<1, 32>>>(nx/BLOCK_SIZE, per_block_cnt, cnt);
  75. cudaThreadSynchronize();
  76. cudaFree(per_block_cnt);
  77. }