cg_dot_kernel.cu 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010, 2015 Université de Bordeaux
  4. * Copyright (C) 2010, 2012, 2015 CNRS
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <starpu.h>
  18. #include "cg.h"
  19. #define MAXNBLOCKS 128
  20. #define MAXTHREADSPERBLOCK 256
  21. /*
  22. * Dot product kernel
  23. * We first perform dot computation in parallel in dot_device, and then we
  24. * gather the dot values into one in gather_dot_device.
  25. */
  26. static __global__ void dot_device(TYPE *vx, TYPE *vy, unsigned n, TYPE *dot_array)
  27. {
  28. __shared__ TYPE scnt[MAXTHREADSPERBLOCK];
  29. /* Do we have a successful shot ? */
  30. const int tid = threadIdx.x + blockIdx.x*blockDim.x;
  31. const int nthreads = gridDim.x * blockDim.x;
  32. /* Blank the shared mem buffer */
  33. if (threadIdx.x < MAXTHREADSPERBLOCK)
  34. scnt[threadIdx.x] = (TYPE)0.0;
  35. __syncthreads();
  36. int ind;
  37. for (ind = tid; ind < n; ind += nthreads)
  38. {
  39. TYPE x = vx[ind];
  40. TYPE y = vy[ind];
  41. scnt[threadIdx.x] += (x*y);
  42. }
  43. __syncthreads();
  44. /* Perform a reduction to compute the sum on each thread within that block */
  45. /* NB: We assume that the number of threads per block is a power of 2 ! */
  46. unsigned s;
  47. for (s = blockDim.x/2; s!=0; s>>=1)
  48. {
  49. if (threadIdx.x < s)
  50. scnt[threadIdx.x] += scnt[threadIdx.x + s];
  51. __syncthreads();
  52. }
  53. /* report the number of successful shots in the block */
  54. if (threadIdx.x == 0)
  55. dot_array[blockIdx.x] = scnt[0];
  56. __syncthreads();
  57. }
  58. static __global__ void gather_dot_device(TYPE *dot_array, TYPE *dot)
  59. {
  60. __shared__ TYPE accumulator[MAXNBLOCKS];
  61. unsigned i;
  62. /* Load the values from global mem */
  63. for (i = 0; i < blockDim.x; i++)
  64. accumulator[i] = dot_array[i];
  65. __syncthreads();
  66. /* Perform a reduction in shared memory */
  67. unsigned s;
  68. for (s = blockDim.x/2; s!=0; s>>=1)
  69. {
  70. if (threadIdx.x < s)
  71. accumulator[threadIdx.x] += accumulator[threadIdx.x + s];
  72. __syncthreads();
  73. }
  74. /* Save the result in global memory */
  75. if (threadIdx.x == 0)
  76. *dot = *dot + accumulator[0];
  77. }
  78. extern "C" void dot_host(TYPE *x, TYPE *y, unsigned nelems, TYPE *dot)
  79. {
  80. /* How many blocks do we use ? */
  81. unsigned nblocks = 128; // TODO
  82. STARPU_ASSERT(nblocks <= MAXNBLOCKS);
  83. TYPE *per_block_sum;
  84. cudaMalloc((void **)&per_block_sum, nblocks*sizeof(TYPE));
  85. STARPU_ASSERT((nelems % nblocks) == 0);
  86. /* How many threads per block ? At most 256, but no more threads than
  87. * there are entries to process per block. */
  88. unsigned nthread_per_block = STARPU_MIN(MAXTHREADSPERBLOCK, (nelems / nblocks));
  89. /* each entry of per_block_sum contains the number of successful shots
  90. * in the corresponding block. */
  91. dot_device<<<nblocks, nthread_per_block, 0, starpu_cuda_get_local_stream()>>>(x, y, nelems, per_block_sum);
  92. /* Note that we do not synchronize between kernel calls because there
  93. * is an implicit serialization */
  94. gather_dot_device<<<1, nblocks, 0, starpu_cuda_get_local_stream()>>>(per_block_sum, dot);
  95. cudaError_t cures;
  96. cures = cudaStreamSynchronize(starpu_cuda_get_local_stream());
  97. if (cures)
  98. STARPU_CUDA_REPORT_ERROR(cures);
  99. cudaFree(per_block_sum);
  100. }
  101. /*
  102. * Fill a vector with zeroes
  103. */
  104. static __global__ void zero_vector_device(TYPE *x, unsigned nelems, unsigned nelems_per_thread)
  105. {
  106. unsigned i;
  107. unsigned first_i = blockDim.x * blockIdx.x + threadIdx.x;
  108. for (i = first_i; i < nelems; i += nelems_per_thread)
  109. x[i] = 0.0;
  110. }
  111. extern "C" void zero_vector(TYPE *x, unsigned nelems)
  112. {
  113. unsigned nblocks = STARPU_MIN(128, nelems);
  114. unsigned nthread_per_block = STARPU_MIN(MAXTHREADSPERBLOCK, (nelems / nblocks));
  115. unsigned nelems_per_thread = nelems / (nblocks * nthread_per_block);
  116. zero_vector_device<<<nblocks, nthread_per_block, 0, starpu_cuda_get_local_stream()>>>(x, nelems, nelems_per_thread);
  117. }