Просмотр исходного кода

Use cublasdot and cudaMemsetAsync instead of self-baked implementations

Samuel Thibault лет назад: 8
Родитель
Сommit
5e9e6b367f
3 измененных файлов с 13 добавлено и 186 удалено
  1. 0 5
      examples/Makefile.am
  2. 0 156
      examples/cg/cg_dot_kernel.cu
  3. 13 25
      examples/cg/cg_kernels.c

+ 0 - 5
examples/Makefile.am

@@ -812,11 +812,6 @@ cg_cg_SOURCES =					\
 	cg/cg_kernels.c				\
 	common/blas.c
 
-if STARPU_USE_CUDA
-cg_cg_SOURCES +=				\
-	cg/cg_dot_kernel.cu
-endif
-
 cg_cg_LDADD =					\
 	$(STARPU_BLAS_LDFLAGS)
 

+ 0 - 156
examples/cg/cg_dot_kernel.cu

@@ -1,156 +0,0 @@
-/* StarPU --- Runtime system for heterogeneous multicore architectures.
- *
- * Copyright (C) 2010, 2015  Université de Bordeaux
- * Copyright (C) 2010, 2012, 2015  CNRS
- *
- * StarPU is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation; either version 2.1 of the License, or (at
- * your option) any later version.
- *
- * StarPU is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- * See the GNU Lesser General Public License in COPYING.LGPL for more details.
- */
-
-#include <starpu.h>
-
-#include "cg.h"
-
-#define MAXNBLOCKS	128
-#define MAXTHREADSPERBLOCK	256
-
-/*
- * Dot product kernel
- * We first perform dot computation in parallel in dot_device, and then we
- * gather the dot values into one in gather_dot_device.
- */
-
-static __global__ void dot_device(TYPE *vx, TYPE *vy, unsigned n, TYPE *dot_array)
-{
-	__shared__ TYPE scnt[MAXTHREADSPERBLOCK];
-
-	/* Do we have a successful shot ? */
-	const int tid = threadIdx.x + blockIdx.x*blockDim.x;
-
-	const int nthreads = gridDim.x * blockDim.x;
-
-	/* Blank the shared mem buffer */
-	if (threadIdx.x < MAXTHREADSPERBLOCK)
-		scnt[threadIdx.x] = (TYPE)0.0;
-
-	__syncthreads();
-
-	int ind;
-	for (ind = tid; ind < n; ind += nthreads)
-	{
-		TYPE x = vx[ind];
-		TYPE y = vy[ind];
-
-		scnt[threadIdx.x] += (x*y);
-	}
-
-	__syncthreads();
-
-	/* Perform a reduction to compute the sum on each thread within that block */
-
-	/* NB: We assume that the number of threads per block is a power of 2 ! */
-	unsigned s;
-	for (s = blockDim.x/2; s!=0; s>>=1)
-	{
-		if (threadIdx.x < s)
-			scnt[threadIdx.x] += scnt[threadIdx.x + s];
-
-		__syncthreads();
-	}
-
-	/* report the number of successful shots in the block */
-	if (threadIdx.x == 0)
-		dot_array[blockIdx.x] = scnt[0];
-
-	__syncthreads();
-}
-
-static __global__ void gather_dot_device(TYPE *dot_array, TYPE *dot)
-{
-	__shared__ TYPE accumulator[MAXNBLOCKS];
-
-	unsigned i;
-
-	/* Load the values from global mem */
-	for (i = 0; i < blockDim.x; i++)
-		accumulator[i] = dot_array[i];
-
-	__syncthreads();
-
-	/* Perform a reduction in shared memory */
-	unsigned s;
-	for (s = blockDim.x/2; s!=0; s>>=1)
-	{
-		if (threadIdx.x < s)
-			accumulator[threadIdx.x] += accumulator[threadIdx.x + s];
-
-		__syncthreads();
-	}
-
-
-	/* Save the result in global memory */
-	if (threadIdx.x == 0)
-		*dot = *dot + accumulator[0];
-}
-
-extern "C" void dot_host(TYPE *x, TYPE *y, unsigned nelems, TYPE *dot)
-{
-	/* How many blocks do we use ? */ 
-	unsigned nblocks = 128; // TODO
-	STARPU_ASSERT(nblocks <= MAXNBLOCKS);
-	
-	TYPE *per_block_sum;
-	cudaMalloc((void **)&per_block_sum, nblocks*sizeof(TYPE));
-
-	STARPU_ASSERT((nelems % nblocks) == 0);
-
-	/* How many threads per block ? At most 256, but no more threads than
-	 * there are entries to process per block. */
-	unsigned nthread_per_block = STARPU_MIN(MAXTHREADSPERBLOCK, (nelems / nblocks));
-
-	/* each entry of per_block_sum contains the number of successful shots
-	 * in the corresponding block. */
-	dot_device<<<nblocks, nthread_per_block, 0, starpu_cuda_get_local_stream()>>>(x, y, nelems, per_block_sum);
-
-	/* Note that we do not synchronize between kernel calls because there
-	 * is an implicit serialization */
-	gather_dot_device<<<1, nblocks, 0, starpu_cuda_get_local_stream()>>>(per_block_sum, dot);
-
-	cudaError_t cures;
-	cures = cudaStreamSynchronize(starpu_cuda_get_local_stream());
-	if (cures)
-		STARPU_CUDA_REPORT_ERROR(cures);
-
-	cudaFree(per_block_sum);
-}
-
-/*
- * Fill a vector with zeroes
- */
-
-static __global__ void zero_vector_device(TYPE *x, unsigned nelems, unsigned nelems_per_thread)
-{
-	unsigned i;
-	unsigned first_i = blockDim.x * blockIdx.x + threadIdx.x;
-
-	for (i = first_i; i < nelems; i += nelems_per_thread)
-		x[i] = 0.0;
-}
-
-extern "C" void zero_vector(TYPE *x, unsigned nelems)
-{
-	unsigned nblocks = STARPU_MIN(128, nelems);
-	unsigned nthread_per_block = STARPU_MIN(MAXTHREADSPERBLOCK, (nelems / nblocks));
-
-	unsigned nelems_per_thread = nelems / (nblocks * nthread_per_block);
-
-	zero_vector_device<<<nblocks, nthread_per_block, 0, starpu_cuda_get_local_stream()>>>(x, nelems, nelems_per_thread);
-}

+ 13 - 25
examples/cg/cg_kernels.c

@@ -173,8 +173,9 @@ extern void zero_vector(TYPE *x, unsigned nelems);
 static void bzero_variable_cuda(void *descr[], void *cl_arg)
 {
 	TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
+	size_t size = STARPU_VARIABLE_GET_ELEMSIZE(descr[0]);
 
-	zero_vector(v, 1);
+	cudaMemsetAsync(v, 0, size, starpu_cuda_get_local_stream());
 }
 #endif
 
@@ -209,8 +210,9 @@ static void bzero_vector_cuda(void *descr[], void *cl_arg)
 {
 	TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
 	unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
- 
-	zero_vector(v, n);
+	size_t elemsize = STARPU_VECTOR_GET_ELEMSIZE(descr[0]);
+
+	cudaMemsetAsync(v, 0, n * elemsize, starpu_cuda_get_local_stream());
 }
 #endif
 
@@ -247,8 +249,6 @@ struct starpu_codelet bzero_vector_cl =
  */
 
 #ifdef STARPU_USE_CUDA
-extern void dot_host(TYPE *x, TYPE *y, unsigned nelems, TYPE *dot);
-
 static void dot_kernel_cuda(void *descr[], void *cl_arg)
 {
 	TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]); 
@@ -257,26 +257,13 @@ static void dot_kernel_cuda(void *descr[], void *cl_arg)
 
 	unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
 
-	int version;
-	cublasGetVersion(starpu_cublas_get_local_handle(), &version);
-
-	/* FIXME: check in Nvidia bug #1882017 when this gets fixed */
-	if (version < 99999)
-	{
-		/* This function puts its result directly in device memory, so
-		 * that we don't have to transfer that value back and forth. */
-		dot_host(v1, v2, n, dot);
-	}
-	else
-	{
-		/* Should be able to put result in GPU, but does not yet, see
-		 * Nvidia bug #1882017 */
-		cublasStatus_t status = cublasdot(starpu_cublas_get_local_handle(),
-			n, v1, 1, v2, 1, dot);
-		if (status != CUBLAS_STATUS_SUCCESS)
-			STARPU_CUBLAS_REPORT_ERROR(status);
-		cudaStreamSynchronize(starpu_cuda_get_local_stream());
-	}
+	cublasHandle_t handle = starpu_cublas_get_local_handle();
+	cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_DEVICE);
+	cublasStatus_t status = cublasdot(handle,
+		n, v1, 1, v2, 1, dot);
+	if (status != CUBLAS_STATUS_SUCCESS)
+		STARPU_CUBLAS_REPORT_ERROR(status);
+	cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_HOST);
 }
 #endif
 
@@ -310,6 +297,7 @@ static struct starpu_codelet dot_kernel_cl =
 #ifdef STARPU_USE_CUDA
 	.cuda_funcs = {dot_kernel_cuda},
 #endif
+	.cuda_flags = {STARPU_CUDA_ASYNC},
 	.nbuffers = 3,
 	.model = &dot_kernel_model
 };