Преглед изворни кода

Fix the CG example:
- Use a Hilbert matrix instead of a randomly generated matrix so that we do
have a positive definite matrix with a bad conditioning (this ensures that
the algorithm will run for a long time).
- Fix the reduction-based implementation of GEMV
- Fix the bzero_vector_cl implementation

Cédric Augonnet пре 14 година
родитељ
комит
c750225a2b
3 измењених фајлова са 330 додато и 95 уклоњено
  1. 50 19
      examples/cg/cg.c
  2. 10 2
      examples/cg/cg.h
  3. 270 74
      examples/cg/cg_kernels.c

+ 50 - 19
examples/cg/cg.c

@@ -15,6 +15,7 @@
  */
 #include <math.h>
 #include <assert.h>
+#include <sys/time.h>
 #include <starpu.h>
 #include <common/blas.h>
 
@@ -66,6 +67,7 @@
 
 static int long long n = 1024;
 static int nblocks = 8;
+static int use_reduction = 1;
 
 static starpu_data_handle A_handle, b_handle, x_handle;
 static TYPE *A, *b, *x;
@@ -79,6 +81,11 @@ static TYPE *r, *d, *q;
 static starpu_data_handle dtq_handle, rtr_handle;
 static TYPE dtq, rtr;
 
+extern starpu_codelet accumulate_variable_cl;
+extern starpu_codelet accumulate_vector_cl;
+extern starpu_codelet bzero_variable_cl;
+extern starpu_codelet bzero_vector_cl;
+
 /*
  *	Generate Input data
  */
@@ -100,21 +107,12 @@ static void generate_random_problem(void)
 		b[j] = (TYPE)1.0;
 		x[j] = (TYPE)0.0;
 
-#if 0
-		for (i = 0; i < n; i++)
-		{
-			A[n*j + i] = (i <= j)?1.0:0.0;
-		}
-
-#else
-
 		/* We take Hilbert matrix that is not well conditionned but definite positive: H(i,j) = 1/(1+i+j) */
 
 		for (i = 0; i < n; i++)
 		{
 			A[n*j + i] = (TYPE)(1.0/(1.0+i+j));
 		}
-#endif
 	}
 
 	/* Internal vectors */
@@ -140,6 +138,15 @@ static void register_data(void)
 
 	starpu_variable_data_register(&dtq_handle, 0, (uintptr_t)&dtq, sizeof(TYPE));
 	starpu_variable_data_register(&rtr_handle, 0, (uintptr_t)&rtr, sizeof(TYPE));
+
+	if (use_reduction)
+	{
+		starpu_data_set_reduction_methods(q_handle, &accumulate_vector_cl, &bzero_vector_cl);
+		starpu_data_set_reduction_methods(r_handle, &accumulate_vector_cl, &bzero_vector_cl);
+	
+		starpu_data_set_reduction_methods(dtq_handle, &accumulate_variable_cl, &bzero_variable_cl);
+		starpu_data_set_reduction_methods(rtr_handle, &accumulate_variable_cl, &bzero_variable_cl);
+	}
 }
 
 /*
@@ -235,13 +242,13 @@ static void cg(void)
 	copy_handle(r_handle, b_handle, nblocks);
 
 	/* r <- r - A x */
-	gemv_kernel(r_handle, A_handle, x_handle, 1.0, -1.0, nblocks); 
+	gemv_kernel(r_handle, A_handle, x_handle, 1.0, -1.0, nblocks, use_reduction); 
 
 	/* d <- r */
 	copy_handle(d_handle, r_handle, nblocks);
 
 	/* delta_new = dot(r,r) */
-	dot_kernel(r_handle, r_handle, rtr_handle, nblocks);
+	dot_kernel(r_handle, r_handle, rtr_handle, nblocks, use_reduction);
 
 	starpu_data_acquire(rtr_handle, STARPU_R);
 	delta_new = rtr;
@@ -251,13 +258,17 @@ static void cg(void)
 	fprintf(stderr, "*************** INITIAL ************ \n");
 	fprintf(stderr, "Delta 0: %e\n", delta_new);
 
+	struct timeval start;
+	struct timeval end;
+	gettimeofday(&start, NULL);
+
 	while ((i < i_max) && ((double)delta_new > (double)(eps*eps*delta_0)))
 	{
 		/* q <- A d */
-		gemv_kernel(q_handle, A_handle, d_handle, 0.0, 1.0, nblocks);
+		gemv_kernel(q_handle, A_handle, d_handle, 0.0, 1.0, nblocks, use_reduction);
 
 		/* dtq <- dot(d,q) */
-		dot_kernel(d_handle, q_handle, dtq_handle, nblocks);
+		dot_kernel(d_handle, q_handle, dtq_handle, nblocks, use_reduction);
 
 		/* alpha = delta_new / dtq */
 		starpu_data_acquire(dtq_handle, STARPU_R);
@@ -273,7 +284,7 @@ static void cg(void)
 			copy_handle(r_handle, b_handle, nblocks);
 		
 			/* r <- r - A x */
-			gemv_kernel(r_handle, A_handle, x_handle, 1.0, -1.0, nblocks); 
+			gemv_kernel(r_handle, A_handle, x_handle, 1.0, -1.0, nblocks, use_reduction); 
 		}
 		else {
 			/* r <- r - alpha q */
@@ -281,7 +292,7 @@ static void cg(void)
 		}
 
 		/* delta_new = dot(r,r) */
-		dot_kernel(r_handle, r_handle, rtr_handle, nblocks);
+		dot_kernel(r_handle, r_handle, rtr_handle, nblocks, use_reduction);
 
 		starpu_data_acquire(rtr_handle, STARPU_R);
 		delta_old = delta_new;
@@ -292,13 +303,22 @@ static void cg(void)
 		/* d <- beta d + r */
 		scal_axpy_kernel(d_handle, beta, r_handle, 1.0, nblocks);
 
-		/* We here take the error as ||r||_2 / (n||b||_2) */
-		double error = sqrt(delta_new/delta_0)/(1.0*n);
-		fprintf(stderr, "*****************************************\n");
-		fprintf(stderr, "iter %d DELTA %e - %e\n", i, delta_new, error);
+		if ((i % 10) == 0)
+		{
+			/* We here take the error as ||r||_2 / (n||b||_2) */
+			double error = sqrt(delta_new/delta_0)/(1.0*n);
+			fprintf(stderr, "*****************************************\n");
+			fprintf(stderr, "iter %d DELTA %e - %e\n", i, delta_new, error);
+		}
 
 		i++;
 	}
+
+	gettimeofday(&end, NULL);
+
+	double timing = (double)(((double)end.tv_sec - (double)start.tv_sec)*10e6 + ((double)end.tv_usec - (double)start.tv_usec));
+	fprintf(stderr, "Total timing : %2.2f seconds\n", timing/10e6);
+	fprintf(stderr, "Seconds per iteration : %2.2e\n", timing/10e6/i);
 }
 
 static int check(void)
@@ -324,6 +344,17 @@ static void parse_args(int argc, char **argv)
 			nblocks = atoi(argv[++i]);
 			continue;
 		}
+
+	        if (strcmp(argv[i], "-no-reduction") == 0) {
+			use_reduction = 0;
+			continue;
+		}
+
+	        if (strcmp(argv[i], "-h") == 0) {
+			fprintf(stderr, "usage: %s [-h] [-nblocks #blocks] [-n problem_size] [-no-reduction]\n", argv[0]);
+			exit(-1);
+			continue;
+		}
         }
 }
 

+ 10 - 2
examples/cg/cg.h

@@ -20,8 +20,12 @@
 #include <starpu.h>
 #include <math.h>
 #include <common/blas.h>
+
+#ifdef STARPU_USE_CUDA
 #include <cuda.h>
 #include <cublas.h>
+#include <starpu_cuda.h>
+#endif
 
 #include <starpu.h>
 
@@ -38,6 +42,7 @@
 #define cublasscal	cublasDscal
 #define cublasaxpy	cublasDaxpy
 #define cublasgemv	cublasDgemv
+#define cublasscal	cublasDscal
 #else
 #define TYPE	float
 #define GEMV	SGEMV
@@ -49,18 +54,21 @@
 #define cublasscal	cublasSscal
 #define cublasaxpy	cublasSaxpy
 #define cublasgemv	cublasSgemv
+#define cublasscal	cublasSscal
 #endif
 
 void dot_kernel(starpu_data_handle v1,
                 starpu_data_handle v2,
                 starpu_data_handle s,
-		unsigned nblocks);
+		unsigned nblocks,
+		int use_reduction);
 
 void gemv_kernel(starpu_data_handle v1,
                 starpu_data_handle matrix, 
                 starpu_data_handle v2,
                 TYPE p1, TYPE p2,
-		unsigned nblocks);
+		unsigned nblocks,
+		int use_reduction);
 
 void axpy_kernel(starpu_data_handle v1,
 		starpu_data_handle v2, TYPE p1,

+ 270 - 74
examples/cg/cg_kernels.c

@@ -15,6 +15,173 @@
  */
 
 #include "cg.h"
+#include <math.h>
+
+struct kernel_params {
+	TYPE p1;
+	TYPE p2;
+};
+
+#if 0
+static void print_vector_from_descr(unsigned nx, TYPE *v)
+{
+	unsigned i;
+	for (i = 0; i < nx; i++)
+	{
+		fprintf(stderr, "%2.2e ", v[i]);
+	}
+	fprintf(stderr, "\n");
+}
+
+
+static void print_matrix_from_descr(unsigned nx, unsigned ny, unsigned ld, TYPE *mat)
+{
+	unsigned i, j;
+	for (j = 0; j < nx; j++)
+	{
+		for (i = 0; i < ny; i++)
+		{
+			fprintf(stderr, "%2.2e ", mat[j+i*ld]);
+		}
+		fprintf(stderr, "\n");
+	}
+}
+#endif
+
+
+/*
+ *	Reduction accumulation methods
+ */
+
+static void accumulate_variable_cuda(void *descr[], void *cl_arg)
+{
+	TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
+	TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
+ 
+	cublasaxpy(1, (TYPE)1.0, v_src, 1, v_dst, 1);
+	cudaError_t ret = cudaThreadSynchronize();
+	if (ret)
+		STARPU_CUDA_REPORT_ERROR(ret);
+}
+
+static void accumulate_variable_cpu(void *descr[], void *cl_arg)
+{
+	TYPE *v_dst = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
+	TYPE *v_src = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[1]);
+ 
+	*v_dst = *v_dst + *v_src;
+}
+
+static struct starpu_perfmodel_t accumulate_variable_model = {
+	.type = STARPU_HISTORY_BASED,
+	.symbol = "accumulate_variable"
+};
+
+starpu_codelet accumulate_variable_cl = {
+	.where = STARPU_CPU|STARPU_CUDA,
+	.cpu_func = accumulate_variable_cpu,
+	.cuda_func = accumulate_variable_cuda,
+	.nbuffers = 2,
+	.model = &accumulate_variable_model
+};
+
+static void accumulate_vector_cuda(void *descr[], void *cl_arg)
+{
+	TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
+	TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
+	unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
+ 
+	cublasaxpy(n, (TYPE)1.0, v_src, 1, v_dst, 1);
+	cudaError_t ret = cudaThreadSynchronize();
+	if (ret)
+		STARPU_CUDA_REPORT_ERROR(ret);
+}
+
+static void accumulate_vector_cpu(void *descr[], void *cl_arg)
+{
+	TYPE *v_dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
+	TYPE *v_src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
+	unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
+ 
+	AXPY(n, (TYPE)1.0, v_src, 1, v_dst, 1);
+}
+
+static struct starpu_perfmodel_t accumulate_vector_model = {
+	.type = STARPU_HISTORY_BASED,
+	.symbol = "accumulate_vector"
+};
+
+starpu_codelet accumulate_vector_cl = {
+	.where = STARPU_CPU|STARPU_CUDA,
+	.cpu_func = accumulate_vector_cpu,
+	.cuda_func = accumulate_vector_cuda,
+	.nbuffers = 2,
+	.model = &accumulate_vector_model
+};
+
+/*
+ *	Reduction initialization methods
+ */
+
+static void bzero_variable_cuda(void *descr[], void *cl_arg)
+{
+	TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
+ 
+	cublasscal (1, (TYPE)0.0, v, 1);
+	cudaThreadSynchronize();
+
+}
+
+static void bzero_variable_cpu(void *descr[], void *cl_arg)
+{
+	TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
+	*v = (TYPE)0.0;
+}
+
+static struct starpu_perfmodel_t bzero_variable_model = {
+	.type = STARPU_HISTORY_BASED,
+	.symbol = "bzero_variable"
+};
+
+starpu_codelet bzero_variable_cl = {
+	.where = STARPU_CPU|STARPU_CUDA,
+	.cpu_func = bzero_variable_cpu,
+	.cuda_func = bzero_variable_cuda,
+	.nbuffers = 1,
+	.model = &bzero_variable_model
+};
+
+static void bzero_vector_cuda(void *descr[], void *cl_arg)
+{
+	TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
+	unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
+ 
+	cublasscal (n, (TYPE)0.0, v, 1);
+	cudaError_t ret = cudaThreadSynchronize();
+	if (ret)
+		STARPU_CUDA_REPORT_ERROR(ret);
+}
+
+static void bzero_vector_cpu(void *descr[], void *cl_arg)
+{
+	TYPE *v = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
+	unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
+ 
+	memset(v, 0, n*sizeof(TYPE));
+}
+
+static struct starpu_perfmodel_t bzero_vector_model = {
+	.type = STARPU_HISTORY_BASED,
+	.symbol = "bzero_vector"
+};
+
+starpu_codelet bzero_vector_cl = {
+	.where = STARPU_CPU|STARPU_CUDA,
+	.cpu_func = bzero_vector_cpu,
+	.cuda_func = bzero_vector_cuda,
+	.nbuffers = 1,
+	.model = &bzero_vector_model
+};
 
 /*
  *	DOT kernel : s = dot(v1, v2)
@@ -22,6 +189,8 @@
 
 static void dot_kernel_cuda(void *descr[], void *cl_arg)
 {
+	cudaError_t ret;
+
 	TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]); 
 	TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
 	TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
@@ -31,14 +200,20 @@ static void dot_kernel_cuda(void *descr[], void *cl_arg)
 	/* Get current value */
 	TYPE host_dot;
 	cudaMemcpy(&host_dot, dot, sizeof(TYPE), cudaMemcpyDeviceToHost);
-	cudaThreadSynchronize();
+	ret = cudaThreadSynchronize();
+	if (ret)
+		STARPU_CUDA_REPORT_ERROR(ret);
 
 	TYPE local_dot = cublasdot(n, v1, 1, v2, 1);
 	host_dot += local_dot;
-	cudaThreadSynchronize();
+	ret = cudaThreadSynchronize();
+	if (ret)
+		STARPU_CUDA_REPORT_ERROR(ret);
 
 	cudaMemcpy(dot, &host_dot, sizeof(TYPE), cudaMemcpyHostToDevice);
-	cudaThreadSynchronize();
+	ret = cudaThreadSynchronize();
+	if (ret)
+		STARPU_CUDA_REPORT_ERROR(ret);
 }
 
 static void dot_kernel_cpu(void *descr[], void *cl_arg)
@@ -49,10 +224,12 @@ static void dot_kernel_cpu(void *descr[], void *cl_arg)
 
 	unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
 
-	TYPE local_dot;
-	local_dot = DOT(n, v1, 1, v2, 1);
+	TYPE local_dot = 0.0;
+	/* Note that we explicitely cast the result of the DOT kernel because
+	 * some BLAS library will return a double for sdot for instance. */
+	local_dot = (TYPE)DOT(n, v1, 1, v2, 1);
 
-	*dot += local_dot;
+	*dot = *dot + local_dot;
 }
 
 static struct starpu_perfmodel_t dot_kernel_model = {
@@ -68,39 +245,11 @@ static starpu_codelet dot_kernel_cl = {
 	.model = &dot_kernel_model
 };
 
-static void bzero_variable_cuda(void *descr[], void *cl_arg)
-{
-	TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
- 
-	TYPE zero = 0.0;
-	cudaMemcpy(v, &zero, sizeof(TYPE), cudaMemcpyHostToDevice);
-	cudaThreadSynchronize();
-}
-
-static void bzero_variable_cpu(void *descr[], void *cl_arg)
-{
-	TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
- 
-	memset(v, 0, sizeof(TYPE));
-}
-
-static struct starpu_perfmodel_t bzero_variable_model = {
-	.type = STARPU_HISTORY_BASED,
-	.symbol = "bzero_variable"
-};
-
-static starpu_codelet bzero_variable_cl = {
-	.where = STARPU_CPU|STARPU_CUDA,
-	.cpu_func = bzero_variable_cpu,
-	.cuda_func = bzero_variable_cuda,
-	.nbuffers = 1,
-	.model = &bzero_variable_model
-};
-
 void dot_kernel(starpu_data_handle v1,
 		starpu_data_handle v2,
 		starpu_data_handle s,
-		unsigned nblocks)
+		unsigned nblocks,
+		int use_reduction)
 {
 	int ret;
 	struct starpu_task *task;
@@ -113,6 +262,9 @@ void dot_kernel(starpu_data_handle v1,
 	ret = starpu_task_submit(task);
 	assert(!ret);
 
+	if (use_reduction)
+		starpu_task_wait_for_all();
+
 	unsigned b;
 	for (b = 0; b < nblocks; b++)
 	{
@@ -120,7 +272,7 @@ void dot_kernel(starpu_data_handle v1,
 
 		task->cl = &dot_kernel_cl;
 		task->buffers[0].handle = s;
-		task->buffers[0].mode = STARPU_RW;
+		task->buffers[0].mode = use_reduction?STARPU_REDUX:STARPU_RW;
 		task->buffers[1].handle = starpu_data_get_sub_data(v1, 1, b);
 		task->buffers[1].mode = STARPU_R;
 		task->buffers[2].handle = starpu_data_get_sub_data(v2, 1, b);
@@ -129,17 +281,62 @@ void dot_kernel(starpu_data_handle v1,
 		ret = starpu_task_submit(task);
 		assert(!ret);
 	}
+
+	if (use_reduction)
+		starpu_task_wait_for_all();
 }
 
 /*
- *	GEMV kernel : v1 = p1 * v1 + p2 * M v2 
+ *	SCAL kernel : v1 = p1 v1
  */
 
-struct kernel_params {
-	TYPE p1;
-	TYPE p2;
+static void scal_kernel_cuda(void *descr[], void *cl_arg)
+{
+	struct kernel_params *params = cl_arg;
+
+	TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
+	unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
+ 
+	/* v1 = p1 v1 */
+	TYPE alpha = params->p1;
+	cublasscal(n, alpha, v1, 1);
+	cudaThreadSynchronize();
+
+	free(params);
+}
+
+static void scal_kernel_cpu(void *descr[], void *cl_arg)
+{
+	struct kernel_params *params = cl_arg;
+
+	TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
+	unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
+
+	/* v1 = p1 v1 */
+	TYPE alpha = params->p1;
+
+	SCAL(n, alpha, v1, 1);
+
+	free(params);
+}
+
+static struct starpu_perfmodel_t scal_kernel_model = {
+	.type = STARPU_HISTORY_BASED,
+	.symbol = "scal_kernel"
 };
 
+static starpu_codelet scal_kernel_cl = {
+	.where = STARPU_CPU|STARPU_CUDA,
+	.cpu_func = scal_kernel_cpu,
+	.cuda_func = scal_kernel_cuda,
+	.nbuffers = 1,
+	.model = &scal_kernel_model
+};
+
+/*
+ *	GEMV kernel : v1 = p1 * v1 + p2 * M v2 
+ */
+
 static void gemv_kernel_cuda(void *descr[], void *cl_arg)
 {
 	struct kernel_params *params = cl_arg;
@@ -162,32 +359,6 @@ static void gemv_kernel_cuda(void *descr[], void *cl_arg)
 	free(params);
 }
 
-#if 0
-static void print_vector_from_descr(unsigned nx, TYPE *v)
-{
-	unsigned i;
-	for (i = 0; i < nx; i++)
-	{
-		fprintf(stderr, "%2.2e ", v[i]);
-	}
-	fprintf(stderr, "\n");
-}
-
-
-static void print_matrix_from_descr(unsigned nx, unsigned ny, unsigned ld, TYPE *mat)
-{
-	unsigned i, j;
-	for (j = 0; j < nx; j++)
-	{
-		for (i = 0; i < ny; i++)
-		{
-			fprintf(stderr, "%2.2e ", mat[j+i*ld]);
-		}
-		fprintf(stderr, "\n");
-	}
-}
-#endif
-
 static void gemv_kernel_cpu(void *descr[], void *cl_arg)
 {
 	struct kernel_params *params = cl_arg;
@@ -226,12 +397,34 @@ void gemv_kernel(starpu_data_handle v1,
 		starpu_data_handle matrix,
 		starpu_data_handle v2,
 		TYPE p1, TYPE p2,
-		unsigned nblocks)
+		unsigned nblocks,
+		int use_reduction)
 {
 	int ret;
 
 	unsigned b1, b2;
 
+	if (use_reduction)
+		starpu_task_wait_for_all();
+
+	for (b2 = 0; b2 < nblocks; b2++)
+	{
+		struct starpu_task *task = starpu_task_create();
+		task->cl = &scal_kernel_cl;
+		task->buffers[0].handle = starpu_data_get_sub_data(v1, 1, b2);
+		task->buffers[0].mode = STARPU_RW;
+		
+		struct kernel_params *params = malloc(sizeof(struct kernel_params));
+		params->p1 = p1;
+
+		task->cl_arg = params;
+		ret = starpu_task_submit(task);
+		assert(!ret);
+	}
+
+	if (use_reduction)
+		starpu_task_wait_for_all();
+
 	for (b2 = 0; b2 < nblocks; b2++)
 	{
 		for (b1 = 0; b1 < nblocks; b1++)
@@ -240,7 +433,7 @@ void gemv_kernel(starpu_data_handle v1,
 		
 			task->cl = &gemv_kernel_cl;
 			task->buffers[0].handle = starpu_data_get_sub_data(v1, 1, b2);
-			task->buffers[0].mode = STARPU_RW;
+			task->buffers[0].mode = use_reduction?STARPU_REDUX:STARPU_RW;
 			task->buffers[1].handle = starpu_data_get_sub_data(matrix, 2, b2, b1);
 			task->buffers[1].mode = STARPU_R;
 			task->buffers[2].handle = starpu_data_get_sub_data(v2, 1, b1);
@@ -248,7 +441,7 @@ void gemv_kernel(starpu_data_handle v1,
 		
 			struct kernel_params *params = malloc(sizeof(struct kernel_params));
 			assert(params);
-			params->p1 = (b1==0)?p1:1.0;
+			params->p1 = 1.0;
 			params->p2 = p2;
 		
 			task->cl_arg = params;
@@ -257,6 +450,9 @@ void gemv_kernel(starpu_data_handle v1,
 			assert(!ret);
 		}
 	}
+
+	if (use_reduction)
+		starpu_task_wait_for_all();
 }
 
 /*
@@ -269,7 +465,7 @@ static void scal_axpy_kernel_cuda(void *descr[], void *cl_arg)
 	TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
 	TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
 
-	unsigned n = STARPU_MATRIX_GET_NX(descr[0]);
+	unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  
 	/* Compute v1 = p1 * v1 + p2 * v2.
 	 *	v1 = p1 v1
@@ -289,7 +485,7 @@ static void scal_axpy_kernel_cpu(void *descr[], void *cl_arg)
 	TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
 	TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
 
-	unsigned nx = STARPU_MATRIX_GET_NX(descr[0]);
+	unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  
 	/* Compute v1 = p1 * v1 + p2 * v2.
 	 *	v1 = p1 v1
@@ -354,7 +550,7 @@ static void axpy_kernel_cuda(void *descr[], void *cl_arg)
 	TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
 	TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
 
-	unsigned n = STARPU_MATRIX_GET_NX(descr[0]);
+	unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  
 	/* Compute v1 = v1 + p1 * v2.
 	 */
@@ -371,7 +567,7 @@ static void axpy_kernel_cpu(void *descr[], void *cl_arg)
 	TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
 	TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
 
-	unsigned nx = STARPU_MATRIX_GET_NX(descr[0]);
+	unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
  
 	/* Compute v1 = p1 * v1 + p2 * v2.
 	 */