Browse Source

Implement cg with filters

Cédric Augonnet 14 years ago
parent
commit
14ebc94929
3 changed files with 378 additions and 138 deletions
  1. 143 35
      examples/cg/cg.c
  2. 11 5
      examples/cg/cg.h
  3. 224 98
      examples/cg/cg_kernels.c

+ 143 - 35
examples/cg/cg.c

@@ -13,12 +13,15 @@
  *
  * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  */
-
-#include <starpu.h>
 #include <math.h>
+#include <assert.h>
+#include <starpu.h>
 #include <common/blas.h>
+
+#ifdef STARPU_USE_CUDA
 #include <cuda.h>
 #include <cublas.h>
+#endif
 
 /*
  *	Conjugate Gradient
@@ -61,14 +64,14 @@
 
 #include "cg.h"
 
-/* TODO parse argc / argv */
-static int long long n = 16*1024;
+static int long long n = 1024;
+static int nblocks = 8;
 
 static starpu_data_handle A_handle, b_handle, x_handle;
 static TYPE *A, *b, *x;
 
-static int i_max = 20000;
-static TYPE eps = 0.000000001;
+static int i_max = 4000;
+static double eps = (10e-14);
 
 static starpu_data_handle r_handle, d_handle, q_handle;
 static TYPE *r, *d, *q;
@@ -94,16 +97,24 @@ static void generate_random_problem(void)
 	/* Create a random matrix (A) and two random vectors (x and b) */
 	for (j = 0; j < n; j++)
 	{
-		b[j] = (TYPE)drand48();
-		x[j] = (TYPE)b[j];
+		b[j] = (TYPE)1.0;
+		x[j] = (TYPE)0.0;
 
-		for (i = 0; i < j; i++)
+#if 0
+		for (i = 0; i < n; i++)
 		{
-			A[n*j + i] = (TYPE)(-2.0+drand48());	
-			A[n*i + j] = A[n*j + i];
+			A[n*j + i] = (i <= j)?1.0:0.0;
 		}
 
-		A[n*j + j] = (TYPE)30.0;
+#else
+
+		/* We take Hilbert matrix that is not well conditionned but definite positive: H(i,j) = 1/(1+i+j) */
+
+		for (i = 0; i < n; i++)
+		{
+			A[n*j + i] = (TYPE)(1.0/(1.0+i+j));
+		}
+#endif
 	}
 
 	/* Internal vectors */
@@ -131,10 +142,83 @@ static void register_data(void)
 	starpu_variable_data_register(&rtr_handle, 0, (uintptr_t)&rtr, sizeof(TYPE));
 }
 
+/*
+ *	Data partitioning filters
+ */
+
+struct starpu_data_filter vector_filter;
+struct starpu_data_filter matrix_filter_1;
+struct starpu_data_filter matrix_filter_2;
+
 static void partition_data(void)
 {
+	assert(n % nblocks == 0);
+
+	/*
+	 *	Partition the A matrix
+	 */
+
+	/* Partition into contiguous parts */
+	matrix_filter_1.filter_func = starpu_block_filter_func;
+	matrix_filter_1.nchildren = nblocks;
+	/* Partition into non-contiguous parts */
+	matrix_filter_2.filter_func = starpu_vertical_block_filter_func;
+	matrix_filter_2.nchildren = nblocks;
+
+	/* A is in FORTRAN ordering, starpu_data_get_sub_data(A_handle, 2, i,
+	 * j) designates the block in column i and row j. */
+	starpu_data_map_filters(A_handle, 2, &matrix_filter_1, &matrix_filter_2);
+
+	/*
+	 *	Partition the vectors
+	 */
+
+	vector_filter.filter_func = starpu_block_filter_func_vector;
+	vector_filter.nchildren = nblocks;
+
+	starpu_data_partition(b_handle, &vector_filter);
+	starpu_data_partition(x_handle, &vector_filter);
+	starpu_data_partition(r_handle, &vector_filter);
+	starpu_data_partition(d_handle, &vector_filter);
+	starpu_data_partition(q_handle, &vector_filter);
+}
+
+/*
+ *	Debug
+ */
+
+#if 0
+static void display_vector(starpu_data_handle handle, TYPE *ptr)
+{
+	unsigned block_size = n / nblocks;
+
+	unsigned b, ind;
+	for (b = 0; b < nblocks; b++)
+	{
+		starpu_data_acquire(starpu_data_get_sub_data(handle, 1, b), STARPU_R);
+		for (ind = 0; ind < block_size; ind++)
+		{
+			fprintf(stderr, "%2.2e ", ptr[b*block_size + ind]);
+		}
+		fprintf(stderr, "| ");
+		starpu_data_release(starpu_data_get_sub_data(handle, 1, b));
+	}
+	fprintf(stderr, "\n");
+}
 
+static void display_matrix(void)
+{
+	unsigned i, j;
+	for (i = 0; i < n; i++)
+	{
+		for (j = 0; j < n; j++)
+		{
+			fprintf(stderr, "%2.2e ", A[j*n + i]);
+		}
+		fprintf(stderr, "\n");
+	}
 }
+#endif
 
 /*
  *	Main loop
@@ -142,66 +226,62 @@ static void partition_data(void)
 
 static void cg(void)
 {
-	TYPE delta_new, delta_old, delta_0;
+	double delta_new, delta_old, delta_0;
+	double alpha, beta;
 
 	int i = 0;
 
 	/* r <- b */
-	copy_handle(r_handle, b_handle);
-
-	starpu_task_wait_for_all();
+	copy_handle(r_handle, b_handle, nblocks);
 
 	/* r <- r - A x */
-	gemv_kernel(r_handle, A_handle, x_handle, 1.0, -1.0); 
+	gemv_kernel(r_handle, A_handle, x_handle, 1.0, -1.0, nblocks); 
 
 	/* d <- r */
-	copy_handle(d_handle, r_handle);
+	copy_handle(d_handle, r_handle, nblocks);
 
 	/* delta_new = dot(r,r) */
-	dot_kernel(r_handle, r_handle, rtr_handle);
+	dot_kernel(r_handle, r_handle, rtr_handle, nblocks);
 
 	starpu_data_acquire(rtr_handle, STARPU_R);
 	delta_new = rtr;
 	delta_0 = delta_new;
 	starpu_data_release(rtr_handle);
-	
-	fprintf(stderr, "DELTA %f\n", delta_new);
 
-	while ((i < i_max) && (delta_new > (eps*eps*delta_0)))
-	{
-		fprintf(stderr, "*****************************************\niter %d DELTA %e - %e\n", i, delta_new, sqrt(delta_new/n));
-		TYPE alpha, beta;
+	fprintf(stderr, "*************** INITIAL ************ \n");
+	fprintf(stderr, "Delta 0: %e\n", delta_new);
 
+	while ((i < i_max) && ((double)delta_new > (double)(eps*eps*delta_0)))
+	{
 		/* q <- A d */
-		gemv_kernel(q_handle, A_handle, d_handle, 0.0, 1.0);
+		gemv_kernel(q_handle, A_handle, d_handle, 0.0, 1.0, nblocks);
 
 		/* dtq <- dot(d,q) */
-		dot_kernel(d_handle, q_handle, dtq_handle);
+		dot_kernel(d_handle, q_handle, dtq_handle, nblocks);
 
 		/* alpha = delta_new / dtq */
 		starpu_data_acquire(dtq_handle, STARPU_R);
 		alpha = delta_new/dtq;
-//		fprintf(stderr, "ALPHA %e DELTA NEW %e DTQ %e\n", alpha, delta_new, dtq);
 		starpu_data_release(dtq_handle);
 		
 		/* x <- x + alpha d */
-		axpy_kernel(x_handle, d_handle, alpha);
+		axpy_kernel(x_handle, d_handle, alpha, nblocks);
 
 		if ((i % 50) == 0)
 		{
 			/* r <- b */
-			copy_handle(r_handle, b_handle);
+			copy_handle(r_handle, b_handle, nblocks);
 		
 			/* r <- r - A x */
-			gemv_kernel(r_handle, A_handle, x_handle, 1.0, -1.0); 
+			gemv_kernel(r_handle, A_handle, x_handle, 1.0, -1.0, nblocks); 
 		}
 		else {
 			/* r <- r - alpha q */
-			axpy_kernel(r_handle, q_handle, -alpha);
+			axpy_kernel(r_handle, q_handle, -alpha, nblocks);
 		}
 
 		/* delta_new = dot(r,r) */
-		dot_kernel(r_handle, r_handle, rtr_handle);
+		dot_kernel(r_handle, r_handle, rtr_handle, nblocks);
 
 		starpu_data_acquire(rtr_handle, STARPU_R);
 		delta_old = delta_new;
@@ -210,21 +290,49 @@ static void cg(void)
 		starpu_data_release(rtr_handle);
 
 		/* d <- beta d + r */
-		scal_axpy_kernel(d_handle, beta, r_handle, 1.0);
+		scal_axpy_kernel(d_handle, beta, r_handle, 1.0, nblocks);
+
+		/* We here take the error as ||r||_2 / (n||b||_2) */
+		double error = sqrt(delta_new/delta_0)/(1.0*n);
+		fprintf(stderr, "*****************************************\n");
+		fprintf(stderr, "iter %d DELTA %e - %e\n", i, delta_new, error);
 
 		i++;
 	}
 }
 
-int check(void)
+static int check(void)
 {
 	return 0;
 }
 
+static void parse_args(int argc, char **argv)
+{
+	int i;
+	for (i = 1; i < argc; i++) {
+	        if (strcmp(argv[i], "-n") == 0) {
+			n = (int long long)atoi(argv[++i]);
+			continue;
+		}
+
+	        if (strcmp(argv[i], "-maxiter") == 0) {
+			i_max = atoi(argv[++i]);
+			continue;
+		}
+
+	        if (strcmp(argv[i], "-nblocks") == 0) {
+			nblocks = atoi(argv[++i]);
+			continue;
+		}
+        }
+}
+
 int main(int argc, char **argv)
 {
 	int ret;
 
+	parse_args(argc, argv);
+
 	starpu_init(NULL);
 	starpu_helper_cublas_init();
 

+ 11 - 5
examples/cg/cg.h

@@ -53,19 +53,25 @@
 
 void dot_kernel(starpu_data_handle v1,
                 starpu_data_handle v2,
-                starpu_data_handle s);
+                starpu_data_handle s,
+		unsigned nblocks);
 
 void gemv_kernel(starpu_data_handle v1,
                 starpu_data_handle matrix, 
                 starpu_data_handle v2,
-                TYPE p1, TYPE p2);
+                TYPE p1, TYPE p2,
+		unsigned nblocks);
 
 void axpy_kernel(starpu_data_handle v1,
-		starpu_data_handle v2, TYPE p1);
+		starpu_data_handle v2, TYPE p1,
+		unsigned nblocks);
 
 void scal_axpy_kernel(starpu_data_handle v1, TYPE p1,
-			starpu_data_handle v2, TYPE p2);
+			starpu_data_handle v2, TYPE p2,
+			unsigned nblocks);
 
-void copy_handle(starpu_data_handle dst, starpu_data_handle src);
+void copy_handle(starpu_data_handle dst,
+		starpu_data_handle src,
+		unsigned nblocks);
 
 #endif // __STARPU_EXAMPLE_CG_H__

+ 224 - 98
examples/cg/cg_kernels.c

@@ -28,9 +28,16 @@ static void dot_kernel_cuda(void *descr[], void *cl_arg)
 
 	unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
  
+	/* Get current value */
+	TYPE host_dot;
+	cudaMemcpy(&host_dot, dot, sizeof(TYPE), cudaMemcpyDeviceToHost);
+	cudaThreadSynchronize();
+
 	TYPE local_dot = cublasdot(n, v1, 1, v2, 1);
-//	fprintf(stderr, "DOT -> %e\n", local_dot);
-	cudaMemcpy(dot, &local_dot, sizeof(TYPE), cudaMemcpyHostToDevice);
+	host_dot += local_dot;
+	cudaThreadSynchronize();
+
+	cudaMemcpy(dot, &host_dot, sizeof(TYPE), cudaMemcpyHostToDevice);
 	cudaThreadSynchronize();
 }
 
@@ -42,48 +49,86 @@ static void dot_kernel_cpu(void *descr[], void *cl_arg)
 
 	unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
 
-//	fprintf(stderr, "SDOT n = %d v1 %p v2 %p\n", n, v1, v2);
-//	fprintf(stderr, "v1:");
-//	print_vector(descr[1]);
-//	fprintf(stderr, "v2:");
-//	print_vector(descr[2]);
-
-	*dot = DOT(n, v1, 1, v2, 1);
-
-//	*dot = 0.0;
-//	TYPE local_dot = 0.0;
-//
-//	unsigned i;
-//	for (i =0; i < n; i++)
-//		local_dot += v1[i]*v2[i]; 
-//
-//	*dot = local_dot;
+	TYPE local_dot;
+	local_dot = DOT(n, v1, 1, v2, 1);
+
+	*dot += local_dot;
 }
 
+static struct starpu_perfmodel_t dot_kernel_model = {
+	.type = STARPU_HISTORY_BASED,
+	.symbol = "dot_kernel"
+};
+
 static starpu_codelet dot_kernel_cl = {
 	.where = STARPU_CPU|STARPU_CUDA,
 	.cpu_func = dot_kernel_cpu,
 	.cuda_func = dot_kernel_cuda,
-	.nbuffers = 3
+	.nbuffers = 3,
+	.model = &dot_kernel_model
+};
+
+static void bzero_variable_cuda(void *descr[], void *cl_arg)
+{
+	TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
+ 
+	TYPE zero = 0.0;
+	cudaMemcpy(v, &zero, sizeof(TYPE), cudaMemcpyHostToDevice);
+	cudaThreadSynchronize();
+}
+
+static void bzero_variable_cpu(void *descr[], void *cl_arg)
+{
+	TYPE *v = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]);
+ 
+	memset(v, 0, sizeof(TYPE));
+}
+
+static struct starpu_perfmodel_t bzero_variable_model = {
+	.type = STARPU_HISTORY_BASED,
+	.symbol = "bzero_variable"
+};
+
+static starpu_codelet bzero_variable_cl = {
+	.where = STARPU_CPU|STARPU_CUDA,
+	.cpu_func = bzero_variable_cpu,
+	.cuda_func = bzero_variable_cuda,
+	.nbuffers = 1,
+	.model = &bzero_variable_model
 };
 
 void dot_kernel(starpu_data_handle v1,
 		starpu_data_handle v2,
-		starpu_data_handle s)
+		starpu_data_handle s,
+		unsigned nblocks)
 {
 	int ret;
-	struct starpu_task *task = starpu_task_create();
+	struct starpu_task *task;
 
-	task->cl = &dot_kernel_cl;
+	/* Blank the accumulation variable */
+	task = starpu_task_create();
+	task->cl = &bzero_variable_cl;
 	task->buffers[0].handle = s;
 	task->buffers[0].mode = STARPU_W;
-	task->buffers[1].handle = v1;
-	task->buffers[1].mode = STARPU_R;
-	task->buffers[2].handle = v2;
-	task->buffers[2].mode = STARPU_R;
-
 	ret = starpu_task_submit(task);
 	assert(!ret);
+
+	unsigned b;
+	for (b = 0; b < nblocks; b++)
+	{
+		task = starpu_task_create();
+
+		task->cl = &dot_kernel_cl;
+		task->buffers[0].handle = s;
+		task->buffers[0].mode = STARPU_RW;
+		task->buffers[1].handle = starpu_data_get_sub_data(v1, 1, b);
+		task->buffers[1].mode = STARPU_R;
+		task->buffers[2].handle = starpu_data_get_sub_data(v2, 1, b);
+		task->buffers[2].mode = STARPU_R;
+
+		ret = starpu_task_submit(task);
+		assert(!ret);
+	}
 }
 
 /*
@@ -107,13 +152,42 @@ static void gemv_kernel_cuda(void *descr[], void *cl_arg)
 	unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
 	unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
  
-	/* Compute v1 = p1 * v1 + p2 * M v2 */
-	cublasgemv('N', nx, ny, params->p2, M, ld, v2, 1, params->p1, v1, 1);
+	TYPE alpha = params->p2;
+	TYPE beta = params->p1;
+
+	/* Compute v1 = alpha M v2 + beta v1 */
+	cublasgemv('N', nx, ny, alpha, M, ld, v2, 1, beta, v1, 1);
 	cudaThreadSynchronize();
 
 	free(params);
 }
 
+#if 0
+static void print_vector_from_descr(unsigned nx, TYPE *v)
+{
+	unsigned i;
+	for (i = 0; i < nx; i++)
+	{
+		fprintf(stderr, "%2.2e ", v[i]);
+	}
+	fprintf(stderr, "\n");
+}
+
+
+static void print_matrix_from_descr(unsigned nx, unsigned ny, unsigned ld, TYPE *mat)
+{
+	unsigned i, j;
+	for (j = 0; j < nx; j++)
+	{
+		for (i = 0; i < ny; i++)
+		{
+			fprintf(stderr, "%2.2e ", mat[j+i*ld]);
+		}
+		fprintf(stderr, "\n");
+	}
+}
+#endif
+
 static void gemv_kernel_cpu(void *descr[], void *cl_arg)
 {
 	struct kernel_params *params = cl_arg;
@@ -126,44 +200,63 @@ static void gemv_kernel_cpu(void *descr[], void *cl_arg)
 	unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
 	unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
 
-	/* Compute v1 = p1 * v1 + p2 * M v2 */
-	GEMV("N", nx, ny, params->p2, M, ld, v2, 1, params->p1, v1, 1);
+	TYPE alpha = params->p2;
+	TYPE beta = params->p1;
+
+	/* Compute v1 = alpha M v2 + beta v1 */
+	GEMV("N", nx, ny, alpha, M, ld, v2, 1, beta, v1, 1);
 
 	free(params);
 }
 
+static struct starpu_perfmodel_t gemv_kernel_model = {
+	.type = STARPU_HISTORY_BASED,
+	.symbol = "gemv_kernel"
+};
+
 static starpu_codelet gemv_kernel_cl = {
 	.where = STARPU_CPU|STARPU_CUDA,
 	.cpu_func = gemv_kernel_cpu,
 	.cuda_func = gemv_kernel_cuda,
-	.nbuffers = 3
+	.nbuffers = 3,
+	.model = &gemv_kernel_model
 };
 
 void gemv_kernel(starpu_data_handle v1,
 		starpu_data_handle matrix,
 		starpu_data_handle v2,
-		TYPE p1, TYPE p2)
+		TYPE p1, TYPE p2,
+		unsigned nblocks)
 {
 	int ret;
-	struct starpu_task *task = starpu_task_create();
 
-	task->cl = &gemv_kernel_cl;
-	task->buffers[0].handle = v1;
-	task->buffers[0].mode = STARPU_RW;
-	task->buffers[1].handle = matrix;
-	task->buffers[1].mode = STARPU_R;
-	task->buffers[2].handle = v2;
-	task->buffers[2].mode = STARPU_R;
-
-	struct kernel_params *params = malloc(sizeof(struct kernel_params));
-	assert(params);
-	params->p1 = p1;
-	params->p2 = p2;
-
-	task->cl_arg = params;
-
-	ret = starpu_task_submit(task);
-	assert(!ret);
+	unsigned b1, b2;
+
+	for (b2 = 0; b2 < nblocks; b2++)
+	{
+		for (b1 = 0; b1 < nblocks; b1++)
+		{
+			struct starpu_task *task = starpu_task_create();
+		
+			task->cl = &gemv_kernel_cl;
+			task->buffers[0].handle = starpu_data_get_sub_data(v1, 1, b2);
+			task->buffers[0].mode = STARPU_RW;
+			task->buffers[1].handle = starpu_data_get_sub_data(matrix, 2, b2, b1);
+			task->buffers[1].mode = STARPU_R;
+			task->buffers[2].handle = starpu_data_get_sub_data(v2, 1, b1);
+			task->buffers[2].mode = STARPU_R;
+		
+			struct kernel_params *params = malloc(sizeof(struct kernel_params));
+			assert(params);
+			params->p1 = (b1==0)?p1:1.0;
+			params->p2 = p2;
+		
+			task->cl_arg = params;
+		
+			ret = starpu_task_submit(task);
+			assert(!ret);
+		}
+	}
 }
 
 /*
@@ -208,34 +301,46 @@ static void scal_axpy_kernel_cpu(void *descr[], void *cl_arg)
 	free(params);
 }
 
+static struct starpu_perfmodel_t scal_axpy_kernel_model = {
+	.type = STARPU_HISTORY_BASED,
+	.symbol = "scal_axpy_kernel"
+};
+
 static starpu_codelet scal_axpy_kernel_cl = {
 	.where = STARPU_CPU|STARPU_CUDA,
 	.cpu_func = scal_axpy_kernel_cpu,
 	.cuda_func = scal_axpy_kernel_cuda,
-	.nbuffers = 2
+	.nbuffers = 2,
+	.model = &scal_axpy_kernel_model
 };
 
 void scal_axpy_kernel(starpu_data_handle v1, TYPE p1,
-			starpu_data_handle v2, TYPE p2)
+			starpu_data_handle v2, TYPE p2,
+			unsigned nblocks)
 {
 	int ret;
-	struct starpu_task *task = starpu_task_create();
-
-	task->cl = &scal_axpy_kernel_cl;
-	task->buffers[0].handle = v1;
-	task->buffers[0].mode = STARPU_RW;
-	task->buffers[1].handle = v2;
-	task->buffers[1].mode = STARPU_R;
-
-	struct kernel_params *params = malloc(sizeof(struct kernel_params));
-	assert(params);
-	params->p1 = p1;
-	params->p2 = p2;
 
-	task->cl_arg = params;
+	unsigned b;
+	for (b = 0; b < nblocks; b++)
+	{
+		struct starpu_task *task = starpu_task_create();
 
-	ret = starpu_task_submit(task);
-	assert(!ret);
+		task->cl = &scal_axpy_kernel_cl;
+		task->buffers[0].handle = starpu_data_get_sub_data(v1, 1, b);
+		task->buffers[0].mode = STARPU_RW;
+		task->buffers[1].handle = starpu_data_get_sub_data(v2, 1, b);
+		task->buffers[1].mode = STARPU_R;
+	
+		struct kernel_params *params = malloc(sizeof(struct kernel_params));
+		assert(params);
+		params->p1 = p1;
+		params->p2 = p2;
+	
+		task->cl_arg = params;
+	
+		ret = starpu_task_submit(task);
+		assert(!ret);
+	}
 }
 
 
@@ -275,33 +380,45 @@ static void axpy_kernel_cpu(void *descr[], void *cl_arg)
 	free(params);
 }
 
+static struct starpu_perfmodel_t axpy_kernel_model = {
+	.type = STARPU_HISTORY_BASED,
+	.symbol = "axpy_kernel"
+};
+
 static starpu_codelet axpy_kernel_cl = {
 	.where = STARPU_CPU|STARPU_CUDA,
 	.cpu_func = axpy_kernel_cpu,
 	.cuda_func = axpy_kernel_cuda,
-	.nbuffers = 2
+	.nbuffers = 2,
+	.model = &axpy_kernel_model
 };
 
 void axpy_kernel(starpu_data_handle v1,
-		starpu_data_handle v2, TYPE p1)
+		starpu_data_handle v2, TYPE p1,
+		unsigned nblocks)
 {
 	int ret;
-	struct starpu_task *task = starpu_task_create();
-
-	task->cl = &axpy_kernel_cl;
-	task->buffers[0].handle = v1;
-	task->buffers[0].mode = STARPU_RW;
-	task->buffers[1].handle = v2;
-	task->buffers[1].mode = STARPU_R;
+	unsigned b;
 
-	struct kernel_params *params = malloc(sizeof(struct kernel_params));
-	assert(params);
-	params->p1 = p1;
-
-	task->cl_arg = params;
-
-	ret = starpu_task_submit(task);
-	assert(!ret);
+	for (b = 0; b < nblocks; b++)
+	{
+		struct starpu_task *task = starpu_task_create();
+	
+		task->cl = &axpy_kernel_cl;
+		task->buffers[0].handle = starpu_data_get_sub_data(v1, 1, b);
+		task->buffers[0].mode = STARPU_RW;
+		task->buffers[1].handle = starpu_data_get_sub_data(v2, 1, b);
+		task->buffers[1].mode = STARPU_R;
+	
+		struct kernel_params *params = malloc(sizeof(struct kernel_params));
+		assert(params);
+		params->p1 = p1;
+	
+		task->cl_arg = params;
+	
+		ret = starpu_task_submit(task);
+		assert(!ret);
+	}
 }
 
 
@@ -318,8 +435,6 @@ static void copy_handle_cpu(void *descr[], void *cl_arg)
 	size_t elemsize = STARPU_VECTOR_GET_ELEMSIZE(descr[0]);
 
 	memcpy(dst, src, nx*elemsize);
-
-//	fprintf(stderr, "MEMCPY %p -> %p length %d src[0] = %f\n", src, dst, nx*elemsize, src[0]);
 }
 
 static void copy_handle_cuda(void *descr[], void *cl_arg)
@@ -334,24 +449,35 @@ static void copy_handle_cuda(void *descr[], void *cl_arg)
 	cudaThreadSynchronize();
 }
 
+static struct starpu_perfmodel_t copy_handle_model = {
+	.type = STARPU_HISTORY_BASED,
+	.symbol = "copy_handle"
+};
+
 static starpu_codelet copy_handle_cl = {
 	.where = STARPU_CPU|STARPU_CUDA,
 	.cpu_func = copy_handle_cpu,
 	.cuda_func = copy_handle_cuda,
-	.nbuffers = 2
+	.nbuffers = 2,
+	.model = &copy_handle_model
 };
 
-void copy_handle(starpu_data_handle dst, starpu_data_handle src)
+void copy_handle(starpu_data_handle dst, starpu_data_handle src, unsigned nblocks)
 {
 	int ret;
-	struct starpu_task *task = starpu_task_create();
-
-	task->cl = &copy_handle_cl;
-	task->buffers[0].handle = dst;
-	task->buffers[0].mode = STARPU_W;
-	task->buffers[1].handle = src;
-	task->buffers[1].mode = STARPU_R;
+	unsigned b;
 
-	ret = starpu_task_submit(task);
-	assert(!ret);
+	for (b = 0; b < nblocks; b++)
+	{
+		struct starpu_task *task = starpu_task_create();
+	
+		task->cl = &copy_handle_cl;
+		task->buffers[0].handle = starpu_data_get_sub_data(dst, 1, b);
+		task->buffers[0].mode = STARPU_W;
+		task->buffers[1].handle = starpu_data_get_sub_data(src, 1, b);
+		task->buffers[1].mode = STARPU_R;
+	
+		ret = starpu_task_submit(task);
+		assert(!ret);
+	}
 }