Browse Source

Start to implement a conjugate gradient

Cédric Augonnet 14 years ago
parent
commit
9c959490e7
4 changed files with 689 additions and 1 deletions
  1. 18 1
      examples/Makefile.am
  2. 243 0
      examples/cg/cg.c
  3. 71 0
      examples/cg/cg.h
  4. 357 0
      examples/cg/cg_kernels.c

+ 18 - 1
examples/Makefile.am

@@ -14,7 +14,7 @@
 # See the GNU Lesser General Public License in COPYING.LGPL for more details.
 #
 
-AM_CFLAGS = $(HWLOC_CFLAGS)
+AM_CFLAGS = $(HWLOC_CFLAGS) -Wall
 LIBS = $(top_builddir)/src/libstarpu.la $(HWLOC_LIBS) @LIBS@
 AM_CPPFLAGS = -I$(top_srcdir)/include/ -I$(top_srcdir)/examples/ -I$(top_builddir)/include
 
@@ -101,6 +101,7 @@ examplebindir = $(libdir)/starpu/examples/
 examplebin_PROGRAMS =
 
 noinst_HEADERS = 				\
+	cg/cg.h					\
 	heat/lu_kernels_model.h			\
 	heat/dw_sparse_cg.h			\
 	heat/heat.h				\
@@ -464,6 +465,22 @@ heat_heat_SOURCES =				\
 
 endif
 
+##############
+# CG example #
+##############
+
+if !NO_BLAS_LIB
+
+examplebin_PROGRAMS += cg/cg
+
+cg_cg_SOURCES =					\
+	cg/cg.c					\
+	cg/cg_kernels.c				\
+	common/blas.c
+endif
+
+
+
 ################
 # Tag examples #
 ################

+ 243 - 0
examples/cg/cg.c

@@ -0,0 +1,243 @@
+/*
+ * StarPU
+ * Copyright (C) Université Bordeaux 1, CNRS 2008-2010 (see AUTHORS file)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU Lesser General Public License in COPYING.LGPL for more details.
+ */
+
+#include <starpu.h>
+#include <math.h>
+#include <common/blas.h>
+#include <cuda.h>
+#include <cublas.h>
+
+/*
+ *	Conjugate Gradient
+ *
+ *	Input:
+ *		- matrix A
+ *		- vector b
+ *		- vector x (starting value)
+ *		- int i_max, error tolerance eps < 1.
+ *	Ouput:
+ *		- vector x
+ *
+ *	Pseudo code:
+ *
+ *		i <- 0
+ *		r <- b - Ax
+ *		d <- r
+ *		delta_new <- dot(r,r)
+ *		delta_0 <- delta_new
+ *	
+ *		while (i < i_max && delta_new > eps^2 delta_0)
+ *		{
+ *			q <- Ad
+ *			alpha <- delta_new/dot(d, q)
+ *			x <- x + alpha d
+ *	
+ *			If (i is divisible by 50)
+ *				r <- b - Ax
+ *			else
+ *				r <- r - alpha q
+ *			
+ *			delta_old <- delta_new
+ *			delta_new <- dot(r,r)
+ *			beta <- delta_new/delta_old
+ *			d <- r + beta d
+ *			i <- i + 1
+ *		}
+ *	
+ */
+
+#include "cg.h"
+
+/* TODO parse argc / argv */
+static int long long n = 16*1024;
+
+static starpu_data_handle A_handle, b_handle, x_handle;
+static TYPE *A, *b, *x;
+
+static int i_max = 20000;
+static TYPE eps = 0.000000001;
+
+static starpu_data_handle r_handle, d_handle, q_handle;
+static TYPE *r, *d, *q;
+
+static starpu_data_handle dtq_handle, rtr_handle;
+static TYPE dtq, rtr;
+
+/*
+ *	Generate Input data
+ */
+
+static void generate_random_problem(void)
+{
+	srand48(0xdeadbeef);
+
+	int i, j;
+
+	starpu_data_malloc_pinned_if_possible((void **)&A, n*n*sizeof(TYPE));
+	starpu_data_malloc_pinned_if_possible((void **)&b, n*sizeof(TYPE));
+	starpu_data_malloc_pinned_if_possible((void **)&x, n*sizeof(TYPE));
+	assert(A && b && x);
+
+	/* Create a random matrix (A) and two random vectors (x and b) */
+	for (j = 0; j < n; j++)
+	{
+		b[j] = (TYPE)drand48();
+		x[j] = (TYPE)b[j];
+
+		for (i = 0; i < j; i++)
+		{
+			A[n*j + i] = (TYPE)(-2.0+drand48());	
+			A[n*i + j] = A[n*j + i];
+		}
+
+		A[n*j + j] = (TYPE)30.0;
+	}
+
+	/* Internal vectors */
+	starpu_data_malloc_pinned_if_possible((void **)&r, n*sizeof(TYPE));
+	starpu_data_malloc_pinned_if_possible((void **)&d, n*sizeof(TYPE));
+	starpu_data_malloc_pinned_if_possible((void **)&q, n*sizeof(TYPE));
+	assert(r && d && q);
+
+	memset(r, 0, n*sizeof(TYPE));
+	memset(d, 0, n*sizeof(TYPE));
+	memset(q, 0, n*sizeof(TYPE));
+}
+
+static void register_data(void)
+{
+	starpu_matrix_data_register(&A_handle, 0, (uintptr_t)A, n, n, n, sizeof(TYPE));
+	starpu_vector_data_register(&b_handle, 0, (uintptr_t)b, n, sizeof(TYPE));
+	starpu_vector_data_register(&x_handle, 0, (uintptr_t)x, n, sizeof(TYPE));
+
+	starpu_vector_data_register(&r_handle, 0, (uintptr_t)r, n, sizeof(TYPE));
+	starpu_vector_data_register(&d_handle, 0, (uintptr_t)d, n, sizeof(TYPE));
+	starpu_vector_data_register(&q_handle, 0, (uintptr_t)q, n, sizeof(TYPE));
+
+	starpu_variable_data_register(&dtq_handle, 0, (uintptr_t)&dtq, sizeof(TYPE));
+	starpu_variable_data_register(&rtr_handle, 0, (uintptr_t)&rtr, sizeof(TYPE));
+}
+
+static void partition_data(void)
+{
+
+}
+
+/*
+ *	Main loop
+ */
+
+static void cg(void)
+{
+	TYPE delta_new, delta_old, delta_0;
+
+	int i = 0;
+
+	/* r <- b */
+	copy_handle(r_handle, b_handle);
+
+	starpu_task_wait_for_all();
+
+	/* r <- r - A x */
+	gemv_kernel(r_handle, A_handle, x_handle, 1.0, -1.0); 
+
+	/* d <- r */
+	copy_handle(d_handle, r_handle);
+
+	/* delta_new = dot(r,r) */
+	dot_kernel(r_handle, r_handle, rtr_handle);
+
+	starpu_data_acquire(rtr_handle, STARPU_R);
+	delta_new = rtr;
+	delta_0 = delta_new;
+	starpu_data_release(rtr_handle);
+	
+	fprintf(stderr, "DELTA %f\n", delta_new);
+
+	while ((i < i_max) && (delta_new > (eps*eps*delta_0)))
+	{
+		fprintf(stderr, "*****************************************\niter %d DELTA %e - %e\n", i, delta_new, sqrt(delta_new/n));
+		TYPE alpha, beta;
+
+		/* q <- A d */
+		gemv_kernel(q_handle, A_handle, d_handle, 0.0, 1.0);
+
+		/* dtq <- dot(d,q) */
+		dot_kernel(d_handle, q_handle, dtq_handle);
+
+		/* alpha = delta_new / dtq */
+		starpu_data_acquire(dtq_handle, STARPU_R);
+		alpha = delta_new/dtq;
+//		fprintf(stderr, "ALPHA %e DELTA NEW %e DTQ %e\n", alpha, delta_new, dtq);
+		starpu_data_release(dtq_handle);
+		
+		/* x <- x + alpha d */
+		axpy_kernel(x_handle, d_handle, alpha);
+
+		if ((i % 50) == 0)
+		{
+			/* r <- b */
+			copy_handle(r_handle, b_handle);
+		
+			/* r <- r - A x */
+			gemv_kernel(r_handle, A_handle, x_handle, 1.0, -1.0); 
+		}
+		else {
+			/* r <- r - alpha q */
+			axpy_kernel(r_handle, q_handle, -alpha);
+		}
+
+		/* delta_new = dot(r,r) */
+		dot_kernel(r_handle, r_handle, rtr_handle);
+
+		starpu_data_acquire(rtr_handle, STARPU_R);
+		delta_old = delta_new;
+		delta_new = rtr;
+		beta = delta_new / delta_old;
+		starpu_data_release(rtr_handle);
+
+		/* d <- beta d + r */
+		scal_axpy_kernel(d_handle, beta, r_handle, 1.0);
+
+		i++;
+	}
+}
+
+int check(void)
+{
+	return 0;
+}
+
+int main(int argc, char **argv)
+{
+	int ret;
+
+	starpu_init(NULL);
+	starpu_helper_cublas_init();
+
+	generate_random_problem();
+	register_data();
+	partition_data();
+
+	cg();
+
+	ret = check();
+
+	starpu_helper_cublas_shutdown();
+	starpu_shutdown();
+
+	return ret;
+}

+ 71 - 0
examples/cg/cg.h

@@ -0,0 +1,71 @@
+/*
+ * StarPU
+ * Copyright (C) Université Bordeaux 1, CNRS 2008-2010 (see AUTHORS file)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU Lesser General Public License in COPYING.LGPL for more details.
+ */
+
+#ifndef __STARPU_EXAMPLE_CG_H__
+#define __STARPU_EXAMPLE_CG_H__
+
+#include <starpu.h>
+#include <math.h>
+#include <common/blas.h>
+#include <cuda.h>
+#include <cublas.h>
+
+#include <starpu.h>
+
+#define DOUBLE
+
+#ifdef DOUBLE
+#define TYPE	double
+#define GEMV	DGEMV
+#define DOT	DDOT
+#define GEMV	DGEMV
+#define AXPY	DAXPY
+#define SCAL	DSCAL
+#define cublasdot	cublasDdot
+#define cublasscal	cublasDscal
+#define cublasaxpy	cublasDaxpy
+#define cublasgemv	cublasDgemv
+#else
+#define TYPE	float
+#define GEMV	SGEMV
+#define DOT	SDOT
+#define GEMV	SGEMV
+#define AXPY	SAXPY
+#define SCAL	SSCAL
+#define cublasdot	cublasSdot
+#define cublasscal	cublasSscal
+#define cublasaxpy	cublasSaxpy
+#define cublasgemv	cublasSgemv
+#endif
+
+void dot_kernel(starpu_data_handle v1,
+                starpu_data_handle v2,
+                starpu_data_handle s);
+
+void gemv_kernel(starpu_data_handle v1,
+                starpu_data_handle matrix, 
+                starpu_data_handle v2,
+                TYPE p1, TYPE p2);
+
+void axpy_kernel(starpu_data_handle v1,
+		starpu_data_handle v2, TYPE p1);
+
+void scal_axpy_kernel(starpu_data_handle v1, TYPE p1,
+			starpu_data_handle v2, TYPE p2);
+
+void copy_handle(starpu_data_handle dst, starpu_data_handle src);
+
+#endif // __STARPU_EXAMPLE_CG_H__

+ 357 - 0
examples/cg/cg_kernels.c

@@ -0,0 +1,357 @@
+/*
+ * StarPU
+ * Copyright (C) Université Bordeaux 1, CNRS 2008-2010 (see AUTHORS file)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU Lesser General Public License in COPYING.LGPL for more details.
+ */
+
+#include "cg.h"
+
+/*
+ *	DOT kernel : s = dot(v1, v2)
+ */
+
+static void dot_kernel_cuda(void *descr[], void *cl_arg)
+{
+	TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]); 
+	TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
+	TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
+
+	unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
+ 
+	TYPE local_dot = cublasdot(n, v1, 1, v2, 1);
+//	fprintf(stderr, "DOT -> %e\n", local_dot);
+	cudaMemcpy(dot, &local_dot, sizeof(TYPE), cudaMemcpyHostToDevice);
+	cudaThreadSynchronize();
+}
+
+static void dot_kernel_cpu(void *descr[], void *cl_arg)
+{
+	TYPE *dot = (TYPE *)STARPU_VARIABLE_GET_PTR(descr[0]); 
+	TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
+	TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
+
+	unsigned n = STARPU_VECTOR_GET_NX(descr[1]);
+
+//	fprintf(stderr, "SDOT n = %d v1 %p v2 %p\n", n, v1, v2);
+//	fprintf(stderr, "v1:");
+//	print_vector(descr[1]);
+//	fprintf(stderr, "v2:");
+//	print_vector(descr[2]);
+
+	*dot = DOT(n, v1, 1, v2, 1);
+
+//	*dot = 0.0;
+//	TYPE local_dot = 0.0;
+//
+//	unsigned i;
+//	for (i =0; i < n; i++)
+//		local_dot += v1[i]*v2[i]; 
+//
+//	*dot = local_dot;
+}
+
+static starpu_codelet dot_kernel_cl = {
+	.where = STARPU_CPU|STARPU_CUDA,
+	.cpu_func = dot_kernel_cpu,
+	.cuda_func = dot_kernel_cuda,
+	.nbuffers = 3
+};
+
+void dot_kernel(starpu_data_handle v1,
+		starpu_data_handle v2,
+		starpu_data_handle s)
+{
+	int ret;
+	struct starpu_task *task = starpu_task_create();
+
+	task->cl = &dot_kernel_cl;
+	task->buffers[0].handle = s;
+	task->buffers[0].mode = STARPU_W;
+	task->buffers[1].handle = v1;
+	task->buffers[1].mode = STARPU_R;
+	task->buffers[2].handle = v2;
+	task->buffers[2].mode = STARPU_R;
+
+	ret = starpu_task_submit(task);
+	assert(!ret);
+}
+
+/*
+ *	GEMV kernel : v1 = p1 * v1 + p2 * M v2 
+ */
+
+struct kernel_params {
+	TYPE p1;
+	TYPE p2;
+};
+
+static void gemv_kernel_cuda(void *descr[], void *cl_arg)
+{
+	struct kernel_params *params = cl_arg;
+
+	TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
+	TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
+	TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
+
+	unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
+	unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
+	unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
+ 
+	/* Compute v1 = p1 * v1 + p2 * M v2 */
+	cublasgemv('N', nx, ny, params->p2, M, ld, v2, 1, params->p1, v1, 1);
+	cudaThreadSynchronize();
+
+	free(params);
+}
+
+static void gemv_kernel_cpu(void *descr[], void *cl_arg)
+{
+	struct kernel_params *params = cl_arg;
+
+	TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
+	TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[2]);
+	TYPE *M = (TYPE *)STARPU_MATRIX_GET_PTR(descr[1]);
+
+	unsigned ld = STARPU_MATRIX_GET_LD(descr[1]);
+	unsigned nx = STARPU_MATRIX_GET_NX(descr[1]);
+	unsigned ny = STARPU_MATRIX_GET_NY(descr[1]);
+
+	/* Compute v1 = p1 * v1 + p2 * M v2 */
+	GEMV("N", nx, ny, params->p2, M, ld, v2, 1, params->p1, v1, 1);
+
+	free(params);
+}
+
+static starpu_codelet gemv_kernel_cl = {
+	.where = STARPU_CPU|STARPU_CUDA,
+	.cpu_func = gemv_kernel_cpu,
+	.cuda_func = gemv_kernel_cuda,
+	.nbuffers = 3
+};
+
+void gemv_kernel(starpu_data_handle v1,
+		starpu_data_handle matrix,
+		starpu_data_handle v2,
+		TYPE p1, TYPE p2)
+{
+	int ret;
+	struct starpu_task *task = starpu_task_create();
+
+	task->cl = &gemv_kernel_cl;
+	task->buffers[0].handle = v1;
+	task->buffers[0].mode = STARPU_RW;
+	task->buffers[1].handle = matrix;
+	task->buffers[1].mode = STARPU_R;
+	task->buffers[2].handle = v2;
+	task->buffers[2].mode = STARPU_R;
+
+	struct kernel_params *params = malloc(sizeof(struct kernel_params));
+	assert(params);
+	params->p1 = p1;
+	params->p2 = p2;
+
+	task->cl_arg = params;
+
+	ret = starpu_task_submit(task);
+	assert(!ret);
+}
+
+/*
+ *	AXPY + SCAL kernel : v1 = p1 * v1 + p2 * v2 
+ */
+static void scal_axpy_kernel_cuda(void *descr[], void *cl_arg)
+{
+	struct kernel_params *params = cl_arg;
+
+	TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
+	TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
+
+	unsigned n = STARPU_MATRIX_GET_NX(descr[0]);
+ 
+	/* Compute v1 = p1 * v1 + p2 * v2.
+	 *	v1 = p1 v1
+	 *	v1 = v1 + p2 v2
+	 */
+	cublasscal(n, params->p1, v1, 1);
+	cublasaxpy(n, params->p2, v2, 1, v1, 1);
+	cudaThreadSynchronize();
+
+	free(params);
+}
+
+static void scal_axpy_kernel_cpu(void *descr[], void *cl_arg)
+{
+	struct kernel_params *params = cl_arg;
+
+	TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
+	TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
+
+	unsigned nx = STARPU_MATRIX_GET_NX(descr[0]);
+ 
+	/* Compute v1 = p1 * v1 + p2 * v2.
+	 *	v1 = p1 v1
+	 *	v1 = v1 + p2 v2
+	 */
+	SCAL(nx, params->p1, v1, 1);
+	AXPY(nx, params->p2, v2, 1, v1, 1);
+
+	free(params);
+}
+
+static starpu_codelet scal_axpy_kernel_cl = {
+	.where = STARPU_CPU|STARPU_CUDA,
+	.cpu_func = scal_axpy_kernel_cpu,
+	.cuda_func = scal_axpy_kernel_cuda,
+	.nbuffers = 2
+};
+
+void scal_axpy_kernel(starpu_data_handle v1, TYPE p1,
+			starpu_data_handle v2, TYPE p2)
+{
+	int ret;
+	struct starpu_task *task = starpu_task_create();
+
+	task->cl = &scal_axpy_kernel_cl;
+	task->buffers[0].handle = v1;
+	task->buffers[0].mode = STARPU_RW;
+	task->buffers[1].handle = v2;
+	task->buffers[1].mode = STARPU_R;
+
+	struct kernel_params *params = malloc(sizeof(struct kernel_params));
+	assert(params);
+	params->p1 = p1;
+	params->p2 = p2;
+
+	task->cl_arg = params;
+
+	ret = starpu_task_submit(task);
+	assert(!ret);
+}
+
+
+/*
+ *	AXPY kernel : v1 = v1 + p1 * v2 
+ */
+static void axpy_kernel_cuda(void *descr[], void *cl_arg)
+{
+	struct kernel_params *params = cl_arg;
+
+	TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
+	TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
+
+	unsigned n = STARPU_MATRIX_GET_NX(descr[0]);
+ 
+	/* Compute v1 = v1 + p1 * v2.
+	 */
+	cublasaxpy(n, params->p1, v2, 1, v1, 1);
+	cudaThreadSynchronize();
+
+	free(params);
+}
+
+static void axpy_kernel_cpu(void *descr[], void *cl_arg)
+{
+	struct kernel_params *params = cl_arg;
+
+	TYPE *v1 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
+	TYPE *v2 = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
+
+	unsigned nx = STARPU_MATRIX_GET_NX(descr[0]);
+ 
+	/* Compute v1 = p1 * v1 + p2 * v2.
+	 */
+	AXPY(nx, params->p1, v2, 1, v1, 1);
+
+	free(params);
+}
+
+static starpu_codelet axpy_kernel_cl = {
+	.where = STARPU_CPU|STARPU_CUDA,
+	.cpu_func = axpy_kernel_cpu,
+	.cuda_func = axpy_kernel_cuda,
+	.nbuffers = 2
+};
+
+void axpy_kernel(starpu_data_handle v1,
+		starpu_data_handle v2, TYPE p1)
+{
+	int ret;
+	struct starpu_task *task = starpu_task_create();
+
+	task->cl = &axpy_kernel_cl;
+	task->buffers[0].handle = v1;
+	task->buffers[0].mode = STARPU_RW;
+	task->buffers[1].handle = v2;
+	task->buffers[1].mode = STARPU_R;
+
+	struct kernel_params *params = malloc(sizeof(struct kernel_params));
+	assert(params);
+	params->p1 = p1;
+
+	task->cl_arg = params;
+
+	ret = starpu_task_submit(task);
+	assert(!ret);
+}
+
+
+/*
+ *	COPY kernel : vector_dst <- vector_src
+ */
+
+static void copy_handle_cpu(void *descr[], void *cl_arg)
+{
+	TYPE *dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
+	TYPE *src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
+	
+	unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
+	size_t elemsize = STARPU_VECTOR_GET_ELEMSIZE(descr[0]);
+
+	memcpy(dst, src, nx*elemsize);
+
+//	fprintf(stderr, "MEMCPY %p -> %p length %d src[0] = %f\n", src, dst, nx*elemsize, src[0]);
+}
+
+static void copy_handle_cuda(void *descr[], void *cl_arg)
+{
+	TYPE *dst = (TYPE *)STARPU_VECTOR_GET_PTR(descr[0]);
+	TYPE *src = (TYPE *)STARPU_VECTOR_GET_PTR(descr[1]);
+	
+	unsigned nx = STARPU_VECTOR_GET_NX(descr[0]);
+	size_t elemsize = STARPU_VECTOR_GET_ELEMSIZE(descr[0]);
+
+	cudaMemcpy(dst, src, nx*elemsize, cudaMemcpyDeviceToDevice);
+	cudaThreadSynchronize();
+}
+
+static starpu_codelet copy_handle_cl = {
+	.where = STARPU_CPU|STARPU_CUDA,
+	.cpu_func = copy_handle_cpu,
+	.cuda_func = copy_handle_cuda,
+	.nbuffers = 2
+};
+
+void copy_handle(starpu_data_handle dst, starpu_data_handle src)
+{
+	int ret;
+	struct starpu_task *task = starpu_task_create();
+
+	task->cl = &copy_handle_cl;
+	task->buffers[0].handle = dst;
+	task->buffers[0].mode = STARPU_W;
+	task->buffers[1].handle = src;
+	task->buffers[1].mode = STARPU_R;
+
+	ret = starpu_task_submit(task);
+	assert(!ret);
+}