Browse Source

Add 4D tensor data interface

Samuel Thibault 5 years ago
parent
commit
788e9e80f9

+ 1 - 0
ChangeLog

@@ -24,6 +24,7 @@ New features:
     variable size to express their maximal potential size.
   * New offline tool to draw graph showing elapsed time between sent
     or received data and their use by tasks
+  * Add 4D tensor data interface.
 
 Small changes:
   * Use the S4U interface of Simgrid instead of xbt and MSG.

+ 89 - 0
include/fstarpu_mod.f90

@@ -784,6 +784,95 @@ module fstarpu_mod
 
                 ! void *starpu_data_get_interface_on_node(starpu_data_handle_t handle, unsigned memory_node);
 
+                ! == starpu_data_interface.h: tensor ==
+
+                ! void starpu_tensor_data_register(starpu_data_handle_t *handle, unsigned home_node, uintptr_t ptr, uint32_t ldy, uint32_t ldz, uint32_t ldt, uint32_t nx, uint32_t ny, uint32_t nz, uint32_t nt, size_t elemsize);
+                subroutine fstarpu_tensor_data_register(dh, home_node, ptr, ldy, ldz, ldt, nx, ny, nz, nt, elt_size) &
+                                bind(C,name="starpu_tensor_data_register")
+                        use iso_c_binding, only: c_ptr, c_int, c_size_t
+                        type(c_ptr), intent(out) :: dh
+                        integer(c_int), value, intent(in) :: home_node
+                        type(c_ptr), value, intent(in) :: ptr
+                        integer(c_int), value, intent(in) :: ldy
+                        integer(c_int), value, intent(in) :: ldz
+                        integer(c_int), value, intent(in) :: ldt
+                        integer(c_int), value, intent(in) :: nx
+                        integer(c_int), value, intent(in) :: ny
+                        integer(c_int), value, intent(in) :: nz
+                        integer(c_int), value, intent(in) :: nt
+                        integer(c_size_t), value, intent(in) :: elt_size
+                end subroutine fstarpu_tensor_data_register
+
+                ! void starpu_tensor_ptr_register(starpu_data_handle_t handle, unsigned node, uintptr_t ptr, uintptr_t dev_handle, size_t offset, uint32_t ldy, uint32_t ldz, uint32_t ldt);
+                subroutine fstarpu_tensor_ptr_register(dh, node, ptr, dev_handle, offset, ldy, ldz, ldt) &
+                                bind(C,name="starpu_tensor_ptr_register")
+                        use iso_c_binding, only: c_ptr, c_int, c_size_t
+                        type(c_ptr), intent(out) :: dh
+                        integer(c_int), value, intent(in) :: node
+                        type(c_ptr), value, intent(in) :: ptr
+                        type(c_ptr), value, intent(in) :: dev_handle
+                        integer(c_size_t), value, intent(in) :: offset
+                        integer(c_int), value, intent(in) :: ldy
+                        integer(c_int), value, intent(in) :: ldz
+                        integer(c_int), value, intent(in) :: ldt
+                end subroutine fstarpu_tensor_ptr_register
+
+                function fstarpu_tensor_get_ptr(buffers, i) bind(C)
+                        use iso_c_binding, only: c_ptr, c_int
+                        type(c_ptr) :: fstarpu_tensor_get_ptr
+                        type(c_ptr), value, intent(in) :: buffers
+                        integer(c_int), value, intent(in) :: i
+                end function fstarpu_tensor_get_ptr
+
+                function fstarpu_tensor_get_ldy(buffers, i) bind(C)
+                        use iso_c_binding, only: c_ptr, c_int
+                        integer(c_int) :: fstarpu_tensor_get_ldy
+                        type(c_ptr), value, intent(in) :: buffers
+                        integer(c_int), value, intent(in) :: i
+                end function fstarpu_tensor_get_ldy
+
+                function fstarpu_tensor_get_ldz(buffers, i) bind(C)
+                        use iso_c_binding, only: c_ptr, c_int
+                        integer(c_int) :: fstarpu_tensor_get_ldz
+                        type(c_ptr), value, intent(in) :: buffers
+                        integer(c_int), value, intent(in) :: i
+                end function fstarpu_tensor_get_ldz
+
+                function fstarpu_tensor_get_ldt(buffers, i) bind(C)
+                        use iso_c_binding, only: c_ptr, c_int
+                        integer(c_int) :: fstarpu_tensor_get_ldt
+                        type(c_ptr), value, intent(in) :: buffers
+                        integer(c_int), value, intent(in) :: i
+                end function fstarpu_tensor_get_ldt
+
+                function fstarpu_tensor_get_nx(buffers, i) bind(C)
+                        use iso_c_binding, only: c_ptr, c_int
+                        integer(c_int) :: fstarpu_tensor_get_nx
+                        type(c_ptr), value, intent(in) :: buffers
+                        integer(c_int), value, intent(in) :: i
+                end function fstarpu_tensor_get_nx
+
+                function fstarpu_tensor_get_ny(buffers, i) bind(C)
+                        use iso_c_binding, only: c_ptr, c_int
+                        integer(c_int) :: fstarpu_tensor_get_ny
+                        type(c_ptr), value, intent(in) :: buffers
+                        integer(c_int), value, intent(in) :: i
+                end function fstarpu_tensor_get_ny
+
+                function fstarpu_tensor_get_nz(buffers, i) bind(C)
+                        use iso_c_binding, only: c_ptr, c_int
+                        integer(c_int) :: fstarpu_tensor_get_nz
+                        type(c_ptr), value, intent(in) :: buffers
+                        integer(c_int), value, intent(in) :: i
+                end function fstarpu_tensor_get_nz
+
+                function fstarpu_tensor_get_nt(buffers, i) bind(C)
+                        use iso_c_binding, only: c_ptr, c_int
+                        integer(c_int) :: fstarpu_tensor_get_nt
+                        type(c_ptr), value, intent(in) :: buffers
+                        integer(c_int), value, intent(in) :: i
+                end function fstarpu_tensor_get_nt
+
                 ! == starpu_data_interface.h: block ==
 
                 ! void starpu_block_data_register(starpu_data_handle_t *handle, unsigned home_node, uintptr_t ptr, uint32_t ldy, uint32_t ldz, uint32_t nx, uint32_t ny, uint32_t nz, size_t elemsize);

+ 183 - 1
include/starpu_data_interfaces.h

@@ -358,7 +358,8 @@ enum starpu_data_interface_id
 	STARPU_VOID_INTERFACE_ID=6, /**< Identifier for the void data interface*/
 	STARPU_MULTIFORMAT_INTERFACE_ID=7, /**< Identifier for the multiformat data interface*/
 	STARPU_COO_INTERFACE_ID=8, /**< Identifier for the COO data interface*/
-	STARPU_MAX_INTERFACE_ID=9 /**< Maximum number of data interfaces */
+	STARPU_TENSOR_INTERFACE_ID=9, /**< Identifier for the block data interface*/
+	STARPU_MAX_INTERFACE_ID=10 /**< Maximum number of data interfaces */
 };
 
 /**
@@ -1165,6 +1166,187 @@ designated by \p interface.
 /** @} */
 
 /**
+   @name Tensor Data Interface
+   @{
+*/
+
+extern struct starpu_data_interface_ops starpu_interface_tensor_ops;
+
+/* TODO: rename to 4dtensor? */
+/* TODO: add allocsize support */
+/**
+   Tensor interface for 4D dense tensors
+*/
+struct starpu_tensor_interface
+{
+	enum starpu_data_interface_id id; /**< identifier of the interface */
+
+	uintptr_t ptr;                    /**< local pointer of the tensor */
+	uintptr_t dev_handle;             /**< device handle of the tensor. */
+	size_t offset;                    /**< offset in the tensor. */
+	uint32_t nx;                      /**< number of elements on the x-axis of the tensor. */
+	uint32_t ny;                      /**< number of elements on the y-axis of the tensor. */
+	uint32_t nz;                      /**< number of elements on the z-axis of the tensor. */
+	uint32_t nt;                      /**< number of elements on the t-axis of the tensor. */
+	uint32_t ldy;                     /**< number of elements between two lines */
+	uint32_t ldz;                     /**< number of elements between two planes */
+	uint32_t ldt;                     /**< number of elements between two cubes */
+	size_t elemsize;                  /**< size of the elements of the tensor. */
+};
+
+/**
+   Register the \p nx x \p ny x \p nz x \p nt 4D tensor of \p elemsize byte elements
+   pointed by \p ptr and initialize \p handle to represent it. Again, \p ldy,
+   \p ldz, and \p ldt specify the number of elements between rows, between z planes and between t cubes.
+
+   Here an example of how to use the function.
+   \code{.c}
+   float *tensor;
+   starpu_data_handle_t tensor_handle;
+   tensor = (float*)malloc(nx*ny*nz*nt*sizeof(float));
+   starpu_tensor_data_register(&tensor_handle, STARPU_MAIN_RAM, (uintptr_t)tensor, nx, nx*ny, nx*ny*nz, nx, ny, nz, nt, sizeof(float));
+   \endcode
+*/
+void starpu_tensor_data_register(starpu_data_handle_t *handle, int home_node, uintptr_t ptr, uint32_t ldy, uint32_t ldz, uint32_t ldt, uint32_t nx, uint32_t ny, uint32_t nz, uint32_t nt, size_t elemsize);
+
+/**
+   Register into the \p handle that to store data on node \p node it should use the
+   buffer located at \p ptr, or device handle \p dev_handle and offset \p offset
+   (for OpenCL, notably), with \p ldy elements between rows, and \p ldz
+   elements between z planes, and \p ldt elements between t cubes.
+*/
+void starpu_tensor_ptr_register(starpu_data_handle_t handle, unsigned node, uintptr_t ptr, uintptr_t dev_handle, size_t offset, uint32_t ldy, uint32_t ldz, uint32_t ldt);
+
+/**
+   Return the number of elements on the x-axis of the tensor
+   designated by \p handle.
+ */
+uint32_t starpu_tensor_get_nx(starpu_data_handle_t handle);
+
+/**
+   Return the number of elements on the y-axis of the tensor
+   designated by \p handle.
+ */
+uint32_t starpu_tensor_get_ny(starpu_data_handle_t handle);
+
+/**
+   Return the number of elements on the z-axis of the tensor
+   designated by \p handle.
+ */
+uint32_t starpu_tensor_get_nz(starpu_data_handle_t handle);
+
+/**
+   Return the number of elements on the t-axis of the tensor
+   designated by \p handle.
+ */
+uint32_t starpu_tensor_get_nt(starpu_data_handle_t handle);
+
+/**
+   Return the number of elements between each row of the tensor
+   designated by \p handle, in the format of the current memory node.
+*/
+uint32_t starpu_tensor_get_local_ldy(starpu_data_handle_t handle);
+
+/**
+   Return the number of elements between each z plane of the tensor
+   designated by \p handle, in the format of the current memory node.
+ */
+uint32_t starpu_tensor_get_local_ldz(starpu_data_handle_t handle);
+
+/**
+   Return the number of elements between each t cubes of the tensor
+   designated by \p handle, in the format of the current memory node.
+ */
+uint32_t starpu_tensor_get_local_ldt(starpu_data_handle_t handle);
+
+/**
+   Return the local pointer associated with \p handle.
+ */
+uintptr_t starpu_tensor_get_local_ptr(starpu_data_handle_t handle);
+
+/**
+   Return the size of the elements of the tensor designated by
+   \p handle.
+ */
+size_t starpu_tensor_get_elemsize(starpu_data_handle_t handle);
+
+#if defined(STARPU_HAVE_STATEMENT_EXPRESSIONS) && defined(STARPU_DEBUG)
+#define STARPU_TENSOR_CHECK(interface)           STARPU_ASSERT_MSG((((struct starpu_tensor_interface *)(interface))->id) == STARPU_TENSOR_INTERFACE_ID, "Error. The given data is not a tensor.")
+#define STARPU_TENSOR_GET_PTR(interface)	        ({ STARPU_TENSOR_CHECK(interface); (((struct starpu_tensor_interface *)(interface))->ptr) ; })
+#define STARPU_TENSOR_GET_DEV_HANDLE(interface)	({ STARPU_TENSOR_CHECK(interface); (((struct starpu_tensor_interface *)(interface))->dev_handle) ; })
+#define STARPU_TENSOR_GET_OFFSET(interface)	({ STARPU_TENSOR_CHECK(interface); (((struct starpu_tensor_interface *)(interface))->offset) ; })
+#define STARPU_TENSOR_GET_NX(interface)	        ({ STARPU_TENSOR_CHECK(interface); (((struct starpu_tensor_interface *)(interface))->nx) ; })
+#define STARPU_TENSOR_GET_NY(interface)	        ({ STARPU_TENSOR_CHECK(interface); (((struct starpu_tensor_interface *)(interface))->ny) ; })
+#define STARPU_TENSOR_GET_NZ(interface)	        ({ STARPU_TENSOR_CHECK(interface); (((struct starpu_tensor_interface *)(interface))->nz) ; })
+#define STARPU_TENSOR_GET_NT(interface)	        ({ STARPU_TENSOR_CHECK(interface); (((struct starpu_tensor_interface *)(interface))->nt) ; })
+#define STARPU_TENSOR_GET_LDY(interface)	        ({ STARPU_TENSOR_CHECK(interface); (((struct starpu_tensor_interface *)(interface))->ldy) ; })
+#define STARPU_TENSOR_GET_LDZ(interface)	        ({ STARPU_TENSOR_CHECK(interface); (((struct starpu_tensor_interface *)(interface))->ldz) ; })
+#define STARPU_TENSOR_GET_LDT(interface)	        ({ STARPU_TENSOR_CHECK(interface); (((struct starpu_tensor_interface *)(interface))->ldt) ; })
+#define STARPU_TENSOR_GET_ELEMSIZE(interface)	({ STARPU_TENSOR_CHECK(interface); (((struct starpu_tensor_interface *)(interface))->elemsize) ; })
+#else
+/**
+   Return a pointer to the tensor designated by \p interface.
+ */
+#define STARPU_TENSOR_GET_PTR(interface)	        (((struct starpu_tensor_interface *)(interface))->ptr)
+/**
+   Return a device handle for the tensor designated by \p interface,
+   to be used on OpenCL. The offset returned by
+   ::STARPU_TENSOR_GET_OFFSET has to be used in
+   addition to this.
+ */
+#define STARPU_TENSOR_GET_DEV_HANDLE(interface)	(((struct starpu_tensor_interface *)(interface))->dev_handle)
+/**
+   Return the offset in the tensor designated by \p interface, to be
+   used with the device handle.
+ */
+#define STARPU_TENSOR_GET_OFFSET(interface)	(((struct starpu_tensor_interface *)(interface))->offset)
+/**
+   Return the number of elements on the x-axis of the tensor
+   designated by \p interface.
+ */
+#define STARPU_TENSOR_GET_NX(interface)	        (((struct starpu_tensor_interface *)(interface))->nx)
+/**
+   Return the number of elements on the y-axis of the tensor
+   designated by \p interface.
+ */
+#define STARPU_TENSOR_GET_NY(interface)	        (((struct starpu_tensor_interface *)(interface))->ny)
+/**
+Return the number of elements on the z-axis of the tensor
+designated by \p interface.
+ */
+#define STARPU_TENSOR_GET_NZ(interface)	        (((struct starpu_tensor_interface *)(interface))->nz)
+/**
+Return the number of elements on the t-axis of the tensor
+designated by \p interface.
+ */
+#define STARPU_TENSOR_GET_NT(interface)	        (((struct starpu_tensor_interface *)(interface))->nt)
+/**
+   Return the number of elements between each row of the tensor
+   designated by \p interface. May be equal to nx when there is no padding.
+ */
+#define STARPU_TENSOR_GET_LDY(interface)	        (((struct starpu_tensor_interface *)(interface))->ldy)
+/**
+   Return the number of elements between each z plane of the tensor
+   designated by \p interface. May be equal to nx*ny when there is no
+   padding.
+ */
+#define STARPU_TENSOR_GET_LDZ(interface)	        (((struct starpu_tensor_interface *)(interface))->ldz)
+/**
+   Return the number of elements between each t cubes of the tensor
+   designated by \p interface. May be equal to nx*ny*nz when there is no
+   padding.
+ */
+#define STARPU_TENSOR_GET_LDT(interface)	        (((struct starpu_tensor_interface *)(interface))->ldt)
+/**
+   Return the size of the elements of the tensor designated by
+   \p interface.
+ */
+#define STARPU_TENSOR_GET_ELEMSIZE(interface)	(((struct starpu_tensor_interface *)(interface))->elemsize)
+#endif
+
+/** @} */
+
+/**
    @name Vector Data Interface
    @{
 */

+ 1 - 0
src/Makefile.am

@@ -245,6 +245,7 @@ libstarpu_@STARPU_EFFECTIVE_VERSION@_la_SOURCES = 		\
 	datawizard/interfaces/matrix_interface.c		\
 	datawizard/interfaces/block_filters.c			\
 	datawizard/interfaces/block_interface.c			\
+	datawizard/interfaces/tensor_interface.c		\
 	datawizard/interfaces/vector_interface.c		\
 	datawizard/interfaces/bcsr_filters.c			\
 	datawizard/interfaces/csr_filters.c			\

+ 832 - 0
src/datawizard/interfaces/tensor_interface.c

@@ -0,0 +1,832 @@
+/* StarPU --- Runtime system for heterogeneous multicore architectures.
+ *
+ * Copyright (C) 2009-2019                                Université de Bordeaux
+ * Copyright (C) 2011,2012,2017                           Inria
+ * Copyright (C) 2010-2017,2019                           CNRS
+ *
+ * StarPU is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ *
+ * StarPU is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU Lesser General Public License in COPYING.LGPL for more details.
+ */
+
+#include <starpu.h>
+
+#ifdef STARPU_USE_CUDA
+static int copy_ram_to_cuda(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED);
+static int copy_cuda_to_ram(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED);
+static int copy_ram_to_cuda_async(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, cudaStream_t stream);
+static int copy_cuda_to_ram_async(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, cudaStream_t stream);
+static int copy_cuda_to_cuda(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED);
+#endif
+#ifdef STARPU_USE_OPENCL
+static int copy_ram_to_opencl(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED);
+static int copy_opencl_to_ram(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED);
+static int copy_opencl_to_opencl(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED);
+static int copy_ram_to_opencl_async(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, cl_event *event);
+static int copy_opencl_to_ram_async(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, cl_event *event);
+static int copy_opencl_to_opencl_async(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, cl_event *event);
+#endif
+static int copy_any_to_any(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, void *async_data);
+
+static const struct starpu_data_copy_methods tensor_copy_data_methods_s =
+{
+#ifdef STARPU_USE_CUDA
+	.ram_to_cuda = copy_ram_to_cuda,
+	.cuda_to_ram = copy_cuda_to_ram,
+	.ram_to_cuda_async = copy_ram_to_cuda_async,
+	.cuda_to_ram_async = copy_cuda_to_ram_async,
+	.cuda_to_cuda = copy_cuda_to_cuda,
+#endif
+#ifdef STARPU_USE_OPENCL
+	.ram_to_opencl = copy_ram_to_opencl,
+	.opencl_to_ram = copy_opencl_to_ram,
+	.opencl_to_opencl = copy_opencl_to_opencl,
+        .ram_to_opencl_async = copy_ram_to_opencl_async,
+	.opencl_to_ram_async = copy_opencl_to_ram_async,
+	.opencl_to_opencl_async = copy_opencl_to_opencl_async,
+#endif
+	.any_to_any = copy_any_to_any,
+};
+
+
+static void register_tensor_handle(starpu_data_handle_t handle, unsigned home_node, void *data_interface);
+static void *tensor_to_pointer(void *data_interface, unsigned node);
+static int tensor_pointer_is_inside(void *data_interface, unsigned node, void *ptr);
+static starpu_ssize_t allocate_tensor_buffer_on_node(void *data_interface_, unsigned dst_node);
+static void free_tensor_buffer_on_node(void *data_interface, unsigned node);
+static size_t tensor_interface_get_size(starpu_data_handle_t handle);
+static uint32_t footprint_tensor_interface_crc32(starpu_data_handle_t handle);
+static int tensor_compare(void *data_interface_a, void *data_interface_b);
+static void display_tensor_interface(starpu_data_handle_t handle, FILE *f);
+static int pack_tensor_handle(starpu_data_handle_t handle, unsigned node, void **ptr, starpu_ssize_t *count);
+static int unpack_tensor_handle(starpu_data_handle_t handle, unsigned node, void *ptr, size_t count);
+static starpu_ssize_t describe(void *data_interface, char *buf, size_t size);
+
+struct starpu_data_interface_ops starpu_interface_tensor_ops =
+{
+	.register_data_handle = register_tensor_handle,
+	.allocate_data_on_node = allocate_tensor_buffer_on_node,
+	.to_pointer = tensor_to_pointer,
+	.pointer_is_inside = tensor_pointer_is_inside,
+	.free_data_on_node = free_tensor_buffer_on_node,
+	.copy_methods = &tensor_copy_data_methods_s,
+	.get_size = tensor_interface_get_size,
+	.footprint = footprint_tensor_interface_crc32,
+	.compare = tensor_compare,
+	.interfaceid = STARPU_TENSOR_INTERFACE_ID,
+	.interface_size = sizeof(struct starpu_tensor_interface),
+	.display = display_tensor_interface,
+	.pack_data = pack_tensor_handle,
+	.unpack_data = unpack_tensor_handle,
+	.describe = describe,
+	.name = "STARPU_TENSOR_INTERFACE"
+};
+
+static void *tensor_to_pointer(void *data_interface, unsigned node)
+{
+	(void) node;
+	struct starpu_tensor_interface *tensor_interface = data_interface;
+
+	return (void*) tensor_interface->ptr;
+}
+
+static int tensor_pointer_is_inside(void *data_interface, unsigned node, void *ptr)
+{
+	(void) node;
+	struct starpu_tensor_interface *tensor_interface = data_interface;
+	uint32_t ldy = tensor_interface->ldy;
+	uint32_t ldz = tensor_interface->ldz;
+	uint32_t ldt = tensor_interface->ldt;
+	uint32_t nx = tensor_interface->nx;
+	uint32_t ny = tensor_interface->ny;
+	uint32_t nz = tensor_interface->nz;
+	uint32_t nt = tensor_interface->nt;
+	size_t elemsize = tensor_interface->elemsize;
+
+	return (char*) ptr >= (char*) tensor_interface->ptr &&
+		(char*) ptr < (char*) tensor_interface->ptr + (nt-1)*ldt*elemsize + (nz-1)*ldz*elemsize + (ny-1)*ldy*elemsize + nx*elemsize;
+}
+
+static void register_tensor_handle(starpu_data_handle_t handle, unsigned home_node, void *data_interface)
+{
+	struct starpu_tensor_interface *tensor_interface = (struct starpu_tensor_interface *) data_interface;
+
+	unsigned node;
+	for (node = 0; node < STARPU_MAXNODES; node++)
+	{
+		struct starpu_tensor_interface *local_interface = (struct starpu_tensor_interface *)
+			starpu_data_get_interface_on_node(handle, node);
+
+		if (node == home_node)
+		{
+			local_interface->ptr = tensor_interface->ptr;
+                        local_interface->dev_handle = tensor_interface->dev_handle;
+                        local_interface->offset = tensor_interface->offset;
+			local_interface->ldy  = tensor_interface->ldy;
+			local_interface->ldz  = tensor_interface->ldz;
+			local_interface->ldt  = tensor_interface->ldt;
+		}
+		else
+		{
+			local_interface->ptr = 0;
+                        local_interface->dev_handle = 0;
+                        local_interface->offset = 0;
+			local_interface->ldy  = 0;
+			local_interface->ldz  = 0;
+			local_interface->ldt  = 0;
+		}
+
+		local_interface->id = tensor_interface->id;
+		local_interface->nx = tensor_interface->nx;
+		local_interface->ny = tensor_interface->ny;
+		local_interface->nz = tensor_interface->nz;
+		local_interface->nt = tensor_interface->nt;
+		local_interface->elemsize = tensor_interface->elemsize;
+	}
+}
+
+/* declare a new data with the BLAS interface */
+void starpu_tensor_data_register(starpu_data_handle_t *handleptr, int home_node,
+				uintptr_t ptr, uint32_t ldy, uint32_t ldz, uint32_t ldt, uint32_t nx,
+				uint32_t ny, uint32_t nz, uint32_t nt, size_t elemsize)
+{
+	struct starpu_tensor_interface tensor_interface =
+	{
+		.id = STARPU_TENSOR_INTERFACE_ID,
+		.ptr = ptr,
+                .dev_handle = ptr,
+                .offset = 0,
+		.ldy = ldy,
+		.ldz = ldz,
+		.ldt = ldt,
+		.nx = nx,
+		.ny = ny,
+		.nz = nz,
+		.nt = nt,
+		.elemsize = elemsize
+	};
+#ifndef STARPU_SIMGRID
+	if (home_node >= 0 && starpu_node_get_kind(home_node) == STARPU_CPU_RAM)
+	{
+		STARPU_ASSERT_ACCESSIBLE(ptr);
+		STARPU_ASSERT_ACCESSIBLE(ptr + (nt-1)*ldt*elemsize + (nz-1)*ldz*elemsize + (ny-1)*ldy*elemsize + nx*elemsize - 1);
+	}
+#endif
+
+	starpu_data_register(handleptr, home_node, &tensor_interface, &starpu_interface_tensor_ops);
+}
+
+void starpu_tensor_ptr_register(starpu_data_handle_t handle, unsigned node,
+				  uintptr_t ptr, uintptr_t dev_handle, size_t offset, uint32_t ldy, uint32_t ldz, uint32_t ldt)
+{
+	struct starpu_tensor_interface *tensor_interface = starpu_data_get_interface_on_node(handle, node);
+	starpu_data_ptr_register(handle, node);
+	tensor_interface->ptr = ptr;
+	tensor_interface->dev_handle = dev_handle;
+	tensor_interface->offset = offset;
+	tensor_interface->ldy = ldy;
+	tensor_interface->ldz = ldz;
+	tensor_interface->ldt = ldt;
+}
+
+static uint32_t footprint_tensor_interface_crc32(starpu_data_handle_t handle)
+{
+	uint32_t hash;
+
+	hash = starpu_hash_crc32c_be(starpu_tensor_get_nx(handle), 0);
+	hash = starpu_hash_crc32c_be(starpu_tensor_get_ny(handle), hash);
+	hash = starpu_hash_crc32c_be(starpu_tensor_get_nz(handle), hash);
+	hash = starpu_hash_crc32c_be(starpu_tensor_get_nt(handle), hash);
+
+	return hash;
+}
+
+static int tensor_compare(void *data_interface_a, void *data_interface_b)
+{
+	struct starpu_tensor_interface *tensor_a = (struct starpu_tensor_interface *) data_interface_a;
+	struct starpu_tensor_interface *tensor_b = (struct starpu_tensor_interface *) data_interface_b;
+
+	/* Two matricess are considered compatible if they have the same size */
+	return (tensor_a->nx == tensor_b->nx)
+		&& (tensor_a->ny == tensor_b->ny)
+		&& (tensor_a->nz == tensor_b->nz)
+		&& (tensor_a->nt == tensor_b->nt)
+		&& (tensor_a->elemsize == tensor_b->elemsize);
+}
+
+static void display_tensor_interface(starpu_data_handle_t handle, FILE *f)
+{
+	struct starpu_tensor_interface *tensor_interface;
+
+	tensor_interface = (struct starpu_tensor_interface *) starpu_data_get_interface_on_node(handle, STARPU_MAIN_RAM);
+
+	fprintf(f, "%u\t%u\t%u\t%u\t", tensor_interface->nx, tensor_interface->ny, tensor_interface->nz, tensor_interface->nt);
+}
+
+static int pack_tensor_handle(starpu_data_handle_t handle, unsigned node, void **ptr, starpu_ssize_t *count)
+{
+	STARPU_ASSERT(starpu_data_test_if_allocated_on_node(handle, node));
+
+	struct starpu_tensor_interface *tensor_interface = (struct starpu_tensor_interface *)
+		starpu_data_get_interface_on_node(handle, node);
+
+	*count = tensor_interface->nx*tensor_interface->ny*tensor_interface->nz*tensor_interface->nt*tensor_interface->elemsize;
+
+	if (ptr != NULL)
+	{
+		uint32_t t, z, y;
+		char *block = (void *)tensor_interface->ptr;
+
+		*ptr = (void *)starpu_malloc_on_node_flags(node, *count, 0);
+
+		char *cur = *ptr;
+		char *block_t = block;
+		for(t=0 ; t<tensor_interface->nt ; t++)
+		{
+		    char *block_z = block_t;
+		    for(z=0 ; z<tensor_interface->nz ; z++)
+		    {
+			char *block_y = block_z;
+			for(y=0 ; y<tensor_interface->ny ; y++)
+			{
+				memcpy(cur, block_y, tensor_interface->nx*tensor_interface->elemsize);
+				cur += tensor_interface->nx*tensor_interface->elemsize;
+				block_y += tensor_interface->ldy * tensor_interface->elemsize;
+			}
+			block_z += tensor_interface->ldz * tensor_interface->elemsize;
+		    }
+		    block_t += tensor_interface->ldt * tensor_interface->elemsize;
+		}
+	}
+
+	return 0;
+}
+
+static int unpack_tensor_handle(starpu_data_handle_t handle, unsigned node, void *ptr, size_t count)
+{
+	STARPU_ASSERT(starpu_data_test_if_allocated_on_node(handle, node));
+
+	struct starpu_tensor_interface *tensor_interface = (struct starpu_tensor_interface *)
+		starpu_data_get_interface_on_node(handle, node);
+
+	STARPU_ASSERT(count == tensor_interface->elemsize * tensor_interface->nx * tensor_interface->ny * tensor_interface->nz * tensor_interface->nt);
+
+	uint32_t t, z, y;
+	char *cur = ptr;
+	char *block = (void *)tensor_interface->ptr;
+	char *block_t = block;
+	for(t=0 ; t<tensor_interface->nt ; t++)
+	{
+	    char *block_z = block_t;
+	    for(z=0 ; z<tensor_interface->nz ; z++)
+	    {
+		char *block_y = block_z;
+		for(y=0 ; y<tensor_interface->ny ; y++)
+		{
+			memcpy(block_y, cur, tensor_interface->nx*tensor_interface->elemsize);
+			cur += tensor_interface->nx*tensor_interface->elemsize;
+			block_y += tensor_interface->ldy * tensor_interface->elemsize;
+		}
+		block_z += tensor_interface->ldz * tensor_interface->elemsize;
+	    }
+	    block_t += tensor_interface->ldt * tensor_interface->elemsize;
+	}
+
+	starpu_free_on_node_flags(node, (uintptr_t)ptr, count, 0);
+
+	return 0;
+}
+
+
+static size_t tensor_interface_get_size(starpu_data_handle_t handle)
+{
+	size_t size;
+	struct starpu_tensor_interface *tensor_interface;
+
+	tensor_interface = (struct starpu_tensor_interface *) starpu_data_get_interface_on_node(handle, STARPU_MAIN_RAM);
+
+#ifdef STARPU_DEBUG
+	STARPU_ASSERT_MSG(tensor_interface->id == STARPU_TENSOR_INTERFACE_ID, "Error. The given data is not a block.");
+#endif
+
+	size = tensor_interface->nx*tensor_interface->ny*tensor_interface->nz*tensor_interface->nt*tensor_interface->elemsize;
+
+	return size;
+}
+
+/* offer an access to the data parameters */
+uint32_t starpu_tensor_get_nx(starpu_data_handle_t handle)
+{
+	struct starpu_tensor_interface *tensor_interface = (struct starpu_tensor_interface *)
+		starpu_data_get_interface_on_node(handle, STARPU_MAIN_RAM);
+
+#ifdef STARPU_DEBUG
+	STARPU_ASSERT_MSG(tensor_interface->id == STARPU_TENSOR_INTERFACE_ID, "Error. The given data is not a block.");
+#endif
+
+	return tensor_interface->nx;
+}
+
+uint32_t starpu_tensor_get_ny(starpu_data_handle_t handle)
+{
+	struct starpu_tensor_interface *tensor_interface = (struct starpu_tensor_interface *)
+		starpu_data_get_interface_on_node(handle, STARPU_MAIN_RAM);
+
+#ifdef STARPU_DEBUG
+	STARPU_ASSERT_MSG(tensor_interface->id == STARPU_TENSOR_INTERFACE_ID, "Error. The given data is not a block.");
+#endif
+
+	return tensor_interface->ny;
+}
+
+uint32_t starpu_tensor_get_nz(starpu_data_handle_t handle)
+{
+	struct starpu_tensor_interface *tensor_interface = (struct starpu_tensor_interface *)
+		starpu_data_get_interface_on_node(handle, STARPU_MAIN_RAM);
+
+#ifdef STARPU_DEBUG
+	STARPU_ASSERT_MSG(tensor_interface->id == STARPU_TENSOR_INTERFACE_ID, "Error. The given data is not a block.");
+#endif
+
+	return tensor_interface->nz;
+}
+
+uint32_t starpu_tensor_get_nt(starpu_data_handle_t handle)
+{
+	struct starpu_tensor_interface *tensor_interface = (struct starpu_tensor_interface *)
+		starpu_data_get_interface_on_node(handle, STARPU_MAIN_RAM);
+
+#ifdef STARPU_DEBUG
+	STARPU_ASSERT_MSG(tensor_interface->id == STARPU_TENSOR_INTERFACE_ID, "Error. The given data is not a block.");
+#endif
+
+	return tensor_interface->nt;
+}
+
+uint32_t starpu_tensor_get_local_ldy(starpu_data_handle_t handle)
+{
+	unsigned node;
+	node = starpu_worker_get_local_memory_node();
+
+	STARPU_ASSERT(starpu_data_test_if_allocated_on_node(handle, node));
+
+	struct starpu_tensor_interface *tensor_interface = (struct starpu_tensor_interface *)
+		starpu_data_get_interface_on_node(handle, node);
+
+#ifdef STARPU_DEBUG
+	STARPU_ASSERT_MSG(tensor_interface->id == STARPU_TENSOR_INTERFACE_ID, "Error. The given data is not a block.");
+#endif
+
+	return tensor_interface->ldy;
+}
+
+uint32_t starpu_tensor_get_local_ldz(starpu_data_handle_t handle)
+{
+	unsigned node;
+	node = starpu_worker_get_local_memory_node();
+
+	STARPU_ASSERT(starpu_data_test_if_allocated_on_node(handle, node));
+
+	struct starpu_tensor_interface *tensor_interface = (struct starpu_tensor_interface *)
+		starpu_data_get_interface_on_node(handle, node);
+
+#ifdef STARPU_DEBUG
+	STARPU_ASSERT_MSG(tensor_interface->id == STARPU_TENSOR_INTERFACE_ID, "Error. The given data is not a block.");
+#endif
+
+	return tensor_interface->ldz;
+}
+
+uint32_t starpu_tensor_get_local_ldt(starpu_data_handle_t handle)
+{
+	unsigned node;
+	node = starpu_worker_get_local_memory_node();
+
+	STARPU_ASSERT(starpu_data_test_if_allocated_on_node(handle, node));
+
+	struct starpu_tensor_interface *tensor_interface = (struct starpu_tensor_interface *)
+		starpu_data_get_interface_on_node(handle, node);
+
+#ifdef STARPU_DEBUG
+	STARPU_ASSERT_MSG(tensor_interface->id == STARPU_TENSOR_INTERFACE_ID, "Error. The given data is not a block.");
+#endif
+
+	return tensor_interface->ldt;
+}
+
+uintptr_t starpu_tensor_get_local_ptr(starpu_data_handle_t handle)
+{
+	unsigned node;
+	node = starpu_worker_get_local_memory_node();
+
+	STARPU_ASSERT(starpu_data_test_if_allocated_on_node(handle, node));
+
+	struct starpu_tensor_interface *tensor_interface = (struct starpu_tensor_interface *)
+		starpu_data_get_interface_on_node(handle, node);
+
+#ifdef STARPU_DEBUG
+	STARPU_ASSERT_MSG(tensor_interface->id == STARPU_TENSOR_INTERFACE_ID, "Error. The given data is not a block.");
+#endif
+
+	return tensor_interface->ptr;
+}
+
+size_t starpu_tensor_get_elemsize(starpu_data_handle_t handle)
+{
+	struct starpu_tensor_interface *tensor_interface = (struct starpu_tensor_interface *)
+		starpu_data_get_interface_on_node(handle, STARPU_MAIN_RAM);
+
+#ifdef STARPU_DEBUG
+	STARPU_ASSERT_MSG(tensor_interface->id == STARPU_TENSOR_INTERFACE_ID, "Error. The given data is not a block.");
+#endif
+
+	return tensor_interface->elemsize;
+}
+
+
+/* memory allocation/deallocation primitives for the BLOCK interface */
+
+/* returns the size of the allocated area */
+static starpu_ssize_t allocate_tensor_buffer_on_node(void *data_interface_, unsigned dst_node)
+{
+	uintptr_t addr = 0, handle;
+
+	struct starpu_tensor_interface *dst_block = (struct starpu_tensor_interface *) data_interface_;
+
+	uint32_t nx = dst_block->nx;
+	uint32_t ny = dst_block->ny;
+	uint32_t nz = dst_block->nz;
+	uint32_t nt = dst_block->nt;
+	size_t elemsize = dst_block->elemsize;
+
+	starpu_ssize_t allocated_memory;
+
+	handle = starpu_malloc_on_node(dst_node, nx*ny*nz*nt*elemsize);
+
+	if (!handle)
+		return -ENOMEM;
+
+	if (starpu_node_get_kind(dst_node) != STARPU_OPENCL_RAM)
+		addr = handle;
+
+	allocated_memory = nx*ny*nz*nt*elemsize;
+
+	/* update the data properly in consequence */
+	dst_block->ptr = addr;
+	dst_block->dev_handle = handle;
+	dst_block->offset = 0;
+	dst_block->ldy = nx;
+	dst_block->ldz = nx*ny;
+	dst_block->ldt = nx*ny*nz;
+
+	return allocated_memory;
+}
+
+static void free_tensor_buffer_on_node(void *data_interface, unsigned node)
+{
+	struct starpu_tensor_interface *tensor_interface = (struct starpu_tensor_interface *) data_interface;
+	uint32_t nx = tensor_interface->nx;
+	uint32_t ny = tensor_interface->ny;
+	uint32_t nz = tensor_interface->nz;
+	uint32_t nt = tensor_interface->nt;
+	size_t elemsize = tensor_interface->elemsize;
+
+	starpu_free_on_node(node, tensor_interface->dev_handle, nx*ny*nz*nt*elemsize);
+}
+
+#ifdef STARPU_USE_CUDA
+static int copy_cuda_common(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, enum cudaMemcpyKind kind)
+{
+	struct starpu_tensor_interface *src_block = src_interface;
+	struct starpu_tensor_interface *dst_block = dst_interface;
+
+	uint32_t nx = src_block->nx;
+	uint32_t ny = src_block->ny;
+	uint32_t nz = src_block->nz;
+	uint32_t nt = src_block->nt;
+	size_t elemsize = src_block->elemsize;
+
+	cudaError_t cures;
+
+	if (src_block->ldy == dst_block->ldy && src_block->ldz == dst_block->ldz && src_block->ldt == dst_block->ldt
+		&& nx*ny*nz == src_block->ldt)
+	{
+		/* Same lds on both sides, and contiguous, simple */
+		starpu_cuda_copy_async_sync((void *)src_block->ptr, src_node, (void *)dst_block->ptr, dst_node, nx*ny*nz*nt*elemsize, NULL, kind);
+	}
+	else
+	{
+		/* TODO: use cudaMemcpy2D for whole 3D blocks etc. when they are contiguous */
+
+		/* Default case: we transfer all blocks one by one: nz transfers */
+		/* TODO: use cudaMemcpy3D now that it works (except on cuda 4.2) */
+		unsigned t;
+		for (t = 0; t < src_block->nt; t++)
+		{
+		    unsigned z;
+		    for (z = 0; z < src_block->nz; z++)
+		    {
+			uint8_t *src_ptr = ((uint8_t *)src_block->ptr) + t*src_block->ldt*src_block->elemsize + z*src_block->ldz*src_block->elemsize;
+			uint8_t *dst_ptr = ((uint8_t *)dst_block->ptr) + t*dst_block->ldt*src_block->elemsize + z*dst_block->ldz*dst_block->elemsize;
+
+			cures = cudaMemcpy2D((char *)dst_ptr, dst_block->ldy*elemsize,
+                                             (char *)src_ptr, src_block->ldy*elemsize,
+                                             nx*elemsize, ny, kind);
+
+			if (!cures)
+				cures = cudaThreadSynchronize();
+			if (STARPU_UNLIKELY(cures))
+				STARPU_CUDA_REPORT_ERROR(cures);
+		    }
+		}
+	}
+
+	starpu_interface_data_copy(src_node, dst_node, src_block->nx*src_block->ny*src_block->nz*src_block->nt*src_block->elemsize);
+
+	return 0;
+}
+
+static int copy_cuda_async_common(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, cudaStream_t stream, enum cudaMemcpyKind kind)
+{
+	struct starpu_tensor_interface *src_block = src_interface;
+	struct starpu_tensor_interface *dst_block = dst_interface;
+
+	uint32_t nx = src_block->nx;
+	uint32_t ny = src_block->ny;
+	uint32_t nz = src_block->nz;
+	uint32_t nt = src_block->nt;
+	size_t elemsize = src_block->elemsize;
+
+	cudaError_t cures;
+
+	int ret;
+
+	if (src_block->ldy == dst_block->ldy && src_block->ldz == dst_block->ldz && src_block->ldt == dst_block->ldt
+		&& nx*ny*nz == src_block->ldt)
+	{
+		/* Same lds on both sides, and contiguous, simple */
+		ret = starpu_cuda_copy_async_sync((void *)src_block->ptr, src_node, (void *)dst_block->ptr, dst_node, nx*ny*nz*nt*elemsize, stream, kind);
+	}
+	else
+	{
+		/* TODO: use cudaMemcpy2D for whole 3D blocks etc. when they are contiguous */
+
+		/* Default case: we transfer all blocks one by one: nz transfers */
+		/* TODO: use cudaMemcpy3D now that it works (except on cuda 4.2) */
+		unsigned t;
+		for (t = 0; t < src_block->nt; t++)
+		{
+		    unsigned z;
+		    for (z = 0; z < src_block->nz; z++)
+		    {
+			uint8_t *src_ptr = ((uint8_t *)src_block->ptr) + t*src_block->ldt*src_block->elemsize + z*src_block->ldz*src_block->elemsize;
+			uint8_t *dst_ptr = ((uint8_t *)dst_block->ptr) + t*dst_block->ldt*dst_block->elemsize + z*dst_block->ldz*dst_block->elemsize;
+			double start;
+
+			starpu_interface_start_driver_copy_async(src_node, dst_node, &start);
+			cures = cudaMemcpy2DAsync((char *)dst_ptr, dst_block->ldy*elemsize,
+                                                  (char *)src_ptr, src_block->ldy*elemsize,
+                                                  nx*elemsize, ny, kind, stream);
+			starpu_interface_end_driver_copy_async(src_node, dst_node, start);
+
+			if (STARPU_UNLIKELY(cures))
+			{
+				/* I don't know how to do that "better" */
+				goto no_async_default;
+			}
+		    }
+		}
+
+		ret = -EAGAIN;
+
+	}
+
+	starpu_interface_data_copy(src_node, dst_node, src_block->nx*src_block->ny*src_block->nz*src_block->nt*src_block->elemsize);
+
+	return ret;
+
+no_async_default:
+
+	{
+	unsigned t;
+	for (t = 0; t < src_block->nt; t++)
+	{
+	    unsigned z;
+	    for (z = 0; z < src_block->nz; z++)
+	    {
+		uint8_t *src_ptr = ((uint8_t *)src_block->ptr) + t*src_block->ldt*src_block->elemsize + z*src_block->ldz*src_block->elemsize;
+		uint8_t *dst_ptr = ((uint8_t *)dst_block->ptr) + t*dst_block->ldt*dst_block->elemsize + z*dst_block->ldz*dst_block->elemsize;
+
+		cures = cudaMemcpy2D((char *)dst_ptr, dst_block->ldy*elemsize,
+                                     (char *)src_ptr, src_block->ldy*elemsize,
+                                     nx*elemsize, ny, kind);
+
+		if (!cures)
+			cures = cudaThreadSynchronize();
+		if (STARPU_UNLIKELY(cures))
+			STARPU_CUDA_REPORT_ERROR(cures);
+	    }
+	}
+
+	starpu_interface_data_copy(src_node, dst_node, src_block->nx*src_block->ny*src_block->nz*src_block->nt*src_block->elemsize);
+	return 0;
+	}
+}
+
+static int copy_cuda_to_ram(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node)
+{
+	return copy_cuda_common(src_interface, src_node, dst_interface, dst_node, cudaMemcpyDeviceToHost);
+}
+
+static int copy_ram_to_cuda(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED)
+{
+	return copy_cuda_common(src_interface, src_node, dst_interface, dst_node, cudaMemcpyHostToDevice);
+}
+
+static int copy_cuda_to_cuda(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED)
+{
+	return copy_cuda_common(src_interface, src_node, dst_interface, dst_node, cudaMemcpyDeviceToDevice);
+}
+
+static int copy_cuda_to_ram_async(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, cudaStream_t stream)
+{
+	return copy_cuda_async_common(src_interface, src_node, dst_interface, dst_node, stream, cudaMemcpyDeviceToHost);
+}
+
+static int copy_ram_to_cuda_async(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, cudaStream_t stream)
+{
+	return copy_cuda_async_common(src_interface, src_node, dst_interface, dst_node, stream, cudaMemcpyHostToDevice);
+}
+#endif // STARPU_USE_CUDA
+
+#ifdef STARPU_USE_OPENCL
+static int copy_opencl_common(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, cl_event *event)
+{
+	struct starpu_tensor_interface *src_block = src_interface;
+	struct starpu_tensor_interface *dst_block = dst_interface;
+        int ret = 0;
+
+	uint32_t nx = src_block->nx;
+	uint32_t ny = src_block->ny;
+	uint32_t nz = src_block->nz;
+
+	/* We may have a contiguous buffer for the entire block, or contiguous
+	 * plans within the block, we can avoid many small transfers that way */
+	if (src_block->ldy == dst_block->ldy && src_block->ldz == dst_block->ldz && src_block->ldt == dst_block->ldt
+		&& nx*ny*nz == src_block->ldt)
+	{
+		ret = starpu_opencl_copy_async_sync(src_block->dev_handle, src_block->offset, src_node,
+						    dst_block->dev_handle, dst_block->offset, dst_node,
+						    src_block->nx*src_block->ny*src_block->nz*src_block->nt*src_block->elemsize,
+						    event);
+	}
+	else
+	{
+		/* Default case: we transfer all lines one by one: ny*nz transfers */
+		/* TODO: rect support */
+		unsigned t;
+		for (t = 0; t < src_block->nt; t++)
+		{
+		    unsigned z;
+		    for (z = 0; z < src_block->nz; z++)
+		    {
+                        unsigned j;
+                        for(j=0 ; j<src_block->ny ; j++)
+			{
+				ret = starpu_opencl_copy_async_sync(src_block->dev_handle,
+								    src_block->offset + t*src_block->ldt*src_block->elemsize + z*src_block->ldz*src_block->elemsize + j*src_block->ldy*src_block->elemsize,
+								    src_node,
+								    dst_block->dev_handle,
+								    dst_block->offset + t*dst_block->ldt*dst_block->elemsize + z*dst_block->ldz*dst_block->elemsize + j*dst_block->ldy*dst_block->elemsize,
+								    dst_node,
+								    src_block->nx*src_block->elemsize,
+								    event);
+                        }
+		    }
+                }
+        }
+
+	starpu_interface_data_copy(src_node, dst_node, src_block->nx*src_block->ny*src_block->nz*src_block->elemsize);
+
+	return ret;
+}
+
+static int copy_ram_to_opencl_async(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, cl_event *event)
+{
+	return copy_opencl_common(src_interface, src_node, dst_interface, dst_node, event);
+}
+
+static int copy_opencl_to_ram_async(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, cl_event *event)
+{
+	return copy_opencl_common(src_interface, src_node, dst_interface, dst_node, event);
+}
+
+static int copy_opencl_to_opencl_async(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, cl_event *event)
+{
+	return copy_opencl_common(src_interface, src_node, dst_interface, dst_node, event);
+}
+
+static int copy_ram_to_opencl(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED)
+{
+        return copy_ram_to_opencl_async(src_interface, src_node, dst_interface, dst_node, NULL);
+}
+
+static int copy_opencl_to_ram(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED)
+{
+        return copy_opencl_to_ram_async(src_interface, src_node, dst_interface, dst_node, NULL);
+}
+
+static int copy_opencl_to_opencl(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED)
+{
+	return copy_opencl_to_opencl_async(src_interface, src_node, dst_interface, dst_node, NULL);
+}
+
+#endif
+
+static int copy_any_to_any(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, void *async_data)
+{
+	struct starpu_tensor_interface *src_block = (struct starpu_tensor_interface *) src_interface;
+	struct starpu_tensor_interface *dst_block = (struct starpu_tensor_interface *) dst_interface;
+	int ret = 0;
+
+	uint32_t nx = dst_block->nx;
+	uint32_t ny = dst_block->ny;
+	uint32_t nz = dst_block->nz;
+	uint32_t nt = dst_block->nt;
+	size_t elemsize = dst_block->elemsize;
+
+	uint32_t ldy_src = src_block->ldy;
+	uint32_t ldz_src = src_block->ldz;
+	uint32_t ldt_src = src_block->ldt;
+	uint32_t ldy_dst = dst_block->ldy;
+	uint32_t ldz_dst = dst_block->ldz;
+	uint32_t ldt_dst = dst_block->ldt;
+
+	if (ldy_src == nx && ldy_dst == nx && ldz_src == nx*ny && ldz_dst == nx*ny && ldt_src == nx*ny*nz && ldt_dst == nx*ny*nz)
+	{
+		/* Optimise non-partitioned and z-partitioned case */
+		if (starpu_interface_copy(src_block->dev_handle, src_block->offset, src_node,
+		                          dst_block->dev_handle, dst_block->offset, dst_node,
+		                          nx*ny*nz*nt*elemsize, async_data))
+				ret = -EAGAIN;
+	}
+	else
+	{
+		unsigned t;
+		for (t = 0; t < nt; t++)
+		{
+		    unsigned z;
+		    for (z = 0; z < nz; z++)
+		    {
+			if (ldy_src == nx && ldy_dst == nx)
+			{
+				/* Optimise y-partitioned case */
+				uint32_t src_offset = t*ldt_src*elemsize + z*ldz_src*elemsize;
+				uint32_t dst_offset = t*ldt_dst*elemsize + z*ldz_dst*elemsize;
+
+				if (starpu_interface_copy(src_block->dev_handle, src_block->offset + src_offset, src_node,
+							  dst_block->dev_handle, dst_block->offset + dst_offset, dst_node,
+							  nx*ny*elemsize, async_data))
+					ret = -EAGAIN;
+			}
+			else
+			{
+				unsigned y;
+				for (y = 0; y < ny; y++)
+				{
+					/* Eerf, x-partitioned case */
+					uint32_t src_offset = (y*ldy_src + z*ldz_src + t*ldt_src)*elemsize;
+					uint32_t dst_offset = (y*ldy_dst + z*ldz_dst + t*ldt_dst)*elemsize;
+
+					if (starpu_interface_copy(src_block->dev_handle, src_block->offset + src_offset, src_node,
+								  dst_block->dev_handle, dst_block->offset + dst_offset, dst_node,
+								  nx*elemsize, async_data))
+						ret = -EAGAIN;
+				}
+			}
+		    }
+		}
+	}
+
+	starpu_interface_data_copy(src_node, dst_node, nx*ny*nz*elemsize);
+
+	return ret;
+}
+
+static starpu_ssize_t describe(void *data_interface, char *buf, size_t size)
+{
+	struct starpu_tensor_interface *block = (struct starpu_tensor_interface *) data_interface;
+	return snprintf(buf, size, "T%ux%ux%ux%ux%u",
+			(unsigned) block->nx,
+			(unsigned) block->ny,
+			(unsigned) block->nz,
+			(unsigned) block->nt,
+			(unsigned) block->elemsize);
+}

+ 20 - 0
tests/Makefile.am

@@ -299,6 +299,7 @@ myPROGRAMS +=				\
 	datawizard/interfaces/multiformat/advanced/multiformat_worker \
 	datawizard/interfaces/multiformat/advanced/multiformat_handle_conversion \
 	datawizard/interfaces/multiformat/advanced/same_handle \
+	datawizard/interfaces/tensor/tensor_interface \
 	datawizard/interfaces/variable/variable_interface    \
 	datawizard/interfaces/vector/vector_interface   \
 	datawizard/interfaces/void/void_interface \
@@ -778,6 +779,25 @@ nobase_STARPU_OPENCL_DATA_DATA += \
 	datawizard/interfaces/block/block_opencl_kernel.cl
 endif
 
+###################
+# Tensor interface #
+###################
+datawizard_interfaces_tensor_tensor_interface_SOURCES= \
+	datawizard/interfaces/test_interfaces.c  \
+	datawizard/interfaces/tensor/tensor_interface.c
+
+if STARPU_USE_CUDA
+datawizard_interfaces_tensor_tensor_interface_SOURCES+= \
+	datawizard/interfaces/tensor/tensor_cuda.cu
+endif
+
+if STARPU_USE_OPENCL
+datawizard_interfaces_tensor_tensor_interface_SOURCES+= \
+	datawizard/interfaces/tensor/tensor_opencl.c
+nobase_STARPU_OPENCL_DATA_DATA += \
+	datawizard/interfaces/tensor/tensor_opencl_kernel.cl
+endif
+
 ##################
 # BSCR interface #
 ##################

+ 12 - 1
tests/datawizard/interfaces/copy_interfaces.c

@@ -2,7 +2,7 @@
  *
  * Copyright (C) 2012,2013                                Inria
  * Copyright (C) 2012,2015,2017                           CNRS
- * Copyright (C) 2013,2016,2017                           Université de Bordeaux
+ * Copyright (C) 2013,2016,2017,2019                      Université de Bordeaux
  *
  * StarPU is free software; you can redistribute it and/or modify
  * it under the terms of the GNU Lesser General Public License as published by
@@ -91,6 +91,17 @@ int main(int argc, char **argv)
 
 	if (ret == 0)
 	{
+		int NX=3;
+		int NY=2;
+		int NZ=4;
+		int NT=3;
+		int tensor[NX*NY*NZ*NT];
+		starpu_tensor_data_register(&handle, STARPU_MAIN_RAM, (uintptr_t)tensor, NX, NX*NY, NX*NY*NZ, NX, NY, NZ, NT, sizeof(tensor[0]));
+		ret = check_copy(handle, "tensor");
+	}
+
+	if (ret == 0)
+	{
 		uint32_t nnz = 2;
 		unsigned nrow = 5;
 		float nzvalA[nnz];

+ 85 - 0
tests/datawizard/interfaces/tensor/tensor_cuda.cu

@@ -0,0 +1,85 @@
+/* StarPU --- Runtime system for heterogeneous multicore architectures.
+ *
+ * Copyright (C) 2011,2012                                Inria
+ * Copyright (C) 2012,2015,2017                           CNRS
+ *
+ * StarPU is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ *
+ * StarPU is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU Lesser General Public License in COPYING.LGPL for more details.
+ */
+#include <starpu.h>
+#include "../test_interfaces.h"
+
+extern struct test_config tensor_config;
+
+static __global__ void tensor_cuda(int *tensor,
+				  int nx, int ny, int nz, int nt,
+				  unsigned ldy, unsigned ldz, unsigned ldt,
+				  float factor, int *err)
+{
+        int i, j, k, l;
+	int val = 0;
+
+        for (l = 0; l < nt ;l++)
+	{
+	    for (k = 0; k < nz ;k++)
+	    {
+                for (j = 0; j < ny ;j++)
+		{
+                        for(i = 0; i < nx ;i++)
+			{
+				if (tensor[(l*ldt)+(k*ldz)+(j*ldy)+i] != factor * val)
+				{
+					*err = 1;
+					return;
+				}
+				else
+				{
+					tensor[(l*ldt)+(k*ldz)+(j*ldy)+i] *= -1;
+					val++;
+				}
+			}
+                }
+	    }
+        }
+}
+
+extern "C" void test_tensor_cuda_func(void *buffers[], void *args)
+{
+	cudaError_t error;
+	int *ret;
+
+	error = cudaMalloc(&ret, sizeof(int));
+	if (error != cudaSuccess)
+		STARPU_CUDA_REPORT_ERROR(error);
+
+	error = cudaMemcpyAsync(ret, &tensor_config.copy_failed, sizeof(int), cudaMemcpyHostToDevice, starpu_cuda_get_local_stream());
+	if (error != cudaSuccess)
+		STARPU_CUDA_REPORT_ERROR(error);
+
+	int nx = STARPU_TENSOR_GET_NX(buffers[0]);
+	int ny = STARPU_TENSOR_GET_NY(buffers[0]);
+	int nz = STARPU_TENSOR_GET_NZ(buffers[0]);
+	int nt = STARPU_TENSOR_GET_NT(buffers[0]);
+        unsigned ldy = STARPU_TENSOR_GET_LDY(buffers[0]);
+        unsigned ldz = STARPU_TENSOR_GET_LDZ(buffers[0]);
+        unsigned ldt = STARPU_TENSOR_GET_LDT(buffers[0]);
+	int *tensor = (int *) STARPU_TENSOR_GET_PTR(buffers[0]);
+	int factor = *(int*) args;
+
+        tensor_cuda<<<1,1, 0, starpu_cuda_get_local_stream()>>>
+		(tensor, nx, ny, nz, nt, ldy, ldz, ldt, factor, ret);
+	error = cudaMemcpyAsync(&tensor_config.copy_failed, ret, sizeof(int), cudaMemcpyDeviceToHost, starpu_cuda_get_local_stream());
+	if (error != cudaSuccess)
+		STARPU_CUDA_REPORT_ERROR(error);
+
+	cudaFree(ret);
+	cudaStreamSynchronize(starpu_cuda_get_local_stream());
+}

+ 175 - 0
tests/datawizard/interfaces/tensor/tensor_interface.c

@@ -0,0 +1,175 @@
+/* StarPU --- Runtime system for heterogeneous multicore architectures.
+ *
+ * Copyright (C) 2011-2013                                Inria
+ * Copyright (C) 2012,2013,2015,2017,2019                 CNRS
+ * Copyright (C) 2012,2013, 2019                                Université de Bordeaux
+ *
+ * StarPU is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ *
+ * StarPU is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU Lesser General Public License in COPYING.LGPL for more details.
+ */
+
+#include <starpu.h>
+#include "../test_interfaces.h"
+#include "../../../helper.h"
+
+#define NX 16
+#define NY NX
+#define NZ NX
+#define NT NX
+
+/* Prototypes */
+static void register_data(void);
+static void unregister_data(void);
+void test_tensor_cpu_func(void *buffers[], void *args);
+#ifdef STARPU_USE_CUDA
+extern void test_tensor_cuda_func(void *buffers[], void *_args);
+#endif
+#ifdef STARPU_USE_OPENCL
+extern void test_tensor_opencl_func(void *buffers[], void *args);
+#endif
+
+static starpu_data_handle_t _tensor_handle;
+static starpu_data_handle_t _tensor2_handle;
+
+struct test_config tensor_config =
+{
+	.cpu_func      = test_tensor_cpu_func,
+#ifdef STARPU_USE_CUDA
+	.cuda_func     = test_tensor_cuda_func,
+#endif
+#ifdef STARPU_USE_OPENCL
+	.opencl_func   = test_tensor_opencl_func,
+#endif
+#ifdef STARPU_USE_MIC
+	.cpu_func_name = "test_tensor_cpu_func",
+#endif
+	.handle        = &_tensor_handle,
+	.dummy_handle  = &_tensor2_handle,
+	.copy_failed   = SUCCESS,
+	.name          = "tensor_interface"
+};
+
+static int _tensor[NX*NY*NZ*NT];
+static int _tensor2[NX*NY*NZ*NT];
+
+static void
+register_data(void)
+{
+	/* Initializing data */
+	int val = 0;
+	int i, j, k, l;
+	for (l = 0; l < NT; l++)
+	    for (k = 0; k < NZ; k++)
+		for (j = 0; j < NY; j++)
+			for (i = 0; i < NX; i++)
+				_tensor[(l*NX*NY*NZ)+(k*NX*NY)+(j*NX)+i] = val++;
+
+	/* Registering data */
+	starpu_tensor_data_register(&_tensor_handle,
+                                    STARPU_MAIN_RAM,
+                                    (uintptr_t)_tensor,
+				    NX,
+				    NX * NY,
+				    NX * NY * NZ,
+				    NX,
+				    NY,
+				    NZ,
+				    NT,
+				    sizeof(_tensor[0]));
+	starpu_tensor_data_register(&_tensor2_handle,
+                                    STARPU_MAIN_RAM,
+                                    (uintptr_t)_tensor2,
+				    NX,
+				    NX * NY,
+				    NX * NY * NZ,
+				    NX,
+				    NY,
+				    NZ,
+				    NT,
+				    sizeof(_tensor2[0]));
+}
+
+static void
+unregister_data(void)
+{
+	starpu_data_unregister(_tensor_handle);
+	starpu_data_unregister(_tensor2_handle);
+}
+
+void test_tensor_cpu_func(void *buffers[], void *args)
+{
+	STARPU_SKIP_IF_VALGRIND;
+
+	int factor = *(int*)args;
+	int nx = STARPU_TENSOR_GET_NX(buffers[0]);
+	int ny = STARPU_TENSOR_GET_NY(buffers[0]);
+	int nz = STARPU_TENSOR_GET_NZ(buffers[0]);
+	int nt = STARPU_TENSOR_GET_NT(buffers[0]);
+        unsigned ldy = STARPU_TENSOR_GET_LDY(buffers[0]);
+        unsigned ldz = STARPU_TENSOR_GET_LDZ(buffers[0]);
+        unsigned ldt = STARPU_TENSOR_GET_LDT(buffers[0]);
+	int *tensor = (int *) STARPU_TENSOR_GET_PTR(buffers[0]);
+	int i, j, k, l;
+	int val = 0;
+	tensor_config.copy_failed = SUCCESS;
+	for (l = 0; l < nt; l++)
+	{
+	    for (k = 0; k < nz; k++)
+	    {
+		for (j = 0; j < ny; j++)
+		{
+			for (i = 0; i < nx; i++)
+			{
+				if (tensor[(l*ldt)+(k*ldz)+(j*ldy)+i] != factor * val)
+				{
+					tensor_config.copy_failed = FAILURE;
+					return;
+				}
+				else
+				{
+					tensor[(l*ldt)+(k*ldz)+(j*ldy)+i] *= -1;
+					val++;
+				}
+			}
+		}
+	    }
+	}
+}
+
+int
+main(int argc, char **argv)
+{
+	struct data_interface_test_summary summary;
+	struct starpu_conf conf;
+	starpu_conf_init(&conf);
+	conf.ncuda = 2;
+	conf.nopencl = 1;
+	conf.nmic = -1;
+
+	if (starpu_initialize(&conf, &argc, &argv) == -ENODEV || starpu_cpu_worker_get_count() == 0)
+		goto enodev;
+
+	register_data();
+
+	run_tests(&tensor_config, &summary);
+
+	unregister_data();
+
+	starpu_shutdown();
+
+	data_interface_test_summary_print(stderr, &summary);
+
+	return data_interface_test_summary_success(&summary);
+
+enodev:
+	return STARPU_TEST_SKIPPED;
+}
+

+ 126 - 0
tests/datawizard/interfaces/tensor/tensor_opencl.c

@@ -0,0 +1,126 @@
+/* StarPU --- Runtime system for heterogeneous multicore architectures.
+ *
+ * Copyright (C) 2011,2012                                Inria
+ * Copyright (C) 2012,2015-2017                           CNRS
+ * Copyright (C) 2011, 2019                                     Université de Bordeaux
+ *
+ * StarPU is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ *
+ * StarPU is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU Lesser General Public License in COPYING.LGPL for more details.
+ */
+
+#include <starpu.h>
+#include "../test_interfaces.h"
+
+#define KERNEL_LOCATION "tests/datawizard/interfaces/tensor/tensor_opencl_kernel.cl"
+extern struct test_config tensor_config;
+static struct starpu_opencl_program opencl_program;
+
+void
+test_tensor_opencl_func(void *buffers[], void *args)
+{
+	STARPU_SKIP_IF_VALGRIND;
+
+	int id, devid, ret;
+	int factor = *(int *) args;
+
+        cl_int             err;
+	cl_kernel          kernel;
+	cl_command_queue   queue;
+	cl_event           event;
+
+	ret = starpu_opencl_load_opencl_from_file(KERNEL_LOCATION, &opencl_program, NULL);
+	STARPU_CHECK_RETURN_VALUE(ret, "starpu_opencl_load_opencl_from_file");
+
+	int nx = STARPU_TENSOR_GET_NX(buffers[0]);
+	int ny = STARPU_TENSOR_GET_NY(buffers[0]);
+	int nz = STARPU_TENSOR_GET_NZ(buffers[0]);
+	int nt = STARPU_TENSOR_GET_NT(buffers[0]);
+        unsigned ldy = STARPU_TENSOR_GET_LDY(buffers[0]);
+        unsigned ldz = STARPU_TENSOR_GET_LDZ(buffers[0]);
+        unsigned ldt = STARPU_TENSOR_GET_LDT(buffers[0]);
+	cl_mem tensor = (cl_mem) STARPU_TENSOR_GET_DEV_HANDLE(buffers[0]);
+
+	cl_context context;
+	id = starpu_worker_get_id_check();
+	devid = starpu_worker_get_devid(id);
+	starpu_opencl_get_context(devid, &context);
+
+	cl_mem fail = clCreateBuffer(context, CL_MEM_COPY_HOST_PTR,
+		sizeof(int), &tensor_config.copy_failed, &err);
+
+	if (err != CL_SUCCESS)
+		STARPU_OPENCL_REPORT_ERROR(err);
+
+
+	err = starpu_opencl_load_kernel(&kernel,
+					&queue,
+					&opencl_program,
+					"tensor_opencl",
+					devid);
+	if (err != CL_SUCCESS)
+		STARPU_OPENCL_REPORT_ERROR(err);
+
+	int nargs;
+	nargs = starpu_opencl_set_kernel_args(&err, &kernel,
+					      sizeof(tensor), &tensor,
+					      sizeof(nx), &nx,
+					      sizeof(ny), &ny,
+					      sizeof(nz), &nz,
+					      sizeof(nt), &nt,
+					      sizeof(ldy), &ldy,
+					      sizeof(ldz), &ldz,
+					      sizeof(ldt), &ldt,
+					      sizeof(factor), &factor,
+					      sizeof(fail), &fail,
+					      0);
+
+	if (nargs != 8)
+	{
+		fprintf(stderr, "Failed to set argument #%d\n", nargs);
+		STARPU_OPENCL_REPORT_ERROR(err);
+	}
+			
+	{
+		size_t global = nx * ny * nz * nt;
+		err = clEnqueueNDRangeKernel(queue,
+					     kernel,
+					     1,
+					     NULL,
+					     &global,
+					     NULL,
+					     0,
+					     NULL,
+					     &event);
+
+		if (err != CL_SUCCESS)
+			STARPU_OPENCL_REPORT_ERROR(err);
+	}
+
+	err = clEnqueueReadBuffer(queue,
+				  fail,
+				  CL_TRUE,
+				  0, 
+				  sizeof(int),
+				  &tensor_config.copy_failed,
+				  0,
+				  NULL,
+				  NULL);
+	if (err != CL_SUCCESS)
+		STARPU_OPENCL_REPORT_ERROR(err);
+
+	clFinish(queue);
+	starpu_opencl_collect_stats(event);
+	clReleaseEvent(event);
+
+	starpu_opencl_release_kernel(kernel);
+        ret = starpu_opencl_unload_opencl(&opencl_program);
+        STARPU_CHECK_RETURN_VALUE(ret, "starpu_opencl_unload_opencl");
+}

+ 47 - 0
tests/datawizard/interfaces/tensor/tensor_opencl_kernel.cl

@@ -0,0 +1,47 @@
+/* StarPU --- Runtime system for heterogeneous multicore architectures.
+ *
+ * Copyright (C) 2011,2012                                Inria
+ * Copyright (C) 2012,2015,2017                           CNRS
+ *
+ * StarPU is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ *
+ * StarPU is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU Lesser General Public License in COPYING.LGPL for more details.
+ */
+__kernel void tensor_opencl(__global int *tensor,
+			   int nx, int ny, int nz,
+			   int ldy, int ldz,
+			   int factor, __global int *err)
+{
+        const int id = get_global_id(0);
+	if (id > 0)
+		return;
+
+	unsigned int i, j, k;
+	int val = 0;
+	for (k = 0; k < nz; k++)
+	{
+		for (j = 0; j < ny; j++)
+		{
+			for (i = 0; i < nx; i++)
+			{
+                                if (tensor[(k*ldz)+(j*ldy)+i] != factor * val)
+				{
+					*err = 1;
+					return;
+				}
+				else
+				{
+					tensor[(k*ldz)+(j*ldy)+i] *= -1;
+					val++;
+				}
+			}
+		}
+	}
+}