Ver código fonte

Support the "block" interface with our mpi lib.

Cédric Augonnet 15 anos atrás
pai
commit
bbea7f8d4b
3 arquivos alterados com 174 adições e 3 exclusões
  1. 8 1
      mpi/Makefile.am
  2. 29 2
      mpi/starpu_mpi_datatype.c
  3. 137 0
      mpi/tests/block_interface.c

+ 8 - 1
mpi/Makefile.am

@@ -51,7 +51,8 @@ check_PROGRAMS =
 
 check_PROGRAMS +=					\
 	tests/pingpong					\
-	tests/ring
+	tests/ring					\
+	tests/block_interface
 
 tests_pingpong_LDADD =					\
 	libstarpumpi.la
@@ -65,6 +66,12 @@ tests_ring_LDADD =					\
 tests_ring_SOURCES =					\
 	tests/ring.c
 
+tests_block_interface_LDADD =				\
+	libstarpumpi.la
+
+tests_block_interface_SOURCES =				\
+	tests/block_interface.c
+
 if USE_CUDA
 tests_ring_SOURCES += tests/ring_kernel.cu
 endif

+ 29 - 2
mpi/starpu_mpi_datatype.c

@@ -46,6 +46,33 @@ static void *handle_to_ptr_blas(starpu_data_handle data_handle)
 	return (void *)starpu_get_blas_local_ptr(data_handle);
 }
 
+/*
+ * 	Block
+ */
+
+static int handle_to_datatype_block(starpu_data_handle data_handle, MPI_Datatype *datatype)
+{
+	unsigned nx = starpu_get_block_nx(data_handle);
+	unsigned ny = starpu_get_block_ny(data_handle);
+	unsigned nz = starpu_get_block_nz(data_handle);
+	unsigned ldy = starpu_get_block_local_ldy(data_handle);
+	unsigned ldz = starpu_get_block_local_ldz(data_handle);
+	size_t elemsize = starpu_get_block_elemsize(data_handle);
+
+	MPI_Datatype datatype_2dlayer;
+	MPI_Type_vector(ny, nx*elemsize, ldy*elemsize, MPI_BYTE, &datatype_2dlayer);
+
+	MPI_Type_hvector(nz, 1, ldz*elemsize, datatype_2dlayer, datatype);
+
+	MPI_Type_commit(datatype);
+
+	return 0;
+}
+
+static void *handle_to_ptr_block(starpu_data_handle data_handle)
+{
+	return (void *)starpu_get_block_local_ptr(data_handle);
+}
 
 /*
  * 	Vector
@@ -73,7 +100,7 @@ static void *handle_to_ptr_vector(starpu_data_handle data_handle)
 
 static handle_to_datatype_func handle_to_datatype_funcs[STARPU_NINTERFACES_ID] = {
 	[STARPU_BLAS_INTERFACE_ID]	= handle_to_datatype_blas,
-	[STARPU_BLOCK_INTERFACE_ID]	= NULL,
+	[STARPU_BLOCK_INTERFACE_ID]	= handle_to_datatype_block,
 	[STARPU_VECTOR_INTERFACE_ID]	= handle_to_datatype_vector,
 	[STARPU_CSR_INTERFACE_ID]	= NULL,
 	[STARPU_CSC_INTERFACE_ID]	= NULL,
@@ -82,7 +109,7 @@ static handle_to_datatype_func handle_to_datatype_funcs[STARPU_NINTERFACES_ID] =
 
 static handle_to_ptr_func handle_to_ptr_funcs[STARPU_NINTERFACES_ID] = {
 	[STARPU_BLAS_INTERFACE_ID]	= handle_to_ptr_blas,
-	[STARPU_BLOCK_INTERFACE_ID]	= NULL,
+	[STARPU_BLOCK_INTERFACE_ID]	= handle_to_ptr_block,
 	[STARPU_VECTOR_INTERFACE_ID]	= handle_to_ptr_vector,
 	[STARPU_CSR_INTERFACE_ID]	= NULL,
 	[STARPU_CSC_INTERFACE_ID]	= NULL,

+ 137 - 0
mpi/tests/block_interface.c

@@ -0,0 +1,137 @@
+/*
+ * StarPU
+ * Copyright (C) INRIA 2008-2009 (see AUTHORS file)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU Lesser General Public License in COPYING.LGPL for more details.
+ */
+
+#include <starpu_mpi.h>
+#include <stdlib.h>
+
+#define NITER	2048
+
+#define BIGSIZE	128
+#define SIZE	64
+
+int main(int argc, char **argv)
+{
+	MPI_Init(NULL, NULL);
+
+	int rank, size;
+
+	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+	MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+	if (size < 2)
+	{
+		if (rank == 0)
+			fprintf(stderr, "We need at least processes.\n");
+
+		MPI_Finalize();
+		return 0;
+	}
+
+	/* We only use 2 nodes for that test */
+	if (rank >= 2)
+	{
+		MPI_Finalize();
+		return 0;
+	}
+		
+	starpu_init(NULL);
+	starpu_mpi_initialize();
+
+	/* Node 0 will allocate a big block and only register an inner part of
+	 * it as the block data, Node 1 will allocate a block of small size and
+	 * register it directly. Node 0 and 1 will then exchange the content of
+	 * their blocks. */
+
+	float *block;
+	starpu_data_handle block_handle;
+
+	if (rank == 0)
+	{
+		block = calloc(BIGSIZE*BIGSIZE*BIGSIZE, sizeof(float));
+		assert(block);
+
+		/* fill the inner block */
+		unsigned i, j, k;
+		for (k = 0; k < SIZE; k++)
+		for (j = 0; j < SIZE; j++)
+		for (i = 0; i < SIZE; i++)
+		{
+			block[i + j*BIGSIZE + k*BIGSIZE*BIGSIZE] = 1.0f;
+		}
+
+		starpu_register_block_data(&block_handle, 0,
+			(uintptr_t)block, BIGSIZE, BIGSIZE*BIGSIZE,
+			SIZE, SIZE, SIZE, sizeof(float));
+	}
+	else /* rank == 1 */
+	{
+		block = calloc(SIZE*SIZE*SIZE, sizeof(float));
+		assert(block);
+
+		starpu_register_block_data(&block_handle, 0,
+			(uintptr_t)block, SIZE, SIZE*SIZE,
+			SIZE, SIZE, SIZE, sizeof(float));
+	}
+
+	if (rank == 0)
+	{
+		starpu_mpi_send(block_handle, 1, 0x42, MPI_COMM_WORLD);
+
+		MPI_Status status;
+		starpu_mpi_recv(block_handle, 1, 0x1337, MPI_COMM_WORLD, &status);
+
+		/* check the content of the block */
+		starpu_sync_data_with_mem(block_handle, STARPU_R);
+		unsigned i, j, k;
+		for (k = 0; k < SIZE; k++)
+		for (j = 0; j < SIZE; j++)
+		for (i = 0; i < SIZE; i++)
+		{
+			assert(block[i + j*BIGSIZE + k*BIGSIZE*BIGSIZE] == 33.0f);
+		}
+		starpu_release_data_from_mem(block_handle);
+		
+	}
+	else /* rank == 1 */
+	{
+		MPI_Status status;
+		starpu_mpi_recv(block_handle, 0, 0x42, MPI_COMM_WORLD, &status);
+
+		/* check the content of the block and modify it */
+		starpu_sync_data_with_mem(block_handle, STARPU_RW);
+		unsigned i, j, k;
+		for (k = 0; k < SIZE; k++)
+		for (j = 0; j < SIZE; j++)
+		for (i = 0; i < SIZE; i++)
+		{
+			assert(block[i + j*SIZE + k*SIZE*SIZE] == 1.0f);
+			block[i + j*SIZE + k*SIZE*SIZE] = 33.0f;
+		}
+		starpu_release_data_from_mem(block_handle);
+
+		starpu_mpi_send(block_handle, 0, 0x1337, MPI_COMM_WORLD);
+	}
+
+	fprintf(stdout, "Rank %d is done\n", rank);
+	fflush(stdout);
+
+	starpu_mpi_shutdown();
+	starpu_shutdown();
+
+	MPI_Finalize();
+
+	return 0;
+}