瀏覽代碼

mpi/examples: new application with several MPI communicators

Nathalie Furmento 10 年之前
父節點
當前提交
867604f6df
共有 2 個文件被更改,包括 171 次插入2 次删除
  1. 6 2
      mpi/examples/Makefile.am
  2. 165 0
      mpi/examples/comm/mix_comm.c

+ 6 - 2
mpi/examples/Makefile.am

@@ -254,13 +254,17 @@ endif
 
 if BUILD_EXAMPLES
 examplebin_PROGRAMS +=			\
-	comm/comm
+	comm/comm			\
+	comm/mix_comm
 
 comm_comm_LDADD =		\
 	../src/libstarpumpi-@STARPU_EFFECTIVE_VERSION@.la
+comm_mix_comm_LDADD =		\
+	../src/libstarpumpi-@STARPU_EFFECTIVE_VERSION@.la
 
 starpu_mpi_EXAMPLES	+=			\
-	comm/comm
+	comm/comm				\
+	comm/mix_comm
 endif
 
 

+ 165 - 0
mpi/examples/comm/mix_comm.c

@@ -0,0 +1,165 @@
+/* StarPU --- Runtime system for heterogeneous multicore architectures.
+ *
+ * Copyright (C) 2015  Centre National de la Recherche Scientifique
+ *
+ * StarPU is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ *
+ * StarPU is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU Lesser General Public License in COPYING.LGPL for more details.
+ */
+
+/*
+ * This example splits the whole set of communicators in subgroups,
+ * communications take place both within each subgroups and MPI_COMM_WORLD.
+ */
+
+#include <starpu_mpi.h>
+#include "../helper.h"
+
+void func_cpu(void *descr[], STARPU_ATTRIBUTE_UNUSED void *_args)
+{
+	int *value = (int *)STARPU_VARIABLE_GET_PTR(descr[0]);
+	int rank;
+
+	starpu_codelet_unpack_args(_args, &rank);
+	FPRINTF_MPI(stderr, "Executing codelet with value %d and rank %d\n", *value, rank);
+	STARPU_ASSERT_MSG(*value == rank, "Received value %d is not the expected value %d\n", *value, rank);
+}
+
+struct starpu_codelet mycodelet =
+{
+	.cpu_funcs = {func_cpu},
+	.nbuffers = 1,
+	.modes = {STARPU_RW}
+};
+
+int main(int argc, char **argv)
+{
+	int size, x;
+	int color;
+	MPI_Comm newcomm;
+	int rank, newrank;
+	int ret;
+	starpu_data_handle_t data[3];
+
+        MPI_Init(&argc, &argv);
+        MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+        MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+        if (size < 4)
+        {
+		FPRINTF(stderr, "We need at least 4 processes.\n");
+                MPI_Finalize();
+                return STARPU_TEST_SKIPPED;
+        }
+
+	color = rank%2;
+	MPI_Comm_split(MPI_COMM_WORLD, color, rank, &newcomm);
+	MPI_Comm_rank(newcomm, &newrank);
+	FPRINTF(stderr, "[%d][%d] color %d\n", rank, newrank, color);
+
+	if (newrank == 0)
+	{
+		FPRINTF(stderr, "[%d][%d] sending %d\n", rank, newrank, rank);
+		MPI_Send(&rank, 1, MPI_INT, 1, 10, newcomm);
+	}
+	else if (newrank == 1)
+	{
+		MPI_Recv(&x, 1, MPI_INT, 0, 10, newcomm, NULL);
+		FPRINTF(stderr, "[%d][%d] received %d\n", rank, newrank, x);
+	}
+
+        ret = starpu_init(NULL);
+        STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
+        ret = starpu_mpi_init(NULL, NULL, 0);
+        STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_init");
+
+	if (rank == 0)
+	{
+		int value = 90;
+		starpu_variable_data_register(&data[2], STARPU_MAIN_RAM, (uintptr_t)&value, sizeof(unsigned));
+	}
+	else
+		starpu_variable_data_register(&data[2], -1, (uintptr_t)NULL, sizeof(unsigned));
+	starpu_mpi_data_register_comm(data[2], 44, 0, MPI_COMM_WORLD);
+
+	if (newrank == 0)
+	{
+		starpu_variable_data_register(&data[0], STARPU_MAIN_RAM, (uintptr_t)&rank, sizeof(unsigned));
+		starpu_variable_data_register(&data[1], STARPU_MAIN_RAM, (uintptr_t)&rank, sizeof(unsigned));
+		starpu_mpi_data_register_comm(data[1], 22, 0, newcomm);
+	}
+	else
+		starpu_variable_data_register(&data[0], -1, (uintptr_t)NULL, sizeof(unsigned));
+	starpu_mpi_data_register_comm(data[0], 12, 0, newcomm);
+
+	if (newrank == 0)
+	{
+		starpu_mpi_req req[2];
+		starpu_mpi_issend(data[1], &req[0], 1, 22, newcomm);
+		starpu_mpi_isend(data[0], &req[1], 1, 12, newcomm);
+		starpu_mpi_wait(&req[0], NULL);
+		starpu_mpi_wait(&req[1], NULL);
+	}
+	else if (newrank == 1)
+	{
+		int *xx;
+
+		starpu_mpi_recv(data[0], 0, 12, newcomm, NULL);
+		xx = (int *)starpu_variable_get_local_ptr(data[0]);
+		FPRINTF(stderr, "[%d][%d] received %d\n", rank, newrank, *xx);
+		STARPU_ASSERT_MSG(x==*xx, "Received value %d is incorrect (should be %d)\n", *xx, x);
+
+		starpu_variable_data_register(&data[1], -1, (uintptr_t)NULL, sizeof(unsigned));
+		starpu_mpi_data_register_comm(data[1], 22, 0, newcomm);
+		starpu_mpi_recv(data[0], 0, 22, newcomm, NULL);
+		xx = (int *)starpu_variable_get_local_ptr(data[0]);
+		FPRINTF(stderr, "[%d][%d] received %d\n", rank, newrank, *xx);
+		STARPU_ASSERT_MSG(x==*xx, "Received value %d is incorrect (should be %d)\n", *xx, x);
+	}
+
+	if (rank == 0)
+	{
+		int value = *((int *)starpu_variable_get_local_ptr(data[2]));
+		FPRINTF_MPI(stderr, "sending value %d to %d and receiving from %d\n", value, 1, size-1);
+		starpu_mpi_send(data[2], 1, 44, MPI_COMM_WORLD);
+		starpu_mpi_recv(data[2], size-1, 44, MPI_COMM_WORLD, NULL);
+		int *xx = (int *)starpu_variable_get_local_ptr(data[2]);
+		FPRINTF_MPI(stderr, "Value back is %d\n", *xx);
+		STARPU_ASSERT_MSG(*xx == value + (2*(size-1)), "Received value %d is incorrect (should be %d)\n", *xx, value + (2*(size-1)));
+	}
+	else
+	{
+		int next = (rank == size-1) ? 0 : rank+1;
+		FPRINTF_MPI(stderr, "receiving from %d and sending to %d\n", rank-1, next);
+		starpu_mpi_recv(data[2], rank-1, 44, MPI_COMM_WORLD, NULL);
+		int *xx = (int *)starpu_variable_get_local_ptr(data[2]);
+		*xx = *xx + 2;
+		starpu_mpi_send(data[2], next, 44, MPI_COMM_WORLD);
+	}
+
+	if (newrank == 0 || newrank == 1)
+	{
+		starpu_mpi_insert_task(newcomm, &mycodelet,
+				       STARPU_RW, data[0],
+				       STARPU_VALUE, &x, sizeof(x),
+				       STARPU_EXECUTE_ON_NODE, 1,
+				       0);
+
+		starpu_task_wait_for_all();
+		starpu_data_unregister(data[0]);
+		starpu_data_unregister(data[1]);
+	}
+	starpu_data_unregister(data[2]);
+
+	starpu_mpi_shutdown();
+	starpu_shutdown();
+        MPI_Finalize();
+	return 0;
+}