소스 검색

make starpu_data_partition wait for previous tasks if any

Samuel Thibault 9 년 전
부모
커밋
65a36146f4
3개의 변경된 파일122개의 추가작업 그리고 2개의 파일을 삭제
  1. 5 1
      src/datawizard/filters.c
  2. 14 1
      tests/Makefile.am
  3. 103 0
      tests/datawizard/partition_dep.c

+ 5 - 1
src/datawizard/filters.c

@@ -1,6 +1,6 @@
 /* StarPU --- Runtime system for heterogeneous multicore architectures.
  *
- * Copyright (C) 2010-2015  Université de Bordeaux
+ * Copyright (C) 2010-2016  Université de Bordeaux
  * Copyright (C) 2010  Mehdi Juhoor <mjuhoor@gmail.com>
  * Copyright (C) 2010, 2011, 2012, 2013, 2015  CNRS
  * Copyright (C) 2012 INRIA
@@ -127,6 +127,10 @@ static void _starpu_data_partition(starpu_data_handle_t initial_handle, starpu_d
 	unsigned i;
 	unsigned node;
 
+	/* Make sure to wait for previous tasks working on the whole data */
+	starpu_data_acquire_on_node(initial_handle, -1, STARPU_RW);
+	starpu_data_release_on_node(initial_handle, -1);
+
 	/* first take care to properly lock the data header */
 	_starpu_spin_lock(&initial_handle->header_lock);
 

+ 14 - 1
tests/Makefile.am

@@ -1,6 +1,6 @@
 # StarPU --- Runtime system for heterogeneous multicore architectures.
 #
-# Copyright (C) 2009-2015  Université de Bordeaux
+# Copyright (C) 2009-2016  Université de Bordeaux
 # Copyright (C) 2010, 2011, 2012, 2013, 2014, 2015  CNRS
 # Copyright (C) 2010, 2011, 2012  INRIA
 #
@@ -208,6 +208,7 @@ noinst_PROGRAMS =				\
 	datawizard/interfaces/vector/test_vector_interface   \
 	datawizard/interfaces/void/void_interface \
 	datawizard/in_place_partition   	\
+	datawizard/partition_dep   		\
 	datawizard/partition_lazy		\
 	datawizard/gpu_register   		\
 	datawizard/gpu_ptr_register   		\
@@ -422,6 +423,18 @@ datawizard_in_place_partition_SOURCES +=	\
 	datawizard/scal_opencl.cl
 endif
 
+datawizard_partition_dep_SOURCES =	\
+	datawizard/partition_dep.c	\
+	datawizard/scal.c
+if STARPU_USE_CUDA
+datawizard_partition_dep_SOURCES +=	\
+	datawizard/scal_cuda.cu
+endif
+if STARPU_USE_OPENCL
+datawizard_partition_dep_SOURCES +=	\
+	datawizard/scal_opencl.cl
+endif
+
 datawizard_partition_lazy_SOURCES =	\
 	datawizard/partition_lazy.c	\
 	datawizard/scal.c

+ 103 - 0
tests/datawizard/partition_dep.c

@@ -0,0 +1,103 @@
+/* StarPU --- Runtime system for heterogeneous multicore architectures.
+ *
+ * Copyright (C) 2011, 2015-2016  Université de Bordeaux
+ *
+ * StarPU is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ *
+ * StarPU is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU Lesser General Public License in COPYING.LGPL for more details.
+ */
+
+#include <starpu.h>
+#include "../helper.h"
+#include "scal.h"
+
+int main(int argc, char **argv)
+{
+	unsigned *foo;
+	starpu_data_handle_t handle;
+	int ret;
+	unsigned n, i, size;
+
+	ret = starpu_initialize(NULL, &argc, &argv);
+	if (ret == -ENODEV) return STARPU_TEST_SKIPPED;
+	STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
+
+#ifdef STARPU_USE_OPENCL
+	ret = starpu_opencl_load_opencl_from_file("tests/datawizard/scal_opencl.cl", &opencl_program, NULL);
+	STARPU_CHECK_RETURN_VALUE(ret, "starpu_opencl_load_opencl_from_file");
+#endif
+
+	n = starpu_worker_get_count();
+	if (n == 1)
+	{
+		starpu_shutdown();
+		return STARPU_TEST_SKIPPED;
+	}
+
+	size = 10 * n;
+
+	foo = (unsigned *) calloc(size, sizeof(*foo));
+	for (i = 0; i < size; i++)
+		foo[i] = i;
+
+	starpu_vector_data_register(&handle, STARPU_MAIN_RAM, (uintptr_t)foo, size, sizeof(*foo));
+
+	starpu_task_insert(&scal_codelet, STARPU_RW, handle, 0);
+
+	struct starpu_data_filter f =
+	{
+		.filter_func = starpu_vector_filter_block,
+		.nchildren = n,
+	};
+
+	starpu_data_partition(handle, &f);
+
+	for (i = 0; i < f.nchildren; i++)
+	{
+		struct starpu_task *task = starpu_task_create();
+
+		task->handles[0] = starpu_data_get_sub_data(handle, 1, i);
+		task->cl = &scal_codelet;
+		task->execute_on_a_specific_worker = 1;
+		task->workerid = i;
+
+		ret = starpu_task_submit(task);
+		if (ret == -ENODEV) goto enodev;
+		STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
+	}
+
+	ret = starpu_task_wait_for_all();
+	STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_wait_for_all");
+
+	starpu_data_unpartition(handle, STARPU_MAIN_RAM);
+	starpu_data_unregister(handle);
+	starpu_shutdown();
+
+	ret = EXIT_SUCCESS;
+	for (i = 0; i < size; i++)
+	{
+		if (foo[i] != i*2*2)
+		{
+			FPRINTF(stderr,"value %u is %u instead of %u\n", i, foo[i], 2*i);
+			ret = EXIT_FAILURE;
+		}
+	}
+	free(foo);
+
+        return ret;
+
+enodev:
+	starpu_data_unregister(handle);
+	fprintf(stderr, "WARNING: No one can execute this task\n");
+	/* yes, we do not perform the computation but we did detect that no one
+ 	 * could perform the kernel, so this is not an error from StarPU */
+	starpu_shutdown();
+	return STARPU_TEST_SKIPPED;
+}