|
@@ -0,0 +1,254 @@
|
|
|
+/* StarPU --- Runtime system for heterogeneous multicore architectures.
|
|
|
+ *
|
|
|
+ * Copyright (C) 2010-2016 Université de Bordeaux
|
|
|
+ * Copyright (C) 2010-2013 CNRS
|
|
|
+ * Copyright (C) 2011 INRIA
|
|
|
+ *
|
|
|
+ * StarPU is free software; you can redistribute it and/or modify
|
|
|
+ * it under the terms of the GNU Lesser General Public License as published by
|
|
|
+ * the Free Software Foundation; either version 2.1 of the License, or (at
|
|
|
+ * your option) any later version.
|
|
|
+ *
|
|
|
+ * StarPU is distributed in the hope that it will be useful, but
|
|
|
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
|
|
+ *
|
|
|
+ * See the GNU Lesser General Public License in COPYING.LGPL for more details.
|
|
|
+ */
|
|
|
+
|
|
|
+/*
|
|
|
+ * This is just a test policy for using task graph information
|
|
|
+ *
|
|
|
+ * We keep tasks in the fifo queue, and store the graph of tasks, until we
|
|
|
+ * get the do_schedule call from the application, which tells us all tasks
|
|
|
+ * were queued, and we can now compute task depths and let a simple
|
|
|
+ * central-queue greedy algorithm proceed.
|
|
|
+ *
|
|
|
+ * TODO: let workers starting running tasks before the whole graph is submitted?
|
|
|
+ */
|
|
|
+
|
|
|
+#include <starpu_scheduler.h>
|
|
|
+#include <sched_policies/fifo_queues.h>
|
|
|
+#include <sched_policies/prio_deque.h>
|
|
|
+#include <common/graph.h>
|
|
|
+#include <common/thread.h>
|
|
|
+#include <starpu_bitmap.h>
|
|
|
+#include <core/task.h>
|
|
|
+
|
|
|
+struct _starpu_graph_test_policy_data
|
|
|
+{
|
|
|
+ struct _starpu_fifo_taskq *fifo; /* Bag of tasks which are ready before do_schedule is called */
|
|
|
+ struct _starpu_prio_deque prio;
|
|
|
+ starpu_pthread_mutex_t policy_mutex;
|
|
|
+ struct starpu_bitmap *waiters;
|
|
|
+ unsigned computed;
|
|
|
+};
|
|
|
+
|
|
|
+static void initialize_graph_test_policy(unsigned sched_ctx_id)
|
|
|
+{
|
|
|
+ struct _starpu_graph_test_policy_data *data = (struct _starpu_graph_test_policy_data*)malloc(sizeof(struct _starpu_graph_test_policy_data));
|
|
|
+
|
|
|
+ /* there is only a single queue in that trivial design */
|
|
|
+ data->fifo = _starpu_create_fifo();
|
|
|
+ _starpu_prio_deque_init(&data->prio );
|
|
|
+ data->waiters = starpu_bitmap_create();
|
|
|
+ data->computed = 0;
|
|
|
+
|
|
|
+ _starpu_graph_record = 1;
|
|
|
+
|
|
|
+ /* Tell helgrind that it's fine to check for empty fifo in
|
|
|
+ * pop_task_graph_test_policy without actual mutex (it's just an integer)
|
|
|
+ */
|
|
|
+ STARPU_HG_DISABLE_CHECKING(data->fifo->ntasks);
|
|
|
+
|
|
|
+ starpu_sched_ctx_set_policy_data(sched_ctx_id, (void*)data);
|
|
|
+ STARPU_PTHREAD_MUTEX_INIT(&data->policy_mutex, NULL);
|
|
|
+}
|
|
|
+
|
|
|
+static void deinitialize_graph_test_policy(unsigned sched_ctx_id)
|
|
|
+{
|
|
|
+ struct _starpu_graph_test_policy_data *data = (struct _starpu_graph_test_policy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
|
|
|
+ struct _starpu_fifo_taskq *fifo = data->fifo;
|
|
|
+
|
|
|
+ STARPU_ASSERT(starpu_task_list_empty(&fifo->taskq));
|
|
|
+
|
|
|
+ /* deallocate the job queue */
|
|
|
+ _starpu_destroy_fifo(fifo);
|
|
|
+ starpu_bitmap_destroy(data->waiters);
|
|
|
+
|
|
|
+ STARPU_PTHREAD_MUTEX_DESTROY(&data->policy_mutex);
|
|
|
+ free(data);
|
|
|
+}
|
|
|
+
|
|
|
+static void set_priority(void *_data STARPU_ATTRIBUTE_UNUSED, struct _starpu_job *job)
|
|
|
+{
|
|
|
+ job->task->priority = job->depth;
|
|
|
+}
|
|
|
+
|
|
|
+static void do_schedule_graph_test_policy(unsigned sched_ctx_id)
|
|
|
+{
|
|
|
+ struct _starpu_graph_test_policy_data *data = (struct _starpu_graph_test_policy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
|
|
|
+
|
|
|
+ STARPU_PTHREAD_MUTEX_LOCK(&data->policy_mutex);
|
|
|
+ _starpu_graph_compute_depths();
|
|
|
+ data->computed = 1;
|
|
|
+ _starpu_graph_foreach(set_priority, NULL);
|
|
|
+
|
|
|
+ /* Now that we have priorities, move tasks from bag to priority queue */
|
|
|
+ while(!_starpu_fifo_empty(data->fifo)) {
|
|
|
+ struct starpu_task *task = _starpu_fifo_pop_task(data->fifo, -1);
|
|
|
+ _starpu_prio_deque_push_back_task(&data->prio, task);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* And unleash the beast! */
|
|
|
+ unsigned worker;
|
|
|
+ struct starpu_worker_collection *workers = starpu_sched_ctx_get_worker_collection(sched_ctx_id);
|
|
|
+ struct starpu_sched_ctx_iterator it;
|
|
|
+ workers->init_iterator(workers, &it);
|
|
|
+ while(workers->has_next(workers, &it))
|
|
|
+ {
|
|
|
+ /* Tell each worker is shouldn't sleep any more */
|
|
|
+ worker = workers->get_next(workers, &it);
|
|
|
+#ifdef STARPU_NON_BLOCKING_DRIVERS
|
|
|
+ starpu_bitmap_unset(data->waiters, worker);
|
|
|
+#endif
|
|
|
+ }
|
|
|
+ STARPU_PTHREAD_MUTEX_UNLOCK(&data->policy_mutex);
|
|
|
+
|
|
|
+#if !defined(STARPU_NON_BLOCKING_DRIVERS) || defined(STARPU_SIMGRID)
|
|
|
+ workers->init_iterator(workers, &it);
|
|
|
+ while(workers->has_next(workers, &it))
|
|
|
+ {
|
|
|
+ /* Wake each worker */
|
|
|
+ worker = workers->get_next(workers, &it);
|
|
|
+ starpu_wake_worker(worker);
|
|
|
+ }
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
+static int push_task_graph_test_policy(struct starpu_task *task)
|
|
|
+{
|
|
|
+ unsigned sched_ctx_id = task->sched_ctx;
|
|
|
+ struct _starpu_graph_test_policy_data *data = (struct _starpu_graph_test_policy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
|
|
|
+
|
|
|
+ STARPU_PTHREAD_MUTEX_LOCK(&data->policy_mutex);
|
|
|
+ if (!data->computed)
|
|
|
+ {
|
|
|
+ /* Priorities are not computed, leave the task in the bag for now */
|
|
|
+ starpu_task_list_push_back(&data->fifo->taskq,task);
|
|
|
+ data->fifo->ntasks++;
|
|
|
+ data->fifo->nprocessed++;
|
|
|
+ starpu_push_task_end(task);
|
|
|
+ STARPU_PTHREAD_MUTEX_UNLOCK(&data->policy_mutex);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Priorities are computed, we can push to execution */
|
|
|
+ _starpu_prio_deque_push_back_task(&data->prio,task);
|
|
|
+
|
|
|
+ starpu_push_task_end(task);
|
|
|
+
|
|
|
+ /*if there are no tasks block */
|
|
|
+ /* wake people waiting for a task */
|
|
|
+ unsigned worker = 0;
|
|
|
+ struct starpu_worker_collection *workers = starpu_sched_ctx_get_worker_collection(sched_ctx_id);
|
|
|
+
|
|
|
+ struct starpu_sched_ctx_iterator it;
|
|
|
+#ifndef STARPU_NON_BLOCKING_DRIVERS
|
|
|
+ char dowake[STARPU_NMAXWORKERS] = { 0 };
|
|
|
+#endif
|
|
|
+
|
|
|
+ workers->init_iterator_for_parallel_tasks(workers, &it, task);
|
|
|
+ while(workers->has_next(workers, &it))
|
|
|
+ {
|
|
|
+ worker = workers->get_next(workers, &it);
|
|
|
+
|
|
|
+#ifdef STARPU_NON_BLOCKING_DRIVERS
|
|
|
+ if (!starpu_bitmap_get(data->waiters, worker))
|
|
|
+ /* This worker is not waiting for a task */
|
|
|
+ continue;
|
|
|
+#endif
|
|
|
+
|
|
|
+ if (starpu_worker_can_execute_task_first_impl(worker, task, NULL))
|
|
|
+ {
|
|
|
+ /* It can execute this one, tell him! */
|
|
|
+#ifdef STARPU_NON_BLOCKING_DRIVERS
|
|
|
+ starpu_bitmap_unset(data->waiters, worker);
|
|
|
+ /* We really woke at least somebody, no need to wake somebody else */
|
|
|
+ break;
|
|
|
+#else
|
|
|
+ dowake[worker] = 1;
|
|
|
+#endif
|
|
|
+ }
|
|
|
+ }
|
|
|
+ /* Let the task free */
|
|
|
+ STARPU_PTHREAD_MUTEX_UNLOCK(&data->policy_mutex);
|
|
|
+
|
|
|
+#if !defined(STARPU_NON_BLOCKING_DRIVERS) || defined(STARPU_SIMGRID)
|
|
|
+ /* Now that we have a list of potential workers, try to wake one */
|
|
|
+
|
|
|
+ workers->init_iterator_for_parallel_tasks(workers, &it, task);
|
|
|
+ while(workers->has_next(workers, &it))
|
|
|
+ {
|
|
|
+ worker = workers->get_next(workers, &it);
|
|
|
+ if (dowake[worker])
|
|
|
+ if (starpu_wake_worker(worker))
|
|
|
+ break; // wake up a single worker
|
|
|
+ }
|
|
|
+#endif
|
|
|
+
|
|
|
+ starpu_sched_ctx_list_task_counters_increment_all(task, sched_ctx_id);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static struct starpu_task *pop_task_graph_test_policy(unsigned sched_ctx_id)
|
|
|
+{
|
|
|
+ struct starpu_task *chosen_task = NULL;
|
|
|
+ unsigned workerid = starpu_worker_get_id();
|
|
|
+ struct _starpu_graph_test_policy_data *data = (struct _starpu_graph_test_policy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
|
|
|
+
|
|
|
+ /* block until some event happens */
|
|
|
+ /* Here helgrind would shout that this is unprotected, this is just an
|
|
|
+ * integer access, and we hold the sched mutex, so we can not miss any
|
|
|
+ * wake up. */
|
|
|
+ if (!STARPU_RUNNING_ON_VALGRIND && _starpu_prio_deque_is_empty(&data->prio))
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+#ifdef STARPU_NON_BLOCKING_DRIVERS
|
|
|
+ if (!STARPU_RUNNING_ON_VALGRIND && !data->computed)
|
|
|
+ /* Not computed yet */
|
|
|
+ return NULL;
|
|
|
+ if (!STARPU_RUNNING_ON_VALGRIND && starpu_bitmap_get(data->waiters, workerid))
|
|
|
+ /* Nobody woke us, avoid bothering the mutex */
|
|
|
+ return NULL;
|
|
|
+#endif
|
|
|
+
|
|
|
+ STARPU_PTHREAD_MUTEX_LOCK(&data->policy_mutex);
|
|
|
+ if (!data->computed)
|
|
|
+ {
|
|
|
+ STARPU_PTHREAD_MUTEX_UNLOCK(&data->policy_mutex);
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ chosen_task = _starpu_prio_deque_pop_task_for_worker(&data->prio, workerid);
|
|
|
+ if (!chosen_task)
|
|
|
+ /* Tell pushers that we are waiting for tasks for us */
|
|
|
+ starpu_bitmap_set(data->waiters, workerid);
|
|
|
+
|
|
|
+ STARPU_PTHREAD_MUTEX_UNLOCK(&data->policy_mutex);
|
|
|
+
|
|
|
+ return chosen_task;
|
|
|
+}
|
|
|
+
|
|
|
+struct starpu_sched_policy _starpu_sched_graph_test_policy =
|
|
|
+{
|
|
|
+ .init_sched = initialize_graph_test_policy,
|
|
|
+ .deinit_sched = deinitialize_graph_test_policy,
|
|
|
+ .do_schedule = do_schedule_graph_test_policy,
|
|
|
+ .push_task = push_task_graph_test_policy,
|
|
|
+ .pop_task = pop_task_graph_test_policy,
|
|
|
+ .policy_name = "graph_test",
|
|
|
+ .policy_description = "test policy for using graphs in scheduling decisions",
|
|
|
+ .worker_type = STARPU_WORKER_LIST,
|
|
|
+};
|