ソースを参照

reenable sched_policies with respect to sleep synchro update
tighten relax regions to the minimum necessary span

Olivier Aumage 8 年 前
コミット
5bcd3a9486

+ 21 - 22
src/core/sched_policy.c

@@ -52,33 +52,32 @@ int starpu_get_prefetch_flag(void)
 
 static struct starpu_sched_policy *predefined_policies[] =
 {
-#warning update policies with sleeping synchro
-	//&_starpu_sched_modular_eager_policy,
-	//&_starpu_sched_modular_eager_prefetching_policy,
-	//&_starpu_sched_modular_prio_policy,
-	//&_starpu_sched_modular_prio_prefetching_policy,
-	//&_starpu_sched_modular_random_policy,
-	//&_starpu_sched_modular_random_prio_policy,
-	//&_starpu_sched_modular_random_prefetching_policy,
-	//&_starpu_sched_modular_random_prio_prefetching_policy,
-	//&_starpu_sched_modular_ws_policy,
-	//&_starpu_sched_modular_heft_policy,
-	//&_starpu_sched_modular_heft_prio_policy,
-	//&_starpu_sched_modular_heft2_policy,
+	&_starpu_sched_modular_eager_policy,
+	&_starpu_sched_modular_eager_prefetching_policy,
+	&_starpu_sched_modular_prio_policy,
+	&_starpu_sched_modular_prio_prefetching_policy,
+	&_starpu_sched_modular_random_policy,
+	&_starpu_sched_modular_random_prio_policy,
+	&_starpu_sched_modular_random_prefetching_policy,
+	&_starpu_sched_modular_random_prio_prefetching_policy,
+	&_starpu_sched_modular_ws_policy,
+	&_starpu_sched_modular_heft_policy,
+	&_starpu_sched_modular_heft_prio_policy,
+	&_starpu_sched_modular_heft2_policy,
 	&_starpu_sched_eager_policy,
 	&_starpu_sched_prio_policy,
 	&_starpu_sched_random_policy,
 	&_starpu_sched_lws_policy,
 	&_starpu_sched_ws_policy,
-	//&_starpu_sched_dm_policy,
-	//&_starpu_sched_dmda_policy,
-	//&_starpu_sched_dmda_ready_policy,
-	//&_starpu_sched_dmda_sorted_policy,
-	//&_starpu_sched_dmda_sorted_decision_policy,
-	//&_starpu_sched_parallel_heft_policy,
-	//&_starpu_sched_peager_policy,
-	//&_starpu_sched_heteroprio_policy,
-	//&_starpu_sched_graph_test_policy,
+	&_starpu_sched_dm_policy,
+	&_starpu_sched_dmda_policy,
+	&_starpu_sched_dmda_ready_policy,
+	&_starpu_sched_dmda_sorted_policy,
+	&_starpu_sched_dmda_sorted_decision_policy,
+	&_starpu_sched_parallel_heft_policy,
+	&_starpu_sched_peager_policy,
+	&_starpu_sched_heteroprio_policy,
+	&_starpu_sched_graph_test_policy,
 	NULL
 };
 

+ 2 - 2
src/sched_policies/eager_central_policy.c

@@ -179,11 +179,12 @@ static struct starpu_task *pop_task_eager_policy(unsigned sched_ctx_id)
 		/* Tell pushers that we are waiting for tasks for us */
 		starpu_bitmap_set(data->waiters, workerid);
 
-	_starpu_worker_relax_on();
 	STARPU_PTHREAD_MUTEX_UNLOCK(&data->policy_mutex);
 	if(chosen_task)
 	{
+		_starpu_worker_relax_on();
 		_starpu_sched_ctx_lock_write(sched_ctx_id);
+		_starpu_worker_relax_off();
 		starpu_sched_ctx_list_task_counters_decrement_all_ctx_locked(chosen_task, sched_ctx_id);
 
 		unsigned child_sched_ctx = starpu_sched_ctx_worker_is_master_for_child_ctx(workerid, sched_ctx_id);
@@ -195,7 +196,6 @@ static struct starpu_task *pop_task_eager_policy(unsigned sched_ctx_id)
 		}
 		_starpu_sched_ctx_unlock_write(sched_ctx_id);
 	}
-	_starpu_worker_relax_off();
 
 	return chosen_task;
 }

+ 2 - 3
src/sched_policies/eager_central_priority_policy.c

@@ -276,12 +276,12 @@ static struct starpu_task *_starpu_priority_pop_task(unsigned sched_ctx_id)
 		/* Tell pushers that we are waiting for tasks for us */
 		starpu_bitmap_set(data->waiters, workerid);
 
-	_starpu_worker_relax_on();
 	STARPU_PTHREAD_MUTEX_UNLOCK(&data->policy_mutex);
-
 	if(chosen_task)
 	{
+		_starpu_worker_relax_on();
 		_starpu_sched_ctx_lock_write(sched_ctx_id);
+		_starpu_worker_relax_off();
 		starpu_sched_ctx_list_task_counters_decrement_all_ctx_locked(chosen_task, sched_ctx_id);
 
                 unsigned child_sched_ctx = starpu_sched_ctx_worker_is_master_for_child_ctx(workerid, sched_ctx_id);
@@ -293,7 +293,6 @@ static struct starpu_task *_starpu_priority_pop_task(unsigned sched_ctx_id)
 		}
 		_starpu_sched_ctx_unlock_write(sched_ctx_id);
 	}
-	_starpu_worker_relax_off();
 
 	return chosen_task;
 }

+ 1 - 1
src/sched_policies/heteroprio.c

@@ -614,6 +614,7 @@ done:		;
 	{
 		_starpu_worker_relax_on();
 		_starpu_sched_ctx_lock_write(sched_ctx_id);
+		_starpu_worker_relax_off();
 		unsigned child_sched_ctx = starpu_sched_ctx_worker_is_master_for_child_ctx(workerid, sched_ctx_id);
 		if(child_sched_ctx != STARPU_NMAX_SCHED_CTXS)
 		{
@@ -622,7 +623,6 @@ done:		;
 			task = NULL;
 		}
 		_starpu_sched_ctx_unlock_write(sched_ctx_id);
-		_starpu_worker_relax_off();
 		return task;
 	}
 

+ 3 - 1
src/sched_policies/parallel_eager.c

@@ -179,7 +179,6 @@ static int push_task_peager_policy(struct starpu_task *task)
 
 static struct starpu_task *pop_task_peager_policy(unsigned sched_ctx_id)
 {
-	_starpu_worker_relax_on();
 	struct _starpu_peager_data *data = (struct _starpu_peager_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	int workerid = starpu_worker_get_id_check();
@@ -188,6 +187,7 @@ static struct starpu_task *pop_task_peager_policy(unsigned sched_ctx_id)
 	if (starpu_worker_get_type(workerid) != STARPU_CPU_WORKER && starpu_worker_get_type(workerid) != STARPU_MIC_WORKER)
 	{
 		struct starpu_task *task = NULL;
+		_starpu_worker_relax_on();
 		STARPU_PTHREAD_MUTEX_LOCK(&data->policy_mutex);
 		_starpu_worker_relax_off();
 		task = _starpu_fifo_pop_task(data->fifo, workerid);
@@ -205,6 +205,7 @@ static struct starpu_task *pop_task_peager_policy(unsigned sched_ctx_id)
 	if (master == workerid)
 	{
 		/* The worker is a master */
+		_starpu_worker_relax_on();
 		STARPU_PTHREAD_MUTEX_LOCK(&data->policy_mutex);
 		_starpu_worker_relax_off();
 		task = _starpu_fifo_pop_task(data->fifo, workerid);
@@ -278,6 +279,7 @@ static struct starpu_task *pop_task_peager_policy(unsigned sched_ctx_id)
 	else
 	{
 		/* The worker is a slave */
+		_starpu_worker_relax_on();
 		STARPU_PTHREAD_MUTEX_LOCK(&data->policy_mutex);
 		_starpu_worker_relax_off();
 		task = _starpu_fifo_pop_task(data->local_fifo[workerid], workerid);

+ 3 - 5
src/sched_policies/work_stealing_policy.c

@@ -539,6 +539,7 @@ static struct starpu_task *ws_pop_task(unsigned sched_ctx_id)
 		ws->per_worker[workerid].busy = 1;
 		_starpu_worker_relax_on();
 		_starpu_sched_ctx_lock_write(sched_ctx_id);
+		_starpu_worker_relax_off();
 		starpu_sched_ctx_list_task_counters_decrement(sched_ctx_id, workerid);
 		unsigned child_sched_ctx = starpu_sched_ctx_worker_is_master_for_child_ctx(workerid, sched_ctx_id);
 		if(child_sched_ctx != STARPU_NMAX_SCHED_CTXS)
@@ -548,14 +549,11 @@ static struct starpu_task *ws_pop_task(unsigned sched_ctx_id)
 			task = NULL;
 		}
 		_starpu_sched_ctx_unlock_write(sched_ctx_id);
-		_starpu_worker_relax_off();
 		return task;
 	}
 
-	/* While stealing, relieve mutex used to synchronize with pushers */
-	_starpu_worker_relax_on();
-
 	/* we need to steal someone's job */
+	_starpu_worker_relax_on();
 	int victim = ws->select_victim(ws, sched_ctx_id, workerid);
 	_starpu_worker_relax_off();
 	if (victim == -1)
@@ -604,6 +602,7 @@ static struct starpu_task *ws_pop_task(unsigned sched_ctx_id)
 	{
 		_starpu_worker_relax_on();
 		_starpu_sched_ctx_lock_write(sched_ctx_id);
+		_starpu_worker_relax_off();
 		unsigned child_sched_ctx = starpu_sched_ctx_worker_is_master_for_child_ctx(workerid, sched_ctx_id);
 		if(child_sched_ctx != STARPU_NMAX_SCHED_CTXS)
 		{
@@ -614,7 +613,6 @@ static struct starpu_task *ws_pop_task(unsigned sched_ctx_id)
 			return NULL;
 		}
 		_starpu_sched_ctx_unlock_write(sched_ctx_id);
-		_starpu_worker_relax_off();
 	}
 	ws->per_worker[workerid].busy = !!task;
 	return task;