Ver código fonte

Avoid taking sched_ctx lock when there is just one context

Samuel Thibault 6 anos atrás
pai
commit
7ca4b1d3e3

+ 23 - 16
src/sched_policies/deque_modeling_policy_data_aware.c

@@ -349,15 +349,18 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 	/* make sure someone could execute that task ! */
 	/* make sure someone could execute that task ! */
 	STARPU_ASSERT(best_workerid != -1);
 	STARPU_ASSERT(best_workerid != -1);
 
 
-	_starpu_worker_relax_on();
-	_starpu_sched_ctx_lock_write(sched_ctx_id);
-	_starpu_worker_relax_off();
-	if (_starpu_sched_ctx_worker_is_master_for_child_ctx(sched_ctx_id, best_workerid, task))
-		task = NULL;
-	_starpu_sched_ctx_unlock_write(sched_ctx_id);
+	if (_starpu_get_nsched_ctxs() > 1)
+	{
+		_starpu_worker_relax_on();
+		_starpu_sched_ctx_lock_write(sched_ctx_id);
+		_starpu_worker_relax_off();
+		if (_starpu_sched_ctx_worker_is_master_for_child_ctx(sched_ctx_id, best_workerid, task))
+			task = NULL;
+		_starpu_sched_ctx_unlock_write(sched_ctx_id);
 
 
-	if (!task)
-                return 0;
+		if (!task)
+			return 0;
+	}
 
 
 	struct _starpu_fifo_taskq *fifo = dt->queue_array[best_workerid];
 	struct _starpu_fifo_taskq *fifo = dt->queue_array[best_workerid];
 
 
@@ -428,15 +431,19 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 		starpu_prefetch_task_input_for(task, best_workerid);
 		starpu_prefetch_task_input_for(task, best_workerid);
 
 
 	STARPU_AYU_ADDTOTASKQUEUE(starpu_task_get_job_id(task), best_workerid);
 	STARPU_AYU_ADDTOTASKQUEUE(starpu_task_get_job_id(task), best_workerid);
-	unsigned stream_ctx_id = starpu_worker_get_sched_ctx_id_stream(best_workerid);
-	if(stream_ctx_id != STARPU_NMAX_SCHED_CTXS)
+
+	if (_starpu_get_nsched_ctxs() > 1)
 	{
 	{
-		_starpu_worker_relax_on();
-		_starpu_sched_ctx_lock_write(sched_ctx_id);
-		_starpu_worker_relax_off();
-		starpu_sched_ctx_move_task_to_ctx_locked(task, stream_ctx_id, 0);
-		starpu_sched_ctx_revert_task_counters_ctx_locked(sched_ctx_id, task->flops);
-		_starpu_sched_ctx_unlock_write(sched_ctx_id);
+		unsigned stream_ctx_id = starpu_worker_get_sched_ctx_id_stream(best_workerid);
+		if(stream_ctx_id != STARPU_NMAX_SCHED_CTXS)
+		{
+			_starpu_worker_relax_on();
+			_starpu_sched_ctx_lock_write(sched_ctx_id);
+			_starpu_worker_relax_off();
+			starpu_sched_ctx_move_task_to_ctx_locked(task, stream_ctx_id, 0);
+			starpu_sched_ctx_revert_task_counters_ctx_locked(sched_ctx_id, task->flops);
+			_starpu_sched_ctx_unlock_write(sched_ctx_id);
+		}
 	}
 	}
 
 
 	int ret = 0;
 	int ret = 0;

+ 1 - 1
src/sched_policies/eager_central_policy.c

@@ -188,7 +188,7 @@ static struct starpu_task *pop_task_eager_policy(unsigned sched_ctx_id)
 		starpu_bitmap_set(data->waiters, workerid);
 		starpu_bitmap_set(data->waiters, workerid);
 
 
 	STARPU_PTHREAD_MUTEX_UNLOCK(&data->policy_mutex);
 	STARPU_PTHREAD_MUTEX_UNLOCK(&data->policy_mutex);
-	if(chosen_task)
+	if(chosen_task &&_starpu_get_nsched_ctxs() > 1)
 	{
 	{
 		_starpu_worker_relax_on();
 		_starpu_worker_relax_on();
 		_starpu_sched_ctx_lock_write(sched_ctx_id);
 		_starpu_sched_ctx_lock_write(sched_ctx_id);

+ 1 - 1
src/sched_policies/eager_central_priority_policy.c

@@ -213,7 +213,7 @@ static struct starpu_task *_starpu_priority_pop_task(unsigned sched_ctx_id)
 		starpu_bitmap_set(data->waiters, workerid);
 		starpu_bitmap_set(data->waiters, workerid);
 
 
 	STARPU_PTHREAD_MUTEX_UNLOCK(&data->policy_mutex);
 	STARPU_PTHREAD_MUTEX_UNLOCK(&data->policy_mutex);
-	if(chosen_task)
+	if(chosen_task &&_starpu_get_nsched_ctxs() > 1)
 	{
 	{
 		_starpu_worker_relax_on();
 		_starpu_worker_relax_on();
 		_starpu_sched_ctx_lock_write(sched_ctx_id);
 		_starpu_sched_ctx_lock_write(sched_ctx_id);

+ 1 - 1
src/sched_policies/heteroprio.c

@@ -620,7 +620,7 @@ done:		;
 	}
 	}
 	STARPU_PTHREAD_MUTEX_UNLOCK(&hp->policy_mutex);
 	STARPU_PTHREAD_MUTEX_UNLOCK(&hp->policy_mutex);
 
 
-	if(task)
+	if(task &&_starpu_get_nsched_ctxs() > 1)
 	{
 	{
 		_starpu_worker_relax_on();
 		_starpu_worker_relax_on();
 		_starpu_sched_ctx_lock_write(sched_ctx_id);
 		_starpu_sched_ctx_lock_write(sched_ctx_id);

+ 12 - 9
src/sched_policies/work_stealing_policy.c

@@ -543,17 +543,20 @@ static struct starpu_task *ws_pop_task(unsigned sched_ctx_id)
 			locality_popped_task(ws, task, workerid, sched_ctx_id);
 			locality_popped_task(ws, task, workerid, sched_ctx_id);
 	}
 	}
 
 
-	if (task)
+	if(task)
 	{
 	{
 		/* there was a local task */
 		/* there was a local task */
 		ws->per_worker[workerid].busy = 1;
 		ws->per_worker[workerid].busy = 1;
-		_starpu_worker_relax_on();
-		_starpu_sched_ctx_lock_write(sched_ctx_id);
-		_starpu_worker_relax_off();
-		starpu_sched_ctx_list_task_counters_decrement(sched_ctx_id, workerid);
-		if (_starpu_sched_ctx_worker_is_master_for_child_ctx(sched_ctx_id, workerid, task))
-			task = NULL;
-		_starpu_sched_ctx_unlock_write(sched_ctx_id);
+		if (_starpu_get_nsched_ctxs() > 1)
+		{
+			_starpu_worker_relax_on();
+			_starpu_sched_ctx_lock_write(sched_ctx_id);
+			_starpu_worker_relax_off();
+			starpu_sched_ctx_list_task_counters_decrement(sched_ctx_id, workerid);
+			if (_starpu_sched_ctx_worker_is_master_for_child_ctx(sched_ctx_id, workerid, task))
+				task = NULL;
+			_starpu_sched_ctx_unlock_write(sched_ctx_id);
+		}
 		return task;
 		return task;
 	}
 	}
 
 
@@ -605,7 +608,7 @@ static struct starpu_task *ws_pop_task(unsigned sched_ctx_id)
 	}
 	}
 #endif
 #endif
 
 
-	if (task)
+	if (task &&_starpu_get_nsched_ctxs() > 1)
 	{
 	{
 		_starpu_worker_relax_on();
 		_starpu_worker_relax_on();
 		_starpu_sched_ctx_lock_write(sched_ctx_id);
 		_starpu_sched_ctx_lock_write(sched_ctx_id);