Browse Source

use shorter function names

Olivier Aumage 8 years ago
parent
commit
95659bdc31

+ 1 - 1
src/core/jobs.c

@@ -714,7 +714,7 @@ int _starpu_push_local_task(struct _starpu_worker *worker, struct starpu_task *t
 	if (STARPU_UNLIKELY(!(worker->worker_mask & task->cl->where)))
 		return -ENODEV;
 
-	_starpu_worker_lock_for_observation_relax(worker->workerid);
+	_starpu_worker_lock(worker->workerid);
 
 	if (task->execute_on_a_specific_worker && task->workerorder)
 	{

+ 3 - 3
src/core/sched_ctx.c

@@ -2308,7 +2308,7 @@ void starpu_sched_ctx_list_task_counters_decrement_all_ctx_locked(struct starpu_
 			struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
 			if (worker->nsched_ctxs > 1)
 			{
-				_starpu_worker_lock_for_observation_relax(workerid);
+				_starpu_worker_lock(workerid);
 				starpu_sched_ctx_list_task_counters_decrement(sched_ctx_id, workerid);
 				_starpu_worker_unlock_for_observation(workerid);
 			}
@@ -2330,7 +2330,7 @@ void starpu_sched_ctx_list_task_counters_decrement_all(struct starpu_task *task,
 			struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
 			if (worker->nsched_ctxs > 1)
 			{
-				_starpu_worker_lock_for_observation_relax(workerid);
+				_starpu_worker_lock(workerid);
 				starpu_sched_ctx_list_task_counters_decrement(sched_ctx_id, workerid);
 				_starpu_worker_unlock_for_observation(workerid);
 			}
@@ -2353,7 +2353,7 @@ void starpu_sched_ctx_list_task_counters_reset_all(struct starpu_task *task, uns
 			struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
 			if (worker->nsched_ctxs > 1)
 			{
-				_starpu_worker_lock_for_observation_relax(workerid);
+				_starpu_worker_lock(workerid);
 				starpu_sched_ctx_list_task_counters_reset(sched_ctx_id, workerid);
 				_starpu_worker_unlock_for_observation(workerid);
 			}

+ 1 - 1
src/core/workers.c

@@ -1726,7 +1726,7 @@ unsigned starpu_worker_is_blocked_in_parallel(int workerid)
 
 unsigned starpu_worker_is_slave_somewhere(int workerid)
 {
-	_starpu_worker_lock_for_observation_relax(workerid);
+	_starpu_worker_lock(workerid);
 	unsigned ret = _starpu_config.workers[workerid].is_slave_somewhere;
 	_starpu_worker_unlock_for_observation(workerid);
 	return ret;

+ 2 - 2
src/core/workers.h

@@ -839,7 +839,7 @@ static inline void _starpu_worker_leave_changing_ctx_op(struct _starpu_worker *
  *
  * notes:
  * - if the observed worker is not in state_safe_for_observation, the function block until the state is reached */
-static inline void _starpu_worker_lock_for_observation_relax(int workerid)
+static inline void _starpu_worker_lock(int workerid)
 {
 	struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
 	STARPU_ASSERT(worker != NULL);
@@ -865,7 +865,7 @@ static inline void _starpu_worker_lock_for_observation_relax(int workerid)
 	}
 }
 
-static inline int _starpu_worker_trylock_for_observation(int workerid)
+static inline int _starpu_worker_trylock(int workerid)
 {
 	struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
 	STARPU_ASSERT(worker != NULL);

+ 3 - 3
src/sched_policies/component_worker.c

@@ -445,7 +445,7 @@ static void simple_worker_can_pull(struct starpu_sched_component * worker_compon
 {
 	struct _starpu_worker * worker = _starpu_sched_component_worker_get_worker(worker_component);
 	int workerid = worker->workerid;
-	_starpu_worker_lock_for_observation_relax(workerid);
+	_starpu_worker_lock(workerid);
 	if(_starpu_sched_component_worker_is_reset_status(worker_component))
 		_starpu_sched_component_worker_set_changed_status(worker_component);
 	if(workerid != _starpu_worker_get_id() && _starpu_sched_component_worker_is_sleeping_status(worker_component))
@@ -658,7 +658,7 @@ static void combined_worker_can_pull(struct starpu_sched_component * component)
 		if((unsigned) i == workerid)
 			continue;
 		int workerid = data->combined_worker->combined_workerid[i];
-		_starpu_worker_lock_for_observation_relax(workerid);
+		_starpu_worker_lock(workerid);
 		if(_starpu_sched_component_worker_is_sleeping_status(component))
 		{
 			starpu_wake_worker_locked(workerid);
@@ -815,7 +815,7 @@ void _starpu_sched_component_lock_all_workers(void)
 {
 	unsigned i;
 	for(i = 0; i < starpu_worker_get_count(); i++)
-		_starpu_worker_lock_for_observation_relax(i);
+		_starpu_worker_lock(i);
 }
 void _starpu_sched_component_unlock_all_workers(void)
 {

+ 5 - 5
src/sched_policies/deque_modeling_policy_data_aware.c

@@ -359,7 +359,7 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 	starpu_sched_ctx_call_pushed_task_cb(best_workerid, sched_ctx_id);
 #endif //STARPU_USE_SC_HYPERVISOR
 
-	_starpu_worker_lock_for_observation_relax(best_workerid);
+	_starpu_worker_lock(best_workerid);
 
         /* Sometimes workers didn't take the tasks as early as we expected */
 	fifo->exp_start = isnan(fifo->exp_start) ? starpu_timing_now() + fifo->pipeline_len : STARPU_MAX(fifo->exp_start, starpu_timing_now());
@@ -433,7 +433,7 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 	int ret = 0;
 	if (prio)
 	{
-		_starpu_worker_lock_for_observation_relax(best_workerid);
+		_starpu_worker_lock(best_workerid);
 		ret =_starpu_fifo_push_sorted_task(dt->queue_array[best_workerid], task);
 		if(dt->num_priorities != -1)
 		{
@@ -452,7 +452,7 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 	}
 	else
 	{
-		_starpu_worker_lock_for_observation_relax(best_workerid);
+		_starpu_worker_lock(best_workerid);
 		starpu_task_list_push_back (&dt->queue_array[best_workerid]->taskq, task);
 		dt->queue_array[best_workerid]->ntasks++;
 		dt->queue_array[best_workerid]->nprocessed++;
@@ -686,7 +686,7 @@ static void compute_all_performance_predictions(struct starpu_task *task,
 				}
 				else
 				{
-					_starpu_worker_lock_for_observation_relax(workerid);
+					_starpu_worker_lock(workerid);
 					prev_exp_len = _starpu_fifo_get_exp_len_prev_task_list(fifo, task, workerid, nimpl, &fifo_ntasks);
 					_starpu_worker_unlock_for_observation(workerid);
 				}
@@ -1135,7 +1135,7 @@ static void dmda_push_task_notify(struct starpu_task *task, int workerid, int pe
 	double predicted_transfer = starpu_task_expected_data_transfer_time(memory_node, task);
 
 	/* Update the predictions */
-	_starpu_worker_lock_for_observation_relax(workerid);
+	_starpu_worker_lock(workerid);
 	/* Sometimes workers didn't take the tasks as early as we expected */
 	fifo->exp_start = isnan(fifo->exp_start) ? starpu_timing_now() + fifo->pipeline_len : STARPU_MAX(fifo->exp_start, starpu_timing_now());
 	fifo->exp_end = fifo->exp_start + fifo->exp_len;

+ 1 - 1
src/sched_policies/heteroprio.c

@@ -583,7 +583,7 @@ static struct starpu_task *pop_task_heteroprio_policy(unsigned sched_ctx_id)
 				   && hp->workers_heteroprio[victim].tasks_queue->ntasks)
 				{
 					/* ensure the worker is not currently prefetching its data */
-					_starpu_worker_lock_for_observation_relax(victim);
+					_starpu_worker_lock(victim);
 
 					if(hp->workers_heteroprio[victim].arch_index == worker->arch_index
 					   && hp->workers_heteroprio[victim].tasks_queue->ntasks)

+ 3 - 3
src/sched_policies/parallel_heft.c

@@ -116,7 +116,7 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 
 	if (!starpu_worker_is_combined_worker(best_workerid))
 	{
-		_starpu_worker_lock_for_observation_relax(best_workerid);
+		_starpu_worker_lock(best_workerid);
 		task->predicted = exp_end_predicted - worker_exp_end[best_workerid];
 		/* TODO */
 		task->predicted_transfer = 0;
@@ -162,7 +162,7 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 			/* TODO */
 			alias->predicted_transfer = 0;
 			alias->destroy = 1;
-			_starpu_worker_lock_for_observation_relax(local_combined_workerid);
+			_starpu_worker_lock(local_combined_workerid);
 			worker_exp_len[local_combined_workerid] += alias->predicted;
 			worker_exp_end[local_combined_workerid] = exp_end_predicted;
 			worker_exp_start[local_combined_workerid] = exp_end_predicted - worker_exp_len[local_combined_workerid];
@@ -312,7 +312,7 @@ static int _parallel_heft_push_task(struct starpu_task *task, unsigned prio, uns
 		if(!starpu_worker_is_combined_worker(workerid))
 		{
 			/* Sometimes workers didn't take the tasks as early as we expected */
-			_starpu_worker_lock_for_observation_relax(workerid);
+			_starpu_worker_lock(workerid);
 			worker_exp_start[workerid] = STARPU_MAX(worker_exp_start[workerid], starpu_timing_now());
 			worker_exp_end[workerid] = worker_exp_start[workerid] + worker_exp_len[workerid];
 			if (worker_exp_end[workerid] > max_exp_end)

+ 2 - 2
src/sched_policies/work_stealing_policy.c

@@ -563,7 +563,7 @@ static struct starpu_task *ws_pop_task(unsigned sched_ctx_id)
 		return NULL;
 	}
 
-	if (_starpu_worker_trylock_for_observation(victim))
+	if (_starpu_worker_trylock(victim))
 	{
 		/* victim is busy, don't bother it, come back later */
 		return NULL;
@@ -636,7 +636,7 @@ int ws_push_task(struct starpu_task *task)
 	if (workerid == -1 || !starpu_sched_ctx_contains_worker(workerid, sched_ctx_id) ||
 			!starpu_worker_can_execute_task_first_impl(workerid, task, NULL))
 		workerid = select_worker(ws, task, sched_ctx_id);
-	_starpu_worker_lock_for_observation_relax(workerid);
+	_starpu_worker_lock(workerid);
 	STARPU_AYU_ADDTOTASKQUEUE(starpu_task_get_job_id(task), workerid);
 	_STARPU_TASK_BREAK_ON(task, sched);
 	record_data_locality(task, workerid);