Browse Source

some renamings

Andra Hugo 13 years ago
parent
commit
30f8bc561c

+ 4 - 4
include/starpu_scheduler.h

@@ -107,13 +107,13 @@ struct starpu_sched_policy_s {
 	const char *policy_description;
 };
 
-unsigned starpu_create_sched_ctx(const char *policy_name, int *workerids_in_ctx, int nworkerids_in_ctx, const char *sched_name);
+unsigned starpu_create_sched_ctx(const char *policy_name, int *workerids_ctx, int nworkerids_ctx, const char *sched_name);
 
 void starpu_delete_sched_ctx(unsigned sched_ctx_id, unsigned inheritor_sched_ctx_id);
 
-void starpu_add_workers_to_sched_ctx(int *workerids_in_ctx, int nworkerids_in_ctx, unsigned sched_ctx);
+void starpu_add_workers_to_sched_ctx(int *workerids_ctx, int nworkerids_ctx, unsigned sched_ctx);
 
-void starpu_remove_workers_from_sched_ctx(int *workerids_in_ctx, int nworkerids_in_ctx, unsigned sched_ctx);
+void starpu_remove_workers_from_sched_ctx(int *workerids_ctx, int nworkerids_ctx, unsigned sched_ctx);
 
 /* When there is no available task for a worker, StarPU blocks this worker on a
 condition variable. This function specifies which condition variable (and the
@@ -197,6 +197,6 @@ double starpu_task_expected_power(struct starpu_task *task, enum starpu_perf_arc
 int starpu_wait_for_all_tasks_of_worker(int workerid);
 
 /* Waits until all the tasks of a bunch of workers have been executed */
-int starpu_wait_for_all_tasks_of_workers(int *workerids_in_ctx, int nworkerids_in_ctx);
+int starpu_wait_for_all_tasks_of_workers(int *workerids_ctx, int nworkerids_ctx);
 
 #endif /* __STARPU_SCHEDULER_H__ */

+ 76 - 76
src/core/sched_ctx.c

@@ -22,7 +22,7 @@ pthread_key_t sched_ctx_key;
 
 static unsigned _starpu_get_first_available_sched_ctx_id(struct starpu_machine_config_s *config);
 static unsigned _starpu_get_first_free_sched_ctx_in_worker_list(struct starpu_worker_s *worker);
-static void _starpu_rearange_sched_ctx_workerids(struct starpu_sched_ctx *sched_ctx, int old_nworkerids_in_ctx);
+static void _starpu_rearange_sched_ctx_workerids(struct starpu_sched_ctx *sched_ctx, int old_nworkerids_ctx);
 
 struct sched_ctx_info {
 	unsigned sched_ctx_id;
@@ -41,7 +41,7 @@ static void update_workers_func(void *buffers[] __attribute__ ((unused)), void *
 	  {
 		/* add context to worker */
 		worker->sched_ctx[sched_ctx_info_args->sched_ctx_id] = current_sched_ctx;
-		worker->nctxs++;
+		worker->nsched_ctxs++;
 	  }
 	else
 	  {
@@ -51,15 +51,15 @@ static void update_workers_func(void *buffers[] __attribute__ ((unused)), void *
 			if(worker->sched_ctx[i] != NULL && worker->sched_ctx[i]->id == sched_ctx_id)
 			  {
 				worker->sched_ctx[i] = NULL;
-				worker->nctxs--;
+				worker->nsched_ctxs--;
 			  }
 	  }
 }
 
-static void _starpu_update_workers(int *workerids_in_ctx, int nworkerids_in_ctx, 
+static void _starpu_update_workers(int *workerids_ctx, int nworkerids_ctx, 
 				   int sched_ctx_id, struct starpu_sched_ctx *sched_ctx)
 {
-	struct starpu_task *tasks[nworkerids_in_ctx];
+	struct starpu_task *tasks[nworkerids_ctx];
 
 	struct starpu_codelet_t sched_ctx_info_cl = {
 		.where = STARPU_CPU|STARPU_CUDA|STARPU_OPENCL,
@@ -70,11 +70,11 @@ static void _starpu_update_workers(int *workerids_in_ctx, int nworkerids_in_ctx,
 	};
 
 	int i, ret;
-	struct starpu_worker_s *worker[nworkerids_in_ctx];
-	struct sched_ctx_info sched_info_args[nworkerids_in_ctx];
-	for(i = 0; i < nworkerids_in_ctx; i++)
+	struct starpu_worker_s *worker[nworkerids_ctx];
+	struct sched_ctx_info sched_info_args[nworkerids_ctx];
+	for(i = 0; i < nworkerids_ctx; i++)
 	  {
-		worker[i] = _starpu_get_worker_struct(workerids_in_ctx[i]);
+		worker[i] = _starpu_get_worker_struct(workerids_ctx[i]);
 		
 		sched_info_args[i].sched_ctx_id = sched_ctx_id == -1  ? 
 			_starpu_get_first_free_sched_ctx_in_worker_list(worker[i]) : 
@@ -87,7 +87,7 @@ static void _starpu_update_workers(int *workerids_in_ctx, int nworkerids_in_ctx,
 		tasks[i]->cl = &sched_ctx_info_cl;
 		tasks[i]->cl_arg = &sched_info_args[i];
 		tasks[i]->execute_on_a_specific_worker = 1;
-		tasks[i]->workerid = workerids_in_ctx[i];
+		tasks[i]->workerid = workerids_ctx[i];
 		tasks[i]->detach = 0;
 		tasks[i]->destroy = 0;
 
@@ -104,7 +104,7 @@ static void _starpu_update_workers(int *workerids_in_ctx, int nworkerids_in_ctx,
 		  }
 	  }
 
-	for (i = 0; i < nworkerids_in_ctx; i++)
+	for (i = 0; i < nworkerids_ctx; i++)
 	  {
 	    if (tasks[i])
 	      {
@@ -115,8 +115,8 @@ static void _starpu_update_workers(int *workerids_in_ctx, int nworkerids_in_ctx,
 	  }
 
 }
-struct starpu_sched_ctx*  _starpu_create_sched_ctx(const char *policy_name, int *workerids_in_ctx, 
-				  int nworkerids_in_ctx, unsigned is_initial_sched,
+struct starpu_sched_ctx*  _starpu_create_sched_ctx(const char *policy_name, int *workerids_ctx, 
+				  int nworkerids_ctx, unsigned is_initial_sched,
 				  const char *sched_name)
 {
 	struct starpu_machine_config_s *config = (struct starpu_machine_config_s *)_starpu_get_machine_config();
@@ -128,10 +128,10 @@ struct starpu_sched_ctx*  _starpu_create_sched_ctx(const char *policy_name, int
 	sched_ctx->id = id;
 	int nworkers = config->topology.nworkers;
 	
-	STARPU_ASSERT(nworkerids_in_ctx <= nworkers);
+	STARPU_ASSERT(nworkerids_ctx <= nworkers);
   
-	sched_ctx->nworkers_in_ctx = nworkerids_in_ctx;
-	sched_ctx->temp_nworkers_in_ctx = -1;
+	sched_ctx->nworkers = nworkerids_ctx;
+	sched_ctx->temp_nworkers = -1;
 	PTHREAD_MUTEX_INIT(&sched_ctx->changing_ctx_mutex, NULL);
 
 	sched_ctx->sched_policy = malloc(sizeof(struct starpu_sched_policy_s));
@@ -142,22 +142,22 @@ struct starpu_sched_ctx*  _starpu_create_sched_ctx(const char *policy_name, int
 
 	int j;
 	/* if null add all the workers are to the contex */
-	if(workerids_in_ctx == NULL)
+	if(workerids_ctx == NULL)
 	  {
 		for(j = 0; j < nworkers; j++)
 		  {
-			sched_ctx->workerid[j] = j;
+			sched_ctx->workerids[j] = j;
 		  }
-		sched_ctx->nworkers_in_ctx = nworkers;
+		sched_ctx->nworkers = nworkers;
 	  } 
 	else 
 	  {
 		int i;
-		for(i = 0; i < nworkerids_in_ctx; i++)
+		for(i = 0; i < nworkerids_ctx; i++)
 		  {
 			/* the user should not ask for a resource that does not exist */
-			STARPU_ASSERT( workerids_in_ctx[i] >= 0 &&  workerids_in_ctx[i] <= nworkers);		    
-			sched_ctx->workerid[i] = workerids_in_ctx[i];
+			STARPU_ASSERT( workerids_ctx[i] >= 0 &&  workerids_ctx[i] <= nworkers);		    
+			sched_ctx->workerids[i] = workerids_ctx[i];
 
 		  }
 	  }
@@ -176,9 +176,9 @@ struct starpu_sched_ctx*  _starpu_create_sched_ctx(const char *policy_name, int
 	if(is_initial_sched)
 	  {
 	    int i;
-	    for(i = 0; i < sched_ctx->nworkers_in_ctx; i++)
+	    for(i = 0; i < sched_ctx->nworkers; i++)
 	      {
-		struct starpu_worker_s *worker = _starpu_get_worker_struct(sched_ctx->workerid[i]);
+		struct starpu_worker_s *worker = _starpu_get_worker_struct(sched_ctx->workerids[i]);
 		worker->sched_ctx[_starpu_get_first_free_sched_ctx_in_worker_list(worker)] = sched_ctx;
 	      }
 	  }
@@ -187,11 +187,11 @@ struct starpu_sched_ctx*  _starpu_create_sched_ctx(const char *policy_name, int
 }
 
 
-unsigned starpu_create_sched_ctx(const char *policy_name, int *workerids_in_ctx, 
-			    int nworkerids_in_ctx, const char *sched_name)
+unsigned starpu_create_sched_ctx(const char *policy_name, int *workerids_ctx, 
+			    int nworkerids_ctx, const char *sched_name)
 {
-	struct starpu_sched_ctx *sched_ctx = _starpu_create_sched_ctx(policy_name, workerids_in_ctx, nworkerids_in_ctx, 0, sched_name);
-	_starpu_update_workers(sched_ctx->workerid, sched_ctx->nworkers_in_ctx, -1, sched_ctx);
+	struct starpu_sched_ctx *sched_ctx = _starpu_create_sched_ctx(policy_name, workerids_ctx, nworkerids_ctx, 0, sched_name);
+	_starpu_update_workers(sched_ctx->workerids, sched_ctx->nworkers, -1, sched_ctx);
 	return sched_ctx->id;
 }
 
@@ -199,8 +199,8 @@ unsigned starpu_create_sched_ctx(const char *policy_name, int *workerids_in_ctx,
 static unsigned _starpu_worker_belongs_to_ctx(int workerid, struct starpu_sched_ctx *sched_ctx)
 {
 	int i;
-	for(i = 0; i < sched_ctx->nworkers_in_ctx; i++)
-	  if(sched_ctx->workerid[i] == workerid)
+	for(i = 0; i < sched_ctx->nworkers; i++)
+	  if(sched_ctx->workerids[i] == workerid)
 		  return 1;
 	return 0;
 }
@@ -223,7 +223,7 @@ static void free_sched_ctx_mem(struct starpu_sched_ctx *sched_ctx)
 
 static void _starpu_manage_delete_sched_ctx(struct starpu_sched_ctx *sched_ctx)
 {
-	_starpu_update_workers(sched_ctx->workerid, sched_ctx->nworkers_in_ctx, 
+	_starpu_update_workers(sched_ctx->workerids, sched_ctx->nworkers, 
 			       sched_ctx->id, NULL);
 }
 
@@ -232,7 +232,7 @@ static void _starpu_add_workers_to_sched_ctx(int *new_workers, int nnew_workers,
 {
         struct starpu_machine_config_s *config = (struct starpu_machine_config_s *)_starpu_get_machine_config();
         int ntotal_workers = config->topology.nworkers;
-        int nworkerids_already_in_ctx =  sched_ctx->nworkers_in_ctx;
+        int nworkerids_already_ctx =  sched_ctx->nworkers;
 	
 	int n_added_workers = 0;
 	int added_workers[ntotal_workers];
@@ -244,7 +244,7 @@ static void _starpu_add_workers_to_sched_ctx(int *new_workers, int nnew_workers,
                 for(j = 0; j < ntotal_workers; j++)
                         if(!_starpu_worker_belongs_to_ctx(j, sched_ctx))
 			  {
-                                sched_ctx->workerid[++nworkerids_already_in_ctx] = j;
+                                sched_ctx->workerids[++nworkerids_already_ctx] = j;
 				added_workers[n_added_workers] = j;
 			  }
                           
@@ -261,7 +261,7 @@ static void _starpu_add_workers_to_sched_ctx(int *new_workers, int nnew_workers,
 			if(!_starpu_worker_belongs_to_ctx(new_workers[i], sched_ctx))
 			  {
 			    /* add worker to context */
-			    sched_ctx->workerid[ nworkerids_already_in_ctx + n_added_workers] = new_workers[i];
+			    sched_ctx->workerids[ nworkerids_already_ctx + n_added_workers] = new_workers[i];
 			    added_workers[n_added_workers] = new_workers[i];
 			    n_added_workers++;
 			  }
@@ -290,8 +290,8 @@ void starpu_delete_sched_ctx(unsigned sched_ctx_id, unsigned inheritor_sched_ctx
 		struct starpu_machine_config_s *config = (struct starpu_machine_config_s *)_starpu_get_machine_config();
 		int ntotal_workers = config->topology.nworkers;
 
-		if(!(sched_ctx->nworkers_in_ctx == ntotal_workers && sched_ctx->nworkers_in_ctx == inheritor_sched_ctx->nworkers_in_ctx))
-		  _starpu_add_workers_to_sched_ctx(sched_ctx->workerid, sched_ctx->nworkers_in_ctx, inheritor_sched_ctx);
+		if(!(sched_ctx->nworkers == ntotal_workers && sched_ctx->nworkers == inheritor_sched_ctx->nworkers))
+		  _starpu_add_workers_to_sched_ctx(sched_ctx->workerids, sched_ctx->nworkers, inheritor_sched_ctx);
 		free_sched_ctx_mem(sched_ctx);
 
 	  }		
@@ -312,23 +312,23 @@ void _starpu_delete_all_sched_ctxs()
 	return;
 }
 
-void starpu_add_workers_to_sched_ctx(int *workerids_in_ctx, int nworkerids_in_ctx,
+void starpu_add_workers_to_sched_ctx(int *workerids_ctx, int nworkerids_ctx,
 				     unsigned sched_ctx_id)
 {
 	struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx(sched_ctx_id);
-	_starpu_add_workers_to_sched_ctx(workerids_in_ctx, nworkerids_in_ctx, sched_ctx);
+	_starpu_add_workers_to_sched_ctx(workerids_ctx, nworkerids_ctx, sched_ctx);
 
 	return;
 }
 
-static void _starpu_remove_workers_from_sched_ctx(int *workerids_in_ctx, int nworkerids_in_ctx, 
+static void _starpu_remove_workers_from_sched_ctx(int *workerids_ctx, int nworkerids_ctx, 
 					  struct starpu_sched_ctx *sched_ctx)
 {
   	struct starpu_machine_config_s *config = (struct starpu_machine_config_s *)_starpu_get_machine_config();
 	int nworkers = config->topology.nworkers;
-	int nworkerids_already_in_ctx =  sched_ctx->nworkers_in_ctx;
+	int nworkerids_already_ctx =  sched_ctx->nworkers;
 
-	STARPU_ASSERT(nworkerids_in_ctx  <= nworkerids_already_in_ctx);
+	STARPU_ASSERT(nworkerids_ctx  <= nworkerids_already_ctx);
 
 	int i, workerid;
 
@@ -336,38 +336,38 @@ static void _starpu_remove_workers_from_sched_ctx(int *workerids_in_ctx, int nwo
 	int removed_workers[nworkers];
 
 	/*if null remove all the workers that belong to this ctx*/
-	if(workerids_in_ctx == NULL)
+	if(workerids_ctx == NULL)
 	  {
-		for(i = 0; i < nworkerids_already_in_ctx; i++)
+		for(i = 0; i < nworkerids_already_ctx; i++)
 		  {
-			removed_workers[i] = sched_ctx->workerid[i];
-			sched_ctx->workerid[i] = -1;
+			removed_workers[i] = sched_ctx->workerids[i];
+			sched_ctx->workerids[i] = -1;
 		  }
 
-		sched_ctx->nworkers_in_ctx = 0;
-		nremoved_workers = nworkerids_already_in_ctx;
+		sched_ctx->nworkers = 0;
+		nremoved_workers = nworkerids_already_ctx;
 	  } 
 	else 
 	  {
-		for(i = 0; i < nworkerids_in_ctx; i++)
+		for(i = 0; i < nworkerids_ctx; i++)
 		  {
-		    	workerid = workerids_in_ctx[i]; 
+		    	workerid = workerids_ctx[i]; 
 			/* take care the user does not ask for a resource that does not exist */
 			STARPU_ASSERT( workerid >= 0 &&  workerid <= nworkers);
-			removed_workers[i] = sched_ctx->workerid[i];
+			removed_workers[i] = sched_ctx->workerids[i];
 
 			int j;
 			/* don't leave the workerid with a correct value even if we don't use it anymore */
-			for(j = 0; j < nworkerids_already_in_ctx; j++)
-				if(sched_ctx->workerid[j] == workerid)				 
-					sched_ctx->workerid[j] = -1;
+			for(j = 0; j < nworkerids_already_ctx; j++)
+				if(sched_ctx->workerids[j] == workerid)				 
+					sched_ctx->workerids[j] = -1;
 		  }
 
-		nremoved_workers = nworkerids_in_ctx;
-		sched_ctx->nworkers_in_ctx -= nworkerids_in_ctx;
+		nremoved_workers = nworkerids_ctx;
+		sched_ctx->nworkers -= nworkerids_ctx;
 		/* reorder the worker's list of contexts in order to avoid 
 		   the holes in the list after removing some elements */
-		_starpu_rearange_sched_ctx_workerids(sched_ctx, nworkerids_already_in_ctx);
+		_starpu_rearange_sched_ctx_workerids(sched_ctx, nworkerids_already_ctx);
 	  }
 
 	_starpu_update_workers(removed_workers, nremoved_workers, sched_ctx->id, NULL);
@@ -375,17 +375,17 @@ static void _starpu_remove_workers_from_sched_ctx(int *workerids_in_ctx, int nwo
 	return;
 }
 
-void starpu_remove_workers_from_sched_ctx(int *workerids_in_ctx, int nworkerids_in_ctx, 
+void starpu_remove_workers_from_sched_ctx(int *workerids_ctx, int nworkerids_ctx, 
 					  unsigned sched_ctx_id)
 {
 	  /* wait for the workers concerned by the change of contex    
 	   * to finish their work in the previous context */
-	if(!starpu_wait_for_all_tasks_of_workers(workerids_in_ctx, nworkerids_in_ctx))
+	if(!starpu_wait_for_all_tasks_of_workers(workerids_ctx, nworkerids_ctx))
 	  {
 		struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx(sched_ctx_id);
 
 		PTHREAD_MUTEX_LOCK(&sched_ctx->changing_ctx_mutex);
-		_starpu_remove_workers_from_sched_ctx(workerids_in_ctx, nworkerids_in_ctx, sched_ctx);
+		_starpu_remove_workers_from_sched_ctx(workerids_ctx, nworkerids_ctx, sched_ctx);
 		PTHREAD_MUTEX_UNLOCK(&sched_ctx->changing_ctx_mutex);
 	  }
 	return;
@@ -439,10 +439,10 @@ static unsigned _starpu_get_first_free_sched_ctx_in_worker_list(struct starpu_wo
 	return STARPU_NMAX_SCHED_CTXS;
 }
 
-static int _starpu_get_first_free_worker_space(int *workerids, int old_nworkerids_in_ctx)
+static int _starpu_get_first_free_worker_space(int *workerids, int old_nworkerids_ctx)
 {
 	int i;
-	for(i = 0; i < old_nworkerids_in_ctx; i++)
+	for(i = 0; i < old_nworkerids_ctx; i++)
 		if(workerids[i] == -1)
 			return i;
 
@@ -453,19 +453,19 @@ static int _starpu_get_first_free_worker_space(int *workerids, int old_nworkerid
    and have instead {5, 7, -1, -1, -1} 
    it is easier afterwards to iterate the array
 */
-static void _starpu_rearange_sched_ctx_workerids(struct starpu_sched_ctx *sched_ctx, int old_nworkerids_in_ctx)
+static void _starpu_rearange_sched_ctx_workerids(struct starpu_sched_ctx *sched_ctx, int old_nworkerids_ctx)
 {
 	int first_free_id = -1;
 	int i;
-	for(i = 0; i < old_nworkerids_in_ctx; i++)
+	for(i = 0; i < old_nworkerids_ctx; i++)
 	  {
-		if(sched_ctx->workerid[i] != -1)
+		if(sched_ctx->workerids[i] != -1)
 		  {
-			first_free_id = _starpu_get_first_free_worker_space(sched_ctx->workerid, old_nworkerids_in_ctx);
+			first_free_id = _starpu_get_first_free_worker_space(sched_ctx->workerids, old_nworkerids_ctx);
 			if(first_free_id != -1)
 			  {
-				sched_ctx->workerid[first_free_id] = sched_ctx->workerid[i];
-				sched_ctx->workerid[i] = -1;
+				sched_ctx->workerids[first_free_id] = sched_ctx->workerids[i];
+				sched_ctx->workerids[i] = -1;
 			  }
 		  }
 	  }
@@ -483,18 +483,18 @@ int starpu_wait_for_all_tasks_of_worker(int workerid)
 	return 0;
 }
 
-int starpu_wait_for_all_tasks_of_workers(int *workerids_in_ctx, int nworkerids_in_ctx){
+int starpu_wait_for_all_tasks_of_workers(int *workerids_ctx, int nworkerids_ctx){
 	int ret_val = 0;
 	
 	struct starpu_machine_config_s *config = _starpu_get_machine_config();
-	int nworkers = nworkerids_in_ctx == -1 ? (int)config->topology.nworkers : nworkerids_in_ctx;
+	int nworkers = nworkerids_ctx == -1 ? (int)config->topology.nworkers : nworkerids_ctx;
 	
 	int workerid = -1;
 	int i, n;
 	
 	for(i = 0; i < nworkers; i++)
 	  {
-		workerid = workerids_in_ctx == NULL ? i : workerids_in_ctx[i];
+		workerid = workerids_ctx == NULL ? i : workerids_ctx[i];
 		n = starpu_wait_for_all_tasks_of_worker(workerid);
 		ret_val = (ret_val && n);
 	  }
@@ -543,15 +543,15 @@ void _starpu_increment_nsubmitted_tasks_of_sched_ctx(unsigned sched_ctx_id)
 	_starpu_barrier_counter_increment(&sched_ctx->tasks_barrier);
 }
 
-int _starpu_get_index_in_ctx_of_workerid(unsigned sched_ctx_id, unsigned workerid)
+int _starpu_get_index_ctx_of_workerid(unsigned sched_ctx_id, unsigned workerid)
 {
 	struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx(sched_ctx_id);
 	
-	int nworkers_in_ctx = sched_ctx->nworkers_in_ctx;
+	int nworkers_ctx = sched_ctx->nworkers;
 
 	int i;
-	for(i = 0; i < nworkers_in_ctx; i++)
-		if(sched_ctx->workerid[i] == (int)workerid)
+	for(i = 0; i < nworkers_ctx; i++)
+		if(sched_ctx->workerids[i] == (int)workerid)
 			return i;
 	
 	return -1;
@@ -559,13 +559,13 @@ int _starpu_get_index_in_ctx_of_workerid(unsigned sched_ctx_id, unsigned workeri
 
 pthread_mutex_t *_starpu_get_sched_mutex(struct starpu_sched_ctx *sched_ctx, int worker)
 {
-	int workerid_ctx = _starpu_get_index_in_ctx_of_workerid(sched_ctx->id, worker);
+	int workerid_ctx = _starpu_get_index_ctx_of_workerid(sched_ctx->id, worker);
 	return (workerid_ctx == -1 ? NULL : sched_ctx->sched_mutex[workerid_ctx]);
 }
 
 pthread_cond_t *_starpu_get_sched_cond(struct starpu_sched_ctx *sched_ctx, int worker)
 {
-	int workerid_ctx = _starpu_get_index_in_ctx_of_workerid(sched_ctx->id, worker);
+	int workerid_ctx = _starpu_get_index_ctx_of_workerid(sched_ctx->id, worker);
 	return (workerid_ctx == -1 ? NULL : sched_ctx->sched_cond[workerid_ctx]);
 }
 

+ 3 - 3
src/core/sched_ctx.h

@@ -37,13 +37,13 @@ struct starpu_sched_ctx {
 	void *policy_data;
 	
 	/* list of indices of workers */
-	int workerid[STARPU_NMAXWORKERS]; 
+	int workerids[STARPU_NMAXWORKERS]; 
 	
 	/* number of threads in contex */
-	int nworkers_in_ctx; 
+	int nworkers; 
 
 	/* temporary variable for number of threads in contex */
-	int temp_nworkers_in_ctx; 
+	int temp_nworkers; 
   
 	/* mutext for temp_nworkers_in_ctx*/
 	pthread_mutex_t changing_ctx_mutex;

+ 3 - 3
src/core/sched_policy.c

@@ -314,10 +314,10 @@ int _starpu_push_task(starpu_job_t j, unsigned job_is_already_locked)
 		   the context in order to avoid doing it during the computation of the
 		   best worker */
 		PTHREAD_MUTEX_LOCK(&sched_ctx->changing_ctx_mutex);
-		if(sched_ctx->temp_nworkers_in_ctx != -1)
+		if(sched_ctx->temp_nworkers != -1)
 		  {
-		    sched_ctx->nworkers_in_ctx = sched_ctx->temp_nworkers_in_ctx;
-		    sched_ctx->temp_nworkers_in_ctx = -1;
+		    sched_ctx->nworkers = sched_ctx->temp_nworkers;
+		    sched_ctx->temp_nworkers = -1;
 		  }
 		/* don't push task on ctx at the same time workers are removed from ctx */
 		ret = sched_ctx->sched_policy->push_task(task, sched_ctx->id);

+ 1 - 1
src/core/workers.h

@@ -81,7 +81,7 @@ struct starpu_worker_s {
 	char short_name[10];
 
 	struct starpu_sched_ctx **sched_ctx;
-	unsigned nctxs; /* the no of contexts a worker belongs to*/
+	unsigned nsched_ctxs; /* the no of contexts a worker belongs to*/
 
 	struct _starpu_barrier_counter_t tasks_barrier; /* wait for the tasks submitted */
        

+ 52 - 52
src/sched_policies/deque_modeling_policy_data_aware.c

@@ -125,7 +125,7 @@ static struct starpu_task *dmda_pop_ready_task(unsigned sched_ctx_id)
 	struct starpu_task *task;
 
 	int workerid = starpu_worker_get_id();
-	int workerid_ctx =  _starpu_get_index_in_ctx_of_workerid(sched_ctx_id, workerid);
+	int workerid_ctx =  _starpu_get_index_ctx_of_workerid(sched_ctx_id, workerid);
 	struct starpu_fifo_taskq_s *fifo = dt->queue_array[workerid_ctx];
 
 	unsigned node = starpu_worker_get_memory_node(workerid);
@@ -161,7 +161,7 @@ static struct starpu_task *dmda_pop_task(unsigned sched_ctx_id)
 	struct starpu_task *task;
 
 	int workerid = starpu_worker_get_id();
-	int workerid_ctx =  _starpu_get_index_in_ctx_of_workerid(sched_ctx_id, workerid);
+	int workerid_ctx =  _starpu_get_index_ctx_of_workerid(sched_ctx_id, workerid);
 	struct starpu_fifo_taskq_s *fifo = dt->queue_array[workerid_ctx];
 
 	task = _starpu_fifo_pop_task(fifo, -1);
@@ -197,7 +197,7 @@ static struct starpu_task *dmda_pop_every_task(unsigned sched_ctx_id)
 	struct starpu_task *new_list;
 
 	int workerid = starpu_worker_get_id();
-	int workerid_ctx =  _starpu_get_index_in_ctx_of_workerid(sched_ctx_id, workerid);
+	int workerid_ctx =  _starpu_get_index_ctx_of_workerid(sched_ctx_id, workerid);
 	struct starpu_fifo_taskq_s *fifo = dt->queue_array[workerid_ctx];
 
 	new_list = _starpu_fifo_pop_every_task(fifo, sched_ctx->sched_mutex[workerid_ctx], workerid);
@@ -289,7 +289,7 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 	/* make sure someone coule execute that task ! */
 	STARPU_ASSERT(best_workerid != -1);
 
-	int best_workerid_ctx =  _starpu_get_index_in_ctx_of_workerid(sched_ctx->id, best_workerid);
+	int best_workerid_ctx =  _starpu_get_index_ctx_of_workerid(sched_ctx->id, best_workerid);
 
 	struct starpu_fifo_taskq_s *fifo;
 	fifo = dt->queue_array[best_workerid_ctx];
@@ -317,7 +317,7 @@ static int _dm_push_task(struct starpu_task *task, unsigned prio, struct starpu_
 	dmda_data *dt = (dmda_data*)sched_ctx->policy_data;
 	/* find the queue */
 	struct starpu_fifo_taskq_s *fifo;
-	unsigned worker, worker_in_ctx;
+	unsigned worker, worker_ctx;
 	int best = -1;
 
 	double best_exp_end = 0.0;
@@ -332,15 +332,15 @@ static int _dm_push_task(struct starpu_task *task, unsigned prio, struct starpu_
 
 	unsigned best_impl = 0;
 	unsigned nimpl;
-	unsigned nworkers = sched_ctx->nworkers_in_ctx;
-	for (worker_in_ctx = 0; worker_in_ctx < nworkers; worker_in_ctx++)
+	unsigned nworkers = sched_ctx->nworkers;
+	for (worker_ctx = 0; worker_ctx < nworkers; worker_ctx++)
 	{
 		for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
 		{
-        	worker = sched_ctx->workerid[worker_in_ctx];
+        	worker = sched_ctx->workerids[worker_ctx];
 			double exp_end;
 		
-			fifo = dt->queue_array[worker_in_ctx];
+			fifo = dt->queue_array[worker_ctx];
 
 			/* Sometimes workers didn't take the tasks as early as we expected */
 			fifo->exp_start = STARPU_MAX(fifo->exp_start, starpu_timing_now());
@@ -414,21 +414,21 @@ static int _dmda_push_task(struct starpu_task *task, unsigned prio, struct starp
 	dmda_data *dt = (dmda_data*)sched_ctx->policy_data;
 	/* find the queue */
 	struct starpu_fifo_taskq_s *fifo;
-	unsigned worker, worker_in_ctx;
-	int best = -1, best_in_ctx = -1;
+	unsigned worker, worker_ctx;
+	int best = -1, best_ctx = -1;
 	
 	/* this flag is set if the corresponding worker is selected because
 	   there is no performance prediction available yet */
 	int forced_best = -1;
 
-	unsigned nworkers_in_ctx = sched_ctx->nworkers_in_ctx;
-	double local_task_length[nworkers_in_ctx];
-	double local_data_penalty[nworkers_in_ctx];
-	double local_power[nworkers_in_ctx];
-	double exp_end[nworkers_in_ctx];
+	unsigned nworkers_ctx = sched_ctx->nworkers;
+	double local_task_length[nworkers_ctx];
+	double local_data_penalty[nworkers_ctx];
+	double local_power[nworkers_ctx];
+	double exp_end[nworkers_ctx];
 	double max_exp_end = 0.0;
 
-	double fitness[nworkers_in_ctx];
+	double fitness[nworkers_ctx];
 
 	double best_exp_end = 10e240;
 	double model_best = 0.0;
@@ -443,12 +443,12 @@ static int _dmda_push_task(struct starpu_task *task, unsigned prio, struct starp
 
 	unsigned best_impl = 0;
 	unsigned nimpl=0;
-	for (worker_in_ctx = 0; worker_in_ctx < nworkers_in_ctx; worker_in_ctx++)
+	for (worker_ctx = 0; worker_ctx < nworkers_ctx; worker_ctx++)
 	{
-		worker = sched_ctx->workerid[worker_in_ctx];
+		worker = sched_ctx->workerids[worker_ctx];
 		for(nimpl  = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
 	 	{
-			fifo = dt->queue_array[worker_in_ctx];
+			fifo = dt->queue_array[worker_ctx];
 
 			/* Sometimes workers didn't take the tasks as early as we expected */
 			fifo->exp_start = STARPU_MAX(fifo->exp_start, starpu_timing_now());
@@ -463,12 +463,12 @@ static int _dmda_push_task(struct starpu_task *task, unsigned prio, struct starp
 			}
 
 			enum starpu_perf_archtype perf_arch = starpu_worker_get_perf_archtype(worker);
-			local_task_length[worker_in_ctx] = starpu_task_expected_length(task, perf_arch, nimpl);
+			local_task_length[worker_ctx] = starpu_task_expected_length(task, perf_arch, nimpl);
 
 			//_STARPU_DEBUG("Scheduler dmda: task length (%lf) worker (%u) kernel (%u) \n", local_task_length[worker],worker,nimpl);
 
 			unsigned memory_node = starpu_worker_get_memory_node(worker);
-			local_data_penalty[worker_in_ctx] = starpu_task_expected_data_transfer_time(memory_node, task);
+			local_data_penalty[worker_ctx] = starpu_task_expected_data_transfer_time(memory_node, task);
 
 			double ntasks_end = fifo->ntasks / starpu_worker_get_relative_speedup(perf_arch);
 
@@ -482,13 +482,13 @@ static int _dmda_push_task(struct starpu_task *task, unsigned prio, struct starp
 
 			}
 
-			if (local_task_length[worker_in_ctx] == -1.0)
+			if (local_task_length[worker_ctx] == -1.0)
 				/* we are calibrating, we want to speed-up calibration time
 			 	* so we privilege non-calibrated tasks (but still
 			 	* greedily distribute them to avoid dumb schedules) */
 				calibrating = 1;
 
-			if (local_task_length[worker_in_ctx] <= 0.0)
+			if (local_task_length[worker_ctx] <= 0.0)
 				/* there is no prediction available for that task
 			 	* with that arch yet, so switch to a greedy strategy */
 				unknown = 1;
@@ -496,19 +496,19 @@ static int _dmda_push_task(struct starpu_task *task, unsigned prio, struct starp
 			if (unknown)
 					continue;
 
-			exp_end[worker_in_ctx] = fifo->exp_start + fifo->exp_len + local_task_length[worker_in_ctx];
+			exp_end[worker_ctx] = fifo->exp_start + fifo->exp_len + local_task_length[worker_ctx];
 
-			if (exp_end[worker_in_ctx] < best_exp_end)
+			if (exp_end[worker_ctx] < best_exp_end)
 			{
 				/* a better solution was found */
-				best_exp_end = exp_end[worker_in_ctx];
+				best_exp_end = exp_end[worker_ctx];
 				best_impl = nimpl;
 
 			}
 
-			local_power[worker_in_ctx] = starpu_task_expected_power(task, perf_arch, nimpl);
-			if (local_power[worker_in_ctx] == -1.0)
-				local_power[worker_in_ctx] = 0.;
+			local_power[worker_ctx] = starpu_task_expected_power(task, perf_arch, nimpl);
+			if (local_power[worker_ctx] == -1.0)
+				local_power[worker_ctx] = 0.;
 			}	
 		}
 
@@ -519,11 +519,11 @@ static int _dmda_push_task(struct starpu_task *task, unsigned prio, struct starp
 	
 		if (forced_best == -1)
 		{
-	        for (worker_in_ctx = 0; worker_in_ctx < nworkers_in_ctx; worker_in_ctx++)
+	        for (worker_ctx = 0; worker_ctx < nworkers_ctx; worker_ctx++)
 	        {
-		        worker = sched_ctx->workerid[worker_in_ctx];
+		        worker = sched_ctx->workerids[worker_ctx];
 
-				fifo = dt->queue_array[worker_in_ctx];
+				fifo = dt->queue_array[worker_ctx];
 	
 			if (!starpu_worker_may_execute_task(worker, task, 0))
 			{
@@ -531,22 +531,22 @@ static int _dmda_push_task(struct starpu_task *task, unsigned prio, struct starp
 				continue;
 			}
 	
-			fitness[worker_in_ctx] = dt->alpha*(exp_end[worker_in_ctx] - best_exp_end) 
-					+ dt->beta*(local_data_penalty[worker_in_ctx])
-					+ dt->_gamma*(local_power[worker_in_ctx]);
+			fitness[worker_ctx] = dt->alpha*(exp_end[worker_ctx] - best_exp_end) 
+					+ dt->beta*(local_data_penalty[worker_ctx])
+					+ dt->_gamma*(local_power[worker_ctx]);
 
-			if (exp_end[worker_in_ctx] > max_exp_end)
+			if (exp_end[worker_ctx] > max_exp_end)
 				/* This placement will make the computation
 				 * longer, take into account the idle
 				 * consumption of other cpus */
-				fitness[worker_in_ctx] += dt->_gamma * dt->idle_power * (exp_end[worker_in_ctx] - max_exp_end) / 1000000.0;
+				fitness[worker_ctx] += dt->_gamma * dt->idle_power * (exp_end[worker_ctx] - max_exp_end) / 1000000.0;
 
-			if (best == -1 || fitness[worker_in_ctx] < best_fitness)
+			if (best == -1 || fitness[worker_ctx] < best_fitness)
 			{
 				/* we found a better solution */
-				best_fitness = fitness[worker_in_ctx];
+				best_fitness = fitness[worker_ctx];
 				best = worker;
-				best_in_ctx = worker_in_ctx;
+				best_ctx = worker_ctx;
 
 	//			_STARPU_DEBUG("best fitness (worker %d) %e = alpha*(%e) + beta(%e) +gamma(%e)\n", worker, best_fitness, exp_end[worker] - best_exp_end, local_data_penalty[worker], local_power[worker]);
 			}
@@ -599,7 +599,7 @@ static int dmda_push_task(struct starpu_task *task, unsigned sched_ctx_id)
 static void initialize_dmda_policy_for_workers(unsigned sched_ctx_id, unsigned nnew_workers) 
 {
 	struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx(sched_ctx_id);
-	unsigned nworkers = sched_ctx->nworkers_in_ctx;
+	unsigned nworkers = sched_ctx->nworkers;
 	dmda_data *dt = (dmda_data*)sched_ctx->policy_data;
 
 	struct starpu_machine_config_s *config = (struct starpu_machine_config_s *)_starpu_get_machine_config();
@@ -620,7 +620,7 @@ static void initialize_dmda_policy_for_workers(unsigned sched_ctx_id, unsigned n
 
 	/* take into account the new number of threads at the next push */
 	PTHREAD_MUTEX_LOCK(&sched_ctx->changing_ctx_mutex);
-	sched_ctx->temp_nworkers_in_ctx = all_workers;
+	sched_ctx->temp_nworkers = all_workers;
 	PTHREAD_MUTEX_UNLOCK(&sched_ctx->changing_ctx_mutex);
 }
 
@@ -633,7 +633,7 @@ static void initialize_dmda_policy(unsigned sched_ctx_id)
 	dt->idle_power = 0.0;
 
 	struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx(sched_ctx_id);
-	unsigned nworkers = sched_ctx->nworkers_in_ctx;
+	unsigned nworkers = sched_ctx->nworkers;
 	sched_ctx->policy_data = (void*)dt;
 
 	dt->queue_array = (struct starpu_fifo_taskq_s**)malloc(STARPU_NMAXWORKERS*sizeof(struct starpu_fifo_taskq_s*));
@@ -675,14 +675,14 @@ static void deinitialize_dmda_policy(unsigned sched_ctx_id)
 {
 	struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx(sched_ctx_id);
 	dmda_data *dt = (dmda_data*)sched_ctx->policy_data;
-	int workerid_in_ctx;
-        int nworkers = sched_ctx->nworkers_in_ctx;
-	for (workerid_in_ctx = 0; workerid_in_ctx < nworkers; workerid_in_ctx++){
-		_starpu_destroy_fifo(dt->queue_array[workerid_in_ctx]);
-		PTHREAD_MUTEX_DESTROY(sched_ctx->sched_mutex[workerid_in_ctx]);
-                PTHREAD_COND_DESTROY(sched_ctx->sched_cond[workerid_in_ctx]);
-		free(sched_ctx->sched_mutex[workerid_in_ctx]);
-                free(sched_ctx->sched_cond[workerid_in_ctx]);
+	int workerid_ctx;
+        int nworkers = sched_ctx->nworkers;
+	for (workerid_ctx = 0; workerid_ctx < nworkers; workerid_ctx++){
+		_starpu_destroy_fifo(dt->queue_array[workerid_ctx]);
+		PTHREAD_MUTEX_DESTROY(sched_ctx->sched_mutex[workerid_ctx]);
+                PTHREAD_COND_DESTROY(sched_ctx->sched_cond[workerid_ctx]);
+		free(sched_ctx->sched_mutex[workerid_ctx]);
+                free(sched_ctx->sched_cond[workerid_ctx]);
 	}
 
 	free(dt->queue_array);

+ 15 - 15
src/sched_policies/eager_central_policy.c

@@ -28,20 +28,20 @@ static void initialize_eager_center_policy_for_workers(unsigned sched_ctx_id, un
 {
 	struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx(sched_ctx_id);
 
-	unsigned nworkers_ctx = sched_ctx->nworkers_in_ctx;
+	unsigned nworkers_ctx = sched_ctx->nworkers;
 	struct starpu_machine_config_s *config = (struct starpu_machine_config_s *)_starpu_get_machine_config();
 	unsigned ntotal_workers = config->topology.nworkers;
 
 	unsigned all_workers = nnew_workers == ntotal_workers ? ntotal_workers : nworkers_ctx + nnew_workers;
 
-	unsigned workerid_in_ctx;
-	for (workerid_in_ctx = nworkers_ctx; workerid_in_ctx < all_workers; workerid_in_ctx++){
-		sched_ctx->sched_mutex[workerid_in_ctx] = sched_ctx->sched_mutex[0];
-		sched_ctx->sched_cond[workerid_in_ctx] = sched_ctx->sched_cond[0];
+	unsigned workerid_ctx;
+	for (workerid_ctx = nworkers_ctx; workerid_ctx < all_workers; workerid_ctx++){
+		sched_ctx->sched_mutex[workerid_ctx] = sched_ctx->sched_mutex[0];
+		sched_ctx->sched_cond[workerid_ctx] = sched_ctx->sched_cond[0];
 	}
 	/* take into account the new number of threads at the next push */
 	PTHREAD_MUTEX_LOCK(&sched_ctx->changing_ctx_mutex);
-	sched_ctx->temp_nworkers_in_ctx = all_workers;
+	sched_ctx->temp_nworkers = all_workers;
 	PTHREAD_MUTEX_UNLOCK(&sched_ctx->changing_ctx_mutex);
 }
 
@@ -58,11 +58,11 @@ static void initialize_eager_center_policy(unsigned sched_ctx_id)
 	PTHREAD_MUTEX_INIT(sched_mutex, NULL);
 	PTHREAD_COND_INIT(sched_cond, NULL);
 
-	int workerid_in_ctx;
-	int nworkers = sched_ctx->nworkers_in_ctx;
-	for (workerid_in_ctx = 0; workerid_in_ctx < nworkers; workerid_in_ctx++){
-		sched_ctx->sched_mutex[workerid_in_ctx] = sched_mutex;
-		sched_ctx->sched_cond[workerid_in_ctx] = sched_cond;
+	int workerid_ctx;
+	int nworkers = sched_ctx->nworkers;
+	for (workerid_ctx = 0; workerid_ctx < nworkers; workerid_ctx++){
+		sched_ctx->sched_mutex[workerid_ctx] = sched_mutex;
+		sched_ctx->sched_cond[workerid_ctx] = sched_cond;
 	}
 }
 
@@ -89,8 +89,8 @@ static int push_task_eager_policy(struct starpu_task *task, unsigned sched_ctx_i
 	struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx(sched_ctx_id);
 	int i;
 	int workerid;
-	for(i = 0; i < sched_ctx->nworkers_in_ctx; i++){
-		workerid = sched_ctx->workerid[i]; 
+	for(i = 0; i < sched_ctx->nworkers; i++){
+		workerid = sched_ctx->workerids[i]; 
 		_starpu_increment_nsubmitted_tasks_of_worker(workerid);
 	}
 
@@ -115,9 +115,9 @@ static struct starpu_task *pop_task_eager_policy(unsigned sched_ctx_id)
 	if(task)
 	  {
 		int i;
-		for(i = 0; i <sched_ctx->nworkers_in_ctx; i++)
+		for(i = 0; i <sched_ctx->nworkers; i++)
 		  {
-			workerid = sched_ctx->workerid[i]; 
+			workerid = sched_ctx->workerids[i]; 
 			_starpu_decrement_nsubmitted_tasks_of_worker(workerid);
 		  }
 	  }

+ 3 - 3
src/sched_policies/eager_central_priority_policy.c

@@ -70,7 +70,7 @@ static void _starpu_destroy_priority_taskq(struct starpu_priority_taskq_s *prior
 static void initialize_eager_center_priority_policy_for_workers(unsigned sched_ctx_id, unsigned nnew_workers) 
 {
 	struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx(sched_ctx_id);
-	unsigned nworkers_ctx = sched_ctx->nworkers_in_ctx;
+	unsigned nworkers_ctx = sched_ctx->nworkers;
 
 	struct starpu_machine_config_s *config = (struct starpu_machine_config_s *)_starpu_get_machine_config();
 	unsigned ntotal_workers = config->topology.nworkers;
@@ -86,7 +86,7 @@ static void initialize_eager_center_priority_policy_for_workers(unsigned sched_c
 
 	/* take into account the new number of threads at the next push */
 	PTHREAD_MUTEX_LOCK(&sched_ctx->changing_ctx_mutex);
-	sched_ctx->temp_nworkers_in_ctx = all_workers;
+	sched_ctx->temp_nworkers = all_workers;
 	PTHREAD_MUTEX_UNLOCK(&sched_ctx->changing_ctx_mutex);
 }
 
@@ -108,7 +108,7 @@ static void initialize_eager_center_priority_policy(unsigned sched_ctx_id)
 	PTHREAD_MUTEX_INIT(global_sched_mutex, NULL);
 	PTHREAD_COND_INIT(global_sched_cond, NULL);
 
-	int nworkers = sched_ctx->nworkers_in_ctx;
+	int nworkers = sched_ctx->nworkers;
 	int workerid_ctx;
 	for (workerid_ctx = 0; workerid_ctx < nworkers; workerid_ctx++)
 	{

+ 50 - 50
src/sched_policies/heft.c

@@ -57,7 +57,7 @@ void param_modified(struct starputop_param_t* d){
 static void heft_init_for_workers(unsigned sched_ctx_id, unsigned nnew_workers)
 {
 	struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx(sched_ctx_id);
-	unsigned nworkers_ctx = sched_ctx->nworkers_in_ctx;
+	unsigned nworkers_ctx = sched_ctx->nworkers;
 
 	struct starpu_machine_config_s *config = (struct starpu_machine_config_s *)_starpu_get_machine_config();
 	unsigned ntotal_workers = config->topology.nworkers;
@@ -68,10 +68,10 @@ static void heft_init_for_workers(unsigned sched_ctx_id, unsigned nnew_workers)
 	int workerid;
 	for (workerid_ctx = nworkers_ctx; workerid_ctx < all_workers; workerid_ctx++)
 	  {
-	    workerid = sched_ctx->workerid[workerid_ctx];
+	    workerid = sched_ctx->workerids[workerid_ctx];
 	    struct starpu_worker_s *workerarg = _starpu_get_worker_struct(workerid);
 	    /* init these structures only once for each worker */
-	    if(workerarg->nctxs == 1)
+	    if(workerarg->nsched_ctxs == 1)
 	      {
 		exp_start[workerid] = starpu_timing_now();
 		exp_len[workerid] = 0.0;
@@ -88,7 +88,7 @@ static void heft_init_for_workers(unsigned sched_ctx_id, unsigned nnew_workers)
 
 	/* take into account the new number of threads at the next push */
 	PTHREAD_MUTEX_LOCK(&sched_ctx->changing_ctx_mutex);
-	sched_ctx->temp_nworkers_in_ctx = all_workers;
+	sched_ctx->temp_nworkers = all_workers;
 	PTHREAD_MUTEX_UNLOCK(&sched_ctx->changing_ctx_mutex);
 }
 static void heft_init(unsigned sched_ctx_id)
@@ -101,7 +101,7 @@ static void heft_init(unsigned sched_ctx_id)
 	
 	struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx(sched_ctx_id);
 
-	unsigned nworkers = sched_ctx->nworkers_in_ctx;
+	unsigned nworkers = sched_ctx->nworkers;
 	sched_ctx->policy_data = (void*)hd;
 
 	const char *strval_alpha = getenv("STARPU_SCHED_ALPHA");
@@ -129,10 +129,10 @@ static void heft_init(unsigned sched_ctx_id)
 
 	for (workerid_ctx = 0; workerid_ctx < nworkers; workerid_ctx++)
 	  {
-	    int workerid = sched_ctx->workerid[workerid_ctx];
+	    int workerid = sched_ctx->workerids[workerid_ctx];
 	    struct starpu_worker_s *workerarg = _starpu_get_worker_struct(workerid);
 	    /* init these structures only once for each worker */
-	    if(workerarg->nctxs == 1)
+	    if(workerarg->nsched_ctxs == 1)
 	      {
 		exp_start[workerid] = starpu_timing_now();
 		exp_len[workerid] = 0.0;
@@ -239,21 +239,21 @@ static void compute_all_performance_predictions(struct starpu_task *task,
   /* A priori, we know all estimations */
   int unknown = 0;
 
-  unsigned nworkers = sched_ctx->nworkers_in_ctx;
+  unsigned nworkers = sched_ctx->nworkers;
 
   unsigned nimpl;
   unsigned best_impl = 0;
-  unsigned worker, worker_in_ctx;
-  for (worker_in_ctx = 0; worker_in_ctx < nworkers; worker_in_ctx++)
+  unsigned worker, worker_ctx;
+  for (worker_ctx = 0; worker_ctx < nworkers; worker_ctx++)
     {
-		worker = sched_ctx->workerid[worker_in_ctx];
+		worker = sched_ctx->workerids[worker_ctx];
 		for (nimpl = 0; nimpl <STARPU_MAXIMPLEMENTATIONS; nimpl++) 
 		{
       		/* Sometimes workers didn't take the tasks as early as we expected */
       		exp_start[worker] = STARPU_MAX(exp_start[worker], starpu_timing_now());
-      		exp_end[worker_in_ctx] = exp_start[worker] + exp_len[worker];
-      		if (exp_end[worker_in_ctx] > max_exp_end)
- 				max_exp_end = exp_end[worker_in_ctx];
+      		exp_end[worker_ctx] = exp_start[worker] + exp_len[worker];
+      		if (exp_end[worker_ctx] > max_exp_end)
+ 				max_exp_end = exp_end[worker_ctx];
 
 			if (!starpu_worker_may_execute_task(worker, task, nimpl))
 			{
@@ -266,37 +266,37 @@ static void compute_all_performance_predictions(struct starpu_task *task,
 
       		if (bundle)
       		{
-      			local_task_length[worker_in_ctx] = starpu_task_bundle_expected_length(bundle, perf_arch, nimpl);
-      	  		local_data_penalty[worker_in_ctx] = starpu_task_bundle_expected_data_transfer_time(bundle, memory_node);
-      	  		local_power[worker_in_ctx] = starpu_task_bundle_expected_power(bundle, perf_arch, nimpl);
-				//_STARPU_DEBUG("Scheduler heft bundle: task length (%lf) local power (%lf) worker (%u) kernel (%u) \n", local_task_length[worker_in_ctx],local_power[worker_in_ctx],worker,nimpl);
+      			local_task_length[worker_ctx] = starpu_task_bundle_expected_length(bundle, perf_arch, nimpl);
+      	  		local_data_penalty[worker_ctx] = starpu_task_bundle_expected_data_transfer_time(bundle, memory_node);
+      	  		local_power[worker_ctx] = starpu_task_bundle_expected_power(bundle, perf_arch, nimpl);
+				//_STARPU_DEBUG("Scheduler heft bundle: task length (%lf) local power (%lf) worker (%u) kernel (%u) \n", local_task_length[worker_ctx],local_power[worker_ctx],worker,nimpl);
       		}
       		else 
 			{
-				local_task_length[worker_in_ctx] = starpu_task_expected_length(task, perf_arch, nimpl);
-				local_data_penalty[worker_in_ctx] = starpu_task_expected_data_transfer_time(memory_node, task);
-				local_power[worker_in_ctx] = starpu_task_expected_power(task, perf_arch, nimpl);
-				//_STARPU_DEBUG("Scheduler heft: task length (%lf) local power (%lf) worker (%u) kernel (%u) \n", local_task_length[worker_in_ctx],local_power[worker_in_ctx],worker,nimpl);
+				local_task_length[worker_ctx] = starpu_task_expected_length(task, perf_arch, nimpl);
+				local_data_penalty[worker_ctx] = starpu_task_expected_data_transfer_time(memory_node, task);
+				local_power[worker_ctx] = starpu_task_expected_power(task, perf_arch, nimpl);
+				//_STARPU_DEBUG("Scheduler heft: task length (%lf) local power (%lf) worker (%u) kernel (%u) \n", local_task_length[worker_ctx],local_power[worker_ctx],worker,nimpl);
       		}
 
       		double ntasks_end = ntasks[worker] / starpu_worker_get_relative_speedup(perf_arch);
 
       		if (ntasks_best == -1
 	  			|| (!calibrating && ntasks_end < ntasks_best_end) /* Not calibrating, take better task */
-	  			|| (!calibrating && local_task_length[worker_in_ctx] == -1.0) /* Not calibrating but this worker is being calibrated */
-	  			|| (calibrating && local_task_length[worker_in_ctx] == -1.0 && ntasks_end < ntasks_best_end) /* Calibrating, compete this worker with other non-calibrated */
+	  			|| (!calibrating && local_task_length[worker_ctx] == -1.0) /* Not calibrating but this worker is being calibrated */
+	  			|| (calibrating && local_task_length[worker_ctx] == -1.0 && ntasks_end < ntasks_best_end) /* Calibrating, compete this worker with other non-calibrated */
 	  		) {
 				ntasks_best_end = ntasks_end;
 				ntasks_best = worker;
       		}
 
-     		if (local_task_length[worker_in_ctx] == -1.0)
+     		if (local_task_length[worker_ctx] == -1.0)
 				/* we are calibrating, we want to speed-up calibration time
 	 			* so we privilege non-calibrated tasks (but still
 	 			* greedily distribute them to avoid dumb schedules) */
 				calibrating = 1;
 
-      		if (local_task_length[worker_in_ctx] <= 0.0)
+      		if (local_task_length[worker_ctx] <= 0.0)
 				/* there is no prediction available for that task
 	 			* with that arch yet, so switch to a greedy strategy */
 				unknown = 1;
@@ -304,17 +304,17 @@ static void compute_all_performance_predictions(struct starpu_task *task,
 			if (unknown)
 				continue;
 
-      		exp_end[worker_in_ctx] = exp_start[worker] + exp_len[worker] + local_task_length[worker_in_ctx];
+      		exp_end[worker_ctx] = exp_start[worker] + exp_len[worker] + local_task_length[worker_ctx];
 
-      		if (exp_end[worker_in_ctx] < best_exp_end)
+      		if (exp_end[worker_ctx] < best_exp_end)
 			{
 	  			/* a better solution was found */
-	  			best_exp_end = exp_end[worker_in_ctx];
+	  			best_exp_end = exp_end[worker_ctx];
 				best_impl = nimpl;
 			}
 
-      		if (local_power[worker_in_ctx] == -1.0)
-				local_power[worker_in_ctx] = 0.;
+      		if (local_power[worker_ctx] == -1.0)
+				local_power[worker_ctx] = 0.;
     	}
 	}
 
@@ -332,18 +332,18 @@ static int _heft_push_task(struct starpu_task *task, unsigned prio, unsigned sch
 {
 	struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx(sched_ctx_id);
 	heft_data *hd = (heft_data*)sched_ctx->policy_data;
-	unsigned worker, worker_in_ctx;
-	int best = -1, best_id_in_ctx = -1;
+	unsigned worker, worker_ctx;
+	int best = -1, best_id_ctx = -1;
 	
 	/* this flag is set if the corresponding worker is selected because
 	   there is no performance prediction available yet */
 	int forced_best;
 
-	unsigned nworkers_in_ctx = sched_ctx->nworkers_in_ctx;
-	double local_task_length[nworkers_in_ctx];
-	double local_data_penalty[nworkers_in_ctx];
-	double local_power[nworkers_in_ctx];
-	double exp_end[nworkers_in_ctx];
+	unsigned nworkers_ctx = sched_ctx->nworkers;
+	double local_task_length[nworkers_ctx];
+	double local_data_penalty[nworkers_ctx];
+	double local_power[nworkers_ctx];
+	double exp_end[nworkers_ctx];
 	double max_exp_end = 0.0;
 
 	double best_exp_end;
@@ -373,12 +373,12 @@ static int _heft_push_task(struct starpu_task *task, unsigned prio, unsigned sch
 	 *	consumption.
 	 */
 	
-	double fitness[nworkers_in_ctx];
+	double fitness[nworkers_ctx];
 	double best_fitness = -1;
 
-	for (worker_in_ctx = 0; worker_in_ctx < nworkers_in_ctx; worker_in_ctx++)
+	for (worker_ctx = 0; worker_ctx < nworkers_ctx; worker_ctx++)
 	{
-		worker = sched_ctx->workerid[worker_in_ctx];
+		worker = sched_ctx->workerids[worker_ctx];
 
 		if (!starpu_worker_may_execute_task(worker, task, 0))
 		{
@@ -386,22 +386,22 @@ static int _heft_push_task(struct starpu_task *task, unsigned prio, unsigned sch
 			continue;
 		}
 
-		fitness[worker_in_ctx] = hd->alpha*(exp_end[worker_in_ctx] - best_exp_end) 
-				+ hd->beta*(local_data_penalty[worker_in_ctx])
-				+ hd->_gamma*(local_power[worker_in_ctx]);
+		fitness[worker_ctx] = hd->alpha*(exp_end[worker_ctx] - best_exp_end) 
+				+ hd->beta*(local_data_penalty[worker_ctx])
+				+ hd->_gamma*(local_power[worker_ctx]);
 
-		if (exp_end[worker_in_ctx] > max_exp_end)
+		if (exp_end[worker_ctx] > max_exp_end)
 			/* This placement will make the computation
 			 * longer, take into account the idle
 			 * consumption of other cpus */
-			fitness[worker_in_ctx] += hd->_gamma * hd->idle_power * (exp_end[worker_in_ctx] - max_exp_end) / 1000000.0;
+			fitness[worker_ctx] += hd->_gamma * hd->idle_power * (exp_end[worker_ctx] - max_exp_end) / 1000000.0;
 
-		if (best == -1 || fitness[worker_in_ctx] < best_fitness)
+		if (best == -1 || fitness[worker_ctx] < best_fitness)
 		{
 			/* we found a better solution */
-			best_fitness = fitness[worker_in_ctx];
+			best_fitness = fitness[worker_ctx];
 			best = worker;
-			best_id_in_ctx = worker_in_ctx;
+			best_id_ctx = worker_ctx;
 		}
 	}
 
@@ -432,7 +432,7 @@ static int _heft_push_task(struct starpu_task *task, unsigned prio, unsigned sch
 
 	}
 	else {
-		model_best = local_task_length[best_id_in_ctx];
+		model_best = local_task_length[best_id_ctx];
 	}
 
 	_starpu_increment_nsubmitted_tasks_of_worker(best);

+ 10 - 10
src/sched_policies/parallel_greedy.c

@@ -49,19 +49,19 @@ static void initialize_pgreedy_policy(unsigned sched_ctx_id)
 	_starpu_sched_find_worker_combinations(topology);
 
 	unsigned workerid, workerid_ctx;;
-	unsigned ncombinedworkers, nworkers, nworkers_in_ctx;
+	unsigned ncombinedworkers, nworkers, nworkers_ctx;
 	
 	nworkers = topology->nworkers;
-	nworkers_in_ctx = sched_ctx->nworkers_in_ctx;
+	nworkers_ctx = sched_ctx->nworkers;
 	ncombinedworkers = starpu_combined_worker_get_count();
 
 	/* Find the master of each worker. We first assign the worker as its
 	 * own master, and then iterate over the different worker combinations
 	 * to find the biggest combination containing this worker. */
 
-	for (workerid_ctx = 0; workerid_ctx < nworkers_in_ctx; workerid_ctx++)
+	for (workerid_ctx = 0; workerid_ctx < nworkers_ctx; workerid_ctx++)
 	  {
-    	        workerid = sched_ctx->workerid[workerid_ctx];
+    	        workerid = sched_ctx->workerids[workerid_ctx];
 
 		int cnt = possible_combinations_cnt[workerid]++;
 		possible_combinations[workerid][cnt] = workerid;
@@ -98,16 +98,16 @@ static void initialize_pgreedy_policy(unsigned sched_ctx_id)
 	PTHREAD_MUTEX_INIT(&sched_mutex, NULL);
 	PTHREAD_COND_INIT(&sched_cond, NULL);
 
-	for (workerid_ctx = 0; workerid_ctx < nworkers_in_ctx; workerid_ctx++)
+	for (workerid_ctx = 0; workerid_ctx < nworkers_ctx; workerid_ctx++)
 	{
-      	workerid = sched_ctx->workerid[workerid_ctx];
+      	workerid = sched_ctx->workerids[workerid_ctx];
 
 		PTHREAD_MUTEX_INIT(&master_sched_mutex[workerid], NULL);
 		PTHREAD_COND_INIT(&master_sched_cond[workerid], NULL);
 	}
-	for (workerid_ctx = 0; workerid_ctx < nworkers_in_ctx; workerid_ctx++)
+	for (workerid_ctx = 0; workerid_ctx < nworkers_ctx; workerid_ctx++)
     {
-		workerid = sched_ctx->workerid[workerid_ctx];
+		workerid = sched_ctx->workerids[workerid_ctx];
 
 		/* slaves pick up tasks from their local queue, their master
 		 * will put tasks directly in that local list when a parallel
@@ -130,9 +130,9 @@ static void initialize_pgreedy_policy(unsigned sched_ctx_id)
 	}
 
 #if 0
-	for (workerid_ctx = 0; workerid_ctx < nworkers_in_ctx; workerid_ctx++)
+	for (workerid_ctx = 0; workerid_ctx < nworkers_ctx; workerid_ctx++)
 	{
-        workerid = sched_ctx->workerid[workerid_ctx];
+        workerid = sched_ctx->workerids[workerid_ctx];
 
 		fprintf(stderr, "MASTER of %d = %d\n", workerid, master_id[workerid]);
 	}

+ 11 - 11
src/sched_policies/random_policy.c

@@ -24,17 +24,17 @@
 static int _random_push_task(struct starpu_task *task, unsigned prio, struct starpu_sched_ctx *sched_ctx)
 {
 	/* find the queue */
-        unsigned worker, worker_in_ctx;
+        unsigned worker, worker_ctx;
 
 	unsigned selected = 0;
 
 	double alpha_sum = 0.0;
 
-	unsigned nworkers = sched_ctx->nworkers_in_ctx;	
+	unsigned nworkers = sched_ctx->nworkers;	
 
-	for (worker_in_ctx = 0; worker_in_ctx < nworkers; worker_in_ctx++)
+	for (worker_ctx = 0; worker_ctx < nworkers; worker_ctx++)
 	{
-                worker = sched_ctx->workerid[worker_in_ctx];
+                worker = sched_ctx->workerids[worker_ctx];
 
 		enum starpu_perf_archtype perf_arch = starpu_worker_get_perf_archtype(worker);
 		alpha_sum += starpu_worker_get_relative_speedup(perf_arch);
@@ -44,9 +44,9 @@ static int _random_push_task(struct starpu_task *task, unsigned prio, struct sta
 //	_STARPU_DEBUG("my rand is %e\n", random);
 
 	double alpha = 0.0;
-	for (worker_in_ctx = 0; worker_in_ctx < nworkers; worker_in_ctx++)
+	for (worker_ctx = 0; worker_ctx < nworkers; worker_ctx++)
 	{
-        worker = sched_ctx->workerid[worker_in_ctx];
+        worker = sched_ctx->workerids[worker_ctx];
 
 		enum starpu_perf_archtype perf_arch = starpu_worker_get_perf_archtype(worker);
 		double worker_alpha = starpu_worker_get_relative_speedup(perf_arch);
@@ -78,7 +78,7 @@ static void initialize_random_policy_for_workers(unsigned sched_ctx_id, unsigned
 {
 	struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx(sched_ctx_id);
 
-	unsigned nworkers_ctx = sched_ctx->nworkers_in_ctx;
+	unsigned nworkers_ctx = sched_ctx->nworkers;
 
 	struct starpu_machine_config_s *config = (struct starpu_machine_config_s *)_starpu_get_machine_config();
 	unsigned ntotal_workers = config->topology.nworkers;
@@ -89,14 +89,14 @@ static void initialize_random_policy_for_workers(unsigned sched_ctx_id, unsigned
 	int workerid;
 	for (workerid_ctx = nworkers_ctx; workerid_ctx < all_workers; workerid_ctx++)
 	{
-		workerid = sched_ctx->workerid[workerid_ctx];
+		workerid = sched_ctx->workerids[workerid_ctx];
 		struct starpu_worker_s *workerarg = _starpu_get_worker_struct(workerid);
 		sched_ctx->sched_mutex[workerid_ctx] = workerarg->sched_mutex;
 		sched_ctx->sched_cond[workerid_ctx] = workerarg->sched_cond;
 	}
 	/* take into account the new number of threads at the next push */
 	PTHREAD_MUTEX_LOCK(&sched_ctx->changing_ctx_mutex);
-	sched_ctx->temp_nworkers_in_ctx = all_workers;
+	sched_ctx->temp_nworkers = all_workers;
 	PTHREAD_MUTEX_UNLOCK(&sched_ctx->changing_ctx_mutex);
 }
 
@@ -106,13 +106,13 @@ static void initialize_random_policy(unsigned sched_ctx_id)
 
 	starpu_srand48(time(NULL));
 
-	unsigned nworkers = sched_ctx->nworkers_in_ctx;	
+	unsigned nworkers = sched_ctx->nworkers;	
 
 	unsigned workerid_ctx;
 	int workerid;
 	for (workerid_ctx = 0; workerid_ctx < nworkers; workerid_ctx++)
 	{
-		workerid = sched_ctx->workerid[workerid_ctx];
+		workerid = sched_ctx->workerids[workerid_ctx];
 		struct starpu_worker_s *workerarg = _starpu_get_worker_struct(workerid);
 		sched_ctx->sched_mutex[workerid_ctx] = workerarg->sched_mutex;
 		sched_ctx->sched_cond[workerid_ctx] = workerarg->sched_cond;

+ 6 - 6
src/sched_policies/work_stealing_policy.c

@@ -141,7 +141,7 @@ static struct starpu_task *ws_pop_task(unsigned sched_ctx_id)
 	struct starpu_task *task;
 
 	int workerid = starpu_worker_get_id();
-	int workerid_ctx =  _starpu_get_index_in_ctx_of_workerid(sched_ctx_id, workerid);
+	int workerid_ctx =  _starpu_get_index_ctx_of_workerid(sched_ctx_id, workerid);
 
 	struct starpu_deque_jobq_s *q;
 
@@ -159,7 +159,7 @@ static struct starpu_task *ws_pop_task(unsigned sched_ctx_id)
 	
 	/* we need to steal someone's job */
 	struct starpu_deque_jobq_s *victimq;
-	victimq = select_victimq(ws, sched_ctx->nworkers_in_ctx);
+	victimq = select_victimq(ws, sched_ctx->nworkers);
 
 	task = _starpu_deque_pop_task(victimq, workerid);
 	if (task) {
@@ -180,7 +180,7 @@ int ws_push_task(struct starpu_task *task, unsigned sched_ctx_id)
 	work_stealing_data *ws = (work_stealing_data*)sched_ctx->policy_data;
 
 	int workerid = starpu_worker_get_id();
-	int workerid_ctx =  _starpu_get_index_in_ctx_of_workerid(sched_ctx_id, workerid);
+	int workerid_ctx =  _starpu_get_index_ctx_of_workerid(sched_ctx_id, workerid);
 
         struct starpu_deque_jobq_s *deque_queue;
 	deque_queue = ws->queue_array[workerid_ctx];
@@ -205,7 +205,7 @@ static void initialize_ws_policy_for_workers(unsigned sched_ctx_id, unsigned nne
 	struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx(sched_ctx_id);
 	work_stealing_data *ws = (work_stealing_data*)sched_ctx->policy_data;
 
-	unsigned nworkers_ctx = sched_ctx->nworkers_in_ctx;
+	unsigned nworkers_ctx = sched_ctx->nworkers;
 
 	struct starpu_machine_config_s *config = (struct starpu_machine_config_s *)_starpu_get_machine_config();
 	unsigned ntotal_workers = config->topology.nworkers;
@@ -222,7 +222,7 @@ static void initialize_ws_policy_for_workers(unsigned sched_ctx_id, unsigned nne
 	}
 	/* take into account the new number of threads at the next push */
 	PTHREAD_MUTEX_LOCK(&sched_ctx->changing_ctx_mutex);
-	sched_ctx->temp_nworkers_in_ctx = all_workers;
+	sched_ctx->temp_nworkers = all_workers;
 	PTHREAD_MUTEX_UNLOCK(&sched_ctx->changing_ctx_mutex);
 }
 
@@ -232,7 +232,7 @@ static void initialize_ws_policy(unsigned sched_ctx_id)
 	work_stealing_data *ws = (work_stealing_data*)malloc(sizeof(work_stealing_data));
 	sched_ctx->policy_data = (void*)ws;
 	
-	unsigned nworkers = sched_ctx->nworkers_in_ctx;
+	unsigned nworkers = sched_ctx->nworkers;
 	ws->rr_worker = 0;
 	ws->queue_array = (struct starpu_deque_jobq_s**)malloc(STARPU_NMAXWORKERS*sizeof(struct starpu_deque_jobq_s*));