浏览代码

rollback sched_ctx (forgot that here there are stuff that in the trunk aren't)

Andra Hugo 10 年之前
父节点
当前提交
819a37afbd
共有 1 个文件被更改,包括 252 次插入58 次删除
  1. 252 58
      src/core/sched_ctx.c

+ 252 - 58
src/core/sched_ctx.c

@@ -34,11 +34,13 @@ static size_t data_size[STARPU_NMAX_SCHED_CTXS][STARPU_NMAXWORKERS];
 static unsigned _starpu_get_first_free_sched_ctx(struct _starpu_machine_config *config);
 static unsigned _starpu_get_first_free_sched_ctx(struct _starpu_machine_config *config);
 static void _starpu_sched_ctx_add_workers_to_master(unsigned sched_ctx_id, int *workerids, int nworkers, int new_master);
 static void _starpu_sched_ctx_add_workers_to_master(unsigned sched_ctx_id, int *workerids, int nworkers, int new_master);
 static void _starpu_sched_ctx_wake_these_workers_up(unsigned sched_ctx_id, int *workerids, int nworkers);
 static void _starpu_sched_ctx_wake_these_workers_up(unsigned sched_ctx_id, int *workerids, int nworkers);
+static int _starpu_sched_ctx_find_master(unsigned sched_ctx_id, int *workerids, int nworkers);
+static void _starpu_sched_ctx_set_master(struct _starpu_sched_ctx *sched_ctx, int *workerids, int nworkers, int master);
 
 
 static void _starpu_worker_gets_into_ctx(unsigned sched_ctx_id, struct _starpu_worker *worker)
 static void _starpu_worker_gets_into_ctx(unsigned sched_ctx_id, struct _starpu_worker *worker)
 {
 {
 	unsigned ret_sched_ctx = _starpu_sched_ctx_list_get_sched_ctx(worker->sched_ctx_list, sched_ctx_id);
 	unsigned ret_sched_ctx = _starpu_sched_ctx_list_get_sched_ctx(worker->sched_ctx_list, sched_ctx_id);
-	/* the worker was planning to go away in another ctx but finally he changed his mind & 
+	/* the worker was planning to go away in another ctx but finally he changed his mind &
 	   he's staying */
 	   he's staying */
 	if (ret_sched_ctx == STARPU_NMAX_SCHED_CTXS)
 	if (ret_sched_ctx == STARPU_NMAX_SCHED_CTXS)
 	{
 	{
@@ -143,7 +145,10 @@ static void _starpu_add_workers_to_sched_ctx(struct _starpu_sched_ctx *sched_ctx
 	int nworkers_to_add = nworkers == -1 ? (int)config->topology.nworkers : nworkers;
 	int nworkers_to_add = nworkers == -1 ? (int)config->topology.nworkers : nworkers;
 	int workers_to_add[nworkers_to_add];
 	int workers_to_add[nworkers_to_add];
 
 
-
+	struct starpu_perfmodel_device devices[nworkers_to_add];
+	int ndevices = 0;
+	struct _starpu_worker *str_worker = NULL;
+	int worker;
 	int i = 0;
 	int i = 0;
 	for(i = 0; i < nworkers_to_add; i++)
 	for(i = 0; i < nworkers_to_add; i++)
 	{
 	{
@@ -151,7 +156,7 @@ static void _starpu_add_workers_to_sched_ctx(struct _starpu_sched_ctx *sched_ctx
 		/* if the function is called at the creation of the context it's no need to do this verif */
 		/* if the function is called at the creation of the context it's no need to do this verif */
 		if(added_workers)
 		if(added_workers)
 		{
 		{
-			int worker = workers->add(workers, (workerids == NULL ? i : workerids[i]));
+			worker = workers->add(workers, (workerids == NULL ? i : workerids[i]));
 			if(worker >= 0)
 			if(worker >= 0)
 				added_workers[(*n_added_workers)++] = worker;
 				added_workers[(*n_added_workers)++] = worker;
 			else
 			else
@@ -169,22 +174,104 @@ static void _starpu_add_workers_to_sched_ctx(struct _starpu_sched_ctx *sched_ctx
 		}
 		}
 		else
 		else
 		{
 		{
-			int worker = (workerids == NULL ? i : workerids[i]);
+			worker = (workerids == NULL ? i : workerids[i]);
 			workers->add(workers, worker);
 			workers->add(workers, worker);
 			workers_to_add[i] = worker;
 			workers_to_add[i] = worker;
-			struct _starpu_worker *str_worker = _starpu_get_worker_struct(worker);
+			str_worker = _starpu_get_worker_struct(worker);
 			str_worker->tmp_sched_ctx = (int)sched_ctx->id;
 			str_worker->tmp_sched_ctx = (int)sched_ctx->id;
+		}
+	}
+
+	int *wa;
+	int na;
+	if(added_workers)
+	{
+		na = *n_added_workers;
+		wa = added_workers;
+	}
+	else
+	{
+		na = nworkers_to_add;
+		wa = workers_to_add;
+	}
+
+	for(i = 0; i < na; i++)
+	{
+		worker = wa[i];
+		str_worker = _starpu_get_worker_struct(worker);
+		int dev1, dev2;
+		unsigned found = 0;
+		for(dev1 = 0; dev1 < str_worker->perf_arch.ndevices; dev1++)
+		{
+			for(dev2 = 0; dev2 < ndevices; dev2++)
+			{
+				if(devices[dev2].type == str_worker->perf_arch.devices[dev1].type &&
+				   devices[dev2].devid == str_worker->perf_arch.devices[dev1].devid)
+				{
+					devices[dev2].ncores += str_worker->perf_arch.devices[dev1].ncores;
+					found = 1;
+					break;
+				}
+			}
+			if(!found)
+			{
+				devices[ndevices].type = str_worker->perf_arch.devices[dev1].type;
+				devices[ndevices].devid = str_worker->perf_arch.devices[dev1].devid;
+				devices[ndevices].ncores = str_worker->perf_arch.devices[dev1].ncores;
+				ndevices++;
+			}
+			else
+				found = 0;
+		}
+	}
+
+	if(ndevices > 0)
+	{
+		if(sched_ctx->perf_arch.devices == NULL)
+			sched_ctx->perf_arch.devices = (struct starpu_perfmodel_device*)malloc(ndevices*sizeof(struct starpu_perfmodel_device));
+		else
+			sched_ctx->perf_arch.devices = (struct starpu_perfmodel_device*)realloc(sched_ctx->perf_arch.devices, (sched_ctx->perf_arch.ndevices+ndevices)*sizeof(struct starpu_perfmodel_device));
+		int dev1, dev2;
+		unsigned found = 0;
+		for(dev1 = 0; dev1 < ndevices; dev1++)
+		{
+			for(dev2 = 0; dev2 < sched_ctx->perf_arch.ndevices; dev2++)
+			{
+				if(sched_ctx->perf_arch.devices[dev2].type == devices[dev1].type && sched_ctx->perf_arch.devices[dev2].devid == devices[dev1].devid)
+				{
+					sched_ctx->perf_arch.devices[dev2].ncores += devices[dev1].ncores;
+					found = 1;
+				}
+			}
+
+			if(!found)
+			{
+				sched_ctx->perf_arch.devices[sched_ctx->perf_arch.ndevices].type = devices[dev1].type;
+				sched_ctx->perf_arch.devices[sched_ctx->perf_arch.ndevices].devid = devices[dev1].devid;
+				sched_ctx->perf_arch.devices[sched_ctx->perf_arch.ndevices].ncores = devices[dev1].ncores;
+				sched_ctx->perf_arch.ndevices++;
+			}
+			else
+				found = 0;
 
 
 		}
 		}
 	}
 	}
 
 
 	if(!sched_ctx->sched_policy)
 	if(!sched_ctx->sched_policy)
 	{
 	{
-		if(sched_ctx->main_master == -1)
-			sched_ctx->main_master = starpu_sched_ctx_book_workers_for_task(sched_ctx->id, workerids, nworkers);
+		if(!sched_ctx->awake_workers)
+		{
+			if(sched_ctx->main_master == -1)
+				sched_ctx->main_master = starpu_sched_ctx_book_workers_for_task(sched_ctx->id, workerids, nworkers);
+			else
+			{
+				_starpu_sched_ctx_add_workers_to_master(sched_ctx->id, workerids, nworkers, sched_ctx->main_master);
+			}
+		}
 		else
 		else
 		{
 		{
-			_starpu_sched_ctx_add_workers_to_master(sched_ctx->id, workerids, nworkers, sched_ctx->main_master);
+			sched_ctx->main_master = _starpu_sched_ctx_find_master(sched_ctx->id, workerids, nworkers);
+			_starpu_sched_ctx_set_master(sched_ctx, workerids, nworkers, sched_ctx->main_master);
 		}
 		}
 	}
 	}
 	else if(sched_ctx->sched_policy->add_workers)
 	else if(sched_ctx->sched_policy->add_workers)
@@ -196,7 +283,9 @@ static void _starpu_add_workers_to_sched_ctx(struct _starpu_sched_ctx *sched_ctx
 				sched_ctx->sched_policy->add_workers(sched_ctx->id, added_workers, *n_added_workers);
 				sched_ctx->sched_policy->add_workers(sched_ctx->id, added_workers, *n_added_workers);
 		}
 		}
 		else
 		else
+		{
 			sched_ctx->sched_policy->add_workers(sched_ctx->id, workers_to_add, nworkers_to_add);
 			sched_ctx->sched_policy->add_workers(sched_ctx->id, workers_to_add, nworkers_to_add);
+		}
 		_STARPU_TRACE_WORKER_SCHEDULING_POP;
 		_STARPU_TRACE_WORKER_SCHEDULING_POP;
 	}
 	}
 	return;
 	return;
@@ -222,8 +311,86 @@ static void _starpu_remove_workers_from_sched_ctx(struct _starpu_sched_ctx *sche
 		}
 		}
 	}
 	}
 
 
+
+	struct starpu_perfmodel_device devices[workers->nworkers];
+	int ndevices = 0;
+
+	for(i = 0; i < *n_removed_workers; i++)
+	{
+		struct _starpu_worker *str_worker = _starpu_get_worker_struct(removed_workers[i]);
+		int dev1, dev2;
+		unsigned found = 0;
+		for(dev1 = 0; dev1 < sched_ctx->perf_arch.ndevices; dev1++)
+		{
+			for(dev2 = 0; dev2 < str_worker->perf_arch.ndevices; dev2++)
+			{
+				if(sched_ctx->perf_arch.devices[dev1].type == str_worker->perf_arch.devices[dev2].type &&
+				   sched_ctx->perf_arch.devices[dev1].devid == str_worker->perf_arch.devices[dev2].devid)
+				{
+					if(sched_ctx->perf_arch.devices[dev1].ncores > str_worker->perf_arch.devices[dev2].ncores)
+					{
+						devices[ndevices].ncores =  sched_ctx->perf_arch.devices[dev1].ncores -
+							str_worker->perf_arch.devices[dev2].ncores;
+						devices[ndevices].type = sched_ctx->perf_arch.devices[dev1].type;
+						devices[ndevices].devid = sched_ctx->perf_arch.devices[dev1].devid;
+						ndevices++;
+						found = 1;
+					}
+				}
+			}
+			if(found)
+				found = 0;
+		}
+
+	}
+
+	int worker;
+	unsigned found = 0;
+	int dev;
+	struct starpu_sched_ctx_iterator it;
+	if(workers->init_iterator)
+		workers->init_iterator(workers, &it);
+
+	while(workers->has_next(workers, &it))
+	{
+		worker = workers->get_next(workers, &it);
+		for(i = 0; i < *n_removed_workers; i++)
+		{
+			if(worker == removed_workers[i])
+			{
+				found = 1;
+				break;
+			}
+		}
+		if(!found)
+		{
+			struct _starpu_worker *str_worker = _starpu_get_worker_struct(worker);
+			for(dev = 0; dev < str_worker->perf_arch.ndevices; dev++)
+			{
+				devices[ndevices].type = str_worker->perf_arch.devices[dev].type;
+				devices[ndevices].devid = str_worker->perf_arch.devices[dev].devid;
+				devices[ndevices].ncores = str_worker->perf_arch.devices[dev].ncores;
+				ndevices++;
+			}
+		}
+		else
+			found = 0;
+	}
+	sched_ctx->perf_arch.ndevices = ndevices;
+	for(dev = 0; dev < ndevices; dev++)
+	{
+		sched_ctx->perf_arch.devices[dev].type = devices[dev].type;
+		sched_ctx->perf_arch.devices[dev].devid = devices[dev].devid;
+		sched_ctx->perf_arch.devices[dev].ncores = devices[dev].ncores;
+	}
+
 	if(!sched_ctx->sched_policy)
 	if(!sched_ctx->sched_policy)
-		_starpu_sched_ctx_wake_these_workers_up(sched_ctx->id, removed_workers, *n_removed_workers);
+	{
+		if(!sched_ctx->awake_workers)
+		{
+			_starpu_sched_ctx_wake_these_workers_up(sched_ctx->id, removed_workers, *n_removed_workers);
+		}
+	}
 
 
 	return;
 	return;
 }
 }
@@ -276,7 +443,7 @@ struct _starpu_sched_ctx* _starpu_create_sched_ctx(struct starpu_sched_policy *p
 						   int nworkers_ctx, unsigned is_initial_sched,
 						   int nworkers_ctx, unsigned is_initial_sched,
 						   const char *sched_ctx_name,
 						   const char *sched_ctx_name,
 						   int min_prio_set, int min_prio,
 						   int min_prio_set, int min_prio,
-						   int max_prio_set, int max_prio)
+						   int max_prio_set, int max_prio, unsigned awake_workers)
 {
 {
 	struct _starpu_machine_config *config = (struct _starpu_machine_config *)_starpu_get_machine_config();
 	struct _starpu_machine_config *config = (struct _starpu_machine_config *)_starpu_get_machine_config();
 
 
@@ -315,7 +482,9 @@ struct _starpu_sched_ctx* _starpu_create_sched_ctx(struct starpu_sched_policy *p
 
 
 	sched_ctx->ready_flops = 0.0;
 	sched_ctx->ready_flops = 0.0;
 	sched_ctx->main_master = -1;
 	sched_ctx->main_master = -1;
-	
+	sched_ctx->perf_arch.devices = NULL;
+	sched_ctx->perf_arch.ndevices = 0;
+
 	int w;
 	int w;
 	for(w = 0; w < nworkers; w++)
 	for(w = 0; w < nworkers; w++)
 	{
 	{
@@ -324,22 +493,24 @@ struct _starpu_sched_ctx* _starpu_create_sched_ctx(struct starpu_sched_policy *p
 
 
 		STARPU_PTHREAD_COND_INIT(&sched_ctx->parallel_sect_cond[w], NULL);
 		STARPU_PTHREAD_COND_INIT(&sched_ctx->parallel_sect_cond[w], NULL);
 		STARPU_PTHREAD_MUTEX_INIT(&sched_ctx->parallel_sect_mutex[w], NULL);
 		STARPU_PTHREAD_MUTEX_INIT(&sched_ctx->parallel_sect_mutex[w], NULL);
-		
+
 		sched_ctx->master[w] = -1;
 		sched_ctx->master[w] = -1;
 		sched_ctx->parallel_sect[w] = 0;
 		sched_ctx->parallel_sect[w] = 0;
 		sched_ctx->sleeping[w] = 0;
 		sched_ctx->sleeping[w] = 0;
 	}
 	}
 
 
-	
+
         /*init the strategy structs and the worker_collection of the ressources of the context */
         /*init the strategy structs and the worker_collection of the ressources of the context */
 	if(policy)
 	if(policy)
+	{
 		_starpu_init_sched_policy(config, sched_ctx, policy);
 		_starpu_init_sched_policy(config, sched_ctx, policy);
+		sched_ctx->awake_workers = 1;
+	}
 	else
 	else
+	{
+		sched_ctx->awake_workers = awake_workers;
 		starpu_sched_ctx_create_worker_collection(sched_ctx->id, STARPU_WORKER_LIST);
 		starpu_sched_ctx_create_worker_collection(sched_ctx->id, STARPU_WORKER_LIST);
-	
-        /* construct the collection of workers(list/tree/etc.) */
-	sched_ctx->workers->init(sched_ctx->workers);
-
+	}
 
 
 	/* after having an worker_collection on the ressources add them */
 	/* after having an worker_collection on the ressources add them */
 	_starpu_add_workers_to_sched_ctx(sched_ctx, workerids, nworkers_ctx, NULL, NULL);
 	_starpu_add_workers_to_sched_ctx(sched_ctx, workerids, nworkers_ctx, NULL, NULL);
@@ -366,7 +537,7 @@ struct _starpu_sched_ctx* _starpu_create_sched_ctx(struct starpu_sched_policy *p
 			worker->nsched_ctxs++;
 			worker->nsched_ctxs++;
 		}
 		}
 	}
 	}
-	
+
 	return sched_ctx;
 	return sched_ctx;
 }
 }
 
 
@@ -505,7 +676,7 @@ unsigned starpu_sched_ctx_create_inside_interval(const char *policy_name, const
 	for(i = 0; i < nw; i++)
 	for(i = 0; i < nw; i++)
 		printf("%d ", workers[i]);
 		printf("%d ", workers[i]);
 	printf("\n");
 	printf("\n");
-	sched_ctx = _starpu_create_sched_ctx(selected_policy, workers, nw, 0, sched_ctx_name, 0, 0, 0, 0);
+	sched_ctx = _starpu_create_sched_ctx(selected_policy, workers, nw, 0, sched_ctx_name, 0, 0, 0, 0, 1);
 	sched_ctx->min_ncpus = min_ncpus;
 	sched_ctx->min_ncpus = min_ncpus;
 	sched_ctx->max_ncpus = max_ncpus;
 	sched_ctx->max_ncpus = max_ncpus;
 	sched_ctx->min_ngpus = min_ngpus;
 	sched_ctx->min_ngpus = min_ngpus;
@@ -534,6 +705,7 @@ unsigned starpu_sched_ctx_create(int *workerids, int nworkers, const char *sched
 	struct starpu_sched_policy *sched_policy = NULL;
 	struct starpu_sched_policy *sched_policy = NULL;
 	unsigned hierarchy_level = 0;
 	unsigned hierarchy_level = 0;
 	unsigned nesting_sched_ctx = STARPU_NMAX_SCHED_CTXS;
 	unsigned nesting_sched_ctx = STARPU_NMAX_SCHED_CTXS;
+	unsigned awake_workers = 0;
 
 
 	va_start(varg_list, sched_ctx_name);
 	va_start(varg_list, sched_ctx_name);
 	while ((arg_type = va_arg(varg_list, int)) != 0)
 	while ((arg_type = va_arg(varg_list, int)) != 0)
@@ -566,6 +738,10 @@ unsigned starpu_sched_ctx_create(int *workerids, int nworkers, const char *sched
 		{
 		{
 			nesting_sched_ctx = va_arg(varg_list, unsigned);
 			nesting_sched_ctx = va_arg(varg_list, unsigned);
 		}
 		}
+		else if (arg_type == STARPU_SCHED_CTX_AWAKE_WORKERS)
+		{
+			awake_workers = 1;
+		}
 		else
 		else
 		{
 		{
 			STARPU_ABORT_MSG("Unrecognized argument %d\n", arg_type);
 			STARPU_ABORT_MSG("Unrecognized argument %d\n", arg_type);
@@ -575,7 +751,7 @@ unsigned starpu_sched_ctx_create(int *workerids, int nworkers, const char *sched
 	va_end(varg_list);
 	va_end(varg_list);
 
 
 	struct _starpu_sched_ctx *sched_ctx = NULL;
 	struct _starpu_sched_ctx *sched_ctx = NULL;
-	sched_ctx = _starpu_create_sched_ctx(sched_policy, workerids, nworkers, 0, sched_ctx_name, min_prio_set, min_prio, max_prio_set, max_prio);
+	sched_ctx = _starpu_create_sched_ctx(sched_policy, workerids, nworkers, 0, sched_ctx_name, min_prio_set, min_prio, max_prio_set, max_prio, awake_workers);
 	sched_ctx->hierarchy_level = hierarchy_level;
 	sched_ctx->hierarchy_level = hierarchy_level;
 	sched_ctx->nesting_sched_ctx = nesting_sched_ctx;
 	sched_ctx->nesting_sched_ctx = nesting_sched_ctx;
 
 
@@ -618,7 +794,16 @@ static void _starpu_delete_sched_ctx(struct _starpu_sched_ctx *sched_ctx)
 		free(sched_ctx->sched_policy);
 		free(sched_ctx->sched_policy);
 		sched_ctx->sched_policy = NULL;
 		sched_ctx->sched_policy = NULL;
 	}
 	}
-	
+	else
+	{
+		starpu_sched_ctx_delete_worker_collection(sched_ctx->id);
+	}
+
+	if (sched_ctx->perf_arch.devices)
+	{
+		free(sched_ctx->perf_arch.devices);
+		sched_ctx->perf_arch.devices = NULL;
+	}
 
 
 	STARPU_PTHREAD_MUTEX_DESTROY(&sched_ctx->empty_ctx_mutex);
 	STARPU_PTHREAD_MUTEX_DESTROY(&sched_ctx->empty_ctx_mutex);
 	sched_ctx->id = STARPU_NMAX_SCHED_CTXS;
 	sched_ctx->id = STARPU_NMAX_SCHED_CTXS;
@@ -654,13 +839,13 @@ void starpu_sched_ctx_delete(unsigned sched_ctx_id)
 
 
 	int *workerids;
 	int *workerids;
 	unsigned nworkers_ctx = starpu_sched_ctx_get_workers_list(sched_ctx->id, &workerids);
 	unsigned nworkers_ctx = starpu_sched_ctx_get_workers_list(sched_ctx->id, &workerids);
-	
+
 	/*if both of them have all the ressources is pointless*/
 	/*if both of them have all the ressources is pointless*/
 	/*trying to transfer ressources from one ctx to the other*/
 	/*trying to transfer ressources from one ctx to the other*/
 	struct _starpu_machine_config *config = (struct _starpu_machine_config *)_starpu_get_machine_config();
 	struct _starpu_machine_config *config = (struct _starpu_machine_config *)_starpu_get_machine_config();
 	unsigned nworkers = config->topology.nworkers;
 	unsigned nworkers = config->topology.nworkers;
 
 
-	if(nworkers_ctx > 0 && inheritor_sched_ctx && inheritor_sched_ctx->id != STARPU_NMAX_SCHED_CTXS && 
+	if(nworkers_ctx > 0 && inheritor_sched_ctx && inheritor_sched_ctx->id != STARPU_NMAX_SCHED_CTXS &&
 	   !(nworkers_ctx == nworkers && nworkers_ctx == inheritor_sched_ctx->workers->nworkers))
 	   !(nworkers_ctx == nworkers && nworkers_ctx == inheritor_sched_ctx->workers->nworkers))
 	{
 	{
 		starpu_sched_ctx_add_workers(workerids, nworkers_ctx, inheritor_sched_ctx_id);
 		starpu_sched_ctx_add_workers(workerids, nworkers_ctx, inheritor_sched_ctx_id);
@@ -677,8 +862,8 @@ void starpu_sched_ctx_delete(unsigned sched_ctx_id)
 		_starpu_update_workers_without_ctx(workerids, nworkers_ctx, sched_ctx_id, 1);
 		_starpu_update_workers_without_ctx(workerids, nworkers_ctx, sched_ctx_id, 1);
 //		_starpu_sched_ctx_free_scheduling_data(sched_ctx);
 //		_starpu_sched_ctx_free_scheduling_data(sched_ctx);
 		_starpu_delete_sched_ctx(sched_ctx);
 		_starpu_delete_sched_ctx(sched_ctx);
-
 	}
 	}
+
 	/* workerids is malloc-ed in starpu_sched_ctx_get_workers_list, don't forget to free it when
 	/* workerids is malloc-ed in starpu_sched_ctx_get_workers_list, don't forget to free it when
 	   you don't use it anymore */
 	   you don't use it anymore */
 	free(workerids);
 	free(workerids);
@@ -735,7 +920,7 @@ void _starpu_fetch_tasks_from_empty_ctx_list(struct _starpu_sched_ctx *sched_ctx
                 /* you're not suppose to get here if you deleted the context
                 /* you're not suppose to get here if you deleted the context
 		   so no point in having the mutex locked */
 		   so no point in having the mutex locked */
 		STARPU_PTHREAD_RWLOCK_UNLOCK(&changing_ctx_mutex[sched_ctx->id]);
 		STARPU_PTHREAD_RWLOCK_UNLOCK(&changing_ctx_mutex[sched_ctx->id]);
-	
+
 	while(!starpu_task_list_empty(&sched_ctx->empty_ctx_tasks))
 	while(!starpu_task_list_empty(&sched_ctx->empty_ctx_tasks))
 	{
 	{
 		if(unlocked)
 		if(unlocked)
@@ -815,7 +1000,7 @@ void starpu_sched_ctx_add_workers(int *workers_to_add, int nworkers_to_add, unsi
 	if(sched_ctx->id != STARPU_NMAX_SCHED_CTXS)
 	if(sched_ctx->id != STARPU_NMAX_SCHED_CTXS)
 	{
 	{
 		_starpu_add_workers_to_sched_ctx(sched_ctx, workers_to_add, nworkers_to_add, added_workers, &n_added_workers);
 		_starpu_add_workers_to_sched_ctx(sched_ctx, workers_to_add, nworkers_to_add, added_workers, &n_added_workers);
-		
+
 		if(n_added_workers > 0)
 		if(n_added_workers > 0)
 		{
 		{
 			_starpu_update_workers_with_ctx(added_workers, n_added_workers, sched_ctx->id);
 			_starpu_update_workers_with_ctx(added_workers, n_added_workers, sched_ctx->id);
@@ -875,7 +1060,7 @@ int _starpu_nworkers_able_to_execute_task(struct starpu_task *task, struct _star
 
 
 	STARPU_PTHREAD_RWLOCK_WRLOCK(&changing_ctx_mutex[sched_ctx->id]);
 	STARPU_PTHREAD_RWLOCK_WRLOCK(&changing_ctx_mutex[sched_ctx->id]);
 	struct starpu_worker_collection *workers = sched_ctx->workers;
 	struct starpu_worker_collection *workers = sched_ctx->workers;
-	
+
 	struct starpu_sched_ctx_iterator it;
 	struct starpu_sched_ctx_iterator it;
 	if(workers->init_iterator)
 	if(workers->init_iterator)
 		workers->init_iterator(workers, &it);
 		workers->init_iterator(workers, &it);
@@ -953,7 +1138,7 @@ void _starpu_decrement_nsubmitted_tasks_of_sched_ctx(unsigned sched_ctx_id)
 
 
 				int *workerids = NULL;
 				int *workerids = NULL;
 				unsigned nworkers = starpu_sched_ctx_get_workers_list(sched_ctx->id, &workerids);
 				unsigned nworkers = starpu_sched_ctx_get_workers_list(sched_ctx->id, &workerids);
-				
+
 				if(nworkers > 0)
 				if(nworkers > 0)
 				{
 				{
 					starpu_sched_ctx_add_workers(workerids, nworkers, sched_ctx->inheritor);
 					starpu_sched_ctx_add_workers(workerids, nworkers, sched_ctx->inheritor);
@@ -1174,6 +1359,9 @@ struct starpu_worker_collection* starpu_sched_ctx_create_worker_collection(unsig
 
 
 	}
 	}
 
 
+        /* construct the collection of workers(list/tree/etc.) */
+	sched_ctx->workers->init(sched_ctx->workers);
+
 	return sched_ctx->workers;
 	return sched_ctx->workers;
 }
 }
 
 
@@ -1219,6 +1407,7 @@ void starpu_sched_ctx_delete_worker_collection(unsigned sched_ctx_id)
 	sched_ctx->workers->deinit(sched_ctx->workers);
 	sched_ctx->workers->deinit(sched_ctx->workers);
 
 
 	free(sched_ctx->workers);
 	free(sched_ctx->workers);
+	sched_ctx->workers = NULL;
 }
 }
 
 
 struct starpu_worker_collection* starpu_sched_ctx_get_worker_collection(unsigned sched_ctx_id)
 struct starpu_worker_collection* starpu_sched_ctx_get_worker_collection(unsigned sched_ctx_id)
@@ -1361,7 +1550,7 @@ unsigned starpu_sched_ctx_worker_get_id(unsigned sched_ctx_id)
 			return workerid;
 			return workerid;
 	return -1;
 	return -1;
 }
 }
-		 
+
 unsigned starpu_sched_ctx_overlapping_ctxs_on_worker(int workerid)
 unsigned starpu_sched_ctx_overlapping_ctxs_on_worker(int workerid)
 {
 {
 	struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
 	struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
@@ -1407,7 +1596,7 @@ void starpu_sched_ctx_finished_submit(unsigned sched_ctx_id)
 void _starpu_sched_ctx_post_exec_task_cb(int workerid, struct starpu_task *task, size_t data_size2, uint32_t footprint)
 void _starpu_sched_ctx_post_exec_task_cb(int workerid, struct starpu_task *task, size_t data_size2, uint32_t footprint)
 {
 {
 	struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(task->sched_ctx);
 	struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(task->sched_ctx);
-	if(sched_ctx != NULL && task->sched_ctx != _starpu_get_initial_sched_ctx()->id && 
+	if(sched_ctx != NULL && task->sched_ctx != _starpu_get_initial_sched_ctx()->id &&
 	   task->sched_ctx != STARPU_NMAX_SCHED_CTXS  && sched_ctx->perf_counters != NULL)
 	   task->sched_ctx != STARPU_NMAX_SCHED_CTXS  && sched_ctx->perf_counters != NULL)
 	{
 	{
 		flops[task->sched_ctx][workerid] += task->flops;
 		flops[task->sched_ctx][workerid] += task->flops;
@@ -1542,7 +1731,7 @@ unsigned _starpu_sched_ctx_last_worker_awake(struct _starpu_worker *worker)
         for (l = worker->sched_ctx_list; l; l = l->next)
         for (l = worker->sched_ctx_list; l; l = l->next)
         {
         {
 		struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(l->sched_ctx);
 		struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(l->sched_ctx);
-		
+
 		unsigned last_worker_awake = 1;
 		unsigned last_worker_awake = 1;
 		struct starpu_worker_collection *workers = sched_ctx->workers;
 		struct starpu_worker_collection *workers = sched_ctx->workers;
 		struct starpu_sched_ctx_iterator it;
 		struct starpu_sched_ctx_iterator it;
@@ -1550,7 +1739,7 @@ unsigned _starpu_sched_ctx_last_worker_awake(struct _starpu_worker *worker)
 		int workerid;
 		int workerid;
 		if(workers->init_iterator)
 		if(workers->init_iterator)
 			workers->init_iterator(workers, &it);
 			workers->init_iterator(workers, &it);
-		
+
 		while(workers->has_next(workers, &it))
 		while(workers->has_next(workers, &it))
 		{
 		{
 			workerid = workers->get_next(workers, &it);
 			workerid = workers->get_next(workers, &it);
@@ -1586,7 +1775,7 @@ void starpu_sched_ctx_bind_current_thread_to_cpuid(unsigned cpuid STARPU_ATTRIBU
 							  config->pu_depth, cpuid);
 							  config->pu_depth, cpuid);
 		hwloc_bitmap_t set = obj->cpuset;
 		hwloc_bitmap_t set = obj->cpuset;
 		int ret;
 		int ret;
-		
+
 		hwloc_bitmap_singlify(set);
 		hwloc_bitmap_singlify(set);
 		ret = hwloc_set_cpubind (config->topology.hwtopology, set,
 		ret = hwloc_set_cpubind (config->topology.hwtopology, set,
 					 HWLOC_CPUBIND_THREAD);
 					 HWLOC_CPUBIND_THREAD);
@@ -1632,7 +1821,7 @@ unsigned starpu_sched_ctx_worker_is_master_for_child_ctx(int workerid, unsigned
 	struct _starpu_sched_ctx_list *l = NULL;
 	struct _starpu_sched_ctx_list *l = NULL;
 	struct _starpu_sched_ctx *sched_ctx = NULL;
 	struct _starpu_sched_ctx *sched_ctx = NULL;
 	for (l = worker->sched_ctx_list; l; l = l->next)
 	for (l = worker->sched_ctx_list; l; l = l->next)
-	{ 
+	{
 		 sched_ctx = _starpu_get_sched_ctx_struct(l->sched_ctx);
 		 sched_ctx = _starpu_get_sched_ctx_struct(l->sched_ctx);
 		if(sched_ctx-> main_master == workerid && sched_ctx->nesting_sched_ctx == sched_ctx_id)
 		if(sched_ctx-> main_master == workerid && sched_ctx->nesting_sched_ctx == sched_ctx_id)
 			return sched_ctx->id;
 			return sched_ctx->id;
@@ -1641,6 +1830,18 @@ unsigned starpu_sched_ctx_worker_is_master_for_child_ctx(int workerid, unsigned
 
 
 }
 }
 
 
+struct _starpu_sched_ctx *_starpu_sched_ctx_get_sched_ctx_for_worker_and_job(struct _starpu_worker *worker, struct _starpu_job *j)
+{
+	struct _starpu_sched_ctx_list *l = NULL;
+	for (l = worker->sched_ctx_list; l; l = l->next)
+	{
+		struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(l->sched_ctx);
+		if (j->task->sched_ctx == sched_ctx->id)
+			return sched_ctx;
+	}
+	return NULL;
+}
+
 void starpu_sched_ctx_revert_task_counters(unsigned sched_ctx_id, double ready_flops)
 void starpu_sched_ctx_revert_task_counters(unsigned sched_ctx_id, double ready_flops)
 {
 {
         _starpu_decrement_nsubmitted_tasks_of_sched_ctx(sched_ctx_id);
         _starpu_decrement_nsubmitted_tasks_of_sched_ctx(sched_ctx_id);
@@ -1866,7 +2067,7 @@ static int _starpu_sched_ctx_find_master(unsigned sched_ctx_id, int *workerids,
                 if (master > -1)
                 if (master > -1)
 		{
 		{
                         int already_seen = 0;
                         int already_seen = 0;
-                        //Could create a function for this. Basically searching an element in an array.                                                                                                             
+                        //Could create a function for this. Basically searching an element in an array.
                         for (i = 0 ; i < npotential_masters; i++)
                         for (i = 0 ; i < npotential_masters; i++)
                         {
                         {
                                 if (potential_masters[i] == master)
                                 if (potential_masters[i] == master)
@@ -1884,7 +2085,7 @@ static int _starpu_sched_ctx_find_master(unsigned sched_ctx_id, int *workerids,
 
 
         for (i = 0 ; i < npotential_masters ; i++) {
         for (i = 0 ; i < npotential_masters ; i++) {
 		int master_is_in_section = 0;
 		int master_is_in_section = 0;
-		//Could create a function for this. Basically searching an element in an array.                                                                                                                     
+		//Could create a function for this. Basically searching an element in an array.
 		for (w = 0 ; w < nworkers ; w++)
 		for (w = 0 ; w < nworkers ; w++)
 		{
 		{
 			if (workerids[w] == potential_masters[i])
 			if (workerids[w] == potential_masters[i])
@@ -1920,7 +2121,7 @@ static void _starpu_sched_ctx_add_workers_to_master(unsigned sched_ctx_id, int *
 	int nwake_up = 0;
 	int nwake_up = 0;
 	int put_to_sleep[nworkers];
 	int put_to_sleep[nworkers];
 	int wake_up[nworkers];
 	int wake_up[nworkers];
-	
+
 	for(w = 0 ; w < nworkers ; w++)
 	for(w = 0 ; w < nworkers ; w++)
 	{
 	{
 		int master = sched_ctx->master[workerids[w]];
 		int master = sched_ctx->master[workerids[w]];
@@ -1937,9 +2138,19 @@ static void _starpu_sched_ctx_add_workers_to_master(unsigned sched_ctx_id, int *
 
 
 }
 }
 
 
+static void _starpu_sched_ctx_set_master(struct _starpu_sched_ctx *sched_ctx, int *workerids, int nworkers, int master)
+{
+	int i;
+	for(i = 0; i < nworkers; i++)
+	{
+		if(workerids[i] != master)
+			sched_ctx->master[workerids[i]] = master;
+	}
+}
+
 int starpu_sched_ctx_book_workers_for_task(unsigned sched_ctx_id, int *workerids, int nworkers)
 int starpu_sched_ctx_book_workers_for_task(unsigned sched_ctx_id, int *workerids, int nworkers)
-{ 
-	int new_master = _starpu_sched_ctx_find_master(sched_ctx_id, workerids, nworkers);	
+{
+	int new_master = _starpu_sched_ctx_find_master(sched_ctx_id, workerids, nworkers);
 	_starpu_sched_ctx_add_workers_to_master(sched_ctx_id, workerids, nworkers, new_master);
 	_starpu_sched_ctx_add_workers_to_master(sched_ctx_id, workerids, nworkers, new_master);
 	return new_master;
 	return new_master;
 }
 }
@@ -1950,25 +2161,8 @@ void starpu_sched_ctx_unbook_workers_for_task(unsigned sched_ctx_id, int master)
 	_starpu_sched_ctx_wake_up_workers(sched_ctx_id, master);
 	_starpu_sched_ctx_wake_up_workers(sched_ctx_id, master);
 }
 }
 
 
-int starpu_sched_ctx_get_worker_rank(unsigned sched_ctx_id)
+struct starpu_perfmodel_arch * _starpu_sched_ctx_get_perf_archtype(unsigned sched_ctx_id)
 {
 {
-	int idx = 0;
-	int curr_workerid = starpu_worker_get_id();
-	int worker;
 	struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
 	struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
-	struct starpu_worker_collection *workers = sched_ctx->workers;
-
-	struct starpu_sched_ctx_iterator it;
-	if(workers->init_iterator)
-		workers->init_iterator(workers, &it);
-
-	while(workers->has_next(workers, &it))
-	{
-		worker = workers->get_next(workers, &it);
-		if(worker == curr_workerid)
-			return idx;
-		idx++;
-	}
-
-	return -1;
+	return &sched_ctx->perf_arch;
 }
 }