Browse Source

Bug fixing + redim conf

Andra Hugo 13 years ago
parent
commit
da627ff571

+ 1 - 0
include/starpu_scheduler.h

@@ -129,6 +129,7 @@ struct worker_collection {
 
 
 struct starpu_sched_ctx_hypervisor_criteria {
 struct starpu_sched_ctx_hypervisor_criteria {
 	void (*idle_time_cb)(unsigned sched_ctx, int worker, double idle_time);
 	void (*idle_time_cb)(unsigned sched_ctx, int worker, double idle_time);
+	void (*reset_idle_time_cb)(unsigned sched_ctx, int worker);
 	void (*working_time_cb)(unsigned sched_ctx, double working_time);
 	void (*working_time_cb)(unsigned sched_ctx, double working_time);
 	void (*pushed_task_cb)(unsigned sched_ctx, int worker);
 	void (*pushed_task_cb)(unsigned sched_ctx, int worker);
 	void (*poped_task_cb)(unsigned sched_ctx, int worker);
 	void (*poped_task_cb)(unsigned sched_ctx, int worker);

+ 2 - 2
sched_ctx_hypervisor/examples/cholesky/cholesky_implicit.c

@@ -119,8 +119,8 @@ static void _cholesky(starpu_data_handle dataA, unsigned nblocks)
 					starpu_data_handle sdataki = starpu_data_get_sub_data(dataA, 2, k, i);
 					starpu_data_handle sdataki = starpu_data_get_sub_data(dataA, 2, k, i);
 					starpu_data_handle sdataij = starpu_data_get_sub_data(dataA, 2, i, j);
 					starpu_data_handle sdataij = starpu_data_get_sub_data(dataA, 2, i, j);
 
 
-					if(k == (nblocks-1) && j == (nblocks-1) &&
-					   i == (nblocks-1) && with_ctxs)
+					if(k == (nblocks-2) && j == (nblocks-1) &&
+					   i == (k + 1) && with_ctxs)
 					{
 					{
 						starpu_insert_task(&cl22,
 						starpu_insert_task(&cl22,
 								   STARPU_PRIORITY, ((i == k+1) && (j == k+1))?prio_level:STARPU_DEFAULT_PRIO,
 								   STARPU_PRIORITY, ((i == k+1) && (j == k+1))?prio_level:STARPU_DEFAULT_PRIO,

+ 75 - 72
sched_ctx_hypervisor/examples/sched_ctx_utils/sched_ctx_utils.c

@@ -34,6 +34,7 @@ int first = 1;
 pthread_mutex_t mut;
 pthread_mutex_t mut;
 retvals rv[2];
 retvals rv[2];
 params p1, p2;
 params p1, p2;
+int it = 0;
 
 
 struct sched_ctx_hypervisor_reply reply1[NSAMPLES*2*2];
 struct sched_ctx_hypervisor_reply reply1[NSAMPLES*2*2];
 struct sched_ctx_hypervisor_reply reply2[NSAMPLES*2*2];
 struct sched_ctx_hypervisor_reply reply2[NSAMPLES*2*2];
@@ -83,22 +84,21 @@ void* start_bench(void *val){
 
 
 	for(i = 0; i < NSAMPLES; i++)
 	for(i = 0; i < NSAMPLES; i++)
 		p->bench(p->mat[i], p->size, p->nblocks);
 		p->bench(p->mat[i], p->size, p->nblocks);
-
-	if(p->ctx != 0)
-	{
-		pthread_mutex_lock(&mut);
-		if(first){
-		 	sched_ctx_hypervisor_ignore_ctx(p->ctx);
-			starpu_delete_sched_ctx(p->ctx, p->the_other_ctx);
-		}
-
-		first = 0;
-		pthread_mutex_unlock(&mut);
-	}
-
+	
+	/* if(p->ctx != 0) */
+	/* { */
+	/* 	pthread_mutex_lock(&mut); */
+	/* 	if(first){ */
+	/* 		sched_ctx_hypervisor_ignore_ctx(p->ctx); */
+	/* 		starpu_delete_sched_ctx(p->ctx, p->the_other_ctx); */
+	/* 	} */
+		
+	/* 	first = 0; */
+	/* 	pthread_mutex_unlock(&mut); */
+	/* } */
+	sched_ctx_hypervisor_stop_resize(p->the_other_ctx);
 	rv[p->id].flops /= NSAMPLES;
 	rv[p->id].flops /= NSAMPLES;
 	rv[p->id].avg_timing /= NSAMPLES;
 	rv[p->id].avg_timing /= NSAMPLES;
-	
 }
 }
 
 
 float* construct_matrix(unsigned size)
 float* construct_matrix(unsigned size)
@@ -151,8 +151,6 @@ void start_2benchs(void (*bench)(float*, unsigned, unsigned))
 	gettimeofday(&end, NULL);
 	gettimeofday(&end, NULL);
 
 
 	pthread_mutex_destroy(&mut);
 	pthread_mutex_destroy(&mut);
-//	sched_ctx_hypervisor_ignore_ctx(p1.ctx);
-//	sched_ctx_hypervisor_ignore_ctx(p2.ctx);
 
 
 	double timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
 	double timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
 	timing /= 1000000;
 	timing /= 1000000;
@@ -251,17 +249,11 @@ void construct_contexts(void (*bench)(float*, unsigned, unsigned))
 	sched_ctx_hypervisor_handle_ctx(p1.ctx);
 	sched_ctx_hypervisor_handle_ctx(p1.ctx);
 	
 	
 	sched_ctx_hypervisor_ioctl(p1.ctx,
 	sched_ctx_hypervisor_ioctl(p1.ctx,
-				   HYPERVISOR_MAX_IDLE, p1.procs, p1.nprocs, 1000000.0,
-				   HYPERVISOR_MAX_IDLE, p1.procs, gpu+gpu1, 100000000.0,
-				   HYPERVISOR_MIN_WORKING, p1.procs, p1.nprocs, 200.0,
-//				   HYPERVISOR_PRIORITY, p1.procs, p1.nprocs, 1,
-				   HYPERVISOR_PRIORITY, p1.procs, gpu+gpu1, 2,
-				   HYPERVISOR_MIN_PROCS, 0,
-				   HYPERVISOR_MAX_PROCS, 11,
-				   HYPERVISOR_GRANULARITY, 2,
-//				   HYPERVISOR_FIXED_PROCS, p1.procs, gpu,
-				   HYPERVISOR_MIN_TASKS, 10000,
-				   HYPERVISOR_NEW_WORKERS_MAX_IDLE, 1000000.0,
+				   HYPERVISOR_MAX_IDLE, p1.procs, p1.nprocs, 5000.0,
+				   HYPERVISOR_MAX_IDLE, p1.procs, gpu+gpu1, 100000.0,
+				   HYPERVISOR_GRANULARITY, 4,
+				   HYPERVISOR_MIN_TASKS, 1000,
+				   HYPERVISOR_NEW_WORKERS_MAX_IDLE, 100000.0,
 				   NULL);
 				   NULL);
 
 
 	k = 0;
 	k = 0;
@@ -282,66 +274,77 @@ void construct_contexts(void (*bench)(float*, unsigned, unsigned))
 	sched_ctx_hypervisor_handle_ctx(p2.ctx);
 	sched_ctx_hypervisor_handle_ctx(p2.ctx);
 	
 	
 	sched_ctx_hypervisor_ioctl(p2.ctx,
 	sched_ctx_hypervisor_ioctl(p2.ctx,
-				   HYPERVISOR_MAX_IDLE, p2.procs, p2.nprocs, 100000.0,
-				   HYPERVISOR_MAX_IDLE, p2.procs, gpu+gpu2, 10000000000.0,
-				   HYPERVISOR_MIN_WORKING, p2.procs, p2.nprocs, 200.0,
-//				   HYPERVISOR_PRIORITY, p2.procs, p2.nprocs, 1,
-				   HYPERVISOR_PRIORITY, p2.procs, gpu+gpu2, 2,
-				   HYPERVISOR_MIN_PROCS, 0,
-				   HYPERVISOR_MAX_PROCS, 11,
+				   HYPERVISOR_MAX_IDLE, p2.procs, p2.nprocs, 2000.0,
+				   HYPERVISOR_MAX_IDLE, p2.procs, gpu+gpu2, 5000.0,
 				   HYPERVISOR_GRANULARITY, 4,
 				   HYPERVISOR_GRANULARITY, 4,
-				   HYPERVISOR_FIXED_PROCS, p2.procs, gpu,
-				   HYPERVISOR_MIN_TASKS, 10000,
-				   HYPERVISOR_NEW_WORKERS_MAX_IDLE, 100000.0,
+				   HYPERVISOR_MIN_TASKS, 500,
+				   HYPERVISOR_NEW_WORKERS_MAX_IDLE, 1000.0,
 				   NULL);
 				   NULL);
 }
 }
 
 
 void set_hypervisor_conf(int event, int task_tag)
 void set_hypervisor_conf(int event, int task_tag)
 {
 {
 	unsigned *id = pthread_getspecific(key);
 	unsigned *id = pthread_getspecific(key);
-	int reset_conf = 1;
-	pthread_mutex_lock(&mut);
-	reset_conf = first;
-	pthread_mutex_unlock(&mut);
-
 
 
-	if(reset_conf)
+	if(*id == 1)
 	{
 	{
-		if(*id == 1)
-		{			
-			if(event == START_BENCH)
-			{
-				//sched_ctx_hypervisor_request(p2.ctx, p2.procs, p2.nprocs, task_tag);
-				/* sched_ctx_hypervisor_ioctl(p2.ctx, */
-				/* 			   HYPERVISOR_MAX_IDLE, p2.procs, p2.nprocs, 10000000.0, */
-				/* 			   HYPERVISOR_TIME_TO_APPLY, task_tag, */
-				/* 			   HYPERVISOR_GRANULARITY, 1, */
-				/* 			   NULL); */
-			}
-		} else {
-
-			if(event == START_BENCH)
+		if(event == START_BENCH)
+		{
+			int procs[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
+			sched_ctx_hypervisor_ioctl(p1.ctx,
+						   HYPERVISOR_MAX_IDLE, procs, 12, 500.0,
+						   HYPERVISOR_MAX_IDLE, procs, 3, 100.0,
+						   HYPERVISOR_TIME_TO_APPLY, task_tag,
+						   NULL);
+		}
+		else
+		{
+			/* int procs[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; */
+			/* sched_ctx_hypervisor_ioctl(p2.ctx, */
+			/* 			   HYPERVISOR_MAX_IDLE, procs, 12, 800.0, */
+			/* 			   HYPERVISOR_MAX_IDLE, procs, 3, 100.0, */
+			/* 			   HYPERVISOR_TIME_TO_APPLY, task_tag, */
+			/* 			   NULL); */
+		}
+		
+	} else {
+		if(event == START_BENCH)
+		{
+			int procs[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
+			sched_ctx_hypervisor_ioctl(p1.ctx,
+						   HYPERVISOR_MAX_IDLE, procs, 12, 500.0,
+//						   HYPERVISOR_MAX_IDLE, procs, 3, 1000.0,
+						   HYPERVISOR_TIME_TO_APPLY, task_tag,
+						   HYPERVISOR_GRANULARITY, 2,
+						   NULL);
+		}
+		if(event == END_BENCH)
+		{
+			if(it < 3)
 			{
 			{
 				int procs[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
 				int procs[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
-				sched_ctx_hypervisor_request(p1.ctx, procs, 12, task_tag);
-				/* sched_ctx_hypervisor_ioctl(p1.ctx, */
-				/* 			   HYPERVISOR_MAX_IDLE, procs, 12, 1000.0, */
-				/* 			   HYPERVISOR_MAX_IDLE, procs, 3, 1000000.0, */
-				/* 			   HYPERVISOR_TIME_TO_APPLY, task_tag, */
-				/* 			   HYPERVISOR_GRANULARITY, 4, */
-				/* 			   NULL); */
+				sched_ctx_hypervisor_ioctl(p1.ctx,
+							   HYPERVISOR_MAX_IDLE, procs, 12, 300.0,
+							   HYPERVISOR_MAX_IDLE, procs, 3, 800.0,
+							   HYPERVISOR_TIME_TO_APPLY, task_tag,
+							   HYPERVISOR_GRANULARITY, 4,
+							   NULL);
 			}
 			}
-			else
+			if(it == 4)
 			{
 			{
-				/* int procs[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; */
-				/* sched_ctx_hypervisor_ioctl(p1.ctx, */
-				/* 			   HYPERVISOR_MAX_IDLE, procs, 12, 1000.0, */
-				/* 			   HYPERVISOR_TIME_TO_APPLY, task_tag, */
-				/* 			   HYPERVISOR_GRANULARITY, 4, */
-				/* 			   NULL); */
-			}
+				int procs[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
+				sched_ctx_hypervisor_ioctl(p1.ctx,
+							   HYPERVISOR_MAX_IDLE, procs, 12, 200.0,
+							   HYPERVISOR_MAX_IDLE, procs, 3, 10000.0,
+							   HYPERVISOR_TIME_TO_APPLY, task_tag,
+							   HYPERVISOR_GRANULARITY, 2,
+							   NULL);
 
 
+			}
+			
+			it++;
 		}
 		}
+
 	}
 	}
 }
 }
 
 

+ 16 - 11
sched_ctx_hypervisor/include/sched_ctx_hypervisor.h

@@ -4,16 +4,16 @@
 #include <pthread.h>
 #include <pthread.h>
 
 
 /* ioctl properties*/
 /* ioctl properties*/
-#define HYPERVISOR_MAX_IDLE 1
-#define HYPERVISOR_MIN_WORKING 2
-#define HYPERVISOR_PRIORITY 3
-#define HYPERVISOR_MIN_PROCS 4
-#define HYPERVISOR_MAX_PROCS 5
-#define HYPERVISOR_GRANULARITY 6
-#define HYPERVISOR_FIXED_PROCS 7
-#define HYPERVISOR_MIN_TASKS 8
-#define HYPERVISOR_NEW_WORKERS_MAX_IDLE 9
-#define HYPERVISOR_TIME_TO_APPLY 10
+#define HYPERVISOR_MAX_IDLE (1<<1)
+#define HYPERVISOR_MIN_WORKING (1<<2)
+#define HYPERVISOR_PRIORITY (1<<3)
+#define HYPERVISOR_MIN_PROCS (1<<4)
+#define HYPERVISOR_MAX_PROCS (1<<5)
+#define HYPERVISOR_GRANULARITY (1<<6)
+#define HYPERVISOR_FIXED_PROCS (1<<7)
+#define HYPERVISOR_MIN_TASKS (1<<8)
+#define HYPERVISOR_NEW_WORKERS_MAX_IDLE (1<<9)
+#define HYPERVISOR_TIME_TO_APPLY (1<<10)
 
 
 struct sched_ctx_hypervisor_reply{
 struct sched_ctx_hypervisor_reply{
 	int procs[STARPU_NMAXWORKERS];
 	int procs[STARPU_NMAXWORKERS];
@@ -31,6 +31,10 @@ void sched_ctx_hypervisor_ignore_ctx(unsigned sched_ctx);
 
 
 void sched_ctx_hypervisor_resize(unsigned sender_sched_ctx, unsigned receier_sched_ctx, int *workers_to_move, unsigned nworkers_to_movex);
 void sched_ctx_hypervisor_resize(unsigned sender_sched_ctx, unsigned receier_sched_ctx, int *workers_to_move, unsigned nworkers_to_movex);
 
 
+void sched_ctx_hypervisor_stop_resize(unsigned sched_ctx);
+
+void sched_ctx_hypervisor_start_resize(unsigned sched_ctx);
+
 void sched_ctx_hypervisor_set_config(unsigned sched_ctx, void *config);
 void sched_ctx_hypervisor_set_config(unsigned sched_ctx, void *config);
 
 
 void* sched_ctx_hypervisor_get_config(unsigned sched_ctx);
 void* sched_ctx_hypervisor_get_config(unsigned sched_ctx);
@@ -50,5 +54,6 @@ struct hypervisor_policy {
 	void (*add_sched_ctx)(unsigned sched_ctx);
 	void (*add_sched_ctx)(unsigned sched_ctx);
 	void(*remove_sched_ctx)(unsigned sched_ctx);
 	void(*remove_sched_ctx)(unsigned sched_ctx);
 	void* (*ioctl)(unsigned sched_ctx, va_list varg_list, unsigned later);
 	void* (*ioctl)(unsigned sched_ctx, va_list varg_list, unsigned later);
-	void (*manage_idle_time)(unsigned req_sched_ctx, int *sched_ctxs, unsigned nsched_ctxs, int worker, double idle_time);
+	unsigned (*manage_idle_time)(unsigned req_sched_ctx, int *sched_ctxs, unsigned nsched_ctxs, int worker, double idle_time);
+	void (*update_config)(void* old_config, void* new_config);
 };
 };

+ 32 - 9
sched_ctx_hypervisor/src/hypervisor_policies/simple_policy.c

@@ -155,11 +155,11 @@ int* _get_first_workers(unsigned sched_ctx, int nworkers)
 	return curr_workers;
 	return curr_workers;
 }
 }
 
 
-static unsigned _get_potential_nworkers(struct simple_policy_config *config, unsigned sched_ctx)
+static int _get_potential_nworkers(struct simple_policy_config *config, unsigned sched_ctx)
 {
 {
 	struct worker_collection *workers = starpu_get_worker_collection_of_sched_ctx(sched_ctx);
 	struct worker_collection *workers = starpu_get_worker_collection_of_sched_ctx(sched_ctx);
 
 
-	unsigned potential_workers = 0;
+	int potential_workers = 0;
 	int worker;
 	int worker;
 
 
 	if(workers->init_cursor)
 	if(workers->init_cursor)
@@ -176,11 +176,11 @@ static unsigned _get_potential_nworkers(struct simple_policy_config *config, uns
 	return potential_workers;
 	return potential_workers;
 }
 }
 
 
-static void simple_manage_idle_time(unsigned req_sched_ctx, int *sched_ctxs, int nsched_ctxs, int worker, double idle_time)
+static unsigned simple_manage_idle_time(unsigned req_sched_ctx, int *sched_ctxs, int nsched_ctxs, int worker, double idle_time)
 {
 {
        	struct simple_policy_config *config = (struct simple_policy_config*)sched_ctx_hypervisor_get_config(req_sched_ctx);
        	struct simple_policy_config *config = (struct simple_policy_config*)sched_ctx_hypervisor_get_config(req_sched_ctx);
 
 
-	if(config && idle_time > config->max_idle[worker])
+	if(config != NULL && idle_time > config->max_idle[worker])
 	{
 	{
 		int ret = pthread_mutex_trylock(&act_hypervisor_mutex);
 		int ret = pthread_mutex_trylock(&act_hypervisor_mutex);
 		if(ret != EBUSY)
 		if(ret != EBUSY)
@@ -190,7 +190,7 @@ static void simple_manage_idle_time(unsigned req_sched_ctx, int *sched_ctxs, int
 			unsigned nworkers_to_move = 0;
 			unsigned nworkers_to_move = 0;
 			
 			
 			/* leave at least one */
 			/* leave at least one */
-			int potential_moving_workers = _get_potential_nworkers(config, req_sched_ctx) - 1;
+			int potential_moving_workers = _get_potential_nworkers(config, req_sched_ctx);
 			if(potential_moving_workers > 0)
 			if(potential_moving_workers > 0)
 			{
 			{
 				if(potential_moving_workers > config->granularity)
 				if(potential_moving_workers > config->granularity)
@@ -204,9 +204,10 @@ static void simple_manage_idle_time(unsigned req_sched_ctx, int *sched_ctxs, int
 					if(nfixed_workers >= config->min_nprocs)
 					if(nfixed_workers >= config->min_nprocs)
 						nworkers_to_move = potential_moving_workers;
 						nworkers_to_move = potential_moving_workers;
 					else
 					else
-						nworkers_to_move = potential_moving_workers - (config->min_nprocs - nfixed_workers);			
+						nworkers_to_move = potential_moving_workers - (config->min_nprocs - nfixed_workers);	
 				}
 				}
 			}
 			}
+
 			if(nworkers_to_move > 0)
 			if(nworkers_to_move > 0)
 			{
 			{
 				unsigned prio_sched_ctx = _get_highest_priority_sched_ctx(req_sched_ctx, sched_ctxs, nsched_ctxs);
 				unsigned prio_sched_ctx = _get_highest_priority_sched_ctx(req_sched_ctx, sched_ctxs, nsched_ctxs);
@@ -218,14 +219,16 @@ static void simple_manage_idle_time(unsigned req_sched_ctx, int *sched_ctxs, int
 					struct simple_policy_config *prio_config = (struct simple_policy_config*)sched_ctx_hypervisor_get_config(prio_sched_ctx);
 					struct simple_policy_config *prio_config = (struct simple_policy_config*)sched_ctx_hypervisor_get_config(prio_sched_ctx);
 					int i;
 					int i;
 					for(i = 0; i < nworkers_to_move; i++)
 					for(i = 0; i < nworkers_to_move; i++)
-						prio_config->max_idle[workers_to_move[i]] = prio_config->new_workers_max_idle;
+						prio_config->max_idle[workers_to_move[i]] = prio_config->max_idle[workers_to_move[i]] !=MAX_IDLE_TIME ? prio_config->max_idle[workers_to_move[i]] :  prio_config->new_workers_max_idle;
 					
 					
 					free(workers_to_move);
 					free(workers_to_move);
 				}
 				}
 			}	
 			}	
 			pthread_mutex_unlock(&act_hypervisor_mutex);
 			pthread_mutex_unlock(&act_hypervisor_mutex);
+			return 0;
 		}
 		}
 	}
 	}
+	return 1;
 }
 }
 
 
 static void* simple_ioctl(unsigned sched_ctx, va_list varg_list, unsigned later)
 static void* simple_ioctl(unsigned sched_ctx, va_list varg_list, unsigned later)
@@ -243,7 +246,6 @@ static void* simple_ioctl(unsigned sched_ctx, va_list varg_list, unsigned later)
 	int i;
 	int i;
 	int *workerids;
 	int *workerids;
 	int nworkers;
 	int nworkers;
-	int it = 0;
 
 
 	while ((arg_type = va_arg(varg_list, int)) != 0) 
 	while ((arg_type = va_arg(varg_list, int)) != 0) 
 	{
 	{
@@ -319,6 +321,26 @@ static void* simple_ioctl(unsigned sched_ctx, va_list varg_list, unsigned later)
 	return later ? (void*)config : NULL;
 	return later ? (void*)config : NULL;
 }
 }
 
 
+static void simple_update_config(void *old_config, void* config)
+{
+	struct simple_policy_config *old = (struct simple_policy_config*)old_config;
+	struct simple_policy_config *new = (struct simple_policy_config*)config;
+
+	old->min_nprocs = new->min_nprocs != 0 ? new->min_nprocs : old->min_nprocs ;
+	old->max_nprocs = new->max_nprocs != 0 ? new->max_nprocs : old->max_nprocs ;
+	old->new_workers_max_idle = new->new_workers_max_idle != MAX_IDLE_TIME ? new->new_workers_max_idle : old->new_workers_max_idle;
+	old->granularity = new->min_nprocs != 1 ? new->granularity : old->granularity;
+
+	int i;
+	for(i = 0; i < STARPU_NMAXWORKERS; i++)
+	{
+		old->priority[i] = new->priority[i] != 0 ? new->priority[i] : old->priority[i];
+		old->fixed_procs[i] = new->fixed_procs[i] != 0 ? new->fixed_procs[i] : old->fixed_procs[i];
+		old->max_idle[i] = new->max_idle[i] != MAX_IDLE_TIME ? new->max_idle[i] : old->max_idle[i];;
+		old->min_working[i] = new->min_working[i] != MIN_WORKING_TIME ? new->min_working[i] : old->min_working[i];
+	}
+}
+
 static void simple_remove_sched_ctx(unsigned sched_ctx)
 static void simple_remove_sched_ctx(unsigned sched_ctx)
 {
 {
 	sched_ctx_hypervisor_set_config(sched_ctx, NULL);
 	sched_ctx_hypervisor_set_config(sched_ctx, NULL);
@@ -330,5 +352,6 @@ struct hypervisor_policy simple_policy = {
 	.add_sched_ctx = simple_add_sched_ctx,
 	.add_sched_ctx = simple_add_sched_ctx,
 	.remove_sched_ctx = simple_remove_sched_ctx,
 	.remove_sched_ctx = simple_remove_sched_ctx,
 	.ioctl = simple_ioctl,
 	.ioctl = simple_ioctl,
-	.manage_idle_time = simple_manage_idle_time
+	.manage_idle_time = simple_manage_idle_time,
+	.update_config = simple_update_config
 };
 };

+ 66 - 23
sched_ctx_hypervisor/src/sched_ctx_hypervisor.c

@@ -1,5 +1,6 @@
 #include <sched_ctx_hypervisor_intern.h>
 #include <sched_ctx_hypervisor_intern.h>
 
 
+unsigned imposed_resize = 0;
 struct starpu_sched_ctx_hypervisor_criteria* criteria = NULL;
 struct starpu_sched_ctx_hypervisor_criteria* criteria = NULL;
 
 
 extern struct hypervisor_policy simple_policy;
 extern struct hypervisor_policy simple_policy;
@@ -8,6 +9,7 @@ static void idle_time_cb(unsigned sched_ctx, int worker, double idle_time);
 static void pushed_task_cb(unsigned sched_ctx, int worker);
 static void pushed_task_cb(unsigned sched_ctx, int worker);
 static void poped_task_cb(unsigned sched_ctx, int worker);
 static void poped_task_cb(unsigned sched_ctx, int worker);
 static void post_exec_hook_cb(unsigned sched_ctx, int taskid);
 static void post_exec_hook_cb(unsigned sched_ctx, int taskid);
+static void reset_idle_time_cb(unsigned sched_ctx, int  worker);
 
 
 static void _load_hypervisor_policy(int type)
 static void _load_hypervisor_policy(int type)
 {
 {
@@ -20,18 +22,20 @@ static void _load_hypervisor_policy(int type)
 		hypervisor.policy.remove_sched_ctx = simple_policy.remove_sched_ctx;
 		hypervisor.policy.remove_sched_ctx = simple_policy.remove_sched_ctx;
 		hypervisor.policy.ioctl = simple_policy.ioctl;
 		hypervisor.policy.ioctl = simple_policy.ioctl;
 		hypervisor.policy.manage_idle_time = simple_policy.manage_idle_time;
 		hypervisor.policy.manage_idle_time = simple_policy.manage_idle_time;
+		hypervisor.policy.update_config = simple_policy.update_config;
 		break;
 		break;
 	}
 	}
 }
 }
 
 
 struct starpu_sched_ctx_hypervisor_criteria* sched_ctx_hypervisor_init(int type)
 struct starpu_sched_ctx_hypervisor_criteria* sched_ctx_hypervisor_init(int type)
 {
 {
+	hypervisor.min_tasks = 0;
 	hypervisor.nsched_ctxs = 0;
 	hypervisor.nsched_ctxs = 0;
-	hypervisor.resize = 0;
 	pthread_mutex_init(&act_hypervisor_mutex, NULL);
 	pthread_mutex_init(&act_hypervisor_mutex, NULL);
 	int i;
 	int i;
 	for(i = 0; i < STARPU_NMAX_SCHED_CTXS; i++)
 	for(i = 0; i < STARPU_NMAX_SCHED_CTXS; i++)
 	{
 	{
+		hypervisor.resize[i] = 0;
 		hypervisor.configurations[i] = NULL;
 		hypervisor.configurations[i] = NULL;
 		hypervisor.sched_ctxs[i] = STARPU_NMAX_SCHED_CTXS;
 		hypervisor.sched_ctxs[i] = STARPU_NMAX_SCHED_CTXS;
 		hypervisor.sched_ctx_w[i].sched_ctx = STARPU_NMAX_SCHED_CTXS;
 		hypervisor.sched_ctx_w[i].sched_ctx = STARPU_NMAX_SCHED_CTXS;
@@ -42,6 +46,7 @@ struct starpu_sched_ctx_hypervisor_criteria* sched_ctx_hypervisor_init(int type)
 			hypervisor.sched_ctx_w[i].current_idle_time[j] = 0.0;
 			hypervisor.sched_ctx_w[i].current_idle_time[j] = 0.0;
 			hypervisor.sched_ctx_w[i].tasks[j] = 0;
 			hypervisor.sched_ctx_w[i].tasks[j] = 0;
 			hypervisor.sched_ctx_w[i].poped_tasks[j] = 0;
 			hypervisor.sched_ctx_w[i].poped_tasks[j] = 0;
+			hypervisor.sched_ctx_w[i].unsucceded_resizes[j] = 0;
 		}
 		}
 	}
 	}
 
 
@@ -52,12 +57,31 @@ struct starpu_sched_ctx_hypervisor_criteria* sched_ctx_hypervisor_init(int type)
 	criteria->pushed_task_cb = pushed_task_cb;
 	criteria->pushed_task_cb = pushed_task_cb;
 	criteria->poped_task_cb = poped_task_cb;
 	criteria->poped_task_cb = poped_task_cb;
 	criteria->post_exec_hook_cb = post_exec_hook_cb;
 	criteria->post_exec_hook_cb = post_exec_hook_cb;
+	criteria->reset_idle_time_cb = reset_idle_time_cb;
 	return criteria;
 	return criteria;
 }
 }
 
 
+void sched_ctx_hypervisor_stop_resize(unsigned sched_ctx)
+{
+	imposed_resize = 1;
+	hypervisor.resize[sched_ctx] = 0;
+}
+
+void sched_ctx_hypervisor_start_resize(unsigned sched_ctx)
+{
+	imposed_resize = 1;
+	hypervisor.resize[sched_ctx] = 1;
+}
+
 void sched_ctx_hypervisor_shutdown(void)
 void sched_ctx_hypervisor_shutdown(void)
 {
 {
-	hypervisor.resize = 0;
+	int i;
+	for(i = 0; i < STARPU_NMAX_SCHED_CTXS; i++)
+	{
+		hypervisor.resize[i] = 0;
+                if(hypervisor.sched_ctxs[i] != STARPU_NMAX_SCHED_CTXS && hypervisor.nsched_ctxs > 0)
+			sched_ctx_hypervisor_ignore_ctx(i);
+	}
 	free(criteria);
 	free(criteria);
 	pthread_mutex_destroy(&act_hypervisor_mutex);
 	pthread_mutex_destroy(&act_hypervisor_mutex);
 }
 }
@@ -71,7 +95,6 @@ void sched_ctx_hypervisor_handle_ctx(unsigned sched_ctx)
 	hypervisor.policy.add_sched_ctx(sched_ctx);
 	hypervisor.policy.add_sched_ctx(sched_ctx);
 	hypervisor.sched_ctx_w[sched_ctx].sched_ctx = sched_ctx;
 	hypervisor.sched_ctx_w[sched_ctx].sched_ctx = sched_ctx;
 	hypervisor.sched_ctxs[hypervisor.nsched_ctxs++] = sched_ctx;
 	hypervisor.sched_ctxs[hypervisor.nsched_ctxs++] = sched_ctx;
-
 }
 }
 
 
 static int _get_first_free_sched_ctx(int *sched_ctxs, unsigned nsched_ctxs)
 static int _get_first_free_sched_ctx(int *sched_ctxs, unsigned nsched_ctxs)
@@ -108,6 +131,7 @@ static void _rearange_sched_ctxs(int *sched_ctxs, int old_nsched_ctxs)
 
 
 void sched_ctx_hypervisor_ignore_ctx(unsigned sched_ctx)
 void sched_ctx_hypervisor_ignore_ctx(unsigned sched_ctx)
 {
 {
+	pthread_mutex_lock(&act_hypervisor_mutex);
         unsigned i;
         unsigned i;
         for(i = 0; i < hypervisor.nsched_ctxs; i++)
         for(i = 0; i < hypervisor.nsched_ctxs; i++)
         {
         {
@@ -125,20 +149,16 @@ void sched_ctx_hypervisor_ignore_ctx(unsigned sched_ctx)
 	free(hypervisor.configurations[sched_ctx]);
 	free(hypervisor.configurations[sched_ctx]);
 	free(hypervisor.advices[sched_ctx]);
 	free(hypervisor.advices[sched_ctx]);
 	free(hypervisor.requests[sched_ctx]);
 	free(hypervisor.requests[sched_ctx]);
+	pthread_mutex_unlock(&act_hypervisor_mutex);
 }
 }
 
 
 void sched_ctx_hypervisor_set_config(unsigned sched_ctx, void *config)
 void sched_ctx_hypervisor_set_config(unsigned sched_ctx, void *config)
 {
 {
-	pthread_mutex_lock(&act_hypervisor_mutex);
-
-	if(hypervisor.sched_ctx_w[sched_ctx].config != NULL)
-	{
-		free(hypervisor.sched_ctx_w[sched_ctx].config);
-		hypervisor.sched_ctx_w[sched_ctx].config = NULL;
-	}
+	if(hypervisor.sched_ctx_w[sched_ctx].config != NULL && config != NULL)
+		hypervisor.policy.update_config(hypervisor.sched_ctx_w[sched_ctx].config, config);
+	else
+		hypervisor.sched_ctx_w[sched_ctx].config = config;
 
 
-	hypervisor.sched_ctx_w[sched_ctx].config = config;
-	pthread_mutex_unlock(&act_hypervisor_mutex);
 	return;
 	return;
 }
 }
 
 
@@ -176,7 +196,7 @@ void sched_ctx_hypervisor_ioctl(unsigned sched_ctx, ...)
 	va_end(varg_list);
 	va_end(varg_list);
 	va_start(varg_list, sched_ctx);
 	va_start(varg_list, sched_ctx);
 
 
-	/* hypervisor configuration to be considered later */
+	/* if config not null => save hypervisor configuration and consider it later */
 	void *config = hypervisor.policy.ioctl(sched_ctx, varg_list, (task_tag > 0));
 	void *config = hypervisor.policy.ioctl(sched_ctx, varg_list, (task_tag > 0));
 	if(config != NULL)
 	if(config != NULL)
 		_starpu_htbl_insert_32(&hypervisor.configurations[sched_ctx], (uint32_t)task_tag, config);
 		_starpu_htbl_insert_32(&hypervisor.configurations[sched_ctx], (uint32_t)task_tag, config);
@@ -186,11 +206,11 @@ void sched_ctx_hypervisor_ioctl(unsigned sched_ctx, ...)
 
 
 static void _sched_ctx_hypervisor_resize(unsigned sender_sched_ctx, unsigned receiver_sched_ctx, int* workers_to_move, unsigned nworkers_to_move)
 static void _sched_ctx_hypervisor_resize(unsigned sender_sched_ctx, unsigned receiver_sched_ctx, int* workers_to_move, unsigned nworkers_to_move)
 {
 {
-	int i;
-	printf("resize ctx %d with", sender_sched_ctx);
-	for(i = 0; i < nworkers_to_move; i++)
-		printf(" %d", workers_to_move[i]);
-	printf("\n");
+	/* int i; */
+	/* printf("resize ctx %d with", sender_sched_ctx); */
+	/* for(i = 0; i < nworkers_to_move; i++) */
+	/* 	printf(" %d", workers_to_move[i]); */
+	/* printf("\n"); */
 
 
 	starpu_remove_workers_from_sched_ctx(workers_to_move, nworkers_to_move, sender_sched_ctx);
 	starpu_remove_workers_from_sched_ctx(workers_to_move, nworkers_to_move, sender_sched_ctx);
 	starpu_add_workers_to_sched_ctx(workers_to_move, nworkers_to_move, receiver_sched_ctx);
 	starpu_add_workers_to_sched_ctx(workers_to_move, nworkers_to_move, receiver_sched_ctx);
@@ -218,13 +238,16 @@ static unsigned check_tasks_of_sched_ctx(unsigned sched_ctx)
 
 
 void sched_ctx_hypervisor_resize(unsigned sender_sched_ctx, unsigned receiver_sched_ctx, int* workers_to_move, unsigned nworkers_to_move)
 void sched_ctx_hypervisor_resize(unsigned sender_sched_ctx, unsigned receiver_sched_ctx, int* workers_to_move, unsigned nworkers_to_move)
 {
 {
-	if(hypervisor.resize)
+	if(hypervisor.resize[sender_sched_ctx])
 	{
 	{
 		_sched_ctx_hypervisor_resize(sender_sched_ctx, receiver_sched_ctx, workers_to_move, nworkers_to_move);
 		_sched_ctx_hypervisor_resize(sender_sched_ctx, receiver_sched_ctx, workers_to_move, nworkers_to_move);
 
 
 		int i;
 		int i;
 		for(i = 0; i < nworkers_to_move; i++)
 		for(i = 0; i < nworkers_to_move; i++)
+		{
 			hypervisor.sched_ctx_w[sender_sched_ctx].current_idle_time[workers_to_move[i]] = 0.0;
 			hypervisor.sched_ctx_w[sender_sched_ctx].current_idle_time[workers_to_move[i]] = 0.0;
+			hypervisor.sched_ctx_w[sender_sched_ctx].unsucceded_resizes[workers_to_move[i]] = 0;
+		}
 	}
 	}
 
 
 	return;
 	return;
@@ -305,6 +328,10 @@ void sched_ctx_hypervisor_request(unsigned sched_ctx, int *workerids, int nworke
 			get_overage_workers(sched_ctx, workerids, nworkers, overage_workers, &noverage_workers);
 			get_overage_workers(sched_ctx, workerids, nworkers, overage_workers, &noverage_workers);
 			starpu_add_workers_to_sched_ctx(workerids, nworkers, sched_ctx);
 			starpu_add_workers_to_sched_ctx(workerids, nworkers, sched_ctx);
 
 
+			sched_ctx_hypervisor_ioctl(sched_ctx, 
+						   HYPERVISOR_PRIORITY, workerids, nworkers, 1,
+						   NULL);		
+
 			if(noverage_workers > 0)
 			if(noverage_workers > 0)
 				starpu_remove_workers_from_sched_ctx(overage_workers, noverage_workers, sched_ctx);
 				starpu_remove_workers_from_sched_ctx(overage_workers, noverage_workers, sched_ctx);
 			
 			
@@ -330,12 +357,24 @@ void sched_ctx_hypervisor_request(unsigned sched_ctx, int *workerids, int nworke
 	return ;
 	return ;
 }
 }
 
 
+static void reset_idle_time_cb(unsigned sched_ctx, int worker)
+{
+	if(hypervisor.resize[sched_ctx])
+		hypervisor.sched_ctx_w[sched_ctx].current_idle_time[worker] = 0.0;
+}
+
 static void idle_time_cb(unsigned sched_ctx, int worker, double idle_time)
 static void idle_time_cb(unsigned sched_ctx, int worker, double idle_time)
 {
 {
-	if(hypervisor.resize && hypervisor.nsched_ctxs > 1 && hypervisor.policy.manage_idle_time)
+	if(hypervisor.resize[sched_ctx] && hypervisor.nsched_ctxs > 1 && hypervisor.policy.manage_idle_time)
 	{
 	{
 		hypervisor.sched_ctx_w[sched_ctx].current_idle_time[worker] += idle_time;
 		hypervisor.sched_ctx_w[sched_ctx].current_idle_time[worker] += idle_time;
-		hypervisor.policy.manage_idle_time(sched_ctx, hypervisor.sched_ctxs, hypervisor.nsched_ctxs, worker, hypervisor.sched_ctx_w[sched_ctx].current_idle_time[worker]);
+		hypervisor.sched_ctx_w[sched_ctx].unsucceded_resizes[worker] += hypervisor.policy.manage_idle_time(sched_ctx, hypervisor.sched_ctxs, hypervisor.nsched_ctxs, worker, hypervisor.sched_ctx_w[sched_ctx].current_idle_time[worker]);
+		if(hypervisor.sched_ctx_w[sched_ctx].unsucceded_resizes[worker] > 5)
+		{
+//			hypervisor.sched_ctx_w[sched_ctx].current_idle_time[worker] -= idle_time;
+			hypervisor.sched_ctx_w[sched_ctx].unsucceded_resizes[worker] = 0;
+//			printf("%d: reseted idle time\n", worker);
+		}
 	}
 	}
 	return;
 	return;
 }
 }
@@ -352,7 +391,8 @@ static void pushed_task_cb(unsigned sched_ctx, int worker)
        
        
 	int ntasks = get_ntasks(hypervisor.sched_ctx_w[sched_ctx].tasks);
 	int ntasks = get_ntasks(hypervisor.sched_ctx_w[sched_ctx].tasks);
 	
 	
-	hypervisor.resize = (ntasks > hypervisor.min_tasks);
+	if(!imposed_resize)
+		hypervisor.resize[sched_ctx] = (ntasks > hypervisor.min_tasks);
 }
 }
 
 
 static void poped_task_cb(unsigned sched_ctx, int worker)
 static void poped_task_cb(unsigned sched_ctx, int worker)
@@ -370,8 +410,11 @@ static void post_exec_hook_cb(unsigned sched_ctx, int task_tag)
 	if(hypervisor.nsched_ctxs > 1)
 	if(hypervisor.nsched_ctxs > 1)
 	{
 	{
 		void *config = _starpu_htbl_search_32(hypervisor.configurations[sched_ctx], (uint32_t)task_tag);
 		void *config = _starpu_htbl_search_32(hypervisor.configurations[sched_ctx], (uint32_t)task_tag);
-		if(config != NULL)	
+		if(config)
+		{	
 			sched_ctx_hypervisor_set_config(sched_ctx, config);
 			sched_ctx_hypervisor_set_config(sched_ctx, config);
+			free(config);
+		}
 		
 		
 		struct sched_ctx_hypervisor_adjustment *adjustment = (struct sched_ctx_hypervisor_adjustment*) _starpu_htbl_search_32(hypervisor.advices[sched_ctx], (uint32_t)task_tag);
 		struct sched_ctx_hypervisor_adjustment *adjustment = (struct sched_ctx_hypervisor_adjustment*) _starpu_htbl_search_32(hypervisor.advices[sched_ctx], (uint32_t)task_tag);
 		if(adjustment)
 		if(adjustment)

+ 2 - 1
sched_ctx_hypervisor/src/sched_ctx_hypervisor_intern.h

@@ -6,13 +6,14 @@ struct sched_ctx_wrapper {
 	double current_idle_time[STARPU_NMAXWORKERS];
 	double current_idle_time[STARPU_NMAXWORKERS];
 	int tasks[STARPU_NMAXWORKERS];
 	int tasks[STARPU_NMAXWORKERS];
 	int poped_tasks[STARPU_NMAXWORKERS];
 	int poped_tasks[STARPU_NMAXWORKERS];
+	int unsucceded_resizes[STARPU_NMAXWORKERS];
 };
 };
 
 
 struct sched_ctx_hypervisor {
 struct sched_ctx_hypervisor {
 	struct sched_ctx_wrapper sched_ctx_w[STARPU_NMAX_SCHED_CTXS];
 	struct sched_ctx_wrapper sched_ctx_w[STARPU_NMAX_SCHED_CTXS];
 	int sched_ctxs[STARPU_NMAX_SCHED_CTXS];
 	int sched_ctxs[STARPU_NMAX_SCHED_CTXS];
 	unsigned nsched_ctxs;
 	unsigned nsched_ctxs;
-	unsigned resize;
+	unsigned resize[STARPU_NMAX_SCHED_CTXS];
 	int min_tasks;
 	int min_tasks;
 	struct hypervisor_policy policy;
 	struct hypervisor_policy policy;
 	struct starpu_htbl32_node_s *configurations[STARPU_NMAX_SCHED_CTXS];
 	struct starpu_htbl32_node_s *configurations[STARPU_NMAX_SCHED_CTXS];

+ 4 - 1
src/core/sched_ctx.c

@@ -521,7 +521,10 @@ void _starpu_increment_nsubmitted_tasks_of_sched_ctx(unsigned sched_ctx_id)
 
 
 pthread_mutex_t *_starpu_get_sched_mutex(struct starpu_sched_ctx *sched_ctx, int workerid)
 pthread_mutex_t *_starpu_get_sched_mutex(struct starpu_sched_ctx *sched_ctx, int workerid)
 {
 {
-	return sched_ctx->sched_mutex[workerid];
+	if(sched_ctx->sched_mutex)
+		return sched_ctx->sched_mutex[workerid];
+	else 
+		return NULL;
 }
 }
 
 
 pthread_cond_t *_starpu_get_sched_cond(struct starpu_sched_ctx *sched_ctx, int workerid)
 pthread_cond_t *_starpu_get_sched_cond(struct starpu_sched_ctx *sched_ctx, int workerid)

+ 26 - 24
src/core/sched_policy.c

@@ -292,26 +292,26 @@ int _starpu_push_task(starpu_job_t j, unsigned job_is_already_locked)
 	int workerid = starpu_worker_get_id();
 	int workerid = starpu_worker_get_id();
 	unsigned no_workers = 0;
 	unsigned no_workers = 0;
 	unsigned nworkers; 
 	unsigned nworkers; 
-	
+       
 	PTHREAD_MUTEX_LOCK(&sched_ctx->changing_ctx_mutex);
 	PTHREAD_MUTEX_LOCK(&sched_ctx->changing_ctx_mutex);
 	nworkers = sched_ctx->workers->nworkers;
 	nworkers = sched_ctx->workers->nworkers;
 	PTHREAD_MUTEX_UNLOCK(&sched_ctx->changing_ctx_mutex);
 	PTHREAD_MUTEX_UNLOCK(&sched_ctx->changing_ctx_mutex);
 
 
-	PTHREAD_MUTEX_LOCK(&sched_ctx->no_workers_mutex);
 	if(nworkers == 0)
 	if(nworkers == 0)
 	{
 	{
-		no_workers = 1;
 		if(workerid == -1)
 		if(workerid == -1)
+		{
+			PTHREAD_MUTEX_LOCK(&sched_ctx->no_workers_mutex);
 			PTHREAD_COND_WAIT(&sched_ctx->no_workers_cond, &sched_ctx->no_workers_mutex);
 			PTHREAD_COND_WAIT(&sched_ctx->no_workers_cond, &sched_ctx->no_workers_mutex);
-	}
-	PTHREAD_MUTEX_UNLOCK(&sched_ctx->no_workers_mutex);
-
-	if(workerid >= 0 && no_workers)
-	{
-		PTHREAD_MUTEX_LOCK(&sched_ctx->empty_ctx_mutex);
-		starpu_task_list_push_front(&sched_ctx->empty_ctx_tasks, task);
-		PTHREAD_MUTEX_UNLOCK(&sched_ctx->empty_ctx_mutex);
-		return 0;
+			PTHREAD_MUTEX_UNLOCK(&sched_ctx->no_workers_mutex);
+		}
+		else
+		{
+			PTHREAD_MUTEX_LOCK(&sched_ctx->empty_ctx_mutex);
+			starpu_task_list_push_front(&sched_ctx->empty_ctx_tasks, task);
+			PTHREAD_MUTEX_UNLOCK(&sched_ctx->empty_ctx_mutex);
+			return 0;
+		}
 	}
 	}
 
 
         _STARPU_LOG_IN();
         _STARPU_LOG_IN();
@@ -339,6 +339,11 @@ int _starpu_push_task(starpu_job_t j, unsigned job_is_already_locked)
 		STARPU_ASSERT(sched_ctx->sched_policy->push_task);
 		STARPU_ASSERT(sched_ctx->sched_policy->push_task);
 
 
 		ret = sched_ctx->sched_policy->push_task(task);
 		ret = sched_ctx->sched_policy->push_task(task);
+		if(ret == -1)
+		{
+			printf("repush task \n");
+			_starpu_push_task(j, job_is_already_locked);
+		}
 	}
 	}
 
 
 	_starpu_profiling_set_task_push_end_time(task);
 	_starpu_profiling_set_task_push_end_time(task);
@@ -381,7 +386,7 @@ struct starpu_task *_starpu_pop_task(struct starpu_worker_s *worker)
 				if(sched_ctx_mutex != NULL)
 				if(sched_ctx_mutex != NULL)
 				{
 				{
 					PTHREAD_MUTEX_LOCK(sched_ctx_mutex);
 					PTHREAD_MUTEX_LOCK(sched_ctx_mutex);
-					if (sched_ctx->sched_policy->pop_task)
+					if (sched_ctx->sched_policy && sched_ctx->sched_policy->pop_task)
 					{
 					{
 						task = sched_ctx->sched_policy->pop_task();
 						task = sched_ctx->sched_policy->pop_task();
 						PTHREAD_MUTEX_UNLOCK(sched_ctx_mutex);
 						PTHREAD_MUTEX_UNLOCK(sched_ctx_mutex);
@@ -412,19 +417,16 @@ struct starpu_task *_starpu_pop_task(struct starpu_worker_s *worker)
 	}
 	}
 
 
 #ifdef STARPU_USE_SCHED_CTX_HYPERVISOR
 #ifdef STARPU_USE_SCHED_CTX_HYPERVISOR
-	/* if task is NULL, the work is idle for this round
-	   therefore we let the sched_ctx_manager know in order 
-	   to decide a possible resize */
-	if(!task)
+	unsigned i;
+	struct starpu_sched_ctx *sched_ctx = NULL;
+	for(i = 0; i < STARPU_NMAX_SCHED_CTXS; i++)
 	{
 	{
-		unsigned i;
-		struct starpu_sched_ctx *sched_ctx = NULL;
-		for(i = 0; i < STARPU_NMAX_SCHED_CTXS; i++)
-		{
-			sched_ctx = worker->sched_ctx[i];
-			if(sched_ctx != NULL && sched_ctx->id != 0 && sched_ctx->criteria != NULL)
+		sched_ctx = worker->sched_ctx[i];
+		if(sched_ctx != NULL && sched_ctx->id != 0 && sched_ctx->criteria != NULL)
+			if(!task)
 				sched_ctx->criteria->idle_time_cb(sched_ctx->id, worker->workerid, 1.0);
 				sched_ctx->criteria->idle_time_cb(sched_ctx->id, worker->workerid, 1.0);
-		}
+			else
+				sched_ctx->criteria->reset_idle_time_cb(sched_ctx->id, worker->workerid);
 	}
 	}
 #endif //STARPU_USE_SCHED_CTX_HYPERVISOR
 #endif //STARPU_USE_SCHED_CTX_HYPERVISOR
 
 

+ 17 - 0
src/sched_policies/heft.c

@@ -305,6 +305,7 @@ static void compute_all_performance_predictions(struct starpu_task *task,
 				local_data_penalty[worker_ctx] = starpu_task_expected_data_transfer_time(memory_node, task);
 				local_data_penalty[worker_ctx] = starpu_task_expected_data_transfer_time(memory_node, task);
 				local_power[worker_ctx] = starpu_task_expected_power(task, perf_arch, nimpl);
 				local_power[worker_ctx] = starpu_task_expected_power(task, perf_arch, nimpl);
 				//_STARPU_DEBUG("Scheduler heft bundle: task length (%lf) local power (%lf) worker (%u) kernel (%u) \n", local_task_length[worker_ctx],local_power[worker_ctx],worker,nimpl);
 				//_STARPU_DEBUG("Scheduler heft bundle: task length (%lf) local power (%lf) worker (%u) kernel (%u) \n", local_task_length[worker_ctx],local_power[worker_ctx],worker,nimpl);
+				//			printf("%d: task_len = %lf task_pen = %lf\n", worker, local_task_length[worker_ctx], local_data_penalty[worker_ctx]);
 			}
 			}
 			
 			
 			double ntasks_end = ntasks[worker] / starpu_worker_get_relative_speedup(perf_arch);
 			double ntasks_end = ntasks[worker] / starpu_worker_get_relative_speedup(perf_arch);
@@ -478,15 +479,31 @@ static int heft_push_task(struct starpu_task *task)
 {
 {
 	unsigned sched_ctx_id = task->sched_ctx;;
 	unsigned sched_ctx_id = task->sched_ctx;;
 	pthread_mutex_t *changing_ctx_mutex = starpu_get_changing_ctx_mutex(sched_ctx_id);
 	pthread_mutex_t *changing_ctx_mutex = starpu_get_changing_ctx_mutex(sched_ctx_id);
+	unsigned nworkers; 
 	int ret_val = -1;
 	int ret_val = -1;
 	if (task->priority > 0)
 	if (task->priority > 0)
 	{
 	{
 		PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
 		PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
+		nworkers = starpu_get_nworkers_of_sched_ctx(sched_ctx_id);
+		if(nworkers == 0)
+		{
+			PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);
+			return ret_val;
+		}
+			
 		ret_val = _heft_push_task(task, 1, sched_ctx_id);
 		ret_val = _heft_push_task(task, 1, sched_ctx_id);
 		PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);
 		PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);
 		return ret_val;
 		return ret_val;
 	}
 	}
+
 	PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
 	PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
+	nworkers = starpu_get_nworkers_of_sched_ctx(sched_ctx_id);
+	if(nworkers == 0)
+	{
+		PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);
+		return ret_val;
+	}
+
 	ret_val = _heft_push_task(task, 0, sched_ctx_id);
 	ret_val = _heft_push_task(task, 0, sched_ctx_id);
 	PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);
 	PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);
 	return ret_val;
 	return ret_val;