Browse Source

starpu_sched_ctx_hypervisor_stuff -> sched_ctx_hypervisor_stuff (any ideas about a name for the hypervisor are welcomed )
fix bug in delete_ctx (free workerids tbl to early)

Andra Hugo 12 years ago
parent
commit
1206404a66

+ 1 - 1
sched_ctx_hypervisor/examples/app_driven_test/app_driven_test.c

@@ -110,7 +110,7 @@ int main()
 	unsigned sched_ctx1 = starpu_sched_ctx_create("heft", ressources1, nres1, "sched_ctx1");
 	unsigned sched_ctx2 = starpu_sched_ctx_create("heft", ressources2, nres2, "sched_ctx2");
 
-	struct starpu_sched_ctx_hypervisor_policy policy;
+	struct sched_ctx_hypervisor_policy policy;
 	policy.custom = 0;
 	policy.name = "app_driven";
 	void *perf_counters = sched_ctx_hypervisor_init(&policy);

+ 1 - 1
sched_ctx_hypervisor/examples/sched_ctx_utils/sched_ctx_utils.c

@@ -238,7 +238,7 @@ void start_2ndbench(void (*bench)(float*, unsigned, unsigned))
 
 void construct_contexts(void (*bench)(float*, unsigned, unsigned))
 {
-	struct starpu_sched_ctx_hypervisor_policy policy;
+	struct sched_ctx_hypervisor_policy policy;
 	policy.custom = 0;
 	policy.name = "idle";
 	struct starpu_performance_counters *perf_counters = sched_ctx_hypervisor_init(&policy);

+ 14 - 14
sched_ctx_hypervisor/include/sched_ctx_hypervisor.h

@@ -14,14 +14,14 @@
  * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  */
 
-#ifndef STARPU_SCHED_CTX_HYPERVISOR_H
-#define STARPU_SCHED_CTX_HYPERVISOR_H
+#ifndef SCHED_CTX_HYPERVISOR_H
+#define SCHED_CTX_HYPERVISOR_H
 
 #include <starpu.h>
 #include <pthread.h>
 
 #ifdef STARPU_DEVEL
-#  warning rename all objects to start with starpu_sched_ctx_hypervisor
+#  warning rename all objects to start with sched_ctx_hypervisor
 #endif
 
 /* ioctl properties*/
@@ -43,7 +43,7 @@ pthread_mutex_t act_hypervisor_mutex;
 #define MAX_IDLE_TIME 5000000000
 #define MIN_WORKING_TIME 500
 
-struct starpu_sched_ctx_hypervisor_policy_config
+struct sched_ctx_hypervisor_policy_config
 {
 	/* underneath this limit we cannot resize */
 	int min_nworkers;
@@ -75,7 +75,7 @@ struct starpu_sched_ctx_hypervisor_policy_config
 	double empty_ctx_max_idle[STARPU_NMAXWORKERS];
 };
 
-struct starpu_sched_ctx_hypervisor_resize_ack
+struct sched_ctx_hypervisor_resize_ack
 {
 	int receiver_sched_ctx;
 	int *moved_workers;
@@ -83,10 +83,10 @@ struct starpu_sched_ctx_hypervisor_resize_ack
 	int *acked_workers;
 };
 
-struct starpu_sched_ctx_hypervisor_wrapper
+struct sched_ctx_hypervisor_wrapper
 {
 	unsigned sched_ctx;
-	struct starpu_sched_ctx_hypervisor_policy_config *config;
+	struct sched_ctx_hypervisor_policy_config *config;
 	double current_idle_time[STARPU_NMAXWORKERS];
 	int worker_to_be_removed[STARPU_NMAXWORKERS];
 	int pushed_tasks[STARPU_NMAXWORKERS];
@@ -97,7 +97,7 @@ struct starpu_sched_ctx_hypervisor_wrapper
 	double submitted_flops;
 	double remaining_flops;
 	double start_time;
-	struct starpu_sched_ctx_hypervisor_resize_ack resize_ack;
+	struct sched_ctx_hypervisor_resize_ack resize_ack;
 	pthread_mutex_t mutex;
 };
 
@@ -105,7 +105,7 @@ struct starpu_sched_ctx_hypervisor_wrapper
  * FIXME: Remove when no longer exposed.  */
 struct resize_request_entry;
 
-struct starpu_sched_ctx_hypervisor_policy
+struct sched_ctx_hypervisor_policy
 {
 	const char* name;
 	unsigned custom;
@@ -120,7 +120,7 @@ struct starpu_sched_ctx_hypervisor_policy
 	void (*handle_submitted_job)(struct starpu_task *task, unsigned footprint);
 };
 
-struct starpu_performance_counters *sched_ctx_hypervisor_init(struct starpu_sched_ctx_hypervisor_policy *policy);
+struct starpu_performance_counters *sched_ctx_hypervisor_init(struct sched_ctx_hypervisor_policy *policy);
 
 void sched_ctx_hypervisor_shutdown(void);
 
@@ -140,7 +140,7 @@ void sched_ctx_hypervisor_ioctl(unsigned sched_ctx, ...);
 
 void sched_ctx_hypervisor_set_config(unsigned sched_ctx, void *config);
 
-struct starpu_sched_ctx_hypervisor_policy_config *sched_ctx_hypervisor_get_config(unsigned sched_ctx);
+struct sched_ctx_hypervisor_policy_config *sched_ctx_hypervisor_get_config(unsigned sched_ctx);
 
 int *sched_ctx_hypervisor_get_sched_ctxs();
 
@@ -148,11 +148,11 @@ int sched_ctx_hypervisor_get_nsched_ctxs();
 
 int get_nworkers_ctx(unsigned sched_ctx, enum starpu_archtype arch);
 
-struct starpu_sched_ctx_hypervisor_wrapper *sched_ctx_hypervisor_get_wrapper(unsigned sched_ctx);
+struct sched_ctx_hypervisor_wrapper *sched_ctx_hypervisor_get_wrapper(unsigned sched_ctx);
 
-double sched_ctx_hypervisor_get_elapsed_flops_per_sched_ctx(struct starpu_sched_ctx_hypervisor_wrapper *sc_w);
+double sched_ctx_hypervisor_get_elapsed_flops_per_sched_ctx(struct sched_ctx_hypervisor_wrapper *sc_w);
 
-double sched_ctx_hypervisor_get_total_elapsed_flops_per_sched_ctx(struct starpu_sched_ctx_hypervisor_wrapper* sc_w);
+double sched_ctx_hypervisor_get_total_elapsed_flops_per_sched_ctx(struct sched_ctx_hypervisor_wrapper* sc_w);
 
 const char *sched_ctx_hypervisor_get_policy();
 

+ 1 - 1
sched_ctx_hypervisor/src/hypervisor_policies/app_driven_policy.c

@@ -23,7 +23,7 @@ static void app_driven_handle_post_exec_hook(unsigned sched_ctx, int task_tag)
 	_resize_to_unknown_receiver(sched_ctx, 1);
 }
 
-struct starpu_sched_ctx_hypervisor_policy app_driven_policy =
+struct sched_ctx_hypervisor_policy app_driven_policy =
 {
 	.size_ctxs = NULL,
 	.handle_poped_task = NULL,

+ 11 - 11
sched_ctx_hypervisor/src/hypervisor_policies/gflops_rate_policy.c

@@ -18,7 +18,7 @@
 
 static double _get_total_elapsed_flops_per_sched_ctx(unsigned sched_ctx)
 {
-	struct starpu_sched_ctx_hypervisor_wrapper* sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctx);
+	struct sched_ctx_hypervisor_wrapper* sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctx);
 	double ret_val = 0.0;
 	int i;
 	for(i = 0; i < STARPU_NMAXWORKERS; i++)
@@ -28,7 +28,7 @@ static double _get_total_elapsed_flops_per_sched_ctx(unsigned sched_ctx)
 
 double _get_exp_end(unsigned sched_ctx)
 {
-	struct starpu_sched_ctx_hypervisor_wrapper *sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctx);
+	struct sched_ctx_hypervisor_wrapper *sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctx);
 	double elapsed_flops = sched_ctx_hypervisor_get_elapsed_flops_per_sched_ctx(sc_w);
 
 	if( elapsed_flops >= 1.0)
@@ -44,7 +44,7 @@ double _get_exp_end(unsigned sched_ctx)
 /* computes the instructions left to be executed out of the total instructions to execute */
 double _get_flops_left_pct(unsigned sched_ctx)
 {
-	struct starpu_sched_ctx_hypervisor_wrapper *wrapper = sched_ctx_hypervisor_get_wrapper(sched_ctx);
+	struct sched_ctx_hypervisor_wrapper *wrapper = sched_ctx_hypervisor_get_wrapper(sched_ctx);
 	double total_elapsed_flops = _get_total_elapsed_flops_per_sched_ctx(sched_ctx);
 	if(wrapper->total_flops == total_elapsed_flops || total_elapsed_flops > wrapper->total_flops)
 		return 0.0;
@@ -55,8 +55,8 @@ double _get_flops_left_pct(unsigned sched_ctx)
 /* select the workers needed to be moved in order to force the sender and the receiver context to finish simultaneously */
 static int* _get_workers_to_move(unsigned sender_sched_ctx, unsigned receiver_sched_ctx, int *nworkers)
 {
-	struct starpu_sched_ctx_hypervisor_wrapper* sender_sc_w = sched_ctx_hypervisor_get_wrapper(sender_sched_ctx);
-	struct starpu_sched_ctx_hypervisor_wrapper* receiver_sc_w = sched_ctx_hypervisor_get_wrapper(receiver_sched_ctx);
+	struct sched_ctx_hypervisor_wrapper* sender_sc_w = sched_ctx_hypervisor_get_wrapper(sender_sched_ctx);
+	struct sched_ctx_hypervisor_wrapper* receiver_sc_w = sched_ctx_hypervisor_get_wrapper(receiver_sched_ctx);
         int *workers = NULL;
         double v_receiver = _get_ctx_velocity(receiver_sc_w);
         double receiver_remainig_flops = receiver_sc_w->remaining_flops;
@@ -69,11 +69,11 @@ static int* _get_workers_to_move(unsigned sender_sched_ctx, unsigned receiver_sc
 /*             v_receiver, v_for_rctx, sender_v_cpu, nworkers_needed); */
         if(nworkers_needed > 0)
         {
-                struct starpu_sched_ctx_hypervisor_policy_config *sender_config = sched_ctx_hypervisor_get_config(sender_sched_ctx);
+                struct sched_ctx_hypervisor_policy_config *sender_config = sched_ctx_hypervisor_get_config(sender_sched_ctx);
                 unsigned potential_moving_cpus = _get_potential_nworkers(sender_config, sender_sched_ctx, STARPU_CPU_WORKER);
                 unsigned potential_moving_gpus = _get_potential_nworkers(sender_config, sender_sched_ctx, STARPU_CUDA_WORKER);
                 unsigned sender_nworkers = starpu_sched_ctx_get_nworkers(sender_sched_ctx);
-                struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(receiver_sched_ctx);
+                struct sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(receiver_sched_ctx);
                 unsigned nworkers_ctx = starpu_sched_ctx_get_nworkers(receiver_sched_ctx);
 
                 if(nworkers_needed < (potential_moving_cpus + 5 * potential_moving_gpus))
@@ -149,7 +149,7 @@ static unsigned _gflops_rate_resize(unsigned sender_sched_ctx, unsigned receiver
                 {
                         sched_ctx_hypervisor_move_workers(sender_sched_ctx, receiver_sched_ctx, workers_to_move, nworkers_to_move, 0);
 
-                        struct starpu_sched_ctx_hypervisor_policy_config *new_config = sched_ctx_hypervisor_get_config(receiver_sched_ctx);
+                        struct sched_ctx_hypervisor_policy_config *new_config = sched_ctx_hypervisor_get_config(receiver_sched_ctx);
                         int i;
                         for(i = 0; i < nworkers_to_move; i++)
                                 new_config->max_idle[workers_to_move[i]] = new_config->max_idle[workers_to_move[i]] !=MAX_IDLE_TIME ? new_config->max_idle[workers_to_move[i]] :  new_config->new_workers_max_idle;
@@ -256,7 +256,7 @@ static void gflops_rate_resize(unsigned sched_ctx)
 			double slowest_flops_left_pct = _get_flops_left_pct(slowest_sched_ctx);
 			if(slowest_flops_left_pct != 0.0f)
 			{
-				struct starpu_sched_ctx_hypervisor_policy_config* config = sched_ctx_hypervisor_get_config(sched_ctx);
+				struct sched_ctx_hypervisor_policy_config* config = sched_ctx_hypervisor_get_config(sched_ctx);
 				config->min_nworkers = 0;
 				config->max_nworkers = 0;
 				printf("ctx %d finished & gives away the res to %d; slow_left %lf\n", sched_ctx, slowest_sched_ctx, slowest_flops_left_pct);
@@ -280,7 +280,7 @@ static void gflops_rate_resize(unsigned sched_ctx)
 			if(fast_flops_left_pct < 0.8)
 			{
 
-				struct starpu_sched_ctx_hypervisor_wrapper *sc_w = sched_ctx_hypervisor_get_wrapper(slowest_sched_ctx);
+				struct sched_ctx_hypervisor_wrapper *sc_w = sched_ctx_hypervisor_get_wrapper(slowest_sched_ctx);
 				double elapsed_flops = sched_ctx_hypervisor_get_elapsed_flops_per_sched_ctx(sc_w);
 				if((elapsed_flops/sc_w->total_flops) > 0.1)
 					_gflops_rate_resize(fastest_sched_ctx, slowest_sched_ctx, 0);
@@ -294,7 +294,7 @@ void gflops_rate_handle_poped_task(unsigned sched_ctx, int worker)
 	gflops_rate_resize(sched_ctx);
 }
 
-struct starpu_sched_ctx_hypervisor_policy gflops_rate_policy = {
+struct sched_ctx_hypervisor_policy gflops_rate_policy = {
 	.size_ctxs = NULL,
 	.handle_poped_task = gflops_rate_handle_poped_task,
 	.handle_pushed_task = NULL,

+ 3 - 3
sched_ctx_hypervisor/src/hypervisor_policies/idle_policy.c

@@ -30,8 +30,8 @@ unsigned worker_belong_to_other_sched_ctx(unsigned sched_ctx, int worker)
 
 void idle_handle_idle_cycle(unsigned sched_ctx, int worker)
 {
-	struct starpu_sched_ctx_hypervisor_wrapper* sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctx);
-	struct starpu_sched_ctx_hypervisor_policy_config *config = sc_w->config;
+	struct sched_ctx_hypervisor_wrapper* sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctx);
+	struct sched_ctx_hypervisor_policy_config *config = sc_w->config;
 	if(config != NULL &&  sc_w->current_idle_time[worker] > config->max_idle[worker])
 	{
 		if(worker_belong_to_other_sched_ctx(sched_ctx, worker))
@@ -41,7 +41,7 @@ void idle_handle_idle_cycle(unsigned sched_ctx, int worker)
 	}
 }
 
-struct starpu_sched_ctx_hypervisor_policy idle_policy =
+struct sched_ctx_hypervisor_policy idle_policy =
 {
 	.size_ctxs = NULL,
 	.handle_poped_task = NULL,

+ 4 - 4
sched_ctx_hypervisor/src/hypervisor_policies/lp2_policy.c

@@ -179,7 +179,7 @@ static void _redistribute_resources_in_ctxs(int ns, int nw, int nt, double w_in_
 		}
 
 		sched_ctx_hypervisor_add_workers_to_sched_ctx(workers_to_add, nadd, sched_ctxs[s]);
-		struct starpu_sched_ctx_hypervisor_policy_config *new_config = sched_ctx_hypervisor_get_config(sched_ctxs[s]);
+		struct sched_ctx_hypervisor_policy_config *new_config = sched_ctx_hypervisor_get_config(sched_ctxs[s]);
 		int i;
 		for(i = 0; i < nadd; i++)
 			new_config->max_idle[workers_to_add[i]] = new_config->max_idle[workers_to_add[i]] != MAX_IDLE_TIME ? new_config->max_idle[workers_to_add[i]] :  new_config->new_workers_max_idle;
@@ -246,7 +246,7 @@ static void size_if_required()
 
 	if(has_req)
 	{
-		struct starpu_sched_ctx_hypervisor_wrapper* sc_w = NULL;
+		struct sched_ctx_hypervisor_wrapper* sc_w = NULL;
 		unsigned ready_to_size = 1;
 		int s;
 		pthread_mutex_lock(&act_hypervisor_mutex);
@@ -503,7 +503,7 @@ static double _find_tmax(double t1, double t2)
 
 static void lp2_handle_poped_task(unsigned sched_ctx, int worker)
 {
-	struct starpu_sched_ctx_hypervisor_wrapper* sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctx);
+	struct sched_ctx_hypervisor_wrapper* sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctx);
 
 	int ret = pthread_mutex_trylock(&act_hypervisor_mutex);
 	if(ret != EBUSY)
@@ -580,7 +580,7 @@ static void lp2_size_ctxs(int *sched_ctxs, int nsched_ctxs , int *workers, int n
 	sched_ctx_hypervisor_save_size_req(sched_ctxs, nsched_ctxs, workers, nworkers);
 }
 
-struct starpu_sched_ctx_hypervisor_policy lp2_policy = {
+struct sched_ctx_hypervisor_policy lp2_policy = {
 	.size_ctxs = lp2_size_ctxs,
 	.handle_poped_task = lp2_handle_poped_task,
 	.handle_pushed_task = NULL,

+ 1 - 1
sched_ctx_hypervisor/src/hypervisor_policies/lp_policy.c

@@ -86,7 +86,7 @@ static void lp_size_ctxs(int *sched_ctxs, int ns, int *workers, int nworkers)
 }
 
 #ifdef STARPU_HAVE_GLPK_H
-struct starpu_sched_ctx_hypervisor_policy lp_policy = {
+struct sched_ctx_hypervisor_policy lp_policy = {
 	.size_ctxs = lp_size_ctxs,
 	.handle_poped_task = lp_handle_poped_task,
 	.handle_pushed_task = NULL,

+ 2 - 2
sched_ctx_hypervisor/src/hypervisor_policies/lp_tools.c

@@ -182,7 +182,7 @@ double _lp_get_nworkers_per_ctx(int nsched_ctxs, int ntypes_of_workers, double r
 	double flops[nsched_ctxs];
 #endif
 	int i = 0;
-	struct starpu_sched_ctx_hypervisor_wrapper* sc_w;
+	struct sched_ctx_hypervisor_wrapper* sc_w;
 	for(i = 0; i < nsched_ctxs; i++)
 	{
 		sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctxs[i]);
@@ -392,7 +392,7 @@ void _lp_distribute_resources_in_ctxs(int* sched_ctxs, int ns, int nw, int res_r
 				{
 					sched_ctx_hypervisor_add_workers_to_sched_ctx(workers_to_add, nworkers_to_add, current_sched_ctxs[s]);
 					sched_ctx_hypervisor_start_resize(current_sched_ctxs[s]);
-					struct starpu_sched_ctx_hypervisor_policy_config *new_config = sched_ctx_hypervisor_get_config(current_sched_ctxs[s]);
+					struct sched_ctx_hypervisor_policy_config *new_config = sched_ctx_hypervisor_get_config(current_sched_ctxs[s]);
 					int i;
 					for(i = 0; i < nworkers_to_add; i++)
 						new_config->max_idle[workers_to_add[i]] = new_config->max_idle[workers_to_add[i]] != MAX_IDLE_TIME ? new_config->max_idle[workers_to_add[i]] :  new_config->new_workers_max_idle;

+ 13 - 13
sched_ctx_hypervisor/src/hypervisor_policies/policy_tools.c

@@ -21,7 +21,7 @@
 
 static int _compute_priority(unsigned sched_ctx)
 {
-	struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(sched_ctx);
+	struct sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(sched_ctx);
 
 	int total_priority = 0;
 
@@ -52,7 +52,7 @@ unsigned _find_poor_sched_ctx(unsigned req_sched_ctx, int nworkers_to_move)
 	int nsched_ctxs = sched_ctx_hypervisor_get_nsched_ctxs();
 
 
-	struct starpu_sched_ctx_hypervisor_policy_config *config = NULL;
+	struct sched_ctx_hypervisor_policy_config *config = NULL;
 
 	for(i = 0; i < nsched_ctxs; i++)
 	{
@@ -100,8 +100,8 @@ int* _get_first_workers_in_list(int *workers, int nall_workers,  unsigned *nwork
 /* get first nworkers with the highest idle time in the context */
 int* _get_first_workers(unsigned sched_ctx, int *nworkers, enum starpu_archtype arch)
 {
-	struct starpu_sched_ctx_hypervisor_wrapper* sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctx);
-	struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(sched_ctx);
+	struct sched_ctx_hypervisor_wrapper* sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctx);
+	struct sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(sched_ctx);
 
 	int *curr_workers = (int*)malloc((*nworkers) * sizeof(int));
 	int i;
@@ -174,7 +174,7 @@ int* _get_first_workers(unsigned sched_ctx, int *nworkers, enum starpu_archtype
 }
 
 /* get the number of workers in the context that are allowed to be moved (that are not fixed) */
-unsigned _get_potential_nworkers(struct starpu_sched_ctx_hypervisor_policy_config *config, unsigned sched_ctx, enum starpu_archtype arch)
+unsigned _get_potential_nworkers(struct sched_ctx_hypervisor_policy_config *config, unsigned sched_ctx, enum starpu_archtype arch)
 {
 	struct starpu_sched_ctx_worker_collection *workers = starpu_sched_ctx_get_worker_collection(sched_ctx);
 
@@ -203,7 +203,7 @@ unsigned _get_potential_nworkers(struct starpu_sched_ctx_hypervisor_policy_confi
    - on the resource granularity imposed by the user for the resizing process*/
 int _get_nworkers_to_move(unsigned req_sched_ctx)
 {
-       	struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(req_sched_ctx);
+       	struct sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(req_sched_ctx);
 	unsigned nworkers = starpu_sched_ctx_get_nworkers(req_sched_ctx);
 	unsigned nworkers_to_move = 0;
 
@@ -265,7 +265,7 @@ unsigned _resize(unsigned sender_sched_ctx, unsigned receiver_sched_ctx, unsigne
 			else
 			{
 				poor_sched_ctx = receiver_sched_ctx;
-				struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(poor_sched_ctx);
+				struct sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(poor_sched_ctx);
 				unsigned nworkers = starpu_sched_ctx_get_nworkers(poor_sched_ctx);
 				unsigned nshared_workers = starpu_sched_ctx_get_nshared_workers(sender_sched_ctx, poor_sched_ctx);
 				if((nworkers+nworkers_to_move-nshared_workers) > config->max_nworkers)
@@ -277,7 +277,7 @@ unsigned _resize(unsigned sender_sched_ctx, unsigned receiver_sched_ctx, unsigne
 				int *workers_to_move = _get_first_workers(sender_sched_ctx, &nworkers_to_move, STARPU_ANY_WORKER);
 				sched_ctx_hypervisor_move_workers(sender_sched_ctx, poor_sched_ctx, workers_to_move, nworkers_to_move, now);
 
-				struct starpu_sched_ctx_hypervisor_policy_config *new_config = sched_ctx_hypervisor_get_config(poor_sched_ctx);
+				struct sched_ctx_hypervisor_policy_config *new_config = sched_ctx_hypervisor_get_config(poor_sched_ctx);
 				int i;
 				for(i = 0; i < nworkers_to_move; i++)
 					new_config->max_idle[workers_to_move[i]] = new_config->max_idle[workers_to_move[i]] !=MAX_IDLE_TIME ? new_config->max_idle[workers_to_move[i]] :  new_config->new_workers_max_idle;
@@ -298,7 +298,7 @@ unsigned _resize_to_unknown_receiver(unsigned sender_sched_ctx, unsigned now)
 	return _resize(sender_sched_ctx, STARPU_NMAX_SCHED_CTXS, 0, now);
 }
 
-static double _get_elapsed_flops(struct starpu_sched_ctx_hypervisor_wrapper* sc_w, int *npus, enum starpu_archtype req_arch)
+static double _get_elapsed_flops(struct sched_ctx_hypervisor_wrapper* sc_w, int *npus, enum starpu_archtype req_arch)
 {
 	double ret_val = 0.0;
 	struct starpu_sched_ctx_worker_collection *workers = starpu_sched_ctx_get_worker_collection(sc_w->sched_ctx);
@@ -322,7 +322,7 @@ static double _get_elapsed_flops(struct starpu_sched_ctx_hypervisor_wrapper* sc_
 	return ret_val;
 }
 
-double _get_ctx_velocity(struct starpu_sched_ctx_hypervisor_wrapper* sc_w)
+double _get_ctx_velocity(struct sched_ctx_hypervisor_wrapper* sc_w)
 {
         double elapsed_flops = sched_ctx_hypervisor_get_elapsed_flops_per_sched_ctx(sc_w);
 	double total_elapsed_flops = sched_ctx_hypervisor_get_total_elapsed_flops_per_sched_ctx(sc_w);
@@ -339,7 +339,7 @@ double _get_ctx_velocity(struct starpu_sched_ctx_hypervisor_wrapper* sc_w)
 }
 
 /* compute an average value of the cpu velocity */
-double _get_velocity_per_worker_type(struct starpu_sched_ctx_hypervisor_wrapper* sc_w, enum starpu_archtype arch)
+double _get_velocity_per_worker_type(struct sched_ctx_hypervisor_wrapper* sc_w, enum starpu_archtype arch)
 {
         int npus = 0;
         double elapsed_flops = _get_elapsed_flops(sc_w, &npus, arch);
@@ -361,8 +361,8 @@ int _velocity_gap_btw_ctxs()
 	int *sched_ctxs = sched_ctx_hypervisor_get_sched_ctxs();
 	int nsched_ctxs = sched_ctx_hypervisor_get_nsched_ctxs();
 	int i = 0, j = 0;
-	struct starpu_sched_ctx_hypervisor_wrapper* sc_w;
-	struct starpu_sched_ctx_hypervisor_wrapper* other_sc_w;
+	struct sched_ctx_hypervisor_wrapper* sc_w;
+	struct sched_ctx_hypervisor_wrapper* other_sc_w;
 
 	for(i = 0; i < nsched_ctxs; i++)
 	{

+ 3 - 3
sched_ctx_hypervisor/src/hypervisor_policies/policy_tools.h

@@ -40,7 +40,7 @@ int* _get_first_workers(unsigned sched_ctx, int *nworkers, enum starpu_archtype
 
 int* _get_first_workers_in_list(int *workers, int nall_workers,  unsigned *nworkers, enum starpu_archtype arch);
 
-unsigned _get_potential_nworkers(struct starpu_sched_ctx_hypervisor_policy_config *config, unsigned sched_ctx, enum starpu_archtype arch);
+unsigned _get_potential_nworkers(struct sched_ctx_hypervisor_policy_config *config, unsigned sched_ctx, enum starpu_archtype arch);
 
 int _get_nworkers_to_move(unsigned req_sched_ctx);
 
@@ -48,9 +48,9 @@ unsigned _resize(unsigned sender_sched_ctx, unsigned receiver_sched_ctx, unsigne
 
 unsigned _resize_to_unknown_receiver(unsigned sender_sched_ctx, unsigned now);
 
-double _get_ctx_velocity(struct starpu_sched_ctx_hypervisor_wrapper* sc_w);
+double _get_ctx_velocity(struct sched_ctx_hypervisor_wrapper* sc_w);
 
-double _get_velocity_per_worker_type(struct starpu_sched_ctx_hypervisor_wrapper* sc_w, enum starpu_archtype arch);
+double _get_velocity_per_worker_type(struct sched_ctx_hypervisor_wrapper* sc_w, enum starpu_archtype arch);
 
 int _velocity_gap_btw_ctxs(void);
 

+ 15 - 15
sched_ctx_hypervisor/src/hypervisor_policies/simple_policy.c

@@ -19,7 +19,7 @@
 
 static int _compute_priority(unsigned sched_ctx)
 {
-	struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(sched_ctx);
+	struct sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(sched_ctx);
 
 	int total_priority = 0;
 
@@ -48,7 +48,7 @@ static unsigned _find_poor_sched_ctx(unsigned req_sched_ctx, int nworkers_to_mov
 	int nsched_ctxs = sched_ctx_hypervisor_get_nsched_ctxs();
 
 
-	struct starpu_sched_ctx_hypervisor_policy_config *config = NULL;
+	struct sched_ctx_hypervisor_policy_config *config = NULL;
 
 	for(i = 0; i < nsched_ctxs; i++)
 	{
@@ -73,7 +73,7 @@ static unsigned _find_poor_sched_ctx(unsigned req_sched_ctx, int nworkers_to_mov
 
 int* _get_first_workers(unsigned sched_ctx, unsigned *nworkers, enum starpu_archtype arch)
 {
-	struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(sched_ctx);
+	struct sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(sched_ctx);
 
 	int *curr_workers = (int*)malloc((*nworkers) * sizeof(int));
 	int i;
@@ -144,7 +144,7 @@ int* _get_first_workers(unsigned sched_ctx, unsigned *nworkers, enum starpu_arch
 	return curr_workers;
 }
 
-static unsigned _get_potential_nworkers(struct starpu_sched_ctx_hypervisor_policy_config *config, unsigned sched_ctx, enum starpu_archtype arch)
+static unsigned _get_potential_nworkers(struct sched_ctx_hypervisor_policy_config *config, unsigned sched_ctx, enum starpu_archtype arch)
 {
 	struct starpu_sched_ctx_worker_collection *workers = starpu_sched_ctx_get_worker_collection(sched_ctx);
 
@@ -170,7 +170,7 @@ static unsigned _get_potential_nworkers(struct starpu_sched_ctx_hypervisor_polic
 
 static unsigned _get_nworkers_to_move(unsigned req_sched_ctx)
 {
-       	struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(req_sched_ctx);
+       	struct sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(req_sched_ctx);
 	unsigned nworkers = starpu_sched_ctx_get_nworkers(req_sched_ctx);
 	unsigned nworkers_to_move = 0;
 
@@ -231,7 +231,7 @@ static unsigned _simple_resize(unsigned sender_sched_ctx, unsigned receiver_sche
 			else
 			{
 				poor_sched_ctx = receiver_sched_ctx;
-				struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(poor_sched_ctx);
+				struct sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(poor_sched_ctx);
 				unsigned nworkers = starpu_sched_ctx_get_nworkers(poor_sched_ctx);
 				unsigned nshared_workers = starpu_sched_ctx_get_nshared_workers(sender_sched_ctx, poor_sched_ctx);
 				if((nworkers+nworkers_to_move-nshared_workers) > config->max_nworkers)
@@ -245,7 +245,7 @@ static unsigned _simple_resize(unsigned sender_sched_ctx, unsigned receiver_sche
 				int *workers_to_move = _get_first_workers(sender_sched_ctx, &nworkers_to_move, 0);
 				sched_ctx_hypervisor_move_workers(sender_sched_ctx, poor_sched_ctx, workers_to_move, nworkers_to_move);
 
-				struct starpu_sched_ctx_hypervisor_policy_config *new_config = sched_ctx_hypervisor_get_config(poor_sched_ctx);
+				struct sched_ctx_hypervisor_policy_config *new_config = sched_ctx_hypervisor_get_config(poor_sched_ctx);
 				int i;
 				for(i = 0; i < nworkers_to_move; i++)
 					new_config->max_idle[workers_to_move[i]] = new_config->max_idle[workers_to_move[i]] !=MAX_IDLE_TIME ? new_config->max_idle[workers_to_move[i]] :  new_config->new_workers_max_idle;
@@ -277,11 +277,11 @@ static int* _get_workers_to_move(unsigned sender_sched_ctx, unsigned receiver_sc
 /*             v_receiver, v_for_rctx, sender_v_cpu, nworkers_needed); */
         if(nworkers_needed > 0)
         {
-                struct starpu_sched_ctx_hypervisor_policy_config *sender_config = sched_ctx_hypervisor_get_config(sender_sched_ctx);
+                struct sched_ctx_hypervisor_policy_config *sender_config = sched_ctx_hypervisor_get_config(sender_sched_ctx);
                 unsigned potential_moving_cpus = _get_potential_nworkers(sender_config, sender_sched_ctx, STARPU_CPU_WORKER);
                 unsigned potential_moving_gpus = _get_potential_nworkers(sender_config, sender_sched_ctx, STARPU_CUDA_WORKER);
                 unsigned sender_nworkers = starpu_sched_ctx_get_nworkers(sender_sched_ctx);
-                struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(receiver_sched_ctx);
+                struct sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(receiver_sched_ctx);
                 unsigned nworkers_ctx = starpu_sched_ctx_get_nworkers(receiver_sched_ctx);
 
                 if(nworkers_needed < (potential_moving_cpus + 5 * potential_moving_gpus))
@@ -348,7 +348,7 @@ static unsigned _simple_resize2(unsigned sender_sched_ctx, unsigned receiver_sch
                 {
                         sched_ctx_hypervisor_move_workers(sender_sched_ctx, receiver_sched_ctx, workers_to_move, nworkers_to_move);
 
-                        struct starpu_sched_ctx_hypervisor_policy_config *new_config = sched_ctx_hypervisor_get_config(receiver_sched_ctx);
+                        struct sched_ctx_hypervisor_policy_config *new_config = sched_ctx_hypervisor_get_config(receiver_sched_ctx);
                         int i;
                         for(i = 0; i < nworkers_to_move; i++)
                                 new_config->max_idle[workers_to_move[i]] = new_config->max_idle[workers_to_move[i]] !=MAX_IDLE_TIME ? new_config->max_idle[workers_to_move[i]] :  new_config->new_workers_max_idle;
@@ -369,7 +369,7 @@ static unsigned simple_resize(unsigned sender_sched_ctx)
 
 static void simple_manage_idle_time(unsigned req_sched_ctx, int worker, double idle_time)
 {
-       	struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(req_sched_ctx);
+       	struct sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(req_sched_ctx);
 
 	if(config != NULL && idle_time > config->max_idle[worker])
 		simple_resize(req_sched_ctx);
@@ -468,7 +468,7 @@ static void simple_manage_gflops_rate(unsigned sched_ctx)
 			printf("ctx %d finished & gives away the res to %d; slow_left %lf\n", sched_ctx, slowest_sched_ctx, slowest_flops_left_pct);
 			if(slowest_flops_left_pct != 0.0f)
 			{
-				struct starpu_sched_ctx_hypervisor_policy_config* config = sched_ctx_hypervisor_get_config(sched_ctx);
+				struct sched_ctx_hypervisor_policy_config* config = sched_ctx_hypervisor_get_config(sched_ctx);
 				config->min_nworkers = 0;
 				config->max_nworkers = 0;
 				_simple_resize(sched_ctx, slowest_sched_ctx, 1);
@@ -498,21 +498,21 @@ static void simple_manage_gflops_rate(unsigned sched_ctx)
 }
 
 
-struct starpu_sched_ctx_hypervisor_policy idle_policy =
+struct sched_ctx_hypervisor_policy idle_policy =
 {
 	.manage_idle_time = simple_manage_idle_time,
 	.manage_gflops_rate = simple_manage_gflops_rate,
 	.resize = simple_resize,
 };
 
-struct starpu_sched_ctx_hypervisor_policy app_driven_policy =
+struct sched_ctx_hypervisor_policy app_driven_policy =
 {
 	.manage_idle_time = simple_manage_idle_time,
 	.manage_gflops_rate = simple_manage_gflops_rate,
 	.resize = simple_resize,
 };
 
-struct starpu_sched_ctx_hypervisor_policy gflops_rate_policy =
+struct sched_ctx_hypervisor_policy gflops_rate_policy =
 {
 	.manage_idle_time = simple_manage_idle_time,
 	.manage_gflops_rate = simple_manage_gflops_rate,

+ 8 - 8
sched_ctx_hypervisor/src/sched_ctx_config.c

@@ -16,9 +16,9 @@
 
 #include <sched_ctx_hypervisor_intern.h>
 
-static struct starpu_sched_ctx_hypervisor_policy_config* _create_config(void)
+static struct sched_ctx_hypervisor_policy_config* _create_config(void)
 {
-	struct starpu_sched_ctx_hypervisor_policy_config *config = (struct starpu_sched_ctx_hypervisor_policy_config *)malloc(sizeof(struct starpu_sched_ctx_hypervisor_policy_config));
+	struct sched_ctx_hypervisor_policy_config *config = (struct sched_ctx_hypervisor_policy_config *)malloc(sizeof(struct sched_ctx_hypervisor_policy_config));
 	config->min_nworkers = -1;
 	config->max_nworkers = -1;
 	config->new_workers_max_idle = -1.0;
@@ -37,7 +37,7 @@ static struct starpu_sched_ctx_hypervisor_policy_config* _create_config(void)
 	return config;
 }
 
-static void _update_config(struct starpu_sched_ctx_hypervisor_policy_config *old, struct starpu_sched_ctx_hypervisor_policy_config* new)
+static void _update_config(struct sched_ctx_hypervisor_policy_config *old, struct sched_ctx_hypervisor_policy_config* new)
 {
 	old->min_nworkers = new->min_nworkers != -1 ? new->min_nworkers : old->min_nworkers ;
 	old->max_nworkers = new->max_nworkers != -1 ? new->max_nworkers : old->max_nworkers ;
@@ -69,7 +69,7 @@ void sched_ctx_hypervisor_set_config(unsigned sched_ctx, void *config)
 
 void _add_config(unsigned sched_ctx)
 {
-	struct starpu_sched_ctx_hypervisor_policy_config *config = _create_config();
+	struct sched_ctx_hypervisor_policy_config *config = _create_config();
 	config->min_nworkers = 0;
 	config->max_nworkers = STARPU_NMAXWORKERS;
 	config->new_workers_max_idle = MAX_IDLE_TIME;
@@ -93,14 +93,14 @@ void _remove_config(unsigned sched_ctx)
 	sched_ctx_hypervisor_set_config(sched_ctx, NULL);
 }
 
-struct starpu_sched_ctx_hypervisor_policy_config* sched_ctx_hypervisor_get_config(unsigned sched_ctx)
+struct sched_ctx_hypervisor_policy_config* sched_ctx_hypervisor_get_config(unsigned sched_ctx)
 {
 	return hypervisor.sched_ctx_w[sched_ctx].config;
 }
 
-static struct starpu_sched_ctx_hypervisor_policy_config* _ioctl(unsigned sched_ctx, va_list varg_list, unsigned later)
+static struct sched_ctx_hypervisor_policy_config* _ioctl(unsigned sched_ctx, va_list varg_list, unsigned later)
 {
-	struct starpu_sched_ctx_hypervisor_policy_config *config = NULL;
+	struct sched_ctx_hypervisor_policy_config *config = NULL;
 
 	if(later)
 		config = _create_config();
@@ -229,7 +229,7 @@ void sched_ctx_hypervisor_ioctl(unsigned sched_ctx, ...)
 	va_start(varg_list, sched_ctx);
 
 	/* if config not null => save hypervisor configuration and consider it later */
-	struct starpu_sched_ctx_hypervisor_policy_config *config = _ioctl(sched_ctx, varg_list, (task_tag > 0));
+	struct sched_ctx_hypervisor_policy_config *config = _ioctl(sched_ctx, varg_list, (task_tag > 0));
 	if(config != NULL)
 	{
 		struct configuration_entry *entry;

+ 24 - 24
sched_ctx_hypervisor/src/sched_ctx_hypervisor.c

@@ -28,16 +28,16 @@ static void notify_post_exec_hook(unsigned sched_ctx, int taskid);
 static void notify_idle_end(unsigned sched_ctx, int  worker);
 static void notify_submitted_job(struct starpu_task *task, unsigned footprint);
 
-extern struct starpu_sched_ctx_hypervisor_policy idle_policy;
-extern struct starpu_sched_ctx_hypervisor_policy app_driven_policy;
-extern struct starpu_sched_ctx_hypervisor_policy gflops_rate_policy;
+extern struct sched_ctx_hypervisor_policy idle_policy;
+extern struct sched_ctx_hypervisor_policy app_driven_policy;
+extern struct sched_ctx_hypervisor_policy gflops_rate_policy;
 #ifdef STARPU_HAVE_GLPK_H
-extern struct starpu_sched_ctx_hypervisor_policy lp_policy;
-extern struct starpu_sched_ctx_hypervisor_policy lp2_policy;
+extern struct sched_ctx_hypervisor_policy lp_policy;
+extern struct sched_ctx_hypervisor_policy lp2_policy;
 #endif // STARPU_HAVE_GLPK_H
 
 
-static struct starpu_sched_ctx_hypervisor_policy *predefined_policies[] =
+static struct sched_ctx_hypervisor_policy *predefined_policies[] =
 {
         &idle_policy,
 	&app_driven_policy,
@@ -48,7 +48,7 @@ static struct starpu_sched_ctx_hypervisor_policy *predefined_policies[] =
 	&gflops_rate_policy
 };
 
-static void _load_hypervisor_policy(struct starpu_sched_ctx_hypervisor_policy *policy)
+static void _load_hypervisor_policy(struct sched_ctx_hypervisor_policy *policy)
 {
 	STARPU_ASSERT(policy);
 
@@ -63,7 +63,7 @@ static void _load_hypervisor_policy(struct starpu_sched_ctx_hypervisor_policy *p
 }
 
 
-static struct starpu_sched_ctx_hypervisor_policy *_find_hypervisor_policy_from_name(const char *policy_name)
+static struct sched_ctx_hypervisor_policy *_find_hypervisor_policy_from_name(const char *policy_name)
 {
 
 	if (!policy_name)
@@ -72,7 +72,7 @@ static struct starpu_sched_ctx_hypervisor_policy *_find_hypervisor_policy_from_n
 	unsigned i;
 	for (i = 0; i < sizeof(predefined_policies)/sizeof(predefined_policies[0]); i++)
 	{
-		struct starpu_sched_ctx_hypervisor_policy *p;
+		struct sched_ctx_hypervisor_policy *p;
 		p = predefined_policies[i];
 		if (p->name)
 		{
@@ -88,9 +88,9 @@ static struct starpu_sched_ctx_hypervisor_policy *_find_hypervisor_policy_from_n
 	return NULL;
 }
 
-static struct starpu_sched_ctx_hypervisor_policy *_select_hypervisor_policy(struct starpu_sched_ctx_hypervisor_policy* hypervisor_policy)
+static struct sched_ctx_hypervisor_policy *_select_hypervisor_policy(struct sched_ctx_hypervisor_policy* hypervisor_policy)
 {
-	struct starpu_sched_ctx_hypervisor_policy *selected_policy = NULL;
+	struct sched_ctx_hypervisor_policy *selected_policy = NULL;
 
 	if(hypervisor_policy && hypervisor_policy->custom)
 		return hypervisor_policy;
@@ -120,7 +120,7 @@ static struct starpu_sched_ctx_hypervisor_policy *_select_hypervisor_policy(stru
 
 
 /* initializez the performance counters that starpu will use to retrive hints for resizing */
-struct starpu_performance_counters* sched_ctx_hypervisor_init(struct starpu_sched_ctx_hypervisor_policy *hypervisor_policy)
+struct starpu_performance_counters* sched_ctx_hypervisor_init(struct sched_ctx_hypervisor_policy *hypervisor_policy)
 {
 	hypervisor.min_tasks = 0;
 	hypervisor.nsched_ctxs = 0;
@@ -157,7 +157,7 @@ struct starpu_performance_counters* sched_ctx_hypervisor_init(struct starpu_sche
 		}
 	}
 
-	struct starpu_sched_ctx_hypervisor_policy *selected_hypervisor_policy = _select_hypervisor_policy(hypervisor_policy);
+	struct sched_ctx_hypervisor_policy *selected_hypervisor_policy = _select_hypervisor_policy(hypervisor_policy);
 	_load_hypervisor_policy(selected_hypervisor_policy);
 
 	perf_counters = (struct starpu_performance_counters*)malloc(sizeof(struct starpu_performance_counters));
@@ -400,7 +400,7 @@ void sched_ctx_hypervisor_move_workers(unsigned sender_sched_ctx, unsigned recei
 				pthread_mutex_unlock(&hypervisor.sched_ctx_w[sender_sched_ctx].mutex);
 			}
 		}
-		struct starpu_sched_ctx_hypervisor_policy_config *new_config = sched_ctx_hypervisor_get_config(receiver_sched_ctx);
+		struct sched_ctx_hypervisor_policy_config *new_config = sched_ctx_hypervisor_get_config(receiver_sched_ctx);
 		int i;
 		for(i = 0; i < nworkers_to_move; i++)
 			new_config->max_idle[workers_to_move[i]] = new_config->max_idle[workers_to_move[i]] !=MAX_IDLE_TIME ? new_config->max_idle[workers_to_move[i]] :  new_config->new_workers_max_idle;
@@ -419,7 +419,7 @@ void sched_ctx_hypervisor_add_workers_to_sched_ctx(int* workers_to_add, unsigned
 /* 			printf(" %d", workers_to_add[j]); */
 /* 		printf("\n"); */
 		starpu_sched_ctx_add_workers(workers_to_add, nworkers_to_add, sched_ctx);
-		struct starpu_sched_ctx_hypervisor_policy_config *new_config = sched_ctx_hypervisor_get_config(sched_ctx);
+		struct sched_ctx_hypervisor_policy_config *new_config = sched_ctx_hypervisor_get_config(sched_ctx);
 		int i;
 		for(i = 0; i < nworkers_to_add; i++)
 			new_config->max_idle[workers_to_add[i]] = new_config->max_idle[workers_to_add[i]] != MAX_IDLE_TIME ? new_config->max_idle[workers_to_add[i]] :  new_config->new_workers_max_idle;
@@ -489,7 +489,7 @@ static void _set_elapsed_flops_per_sched_ctx(unsigned sched_ctx, double val)
 		hypervisor.sched_ctx_w[sched_ctx].elapsed_flops[i] = val;
 }
 
-double sched_ctx_hypervisor_get_elapsed_flops_per_sched_ctx(struct starpu_sched_ctx_hypervisor_wrapper* sc_w)
+double sched_ctx_hypervisor_get_elapsed_flops_per_sched_ctx(struct sched_ctx_hypervisor_wrapper* sc_w)
 {
 	double ret_val = 0.0;
 	int i;
@@ -498,7 +498,7 @@ double sched_ctx_hypervisor_get_elapsed_flops_per_sched_ctx(struct starpu_sched_
 	return ret_val;
 }
 
-double sched_ctx_hypervisor_get_total_elapsed_flops_per_sched_ctx(struct starpu_sched_ctx_hypervisor_wrapper* sc_w)
+double sched_ctx_hypervisor_get_total_elapsed_flops_per_sched_ctx(struct sched_ctx_hypervisor_wrapper* sc_w)
 {
 	double ret_val = 0.0;
 	int i;
@@ -512,7 +512,7 @@ static unsigned _ack_resize_completed(unsigned sched_ctx, int worker)
 	if(worker != -1 && !starpu_sched_ctx_contains_worker(worker, sched_ctx))
 		return 0;
 
-	struct starpu_sched_ctx_hypervisor_resize_ack *resize_ack = NULL;
+	struct sched_ctx_hypervisor_resize_ack *resize_ack = NULL;
 	unsigned sender_sched_ctx = STARPU_NMAX_SCHED_CTXS;
 
 	int i;
@@ -520,7 +520,7 @@ static unsigned _ack_resize_completed(unsigned sched_ctx, int worker)
 	{
 		if(hypervisor.sched_ctxs[i] != STARPU_NMAX_SCHED_CTXS)
 		{
-			struct starpu_sched_ctx_hypervisor_wrapper *sc_w = &hypervisor.sched_ctx_w[hypervisor.sched_ctxs[i]];
+			struct sched_ctx_hypervisor_wrapper *sc_w = &hypervisor.sched_ctx_w[hypervisor.sched_ctxs[i]];
 			pthread_mutex_lock(&sc_w->mutex);
 			unsigned only_remove = 0;
 			if(sc_w->resize_ack.receiver_sched_ctx == -1 && hypervisor.sched_ctxs[i] != sched_ctx &&
@@ -592,8 +592,8 @@ static unsigned _ack_resize_completed(unsigned sched_ctx, int worker)
 				starpu_sched_ctx_remove_workers(moved_workers, nmoved_workers, sender_sched_ctx);
 
 				/* info concerning only the gflops_rate strateg */
-				struct starpu_sched_ctx_hypervisor_wrapper *sender_sc_w = &hypervisor.sched_ctx_w[sender_sched_ctx];
-				struct starpu_sched_ctx_hypervisor_wrapper *receiver_sc_w = &hypervisor.sched_ctx_w[receiver_sched_ctx];
+				struct sched_ctx_hypervisor_wrapper *sender_sc_w = &hypervisor.sched_ctx_w[sender_sched_ctx];
+				struct sched_ctx_hypervisor_wrapper *receiver_sc_w = &hypervisor.sched_ctx_w[receiver_sched_ctx];
 
 				double start_time =  starpu_timing_now();
 				sender_sc_w->start_time = start_time;
@@ -656,7 +656,7 @@ static void notify_idle_cycle(unsigned sched_ctx, int worker, double idle_time)
 {
 	if(hypervisor.resize[sched_ctx])
 	{
-		struct starpu_sched_ctx_hypervisor_wrapper *sc_w = &hypervisor.sched_ctx_w[sched_ctx];
+		struct sched_ctx_hypervisor_wrapper *sc_w = &hypervisor.sched_ctx_w[sched_ctx];
 		sc_w->current_idle_time[worker] += idle_time;
 		if(hypervisor.policy.handle_idle_cycle)
 		{
@@ -724,7 +724,7 @@ static void notify_post_exec_hook(unsigned sched_ctx, int task_tag)
 
 		if (entry != NULL)
 		{
-			struct starpu_sched_ctx_hypervisor_policy_config *config = entry->configuration;
+			struct sched_ctx_hypervisor_policy_config *config = entry->configuration;
 
 			sched_ctx_hypervisor_set_config(conf_sched_ctx, config);
 			HASH_DEL(hypervisor.configurations[conf_sched_ctx], entry);
@@ -780,7 +780,7 @@ void sched_ctx_hypervisor_size_ctxs(int *sched_ctxs, int nsched_ctxs, int *worke
 		hypervisor.policy.size_ctxs(curr_sched_ctxs, curr_nsched_ctxs, workers, nworkers);
 }
 
-struct starpu_sched_ctx_hypervisor_wrapper* sched_ctx_hypervisor_get_wrapper(unsigned sched_ctx)
+struct sched_ctx_hypervisor_wrapper* sched_ctx_hypervisor_get_wrapper(unsigned sched_ctx)
 {
 	return &hypervisor.sched_ctx_w[sched_ctx];
 }

+ 3 - 3
sched_ctx_hypervisor/src/sched_ctx_hypervisor_intern.h

@@ -46,7 +46,7 @@ struct configuration_entry
 	uint32_t task_tag;
 
 	/* Value: configuration of the scheduling context.  */
-	struct starpu_sched_ctx_hypervisor_policy_config *configuration;
+	struct sched_ctx_hypervisor_policy_config *configuration;
 
 	/* Bookkeeping.  */
 	UT_hash_handle hh;
@@ -54,12 +54,12 @@ struct configuration_entry
 
 struct sched_ctx_hypervisor
 {
-	struct starpu_sched_ctx_hypervisor_wrapper sched_ctx_w[STARPU_NMAX_SCHED_CTXS];
+	struct sched_ctx_hypervisor_wrapper sched_ctx_w[STARPU_NMAX_SCHED_CTXS];
 	int sched_ctxs[STARPU_NMAX_SCHED_CTXS];
 	unsigned nsched_ctxs;
 	unsigned resize[STARPU_NMAX_SCHED_CTXS];
 	int min_tasks;
-	struct starpu_sched_ctx_hypervisor_policy policy;
+	struct sched_ctx_hypervisor_policy policy;
 
 	struct configuration_entry *configurations[STARPU_NMAX_SCHED_CTXS];
 

+ 20 - 18
src/core/sched_ctx.c

@@ -31,7 +31,7 @@ static unsigned _starpu_worker_get_first_free_sched_ctx(struct _starpu_worker *w
 
 static unsigned _starpu_worker_get_sched_ctx_id(struct _starpu_worker *worker, unsigned sched_ctx_id);
 
-static unsigned	_get_workers_list(struct starpu_sched_ctx_worker_collection *workers, int **workerids);
+static void _get_workers_list(struct starpu_sched_ctx_worker_collection *workers, int **workerids);
 
 static void change_worker_sched_ctx(unsigned sched_ctx_id)
 {
@@ -187,12 +187,12 @@ static void _starpu_sched_ctx_free_scheduling_data(struct _starpu_sched_ctx *sch
 	unsigned nworkers_ctx = sched_ctx->workers->nworkers;
 	int *workerids = NULL;
 
-	int is_list =_get_workers_list(sched_ctx->workers, &workerids);
+	_get_workers_list(sched_ctx->workers, &workerids);
 
 	if(nworkers_ctx > 0 && sched_ctx->sched_policy->remove_workers)
 		sched_ctx->sched_policy->remove_workers(sched_ctx->id, workerids, nworkers_ctx);
-	if(!is_list)
-		free(workerids);
+
+	free(workerids);
 	return;
 
 }
@@ -459,12 +459,8 @@ void starpu_sched_ctx_delete(unsigned sched_ctx_id)
 	STARPU_ASSERT(sched_ctx->id != STARPU_NMAX_SCHED_CTXS);
 	unsigned nworkers_ctx = sched_ctx->workers->nworkers;
 	int *workerids;
-	unsigned is_list = _get_workers_list(sched_ctx->workers, &workerids);
-	_starpu_update_workers(workerids, nworkers_ctx, sched_ctx->id);
-
-	if(!is_list)
-		free(workerids);
-
+	_get_workers_list(sched_ctx->workers, &workerids);
+	
 	/*if both of them have all the ressources is pointless*/
 	/*trying to transfer ressources from one ctx to the other*/
 	struct _starpu_machine_config *config = (struct _starpu_machine_config *)_starpu_get_machine_config();
@@ -485,11 +481,17 @@ void starpu_sched_ctx_delete(unsigned sched_ctx_id)
 		_STARPU_PTHREAD_MUTEX_LOCK(&changing_ctx_mutex[sched_ctx_id]);
 		/*if btw the mutex release & the mutex lock the context has changed take care to free all
 		  scheduling data before deleting the context */
+		_starpu_update_workers(workerids, nworkers_ctx, sched_ctx_id);
 		_starpu_sched_ctx_free_scheduling_data(sched_ctx);
 		_starpu_delete_sched_ctx(sched_ctx);
 
 		_STARPU_PTHREAD_MUTEX_UNLOCK(&changing_ctx_mutex[sched_ctx_id]);
 	}
+
+	/* workerids is malloc-ed in _get_workers_list, don't forget to free it when
+	   you don't use it anymore */
+	free(workerids);
+
 	return;
 }
 
@@ -523,7 +525,7 @@ static void _starpu_check_workers(int *workerids, int nworkers)
 	for(i = 0; i < nworkers; i++)
 	{
 		/* take care the user does not ask for a resource that does not exist */
-		STARPU_ASSERT(workerids[i] >= 0 &&  workerids[i] <= nworkers_conf);
+		STARPU_ASSERT_MSG(workerids[i] >= 0 &&  workerids[i] <= nworkers_conf, "workerid = %d", workerids[i]);
 	}
 }
 
@@ -557,10 +559,11 @@ void starpu_sched_ctx_add_workers(int *workers_to_add, int nworkers_to_add, unsi
 	int added_workers[nworkers_to_add];
 	int n_added_workers = 0;
 
+	_STARPU_PTHREAD_MUTEX_LOCK(&changing_ctx_mutex[sched_ctx_id]);
+
 	STARPU_ASSERT(workers_to_add != NULL && nworkers_to_add > 0);
 	_starpu_check_workers(workers_to_add, nworkers_to_add);
 
-	_STARPU_PTHREAD_MUTEX_LOCK(&changing_ctx_mutex[sched_ctx_id]);
 	/* if the context has not already been deleted */
 	if(sched_ctx->id != STARPU_NMAX_SCHED_CTXS)
 	{
@@ -693,13 +696,12 @@ void _starpu_decrement_nsubmitted_tasks_of_sched_ctx(unsigned sched_ctx_id)
 			_STARPU_PTHREAD_MUTEX_UNLOCK(&finished_submit_mutex);
 			unsigned nworkers = sched_ctx->workers->nworkers;
 			int *workerids = NULL;
-			unsigned is_list = _get_workers_list(sched_ctx->workers, &workerids);
+			_get_workers_list(sched_ctx->workers, &workerids);
 
 			starpu_sched_ctx_add_workers(workerids, nworkers, sched_ctx->inheritor);
 			starpu_sched_ctx_remove_workers(workerids, nworkers, sched_ctx_id);
-
-			if(!is_list)
-				free(workerids);
+			
+			free(workerids);
 
 			return;
 		}
@@ -778,7 +780,7 @@ struct starpu_sched_ctx_worker_collection* starpu_sched_ctx_create_worker_collec
 	return sched_ctx->workers;
 }
 
-static unsigned _get_workers_list(struct starpu_sched_ctx_worker_collection *workers, int **workerids)
+static void _get_workers_list(struct starpu_sched_ctx_worker_collection *workers, int **workerids)
 {
 	*workerids = (int*)malloc(workers->nworkers*sizeof(int));
 	int worker;
@@ -793,7 +795,7 @@ static unsigned _get_workers_list(struct starpu_sched_ctx_worker_collection *wor
 		(*workerids)[i++] = worker;
 	}
 
-	return 0;
+	return;
 }
 void starpu_sched_ctx_delete_worker_collection(unsigned sched_ctx_id)
 {