Browse Source

rename functions to be cleared what they do

Andra Hugo 12 years ago
parent
commit
911f574418

+ 9 - 9
sc_hypervisor/include/sc_hypervisor_policy.h

@@ -28,7 +28,7 @@ extern "C"
 #define HYPERVISOR_REDIM_SAMPLE 0.02
 #define HYPERVISOR_REDIM_SAMPLE 0.02
 #define HYPERVISOR_START_REDIM_SAMPLE 0.1
 #define HYPERVISOR_START_REDIM_SAMPLE 0.1
 
 
-struct bound_task_pool
+struct sc_hypervisor_policy_task_pool
 {
 {
 	/* Which codelet has been executed */
 	/* Which codelet has been executed */
 	struct starpu_codelet *cl;
 	struct starpu_codelet *cl;
@@ -39,22 +39,22 @@ struct bound_task_pool
 	/* Number of tasks of this kind */
 	/* Number of tasks of this kind */
 	unsigned long n;
 	unsigned long n;
 	/* Other task kinds */
 	/* Other task kinds */
-	struct bound_task_pool *next;
+	struct sc_hypervisor_policy_task_pool *next;
 };
 };
 
 
-unsigned _find_poor_sched_ctx(unsigned req_sched_ctx, int nworkers_to_move);
+unsigned sc_hypervisor_find_lowest_prio_sched_ctx(unsigned req_sched_ctx, int nworkers_to_move);
 
 
-int* _get_first_workers(unsigned sched_ctx, int *nworkers, enum starpu_archtype arch);
+int* sc_hypervisor_get_idlest_workers(unsigned sched_ctx, int *nworkers, enum starpu_archtype arch);
 
 
-int* _get_first_workers_in_list(int *start, int *workers, int nall_workers,  int *nworkers, enum starpu_archtype arch);
+int* sc_hypervisor_get_idlest_workers_in_list(int *start, int *workers, int nall_workers,  int *nworkers, enum starpu_archtype arch);
 
 
-unsigned _get_potential_nworkers(struct sc_hypervisor_policy_config *config, unsigned sched_ctx, enum starpu_archtype arch);
+unsigned sc_hypervisor_get_movable_nworkers(struct sc_hypervisor_policy_config *config, unsigned sched_ctx, enum starpu_archtype arch);
 
 
-int _get_nworkers_to_move(unsigned req_sched_ctx);
+int sc_hypervisor_compute_nworkers_to_move(unsigned req_sched_ctx);
 
 
-unsigned _resize(unsigned sender_sched_ctx, unsigned receiver_sched_ctx, unsigned force_resize, unsigned now);
+unsigned sc_hypervisor_policy_resize(unsigned sender_sched_ctx, unsigned receiver_sched_ctx, unsigned force_resize, unsigned now);
 
 
-unsigned _resize_to_unknown_receiver(unsigned sender_sched_ctx, unsigned now);
+unsigned sc_hypervisor_policy_resize_to_unknown_receiver(unsigned sender_sched_ctx, unsigned now);
 
 
 double _get_ctx_velocity(struct sc_hypervisor_wrapper* sc_w);
 double _get_ctx_velocity(struct sc_hypervisor_wrapper* sc_w);
 
 

+ 1 - 1
sc_hypervisor/src/hypervisor_policies/app_driven_policy.c

@@ -17,7 +17,7 @@
 
 
 static void app_driven_handle_post_exec_hook(unsigned sched_ctx, int task_tag)
 static void app_driven_handle_post_exec_hook(unsigned sched_ctx, int task_tag)
 {
 {
-	_resize_to_unknown_receiver(sched_ctx, 1);
+	sc_hypervisor_policy_resize_to_unknown_receiver(sched_ctx, 1);
 }
 }
 
 
 struct sc_hypervisor_policy app_driven_policy =
 struct sc_hypervisor_policy app_driven_policy =

+ 7 - 7
sc_hypervisor/src/hypervisor_policies/gflops_rate_policy.c

@@ -70,8 +70,8 @@ static int* _get_workers_to_move(unsigned sender_sched_ctx, unsigned receiver_sc
         if(nworkers_needed > 0)
         if(nworkers_needed > 0)
         {
         {
                 struct sc_hypervisor_policy_config *sender_config = sc_hypervisor_get_config(sender_sched_ctx);
                 struct sc_hypervisor_policy_config *sender_config = sc_hypervisor_get_config(sender_sched_ctx);
-                unsigned potential_moving_cpus = _get_potential_nworkers(sender_config, sender_sched_ctx, STARPU_CPU_WORKER);
-                unsigned potential_moving_gpus = _get_potential_nworkers(sender_config, sender_sched_ctx, STARPU_CUDA_WORKER);
+                unsigned potential_moving_cpus = sc_hypervisor_get_movable_nworkers(sender_config, sender_sched_ctx, STARPU_CPU_WORKER);
+                unsigned potential_moving_gpus = sc_hypervisor_get_movable_nworkers(sender_config, sender_sched_ctx, STARPU_CUDA_WORKER);
                 unsigned sender_nworkers = starpu_sched_ctx_get_nworkers(sender_sched_ctx);
                 unsigned sender_nworkers = starpu_sched_ctx_get_nworkers(sender_sched_ctx);
                 struct sc_hypervisor_policy_config *config = sc_hypervisor_get_config(receiver_sched_ctx);
                 struct sc_hypervisor_policy_config *config = sc_hypervisor_get_config(receiver_sched_ctx);
                 unsigned nworkers_ctx = starpu_sched_ctx_get_nworkers(receiver_sched_ctx);
                 unsigned nworkers_ctx = starpu_sched_ctx_get_nworkers(receiver_sched_ctx);
@@ -87,10 +87,10 @@ static int* _get_workers_to_move(unsigned sender_sched_ctx, unsigned receiver_sc
                                 {
                                 {
                                         int ngpus = nworkers_needed / 5;
                                         int ngpus = nworkers_needed / 5;
                                         int *gpus;
                                         int *gpus;
-                                        gpus = _get_first_workers(sender_sched_ctx, &ngpus, STARPU_CUDA_WORKER);
+                                        gpus = sc_hypervisor_get_idlest_workers(sender_sched_ctx, &ngpus, STARPU_CUDA_WORKER);
                                         int ncpus = nworkers_needed - ngpus;
                                         int ncpus = nworkers_needed - ngpus;
                                         int *cpus;
                                         int *cpus;
-                                        cpus = _get_first_workers(sender_sched_ctx, &ncpus, STARPU_CPU_WORKER);
+                                        cpus = sc_hypervisor_get_idlest_workers(sender_sched_ctx, &ncpus, STARPU_CPU_WORKER);
                                         workers = (int*)malloc(nworkers_needed*sizeof(int));
                                         workers = (int*)malloc(nworkers_needed*sizeof(int));
                                         int i;
                                         int i;
 					printf("%d: gpus: ", nworkers_needed);
 					printf("%d: gpus: ", nworkers_needed);
@@ -115,7 +115,7 @@ static int* _get_workers_to_move(unsigned sender_sched_ctx, unsigned receiver_sc
                 {
                 {
 			/*if the needed number of workers is to big we only move the number of workers
 			/*if the needed number of workers is to big we only move the number of workers
 			  corresponding to the granularity set by the user */
 			  corresponding to the granularity set by the user */
-                        int nworkers_to_move = _get_nworkers_to_move(sender_sched_ctx);
+                        int nworkers_to_move = sc_hypervisor_compute_nworkers_to_move(sender_sched_ctx);
 
 
                         if(sender_nworkers - nworkers_to_move >= sender_config->min_nworkers)
                         if(sender_nworkers - nworkers_to_move >= sender_config->min_nworkers)
                         {
                         {
@@ -125,7 +125,7 @@ static int* _get_workers_to_move(unsigned sender_sched_ctx, unsigned receiver_sc
 
 
                                 if(nworkers_to_move > 0)
                                 if(nworkers_to_move > 0)
                                 {
                                 {
-                                        workers = _get_first_workers(sender_sched_ctx, &nworkers_to_move, STARPU_ANY_WORKER);
+                                        workers = sc_hypervisor_get_idlest_workers(sender_sched_ctx, &nworkers_to_move, STARPU_ANY_WORKER);
                                         *nworkers = nworkers_to_move;
                                         *nworkers = nworkers_to_move;
                                 }
                                 }
                         }
                         }
@@ -260,7 +260,7 @@ static void gflops_rate_resize(unsigned sched_ctx)
 				config->min_nworkers = 0;
 				config->min_nworkers = 0;
 				config->max_nworkers = 0;
 				config->max_nworkers = 0;
 				printf("ctx %d finished & gives away the res to %d; slow_left %lf\n", sched_ctx, slowest_sched_ctx, slowest_flops_left_pct);
 				printf("ctx %d finished & gives away the res to %d; slow_left %lf\n", sched_ctx, slowest_sched_ctx, slowest_flops_left_pct);
-				_resize(sched_ctx, slowest_sched_ctx, 1, 1);
+				sc_hypervisor_policy_resize(sched_ctx, slowest_sched_ctx, 1, 1);
 				sc_hypervisor_stop_resize(slowest_sched_ctx);
 				sc_hypervisor_stop_resize(slowest_sched_ctx);
 			}
 			}
 		}
 		}

+ 1 - 1
sc_hypervisor/src/hypervisor_policies/idle_policy.c

@@ -37,7 +37,7 @@ void idle_handle_idle_cycle(unsigned sched_ctx, int worker)
 		if(worker_belong_to_other_sched_ctx(sched_ctx, worker))
 		if(worker_belong_to_other_sched_ctx(sched_ctx, worker))
 			sc_hypervisor_remove_workers_from_sched_ctx(&worker, 1, sched_ctx, 1);
 			sc_hypervisor_remove_workers_from_sched_ctx(&worker, 1, sched_ctx, 1);
 		else
 		else
-			_resize_to_unknown_receiver(sched_ctx, 0);
+			sc_hypervisor_policy_resize_to_unknown_receiver(sched_ctx, 0);
 	}
 	}
 }
 }
 
 

+ 1 - 1
sc_hypervisor/src/hypervisor_policies/ispeed_policy.c

@@ -152,7 +152,7 @@ static void ispeed_handle_poped_task(unsigned sched_ctx, int worker, struct star
 			unsigned slowest_sched_ctx = _get_slowest_sched_ctx();
 			unsigned slowest_sched_ctx = _get_slowest_sched_ctx();
 			if(fastest_sched_ctx != STARPU_NMAX_SCHED_CTXS && slowest_sched_ctx != STARPU_NMAX_SCHED_CTXS && fastest_sched_ctx != slowest_sched_ctx)
 			if(fastest_sched_ctx != STARPU_NMAX_SCHED_CTXS && slowest_sched_ctx != STARPU_NMAX_SCHED_CTXS && fastest_sched_ctx != slowest_sched_ctx)
 			{
 			{
-				int nworkers_to_move = _get_nworkers_to_move(fastest_sched_ctx);
+				int nworkers_to_move = sc_hypervisor_compute_nworkers_to_move(fastest_sched_ctx);
 				if(nworkers_to_move > 0)
 				if(nworkers_to_move > 0)
 				{
 				{
 					int *workers_to_move = _get_slowest_workers(fastest_sched_ctx, &nworkers_to_move, STARPU_ANY_WORKER);
 					int *workers_to_move = _get_slowest_workers(fastest_sched_ctx, &nworkers_to_move, STARPU_ANY_WORKER);

+ 19 - 19
sc_hypervisor/src/hypervisor_policies/teft_lp_policy.c

@@ -20,13 +20,13 @@
 #include <math.h>
 #include <math.h>
 #include <sys/time.h>
 #include <sys/time.h>
 
 
-static struct bound_task_pool *task_pools = NULL;
+static struct sc_hypervisor_policy_task_pool *task_pools = NULL;
 
 
 static starpu_pthread_mutex_t mutex = STARPU_PTHREAD_MUTEX_INITIALIZER;
 static starpu_pthread_mutex_t mutex = STARPU_PTHREAD_MUTEX_INITIALIZER;
 static double _glp_resolve(int ns, int nw, int nt, double tasks[nw][nt], double tmax, double w_in_s[ns][nw], int *in_sched_ctxs, int *workers, unsigned interger,
 static double _glp_resolve(int ns, int nw, int nt, double tasks[nw][nt], double tmax, double w_in_s[ns][nw], int *in_sched_ctxs, int *workers, unsigned interger,
-			   struct bound_task_pool *tmp_task_pools, unsigned size_ctxs);
+			   struct sc_hypervisor_policy_task_pool *tmp_task_pools, unsigned size_ctxs);
 static unsigned _compute_task_distribution_over_ctxs(int ns, int nw, int nt, double w_in_s[ns][nw], double tasks[nw][nt], 
 static unsigned _compute_task_distribution_over_ctxs(int ns, int nw, int nt, double w_in_s[ns][nw], double tasks[nw][nt], 
-						     int *sched_ctxs, int *workers, struct bound_task_pool *tmp_task_pools, unsigned size_ctxs)
+						     int *sched_ctxs, int *workers, struct sc_hypervisor_policy_task_pool *tmp_task_pools, unsigned size_ctxs)
 {
 {
 	double draft_tasks[nw][nt];
 	double draft_tasks[nw][nt];
 	double draft_w_in_s[ns][nw];
 	double draft_w_in_s[ns][nw];
@@ -128,7 +128,7 @@ static void _size_ctxs(int *sched_ctxs, int nsched_ctxs , int *workers, int nwor
 	int nw = workers == NULL ? (int)starpu_worker_get_count() : nworkers; /* Number of different workers */
 	int nw = workers == NULL ? (int)starpu_worker_get_count() : nworkers; /* Number of different workers */
 	int nt = 0; /* Number of different kinds of tasks */
 	int nt = 0; /* Number of different kinds of tasks */
 	starpu_pthread_mutex_lock(&mutex);
 	starpu_pthread_mutex_lock(&mutex);
-	struct bound_task_pool * tp;
+	struct sc_hypervisor_policy_task_pool * tp;
 	for (tp = task_pools; tp; tp = tp->next)
 	for (tp = task_pools; tp; tp = tp->next)
 		nt++;
 		nt++;
 
 
@@ -170,7 +170,7 @@ static void teft_lp_handle_submitted_job(struct starpu_codelet *cl, unsigned sch
 {
 {
 	/* count the tasks of the same type */
 	/* count the tasks of the same type */
 	starpu_pthread_mutex_lock(&mutex);
 	starpu_pthread_mutex_lock(&mutex);
-	struct bound_task_pool *tp = NULL;
+	struct sc_hypervisor_policy_task_pool *tp = NULL;
 
 
 	for (tp = task_pools; tp; tp = tp->next)
 	for (tp = task_pools; tp; tp = tp->next)
 	{
 	{
@@ -180,7 +180,7 @@ static void teft_lp_handle_submitted_job(struct starpu_codelet *cl, unsigned sch
 
 
 	if (!tp)
 	if (!tp)
 	{
 	{
-		tp = (struct bound_task_pool *) malloc(sizeof(struct bound_task_pool));
+		tp = (struct sc_hypervisor_policy_task_pool *) malloc(sizeof(struct sc_hypervisor_policy_task_pool));
 		tp->cl = cl;
 		tp->cl = cl;
 		tp->footprint = footprint;
 		tp->footprint = footprint;
 		tp->sched_ctx_id = sched_ctx;
 		tp->sched_ctx_id = sched_ctx;
@@ -199,7 +199,7 @@ static void teft_lp_handle_submitted_job(struct starpu_codelet *cl, unsigned sch
 static void _remove_task_from_pool(struct starpu_task *task, uint32_t footprint)
 static void _remove_task_from_pool(struct starpu_task *task, uint32_t footprint)
 {
 {
 	/* count the tasks of the same type */
 	/* count the tasks of the same type */
-	struct bound_task_pool *tp = NULL;
+	struct sc_hypervisor_policy_task_pool *tp = NULL;
 
 
 	for (tp = task_pools; tp; tp = tp->next)
 	for (tp = task_pools; tp; tp = tp->next)
 	{
 	{
@@ -215,7 +215,7 @@ static void _remove_task_from_pool(struct starpu_task *task, uint32_t footprint)
 		{
 		{
 			if(tp == task_pools)
 			if(tp == task_pools)
 			{
 			{
-				struct bound_task_pool *next_tp = NULL;
+				struct sc_hypervisor_policy_task_pool *next_tp = NULL;
 				if(task_pools->next)
 				if(task_pools->next)
 					next_tp = task_pools->next;
 					next_tp = task_pools->next;
 
 
@@ -228,7 +228,7 @@ static void _remove_task_from_pool(struct starpu_task *task, uint32_t footprint)
 			}
 			}
 			else
 			else
 			{
 			{
-				struct bound_task_pool *prev_tp = NULL;
+				struct sc_hypervisor_policy_task_pool *prev_tp = NULL;
 				for (prev_tp = task_pools; prev_tp; prev_tp = prev_tp->next)
 				for (prev_tp = task_pools; prev_tp; prev_tp = prev_tp->next)
 				{
 				{
 					if (prev_tp->next == tp)
 					if (prev_tp->next == tp)
@@ -242,19 +242,19 @@ static void _remove_task_from_pool(struct starpu_task *task, uint32_t footprint)
 	}
 	}
 }
 }
 
 
-static struct bound_task_pool* _clone_linked_list(struct bound_task_pool *tp)
+static struct sc_hypervisor_policy_task_pool* _clone_linked_list(struct sc_hypervisor_policy_task_pool *tp)
 {
 {
 	if(tp == NULL) return NULL;
 	if(tp == NULL) return NULL;
 
 
-	struct bound_task_pool *tmp_tp = (struct bound_task_pool*)malloc(sizeof(struct bound_task_pool));
-	memcpy(tmp_tp, tp, sizeof(struct bound_task_pool));
+	struct sc_hypervisor_policy_task_pool *tmp_tp = (struct sc_hypervisor_policy_task_pool*)malloc(sizeof(struct sc_hypervisor_policy_task_pool));
+	memcpy(tmp_tp, tp, sizeof(struct sc_hypervisor_policy_task_pool));
 	tmp_tp->next = _clone_linked_list(tp->next);
 	tmp_tp->next = _clone_linked_list(tp->next);
 	return tmp_tp;
 	return tmp_tp;
 }
 }
 
 
 static void _get_tasks_times(int nw, int nt, double times[nw][nt], int *workers, unsigned size_ctxs)
 static void _get_tasks_times(int nw, int nt, double times[nw][nt], int *workers, unsigned size_ctxs)
 {
 {
-        struct bound_task_pool *tp;
+        struct sc_hypervisor_policy_task_pool *tp;
         int w, t;
         int w, t;
         for (w = 0; w < nw; w++)
         for (w = 0; w < nw; w++)
         {
         {
@@ -297,11 +297,11 @@ static void _get_tasks_times(int nw, int nt, double times[nw][nt], int *workers,
 #ifdef STARPU_HAVE_GLPK_H
 #ifdef STARPU_HAVE_GLPK_H
 #include <glpk.h>
 #include <glpk.h>
 static double _glp_resolve(int ns, int nw, int nt, double tasks[nw][nt], double tmax, double w_in_s[ns][nw], int *in_sched_ctxs, int *workers, unsigned integer,
 static double _glp_resolve(int ns, int nw, int nt, double tasks[nw][nt], double tmax, double w_in_s[ns][nw], int *in_sched_ctxs, int *workers, unsigned integer,
-			   struct bound_task_pool *tmp_task_pools, unsigned size_ctxs)
+			   struct sc_hypervisor_policy_task_pool *tmp_task_pools, unsigned size_ctxs)
 {
 {
 	if(tmp_task_pools == NULL)
 	if(tmp_task_pools == NULL)
 		return 0.0;
 		return 0.0;
-	struct bound_task_pool * tp;
+	struct sc_hypervisor_policy_task_pool * tp;
 	int t, w, s;
 	int t, w, s;
 	glp_prob *lp;
 	glp_prob *lp;
 
 
@@ -553,8 +553,8 @@ static void teft_lp_handle_poped_task(unsigned sched_ctx, int worker, struct sta
 			   that the linear progr won't segfault if the list of 
 			   that the linear progr won't segfault if the list of 
 			   submitted task will change during the exec */
 			   submitted task will change during the exec */
 
 
-			struct bound_task_pool *tp = NULL;
-			struct bound_task_pool *tmp_task_pools = _clone_linked_list(task_pools);
+			struct sc_hypervisor_policy_task_pool *tp = NULL;
+			struct sc_hypervisor_policy_task_pool *tmp_task_pools = _clone_linked_list(task_pools);
 
 
 			for (tp = task_pools; tp; tp = tp->next)
 			for (tp = task_pools; tp; tp = tp->next)
 				nt++;
 				nt++;
@@ -570,8 +570,8 @@ static void teft_lp_handle_poped_task(unsigned sched_ctx, int worker, struct sta
 			if(found_sol)
 			if(found_sol)
 				sc_hypervisor_lp_place_resources_in_ctx(ns, nw, w_in_s, NULL, NULL, 0);
 				sc_hypervisor_lp_place_resources_in_ctx(ns, nw, w_in_s, NULL, NULL, 0);
 
 
-			struct bound_task_pool *next = NULL;
-			struct bound_task_pool *tmp_tp = tmp_task_pools;
+			struct sc_hypervisor_policy_task_pool *next = NULL;
+			struct sc_hypervisor_policy_task_pool *tmp_tp = tmp_task_pools;
 			while(tmp_task_pools)
 			while(tmp_task_pools)
 			{
 			{
 				next = tmp_tp->next;
 				next = tmp_tp->next;

+ 6 - 6
sc_hypervisor/src/policies_utils/lp_tools.c

@@ -344,7 +344,7 @@ void _lp_find_workers_to_give_away(int nw, int ns, unsigned sched_ctx, int sched
 			if(nworkers_ctx > res_rounded[sched_ctx_idx][w])
 			if(nworkers_ctx > res_rounded[sched_ctx_idx][w])
 			{
 			{
 				int nworkers_to_move = nworkers_ctx - res_rounded[sched_ctx_idx][w];
 				int nworkers_to_move = nworkers_ctx - res_rounded[sched_ctx_idx][w];
-				int *workers_to_move = _get_first_workers(sched_ctx, &nworkers_to_move, arch);
+				int *workers_to_move = sc_hypervisor_get_idlest_workers(sched_ctx, &nworkers_to_move, arch);
 				int i;
 				int i;
 				for(i = 0; i < nworkers_to_move; i++)
 				for(i = 0; i < nworkers_to_move; i++)
 					tmp_workers_move[w][tmp_nw_move[w]++] = workers_to_move[i];
 					tmp_workers_move[w][tmp_nw_move[w]++] = workers_to_move[i];
@@ -362,7 +362,7 @@ void _lp_find_workers_to_give_away(int nw, int ns, unsigned sched_ctx, int sched
 				double diff = nworkers_to_move - x_double;
 				double diff = nworkers_to_move - x_double;
 				if(diff == 0.0)
 				if(diff == 0.0)
 				{
 				{
-					int *workers_to_move = _get_first_workers(sched_ctx, &x, arch);
+					int *workers_to_move = sc_hypervisor_get_idlest_workers(sched_ctx, &x, arch);
 					if(x > 0)
 					if(x > 0)
 					{
 					{
 						int i;
 						int i;
@@ -375,7 +375,7 @@ void _lp_find_workers_to_give_away(int nw, int ns, unsigned sched_ctx, int sched
 				else
 				else
 				{
 				{
 					x+=1;
 					x+=1;
-					int *workers_to_move = _get_first_workers(sched_ctx, &x, arch);
+					int *workers_to_move = sc_hypervisor_get_idlest_workers(sched_ctx, &x, arch);
 					if(x > 0)
 					if(x > 0)
 					{
 					{
 						int i;
 						int i;
@@ -587,7 +587,7 @@ void sc_hypervisor_lp_distribute_resources_in_ctxs(int* sched_ctxs, int ns, int
 			if(w == 1)
 			if(w == 1)
 			{
 			{
 				int nworkers_to_add = res_rounded[s][w];
 				int nworkers_to_add = res_rounded[s][w];
-				int *workers_to_add = _get_first_workers_in_list(&start[w], workers, current_nworkers, &nworkers_to_add, arch);
+				int *workers_to_add = sc_hypervisor_get_idlest_workers_in_list(&start[w], workers, current_nworkers, &nworkers_to_add, arch);
 				int i;
 				int i;
 				for(i = 0; i < nworkers_to_add; i++)
 				for(i = 0; i < nworkers_to_add; i++)
 					workers_add[nw_add++] = workers_to_add[i];
 					workers_add[nw_add++] = workers_to_add[i];
@@ -602,7 +602,7 @@ void sc_hypervisor_lp_distribute_resources_in_ctxs(int* sched_ctxs, int ns, int
 				double diff = nworkers_to_add - x_double;
 				double diff = nworkers_to_add - x_double;
 				if(diff == 0.0)
 				if(diff == 0.0)
 				{
 				{
-					int *workers_to_add = _get_first_workers_in_list(&start[w], workers, current_nworkers, &x, arch);
+					int *workers_to_add = sc_hypervisor_get_idlest_workers_in_list(&start[w], workers, current_nworkers, &x, arch);
 					int i;
 					int i;
 					for(i = 0; i < x; i++)
 					for(i = 0; i < x; i++)
 						workers_add[nw_add++] = workers_to_add[i];
 						workers_add[nw_add++] = workers_to_add[i];
@@ -611,7 +611,7 @@ void sc_hypervisor_lp_distribute_resources_in_ctxs(int* sched_ctxs, int ns, int
 				else
 				else
 				{
 				{
 					x+=1;
 					x+=1;
-					int *workers_to_add = _get_first_workers_in_list(&start[w], workers, current_nworkers, &x, arch);
+					int *workers_to_add = sc_hypervisor_get_idlest_workers_in_list(&start[w], workers, current_nworkers, &x, arch);
 					int i;
 					int i;
 					if(diff >= 0.3)
 					if(diff >= 0.3)
 						for(i = 0; i < x; i++)
 						for(i = 0; i < x; i++)

+ 12 - 12
sc_hypervisor/src/policies_utils/policy_tools.c

@@ -40,7 +40,7 @@ static int _compute_priority(unsigned sched_ctx)
 }
 }
 
 
 /* find the context with the slowest priority */
 /* find the context with the slowest priority */
-unsigned _find_poor_sched_ctx(unsigned req_sched_ctx, int nworkers_to_move)
+unsigned sc_hypervisor_find_lowest_prio_sched_ctx(unsigned req_sched_ctx, int nworkers_to_move)
 {
 {
 	int i;
 	int i;
 	int highest_priority = -1;
 	int highest_priority = -1;
@@ -73,7 +73,7 @@ unsigned _find_poor_sched_ctx(unsigned req_sched_ctx, int nworkers_to_move)
 	return sched_ctx;
 	return sched_ctx;
 }
 }
 
 
-int* _get_first_workers_in_list(int *start, int *workers, int nall_workers,  int *nworkers, enum starpu_archtype arch)
+int* sc_hypervisor_get_idlest_workers_in_list(int *start, int *workers, int nall_workers,  int *nworkers, enum starpu_archtype arch)
 {
 {
 	int *curr_workers = (int*)malloc((*nworkers)*sizeof(int));
 	int *curr_workers = (int*)malloc((*nworkers)*sizeof(int));
 
 
@@ -101,7 +101,7 @@ int* _get_first_workers_in_list(int *start, int *workers, int nall_workers,  int
 }
 }
 
 
 /* get first nworkers with the highest idle time in the context */
 /* get first nworkers with the highest idle time in the context */
-int* _get_first_workers(unsigned sched_ctx, int *nworkers, enum starpu_archtype arch)
+int* sc_hypervisor_get_idlest_workers(unsigned sched_ctx, int *nworkers, enum starpu_archtype arch)
 {
 {
 	struct sc_hypervisor_wrapper* sc_w = sc_hypervisor_get_wrapper(sched_ctx);
 	struct sc_hypervisor_wrapper* sc_w = sc_hypervisor_get_wrapper(sched_ctx);
 	struct sc_hypervisor_policy_config *config = sc_hypervisor_get_config(sched_ctx);
 	struct sc_hypervisor_policy_config *config = sc_hypervisor_get_config(sched_ctx);
@@ -176,7 +176,7 @@ int* _get_first_workers(unsigned sched_ctx, int *nworkers, enum starpu_archtype
 }
 }
 
 
 /* get the number of workers in the context that are allowed to be moved (that are not fixed) */
 /* get the number of workers in the context that are allowed to be moved (that are not fixed) */
-unsigned _get_potential_nworkers(struct sc_hypervisor_policy_config *config, unsigned sched_ctx, enum starpu_archtype arch)
+unsigned sc_hypervisor_get_movable_nworkers(struct sc_hypervisor_policy_config *config, unsigned sched_ctx, enum starpu_archtype arch)
 {
 {
 	struct starpu_worker_collection *workers = starpu_sched_ctx_get_worker_collection(sched_ctx);
 	struct starpu_worker_collection *workers = starpu_sched_ctx_get_worker_collection(sched_ctx);
 
 
@@ -203,13 +203,13 @@ unsigned _get_potential_nworkers(struct sc_hypervisor_policy_config *config, uns
 /* compute the number of workers that should be moved depending:
 /* compute the number of workers that should be moved depending:
    - on the min/max number of workers in a context imposed by the user,
    - on the min/max number of workers in a context imposed by the user,
    - on the resource granularity imposed by the user for the resizing process*/
    - on the resource granularity imposed by the user for the resizing process*/
-int _get_nworkers_to_move(unsigned req_sched_ctx)
+int sc_hypervisor_compute_nworkers_to_move(unsigned req_sched_ctx)
 {
 {
        	struct sc_hypervisor_policy_config *config = sc_hypervisor_get_config(req_sched_ctx);
        	struct sc_hypervisor_policy_config *config = sc_hypervisor_get_config(req_sched_ctx);
 	unsigned nworkers = starpu_sched_ctx_get_nworkers(req_sched_ctx);
 	unsigned nworkers = starpu_sched_ctx_get_nworkers(req_sched_ctx);
 	unsigned nworkers_to_move = 0;
 	unsigned nworkers_to_move = 0;
 
 
-	unsigned potential_moving_workers = _get_potential_nworkers(config, req_sched_ctx, STARPU_ANY_WORKER);
+	unsigned potential_moving_workers = sc_hypervisor_get_movable_nworkers(config, req_sched_ctx, STARPU_ANY_WORKER);
 	if(potential_moving_workers > 0)
 	if(potential_moving_workers > 0)
 	{
 	{
 		if(potential_moving_workers <= config->min_nworkers)
 		if(potential_moving_workers <= config->min_nworkers)
@@ -247,7 +247,7 @@ int _get_nworkers_to_move(unsigned req_sched_ctx)
 	return nworkers_to_move;
 	return nworkers_to_move;
 }
 }
 
 
-unsigned _resize(unsigned sender_sched_ctx, unsigned receiver_sched_ctx, unsigned force_resize, unsigned now)
+unsigned sc_hypervisor_policy_resize(unsigned sender_sched_ctx, unsigned receiver_sched_ctx, unsigned force_resize, unsigned now)
 {
 {
 	int ret = 1;
 	int ret = 1;
 	if(force_resize)
 	if(force_resize)
@@ -256,13 +256,13 @@ unsigned _resize(unsigned sender_sched_ctx, unsigned receiver_sched_ctx, unsigne
 		ret = starpu_pthread_mutex_trylock(&act_hypervisor_mutex);
 		ret = starpu_pthread_mutex_trylock(&act_hypervisor_mutex);
 	if(ret != EBUSY)
 	if(ret != EBUSY)
 	{
 	{
-		int nworkers_to_move = _get_nworkers_to_move(sender_sched_ctx);
+		int nworkers_to_move = sc_hypervisor_compute_nworkers_to_move(sender_sched_ctx);
 		if(nworkers_to_move > 0)
 		if(nworkers_to_move > 0)
 		{
 		{
 			unsigned poor_sched_ctx = STARPU_NMAX_SCHED_CTXS;
 			unsigned poor_sched_ctx = STARPU_NMAX_SCHED_CTXS;
 			if(receiver_sched_ctx == STARPU_NMAX_SCHED_CTXS)
 			if(receiver_sched_ctx == STARPU_NMAX_SCHED_CTXS)
 			{
 			{
-				poor_sched_ctx = _find_poor_sched_ctx(sender_sched_ctx, nworkers_to_move);
+				poor_sched_ctx = sc_hypervisor_find_lowest_prio_sched_ctx(sender_sched_ctx, nworkers_to_move);
 			}
 			}
 			else
 			else
 			{
 			{
@@ -276,7 +276,7 @@ unsigned _resize(unsigned sender_sched_ctx, unsigned receiver_sched_ctx, unsigne
 			}
 			}
 			if(poor_sched_ctx != STARPU_NMAX_SCHED_CTXS)
 			if(poor_sched_ctx != STARPU_NMAX_SCHED_CTXS)
 			{
 			{
-				int *workers_to_move = _get_first_workers(sender_sched_ctx, &nworkers_to_move, STARPU_ANY_WORKER);
+				int *workers_to_move = sc_hypervisor_get_idlest_workers(sender_sched_ctx, &nworkers_to_move, STARPU_ANY_WORKER);
 				sc_hypervisor_move_workers(sender_sched_ctx, poor_sched_ctx, workers_to_move, nworkers_to_move, now);
 				sc_hypervisor_move_workers(sender_sched_ctx, poor_sched_ctx, workers_to_move, nworkers_to_move, now);
 
 
 				struct sc_hypervisor_policy_config *new_config = sc_hypervisor_get_config(poor_sched_ctx);
 				struct sc_hypervisor_policy_config *new_config = sc_hypervisor_get_config(poor_sched_ctx);
@@ -295,9 +295,9 @@ unsigned _resize(unsigned sender_sched_ctx, unsigned receiver_sched_ctx, unsigne
 }
 }
 
 
 
 
-unsigned _resize_to_unknown_receiver(unsigned sender_sched_ctx, unsigned now)
+unsigned sc_hypervisor_policy_resize_to_unknown_receiver(unsigned sender_sched_ctx, unsigned now)
 {
 {
-	return _resize(sender_sched_ctx, STARPU_NMAX_SCHED_CTXS, 0, now);
+	return sc_hypervisor_policy_resize(sender_sched_ctx, STARPU_NMAX_SCHED_CTXS, 0, now);
 }
 }
 
 
 static double _get_ispeed_sample_for_type_of_worker(struct sc_hypervisor_wrapper* sc_w, enum starpu_archtype req_arch)
 static double _get_ispeed_sample_for_type_of_worker(struct sc_hypervisor_wrapper* sc_w, enum starpu_archtype req_arch)