浏览代码

rename + comments in the public interface

Andra Hugo 12 年之前
父节点
当前提交
6bcd0d3c6e

+ 1 - 1
sc_hypervisor/Makefile.am

@@ -17,7 +17,7 @@ SUBDIRS = src examples
 
 versincludedir = $(includedir)/starpu/$(STARPU_EFFECTIVE_VERSION)
 
-versinclude_HEADERS = 	include/sc_hypervisor.h 			\
+versinclude_HEADERS = 	include/sc_hypervisor.h			\
 			include/sc_hypervisor_config.h 		\
 			include/sc_hypervisor_monitoring.h 	\
 			include/sc_hypervisor_policy.h 		\

+ 1 - 1
sc_hypervisor/include/sc_hypervisor_monitoring.h

@@ -116,7 +116,7 @@ double sc_hypervisor_get_elapsed_flops_per_sched_ctx(struct sc_hypervisor_wrappe
 double sc_hypervisor_get_total_elapsed_flops_per_sched_ctx(struct sc_hypervisor_wrapper* sc_w);
 
 /* compute an average value of the cpu/cuda velocity */
-double sc_hypervisor_get_velocity_per_worker_type(struct sc_hypervisor_wrapper* sc_w, enum starpu_archtype arch);
+double sc_hypervisorsc_hypervisor_get_velocity_per_worker_type(struct sc_hypervisor_wrapper* sc_w, enum starpu_archtype arch);
 
 /* compte the actual velocity of all workers of a specific type of worker */
 double sc_hypervisor_get_velocity(struct sc_hypervisor_wrapper *sc_w, enum starpu_archtype arch);

+ 23 - 8
sc_hypervisor/include/sc_hypervisor_policy.h

@@ -42,35 +42,50 @@ struct sc_hypervisor_policy_task_pool
 	struct sc_hypervisor_policy_task_pool *next;
 };
 
+/* find the context with the lowest priority in order to move some workers */
 unsigned sc_hypervisor_find_lowest_prio_sched_ctx(unsigned req_sched_ctx, int nworkers_to_move);
 
+/* find the first most idle workers of a context*/
 int* sc_hypervisor_get_idlest_workers(unsigned sched_ctx, int *nworkers, enum starpu_archtype arch);
 
+/* find the first most idle workers in a list */
 int* sc_hypervisor_get_idlest_workers_in_list(int *start, int *workers, int nall_workers,  int *nworkers, enum starpu_archtype arch);
 
+/* find workers that can be moved from a context (if the constraints of min, max, etc allow this) */
 unsigned sc_hypervisor_get_movable_nworkers(struct sc_hypervisor_policy_config *config, unsigned sched_ctx, enum starpu_archtype arch);
 
+/* compute how many workers should be moved from this context */
 int sc_hypervisor_compute_nworkers_to_move(unsigned req_sched_ctx);
 
+/* check the policy's constraints in order to resize */
 unsigned sc_hypervisor_policy_resize(unsigned sender_sched_ctx, unsigned receiver_sched_ctx, unsigned force_resize, unsigned now);
 
+/* check the policy's constraints in order to resize  and find a context willing the resources */
 unsigned sc_hypervisor_policy_resize_to_unknown_receiver(unsigned sender_sched_ctx, unsigned now);
 
-double _get_ctx_velocity(struct sc_hypervisor_wrapper* sc_w);
+/* compute the velocity of a context */
+double sc_hypervisor_get_ctx_velocity(struct sc_hypervisor_wrapper* sc_w);
 
-double _get_slowest_ctx_exec_time(void);
+/* get the time of execution of the slowest context */
+double sc_hypervisor_get_slowest_ctx_exec_time(void);
 
-double _get_fastest_ctx_exec_time(void);
+/* get the time of execution of the fastest context */
+double sc_hypervisor_get_fastest_ctx_exec_time(void);
 
-double _get_velocity_per_worker(struct sc_hypervisor_wrapper *sc_w, unsigned worker); 
+/* compute the velocity of a workers in a context */
+double sc_hypervisor_get_velocity_per_worker(struct sc_hypervisor_wrapper *sc_w, unsigned worker); 
 
-double _get_velocity_per_worker_type(struct sc_hypervisor_wrapper* sc_w, enum starpu_archtype arch);
+/* compute the velocity of a type of worker in a context */
+double sc_hypervisor_get_velocity_per_worker_type(struct sc_hypervisor_wrapper* sc_w, enum starpu_archtype arch);
 
-double _get_ref_velocity_per_worker_type(struct sc_hypervisor_wrapper* sc_w, enum starpu_archtype arch);
+/* compute the velocity of a type of worker in a context depending on its history */ 
+double sc_hypervisor_get_ref_velocity_per_worker_type(struct sc_hypervisor_wrapper* sc_w, enum starpu_archtype arch);
 
-int _velocity_gap_btw_ctxs(void);
+/* check if there are contexts a lot more delayed than others */
+int sc_hypervisor_has_velocity_gap_btw_ctxs(void);
 
-void _get_total_nw(int *workers, int nworkers, int ntypes_of_workers, int total_nw[ntypes_of_workers]);
+/* get the list of workers grouped by type */
+void sc_hypervisor_group_workers_by_type(int *workers, int nworkers, int ntypes_of_workers, int total_nw[ntypes_of_workers]);
 
 #ifdef __cplusplus
 }

+ 2 - 2
sc_hypervisor/src/hypervisor_policies/debit_lp_policy.c

@@ -228,11 +228,11 @@ static double _glp_resolve(int ns, int nw, double velocity[ns][nw], double w_in_
 static void debit_lp_handle_poped_task(unsigned sched_ctx, int worker, struct starpu_task *task, uint32_t footprint)
 {
 	struct sc_hypervisor_wrapper* sc_w = sc_hypervisor_get_wrapper(sched_ctx);
-	_get_velocity_per_worker(sc_w, worker);
+	sc_hypervisor_get_velocity_per_worker(sc_w, worker);
 	int ret = starpu_pthread_mutex_trylock(&act_hypervisor_mutex);
 	if(ret != EBUSY)
 	{
-		if(_velocity_gap_btw_ctxs())
+		if(sc_hypervisor_has_velocity_gap_btw_ctxs())
 		{
 			int ns = sc_hypervisor_get_nsched_ctxs();
 			int nw = starpu_worker_get_count(); /* Number of different workers */

+ 3 - 3
sc_hypervisor/src/hypervisor_policies/feft_lp_policy.c

@@ -21,7 +21,7 @@
 #ifdef STARPU_HAVE_GLPK_H
 static void feft_lp_handle_poped_task(unsigned sched_ctx, int worker, struct starpu_task *task, uint32_t footprint)
 {
-	if(_velocity_gap_btw_ctxs())
+	if(sc_hypervisor_has_velocity_gap_btw_ctxs())
 	{
 		int nsched_ctxs = sc_hypervisor_get_nsched_ctxs();
 
@@ -36,7 +36,7 @@ static void feft_lp_handle_poped_task(unsigned sched_ctx, int worker, struct sta
 			nw = ncuda != 0 ? 2 : 1;
 #endif
 			int total_nw[nw];
-			_get_total_nw(NULL, -1, nw, total_nw);
+			sc_hypervisor_group_workers_by_type(NULL, -1, nw, total_nw);
 
 
 			struct timeval start_time;
@@ -71,7 +71,7 @@ static void feft_lp_size_ctxs(int *sched_ctxs, int ns, int *workers, int nworker
 #endif
 	double nworkers_per_type[nsched_ctxs][nw];
 	int total_nw[nw];
-	_get_total_nw(workers, nworkers, nw, total_nw);
+	sc_hypervisor_group_workers_by_type(workers, nworkers, nw, total_nw);
 
 	starpu_pthread_mutex_lock(&act_hypervisor_mutex);
 	double vmax = sc_hypervisor_lp_get_nworkers_per_ctx(nsched_ctxs, nw, nworkers_per_type, total_nw);

+ 2 - 2
sc_hypervisor/src/hypervisor_policies/gflops_rate_policy.c

@@ -58,10 +58,10 @@ static int* _get_workers_to_move(unsigned sender_sched_ctx, unsigned receiver_sc
 	struct sc_hypervisor_wrapper* sender_sc_w = sc_hypervisor_get_wrapper(sender_sched_ctx);
 	struct sc_hypervisor_wrapper* receiver_sc_w = sc_hypervisor_get_wrapper(receiver_sched_ctx);
         int *workers = NULL;
-        double v_receiver = _get_ctx_velocity(receiver_sc_w);
+        double v_receiver = sc_hypervisor_get_ctx_velocity(receiver_sc_w);
         double receiver_remainig_flops = receiver_sc_w->remaining_flops;
         double sender_exp_end = _get_exp_end(sender_sched_ctx);
-        double sender_v_cpu = _get_velocity_per_worker_type(sender_sc_w, STARPU_CPU_WORKER);
+        double sender_v_cpu = sc_hypervisor_get_velocity_per_worker_type(sender_sc_w, STARPU_CPU_WORKER);
         double v_for_rctx = (receiver_remainig_flops/(sender_exp_end - starpu_timing_now())) - v_receiver;
 
         int nworkers_needed = v_for_rctx/sender_v_cpu;

+ 5 - 5
sc_hypervisor/src/hypervisor_policies/ispeed_lp_policy.c

@@ -46,7 +46,7 @@ static unsigned _compute_flops_distribution_over_ctxs(int ns, int nw, double w_i
 			draft_flops_on_w[s][w] = 0.0;
 			int worker = workers == NULL ? w : workers[w];
 
-			velocity[s][w] = _get_velocity_per_worker(sc_w, worker);
+			velocity[s][w] = sc_hypervisor_get_velocity_per_worker(sc_w, worker);
 			if(velocity[s][w] == -1.0)
 			{
 				enum starpu_archtype arch = starpu_worker_get_type(worker);
@@ -72,8 +72,8 @@ static unsigned _compute_flops_distribution_over_ctxs(int ns, int nw, double w_i
 	/* take the exec time of the slowest ctx 
 	   as starting point and then try to minimize it
 	   as increasing it a little for the faster ctxs */
-	double tmax = _get_slowest_ctx_exec_time();
- 	double smallest_tmax = _get_fastest_ctx_exec_time(); //tmax - 0.5*tmax; 
+	double tmax = sc_hypervisor_get_slowest_ctx_exec_time();
+ 	double smallest_tmax = sc_hypervisor_get_fastest_ctx_exec_time(); //tmax - 0.5*tmax; 
 //	printf("tmax %lf smallest %lf\n", tmax, smallest_tmax);
 
 	double res = 1.0;
@@ -357,11 +357,11 @@ static double _glp_resolve(int ns, int nw, double velocity[ns][nw], double flops
 static void ispeed_lp_handle_poped_task(unsigned sched_ctx, int worker, struct starpu_task *task, uint32_t footprint)
 {
 	struct sc_hypervisor_wrapper* sc_w = sc_hypervisor_get_wrapper(sched_ctx);
-	_get_velocity_per_worker(sc_w, worker);
+	sc_hypervisor_get_velocity_per_worker(sc_w, worker);
 	int ret = starpu_pthread_mutex_trylock(&act_hypervisor_mutex);
 	if(ret != EBUSY)
 	{
-		if(_velocity_gap_btw_ctxs())
+		if(sc_hypervisor_has_velocity_gap_btw_ctxs())
 		{
 			int ns = sc_hypervisor_get_nsched_ctxs();
 			int nw = starpu_worker_get_count(); /* Number of different workers */

+ 9 - 9
sc_hypervisor/src/hypervisor_policies/ispeed_policy.c

@@ -27,7 +27,7 @@ static unsigned _get_fastest_sched_ctx(void)
 	int i;
 	for(i = 0; i < nsched_ctxs; i++)
 	{
-		curr_velocity = _get_ctx_velocity(sc_hypervisor_get_wrapper(sched_ctxs[i]));
+		curr_velocity = sc_hypervisor_get_ctx_velocity(sc_hypervisor_get_wrapper(sched_ctxs[i]));
 		if( curr_velocity > biggest_velocity)
 		{
 			fastest_sched_ctx = sched_ctxs[i];
@@ -43,13 +43,13 @@ static unsigned _get_slowest_sched_ctx(void)
 	int *sched_ctxs = sc_hypervisor_get_sched_ctxs();
 	int nsched_ctxs = sc_hypervisor_get_nsched_ctxs();
 
-	double smallest_velocity = _get_ctx_velocity(sc_hypervisor_get_wrapper(sched_ctxs[0]));
+	double smallest_velocity = sc_hypervisor_get_ctx_velocity(sc_hypervisor_get_wrapper(sched_ctxs[0]));
 	unsigned slowest_sched_ctx = smallest_velocity == -1.0  ? STARPU_NMAX_SCHED_CTXS : sched_ctxs[0];
 	double curr_velocity = 0.0;
 	int i;
 	for(i = 1; i < nsched_ctxs; i++)
 	{
-		curr_velocity = _get_ctx_velocity(sc_hypervisor_get_wrapper(sched_ctxs[i]));
+		curr_velocity = sc_hypervisor_get_ctx_velocity(sc_hypervisor_get_wrapper(sched_ctxs[i]));
 		if((curr_velocity < smallest_velocity || smallest_velocity == 0.0) && curr_velocity != -1.0)
 		{
 			smallest_velocity = curr_velocity;
@@ -104,7 +104,7 @@ static int* _get_slowest_workers(unsigned sched_ctx, int *nworkers, enum starpu_
 
 					if(!considered)
 					{
-						double worker_velocity = _get_velocity_per_worker(sc_w, worker);
+						double worker_velocity = sc_hypervisor_get_velocity_per_worker(sc_w, worker);
 						if(worker_velocity != -1.0)
 						{
 							/* the first iteration*/
@@ -119,7 +119,7 @@ static int* _get_slowest_workers(unsigned sched_ctx, int *nworkers, enum starpu_
 							else if(config->priority[worker] ==
 								config->priority[curr_workers[index]])
 							{
-								double curr_worker_velocity = _get_velocity_per_worker(sc_w, curr_workers[index]);
+								double curr_worker_velocity = sc_hypervisor_get_velocity_per_worker(sc_w, curr_workers[index]);
 //								printf("speed[%d] = %lf speed[%d] = %lf\n", worker, worker_velocity, curr_workers[index], curr_worker_velocity);
 								if(worker_velocity < curr_worker_velocity && curr_worker_velocity != -1.0)
 								{
@@ -146,7 +146,7 @@ static void ispeed_handle_poped_task(unsigned sched_ctx, int worker, struct star
 	int ret = starpu_pthread_mutex_trylock(&act_hypervisor_mutex);
 	if(ret != EBUSY)
 	{
-		if(_velocity_gap_btw_ctxs())
+		if(sc_hypervisor_has_velocity_gap_btw_ctxs())
 		{
 			unsigned fastest_sched_ctx = _get_fastest_sched_ctx();
 			unsigned slowest_sched_ctx = _get_slowest_sched_ctx();
@@ -161,9 +161,9 @@ static void ispeed_handle_poped_task(unsigned sched_ctx, int worker, struct star
 						double new_speed = 0.0;
 						int i;
 						for(i = 0; i < nworkers_to_move; i++)
-							new_speed += _get_velocity_per_worker(sc_hypervisor_get_wrapper(fastest_sched_ctx), workers_to_move[i]);
-						double fastest_speed = _get_ctx_velocity(sc_hypervisor_get_wrapper(fastest_sched_ctx));
-						double slowest_speed = _get_ctx_velocity(sc_hypervisor_get_wrapper(slowest_sched_ctx));
+							new_speed += sc_hypervisor_get_velocity_per_worker(sc_hypervisor_get_wrapper(fastest_sched_ctx), workers_to_move[i]);
+						double fastest_speed = sc_hypervisor_get_ctx_velocity(sc_hypervisor_get_wrapper(fastest_sched_ctx));
+						double slowest_speed = sc_hypervisor_get_ctx_velocity(sc_hypervisor_get_wrapper(slowest_sched_ctx));
 //						printf("fast_speed(%d) %lf slow_speed(%d) %lf new speed(%d) %lf \n", fastest_sched_ctx, fastest_speed, slowest_sched_ctx, 
 //						       slowest_speed, workers_to_move[0], new_speed);
 						if(fastest_speed != -1.0 && slowest_speed != -1.0 && (slowest_speed + new_speed) <= (fastest_speed - new_speed))

+ 1 - 1
sc_hypervisor/src/hypervisor_policies/teft_lp_policy.c

@@ -540,7 +540,7 @@ static void teft_lp_handle_poped_task(unsigned sched_ctx, int worker, struct sta
 			return;
 		}
 
-		if(_velocity_gap_btw_ctxs())
+		if(sc_hypervisor_has_velocity_gap_btw_ctxs())
 		{
 			int ns = sc_hypervisor_get_nsched_ctxs();
 			int nw = starpu_worker_get_count(); /* Number of different workers */

+ 1 - 1
sc_hypervisor/src/policies_utils/lp_tools.c

@@ -259,7 +259,7 @@ double sc_hypervisor_lp_get_tmax(int nw, int *workers)
 {
 	int ntypes_of_workers = 2;
 	int total_nw[ntypes_of_workers];
-	_get_total_nw(workers, nw, 2, total_nw);
+	sc_hypervisor_group_workers_by_type(workers, nw, 2, total_nw);
 
 	int nsched_ctxs = sc_hypervisor_get_nsched_ctxs();
 

+ 11 - 11
sc_hypervisor/src/policies_utils/policy_tools.c

@@ -347,7 +347,7 @@ static double _get_ispeed_sample_for_sched_ctx(unsigned sched_ctx)
 	return ispeed_sample;
 }
 
-double _get_ctx_velocity(struct sc_hypervisor_wrapper* sc_w)
+double sc_hypervisor_get_ctx_velocity(struct sc_hypervisor_wrapper* sc_w)
 {
 	struct sc_hypervisor_policy_config *config = sc_hypervisor_get_config(sc_w->sched_ctx);
         double elapsed_flops = sc_hypervisor_get_elapsed_flops_per_sched_ctx(sc_w);
@@ -373,7 +373,7 @@ double _get_ctx_velocity(struct sc_hypervisor_wrapper* sc_w)
 	return -1.0;
 }
 
-double _get_slowest_ctx_exec_time(void)
+double sc_hypervisor_get_slowest_ctx_exec_time(void)
 {
 	int *sched_ctxs = sc_hypervisor_get_sched_ctxs();
 	int nsched_ctxs = sc_hypervisor_get_nsched_ctxs();
@@ -389,7 +389,7 @@ double _get_slowest_ctx_exec_time(void)
 
 //		double elapsed_time  = (curr_time - sc_w->start_time)/1000000;
 		struct sc_hypervisor_policy_config *config = sc_hypervisor_get_config(sc_w->sched_ctx);
-		double elapsed_time = (config->ispeed_ctx_sample/1000000000.0)/_get_ctx_velocity(sc_w);
+		double elapsed_time = (config->ispeed_ctx_sample/1000000000.0)/sc_hypervisor_get_ctx_velocity(sc_w);
 		if(elapsed_time > slowest_time)
 			slowest_time = elapsed_time;
 
@@ -397,7 +397,7 @@ double _get_slowest_ctx_exec_time(void)
 	return slowest_time;
 }
 
-double _get_fastest_ctx_exec_time(void)
+double sc_hypervisor_get_fastest_ctx_exec_time(void)
 {
 	int *sched_ctxs = sc_hypervisor_get_sched_ctxs();
 	int nsched_ctxs = sc_hypervisor_get_nsched_ctxs();
@@ -412,7 +412,7 @@ double _get_fastest_ctx_exec_time(void)
 		sc_w = sc_hypervisor_get_wrapper(sched_ctxs[s]);
 
 		struct sc_hypervisor_policy_config *config = sc_hypervisor_get_config(sc_w->sched_ctx);
-		double elapsed_time = (config->ispeed_ctx_sample/1000000000.0)/_get_ctx_velocity(sc_w);
+		double elapsed_time = (config->ispeed_ctx_sample/1000000000.0)/sc_hypervisor_get_ctx_velocity(sc_w);
 		
 		if(elapsed_time < fastest_time)
 			fastest_time = elapsed_time;
@@ -423,7 +423,7 @@ double _get_fastest_ctx_exec_time(void)
 }
 
 
-double _get_velocity_per_worker(struct sc_hypervisor_wrapper *sc_w, unsigned worker)
+double sc_hypervisor_get_velocity_per_worker(struct sc_hypervisor_wrapper *sc_w, unsigned worker)
 {
 	if(!starpu_sched_ctx_contains_worker(worker, sc_w->sched_ctx))
 		return -1.0;
@@ -506,7 +506,7 @@ static double _get_best_elapsed_flops(struct sc_hypervisor_wrapper* sc_w, int *n
 }
 
 /* compute an average value of the cpu/cuda velocity */
-double _get_velocity_per_worker_type(struct sc_hypervisor_wrapper* sc_w, enum starpu_archtype arch)
+double sc_hypervisor_get_velocity_per_worker_type(struct sc_hypervisor_wrapper* sc_w, enum starpu_archtype arch)
 {
         int npus = 0;
         double elapsed_flops = _get_best_elapsed_flops(sc_w, &npus, arch) / 1000000000.0 ; /* in gflops */
@@ -526,7 +526,7 @@ double _get_velocity_per_worker_type(struct sc_hypervisor_wrapper* sc_w, enum st
 
 
 /* check if there is a big velocity gap between the contexts */
-int _velocity_gap_btw_ctxs()
+int sc_hypervisor_has_velocity_gap_btw_ctxs()
 {
 	int *sched_ctxs = sc_hypervisor_get_sched_ctxs();
 	int nsched_ctxs = sc_hypervisor_get_nsched_ctxs();
@@ -537,7 +537,7 @@ int _velocity_gap_btw_ctxs()
 	for(i = 0; i < nsched_ctxs; i++)
 	{
 		sc_w = sc_hypervisor_get_wrapper(sched_ctxs[i]);
-		double ctx_v = _get_ctx_velocity(sc_w);
+		double ctx_v = sc_hypervisor_get_ctx_velocity(sc_w);
 		if(ctx_v != -1.0)
 		{
 			for(j = 0; j < nsched_ctxs; j++)
@@ -549,7 +549,7 @@ int _velocity_gap_btw_ctxs()
 						return 1;
 
 					other_sc_w = sc_hypervisor_get_wrapper(sched_ctxs[j]);
-					double other_ctx_v = _get_ctx_velocity(other_sc_w);
+					double other_ctx_v = sc_hypervisor_get_ctx_velocity(other_sc_w);
 					if(other_ctx_v != -1.0)
 					{
 						double gap = ctx_v < other_ctx_v ? other_ctx_v / ctx_v : ctx_v / other_ctx_v ;
@@ -566,7 +566,7 @@ int _velocity_gap_btw_ctxs()
 }
 
 
-void _get_total_nw(int *workers, int nworkers, int ntypes_of_workers, int total_nw[ntypes_of_workers])
+void sc_hypervisor_group_workers_by_type(int *workers, int nworkers, int ntypes_of_workers, int total_nw[ntypes_of_workers])
 {
 	int current_nworkers = workers == NULL ? starpu_worker_get_count() : nworkers;
 	int w;

+ 4 - 4
sc_hypervisor/src/sc_hypervisor.c

@@ -365,7 +365,7 @@ static double _get_best_total_elapsed_flops(struct sc_hypervisor_wrapper* sc_w,
 }
 
 /* compute an average value of the cpu/cuda velocity */
-double sc_hypervisor_get_velocity_per_worker_type(struct sc_hypervisor_wrapper* sc_w, enum starpu_archtype arch)
+double sc_hypervisorsc_hypervisor_get_velocity_per_worker_type(struct sc_hypervisor_wrapper* sc_w, enum starpu_archtype arch)
 {
         int npus = 0;
         double elapsed_flops = _get_best_total_elapsed_flops(sc_w, &npus, arch) / 1000000000.0 ; /* in gflops */
@@ -384,7 +384,7 @@ double sc_hypervisor_get_velocity_per_worker_type(struct sc_hypervisor_wrapper*
 }
 
 /* compute an average value of the cpu/cuda old velocity */
-double _get_ref_velocity_per_worker_type(struct sc_hypervisor_wrapper* sc_w, enum starpu_archtype arch)
+double sc_hypervisor_get_ref_velocity_per_worker_type(struct sc_hypervisor_wrapper* sc_w, enum starpu_archtype arch)
 {
 	double ref_velocity = 0.0;
 	unsigned nw = 0;
@@ -978,9 +978,9 @@ void sc_hypervisor_free_size_req(void)
 double sc_hypervisor_get_velocity(struct sc_hypervisor_wrapper *sc_w, enum starpu_archtype arch)
 {
 
-	double velocity = sc_hypervisor_get_velocity_per_worker_type(sc_w, arch);
+	double velocity = sc_hypervisorsc_hypervisor_get_velocity_per_worker_type(sc_w, arch);
 	if(velocity == -1.0)
-		velocity = _get_ref_velocity_per_worker_type(sc_w, arch);
+		velocity = sc_hypervisor_get_ref_velocity_per_worker_type(sc_w, arch);
 	if(velocity == -1.0)
 		velocity = arch == STARPU_CPU_WORKER ? 5.0 : 100.0;