Nathalie Furmento 12 éve
szülő
commit
22ce72ad31

+ 11 - 11
doc/chapters/sched_ctx_hypervisor.texi

@@ -27,7 +27,7 @@ Basic strategies of resizing scheduling contexts already exist but a platform fo
 @section Managing the hypervisor
 There is a single hypervisor that is in charge of resizing contexts and the resizing strategy is chosen at the initialization of the hypervisor. A single resize can be done at a time.
 
-@deftypefun struct starpu_performance_counters* sched_ctx_hypervisor_init ({struct hypervisor_policy*} @var{policy})
+@deftypefun struct starpu_performance_counters* sched_ctx_hypervisor_init ({struct starpu_sched_ctx_hypervisor_policy*} @var{policy})
 Initializes the hypervisor to use the strategy provided as parameter and creates the performance counters (see @pxref{Performance Counters}).
 These performance counters represent actually some callbacks that will be used by the contexts to notify the information needed by the hypervisor.
 @end deftypefun
@@ -248,7 +248,7 @@ it may sometimes be desirable to implement custom
 policies to address specific problems.  The API described below allows
 users to write their own resizing policy.
 
-@deftp {Data Type} {struct hypervisor_policy}
+@deftp {Data Type} {struct starpu_sched_ctx_hypervisor_policy}
 This structure contains all the methods that implement a hypervisor resizing policy. 
 
 @table @asis
@@ -271,7 +271,7 @@ It is called whenever a tag task has just been executed. The table of resize req
 
 The Hypervisor provides also a structure with configuration information of each context, which can be used to construct new resize strategies.
 
-@deftp {Data Type} {struct policy_config }
+@deftp {Data Type} {struct starpu_sched_ctx_hypervisor_policy_config }
 This structure contains all configuration information of a context
 
 @table @asis
@@ -295,14 +295,14 @@ Indicates the maximum idle time accepted before a resize is triggered for the wo
 Additionally, the hypervisor provides a structure with information obtained from StarPU by means of the performance counters
 
 
-@deftp {Data Type} {struct sched_ctx_wrapper}
+@deftp {Data Type} {struct starpu_sched_ctx_hypervisor_wrapper}
 This structure is a wrapper of the contexts available in StarPU
 and contains all information about a context obtained by incrementing the performance counters
 
 @table @asis
 @item @code{unsigned sched_ctx}
 The context wrapped
-@item @code{struct policy_config *config}
+@item @code{struct starpu_sched_ctx_hypervisor_policy_config *config}
 The corresponding resize configuration
 @item @code{double current_idle_time[STARPU_NMAXWORKERS]}
 The idle time counter of each worker of the context
@@ -320,12 +320,12 @@ The number of flops executed by each worker of the context from last resize
 The number of flops that still have to be executed by the workers in the context
 @item @code{double start_time}
 The time when he started executed
-@item @code{struct resize_ack resize_ack}
+@item @code{struct starpu_sched_ctx_hypervisor_resize_ack resize_ack}
 The structure confirming the last resize finished and a new one can be done
 @end table
 @end deftp
 
-@deftp {Data Type} {struct resize_ack}
+@deftp {Data Type} {struct starpu_sched_ctx_hypervisor_resize_ack}
 This structures checks if the workers moved to another context are actually taken into account in that context
 @table @asis
 @item @code{int receiver_sched_ctx}
@@ -345,7 +345,7 @@ The following functions can be used in the resizing strategies.
 Moves workers from one context to another
 @end deftypefun
 
-@deftypefun {struct policy_config*} sched_ctx_hypervisor_get_config (unsigned @var{sched_ctx});
+@deftypefun {struct starpu_sched_ctx_hypervisor_policy_config*} sched_ctx_hypervisor_get_config (unsigned @var{sched_ctx});
 Returns the configuration structure of a context
 @end deftypefun
 
@@ -357,11 +357,11 @@ Gets the contexts managed by the hypervisor
 Gets the number of contexts managed by the hypervisor
 @end deftypefun
 
-@deftypefun {struct sched_ctx_wrapper*} sched_ctx_hypervisor_get_wrapper (unsigned @var{sched_ctx});
+@deftypefun {struct starpu_sched_ctx_hypervisor_wrapper*} sched_ctx_hypervisor_get_wrapper (unsigned @var{sched_ctx});
 Returns the wrapper corresponding the context @code{sched_ctx}
 @end deftypefun
 
-@deftypefun double sched_ctx_hypervisor_get_elapsed_flops_per_sched_ctx ({struct sched_ctx_wrapper*} @var{sc_w});
+@deftypefun double sched_ctx_hypervisor_get_elapsed_flops_per_sched_ctx ({struct starpu_sched_ctx_hypervisor_wrapper*} @var{sc_w});
 Returns the flops of a context elapsed from the last resize
 @end deftypefun
 
@@ -375,7 +375,7 @@ Returns the name of the resizing policy the hypervisor uses
 @cartouche
 @smallexample
 
-struct hypervisor_policy dummy_policy = 
+struct starpu_sched_ctx_hypervisor_policy dummy_policy = 
 @{
        .handle_poped_task = dummy_handle_poped_task,
        .handle_pushed_task = dummy_handle_pushed_task,

+ 1 - 1
sched_ctx_hypervisor/examples/app_driven_test/app_driven_test.c

@@ -110,7 +110,7 @@ int main()
 	unsigned sched_ctx1 = starpu_create_sched_ctx("heft", ressources1, nres1, "sched_ctx1");
 	unsigned sched_ctx2 = starpu_create_sched_ctx("heft", ressources2, nres2, "sched_ctx2");
 
-	struct hypervisor_policy policy;
+	struct starpu_sched_ctx_hypervisor_policy policy;
 	policy.custom = 0;
 	policy.name = "app_driven";
 	void *perf_counters = sched_ctx_hypervisor_init(&policy);

+ 1 - 1
sched_ctx_hypervisor/examples/sched_ctx_utils/sched_ctx_utils.c

@@ -238,7 +238,7 @@ void start_2ndbench(void (*bench)(float*, unsigned, unsigned))
 
 void construct_contexts(void (*bench)(float*, unsigned, unsigned))
 {
-	struct hypervisor_policy policy;
+	struct starpu_sched_ctx_hypervisor_policy policy;
 	policy.custom = 0;
 	policy.name = "idle";
 	struct starpu_performance_counters *perf_counters = sched_ctx_hypervisor_init(&policy);

+ 13 - 11
sched_ctx_hypervisor/include/sched_ctx_hypervisor.h

@@ -20,6 +20,8 @@
 #include <starpu.h>
 #include <pthread.h>
 
+#warning rename all objects to start with starpu_sched_ctx_hypervisor
+
 /* ioctl properties*/
 #define HYPERVISOR_MAX_IDLE -1
 #define HYPERVISOR_MIN_WORKING -2
@@ -39,7 +41,7 @@ pthread_mutex_t act_hypervisor_mutex;
 #define MAX_IDLE_TIME 5000000000
 #define MIN_WORKING_TIME 500
 
-struct policy_config
+struct starpu_sched_ctx_hypervisor_policy_config
 {
 	/* underneath this limit we cannot resize */
 	int min_nworkers;
@@ -71,7 +73,7 @@ struct policy_config
 	double empty_ctx_max_idle[STARPU_NMAXWORKERS];
 };
 
-struct resize_ack
+struct starpu_sched_ctx_hypervisor_resize_ack
 {
 	int receiver_sched_ctx;
 	int *moved_workers;
@@ -79,10 +81,10 @@ struct resize_ack
 	int *acked_workers;
 };
 
-struct sched_ctx_wrapper
+struct starpu_sched_ctx_hypervisor_wrapper
 {
 	unsigned sched_ctx;
-	struct policy_config *config;
+	struct starpu_sched_ctx_hypervisor_policy_config *config;
 	double current_idle_time[STARPU_NMAXWORKERS];
 	int worker_to_be_removed[STARPU_NMAXWORKERS];
 	int pushed_tasks[STARPU_NMAXWORKERS];
@@ -93,7 +95,7 @@ struct sched_ctx_wrapper
 	double submitted_flops;
 	double remaining_flops;
 	double start_time;
-	struct resize_ack resize_ack;
+	struct starpu_sched_ctx_hypervisor_resize_ack resize_ack;
 	pthread_mutex_t mutex;
 };
 
@@ -101,7 +103,7 @@ struct sched_ctx_wrapper
  * FIXME: Remove when no longer exposed.  */
 struct resize_request_entry;
 
-struct hypervisor_policy
+struct starpu_sched_ctx_hypervisor_policy
 {
 	const char* name;
 	unsigned custom;
@@ -116,7 +118,7 @@ struct hypervisor_policy
 	void (*handle_submitted_job)(struct starpu_task *task, unsigned footprint);
 };
 
-struct starpu_performance_counters* sched_ctx_hypervisor_init(struct hypervisor_policy* policy);
+struct starpu_performance_counters* sched_ctx_hypervisor_init(struct starpu_sched_ctx_hypervisor_policy* policy);
 
 void sched_ctx_hypervisor_shutdown(void);
 
@@ -136,7 +138,7 @@ void sched_ctx_hypervisor_ioctl(unsigned sched_ctx, ...);
 
 void sched_ctx_hypervisor_set_config(unsigned sched_ctx, void *config);
 
-struct policy_config* sched_ctx_hypervisor_get_config(unsigned sched_ctx);
+struct starpu_sched_ctx_hypervisor_policy_config* sched_ctx_hypervisor_get_config(unsigned sched_ctx);
 
 int* sched_ctx_hypervisor_get_sched_ctxs();
 
@@ -144,11 +146,11 @@ int sched_ctx_hypervisor_get_nsched_ctxs();
 
 int get_nworkers_ctx(unsigned sched_ctx, enum starpu_archtype arch);
 
-struct sched_ctx_wrapper* sched_ctx_hypervisor_get_wrapper(unsigned sched_ctx);
+struct starpu_sched_ctx_hypervisor_wrapper* sched_ctx_hypervisor_get_wrapper(unsigned sched_ctx);
 
-double sched_ctx_hypervisor_get_elapsed_flops_per_sched_ctx(struct sched_ctx_wrapper* sc_w);
+double sched_ctx_hypervisor_get_elapsed_flops_per_sched_ctx(struct starpu_sched_ctx_hypervisor_wrapper* sc_w);
 
-double sched_ctx_hypervisor_get_total_elapsed_flops_per_sched_ctx(struct sched_ctx_wrapper* sc_w);
+double sched_ctx_hypervisor_get_total_elapsed_flops_per_sched_ctx(struct starpu_sched_ctx_hypervisor_wrapper* sc_w);
 
 const char* sched_ctx_hypervisor_get_policy();
 

+ 1 - 1
sched_ctx_hypervisor/src/hypervisor_policies/app_driven_policy.c

@@ -23,7 +23,7 @@ static void app_driven_handle_post_exec_hook(unsigned sched_ctx, int task_tag)
 	_resize_to_unknown_receiver(sched_ctx, 1);
 }
 
-struct hypervisor_policy app_driven_policy =
+struct starpu_sched_ctx_hypervisor_policy app_driven_policy =
 {
 	.size_ctxs = NULL,
 	.handle_poped_task = NULL,

+ 11 - 11
sched_ctx_hypervisor/src/hypervisor_policies/gflops_rate_policy.c

@@ -18,7 +18,7 @@
 
 static double _get_total_elapsed_flops_per_sched_ctx(unsigned sched_ctx)
 {
-	struct sched_ctx_wrapper* sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctx);
+	struct starpu_sched_ctx_hypervisor_wrapper* sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctx);
 	double ret_val = 0.0;
 	int i;
 	for(i = 0; i < STARPU_NMAXWORKERS; i++)
@@ -28,7 +28,7 @@ static double _get_total_elapsed_flops_per_sched_ctx(unsigned sched_ctx)
 
 double _get_exp_end(unsigned sched_ctx)
 {
-	struct sched_ctx_wrapper *sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctx);
+	struct starpu_sched_ctx_hypervisor_wrapper *sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctx);
 	double elapsed_flops = sched_ctx_hypervisor_get_elapsed_flops_per_sched_ctx(sc_w);
 
 	if( elapsed_flops >= 1.0)
@@ -44,7 +44,7 @@ double _get_exp_end(unsigned sched_ctx)
 /* computes the instructions left to be executed out of the total instructions to execute */
 double _get_flops_left_pct(unsigned sched_ctx)
 {
-	struct sched_ctx_wrapper *wrapper = sched_ctx_hypervisor_get_wrapper(sched_ctx);
+	struct starpu_sched_ctx_hypervisor_wrapper *wrapper = sched_ctx_hypervisor_get_wrapper(sched_ctx);
 	double total_elapsed_flops = _get_total_elapsed_flops_per_sched_ctx(sched_ctx);
 	if(wrapper->total_flops == total_elapsed_flops || total_elapsed_flops > wrapper->total_flops)
 		return 0.0;
@@ -55,8 +55,8 @@ double _get_flops_left_pct(unsigned sched_ctx)
 /* select the workers needed to be moved in order to force the sender and the receiver context to finish simultaneously */
 static int* _get_workers_to_move(unsigned sender_sched_ctx, unsigned receiver_sched_ctx, int *nworkers)
 {
-	struct sched_ctx_wrapper* sender_sc_w = sched_ctx_hypervisor_get_wrapper(sender_sched_ctx);
-	struct sched_ctx_wrapper* receiver_sc_w = sched_ctx_hypervisor_get_wrapper(receiver_sched_ctx);
+	struct starpu_sched_ctx_hypervisor_wrapper* sender_sc_w = sched_ctx_hypervisor_get_wrapper(sender_sched_ctx);
+	struct starpu_sched_ctx_hypervisor_wrapper* receiver_sc_w = sched_ctx_hypervisor_get_wrapper(receiver_sched_ctx);
         int *workers = NULL;
         double v_receiver = _get_ctx_velocity(receiver_sc_w);
         double receiver_remainig_flops = receiver_sc_w->remaining_flops;
@@ -69,11 +69,11 @@ static int* _get_workers_to_move(unsigned sender_sched_ctx, unsigned receiver_sc
 /*             v_receiver, v_for_rctx, sender_v_cpu, nworkers_needed); */
         if(nworkers_needed > 0)
         {
-                struct policy_config *sender_config = sched_ctx_hypervisor_get_config(sender_sched_ctx);
+                struct starpu_sched_ctx_hypervisor_policy_config *sender_config = sched_ctx_hypervisor_get_config(sender_sched_ctx);
                 unsigned potential_moving_cpus = _get_potential_nworkers(sender_config, sender_sched_ctx, STARPU_CPU_WORKER);
                 unsigned potential_moving_gpus = _get_potential_nworkers(sender_config, sender_sched_ctx, STARPU_CUDA_WORKER);
                 unsigned sender_nworkers = starpu_get_nworkers_of_sched_ctx(sender_sched_ctx);
-                struct policy_config *config = sched_ctx_hypervisor_get_config(receiver_sched_ctx);
+                struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(receiver_sched_ctx);
                 unsigned nworkers_ctx = starpu_get_nworkers_of_sched_ctx(receiver_sched_ctx);
 
                 if(nworkers_needed < (potential_moving_cpus + 5 * potential_moving_gpus))
@@ -149,7 +149,7 @@ static unsigned _gflops_rate_resize(unsigned sender_sched_ctx, unsigned receiver
                 {
                         sched_ctx_hypervisor_move_workers(sender_sched_ctx, receiver_sched_ctx, workers_to_move, nworkers_to_move, 0);
 
-                        struct policy_config *new_config = sched_ctx_hypervisor_get_config(receiver_sched_ctx);
+                        struct starpu_sched_ctx_hypervisor_policy_config *new_config = sched_ctx_hypervisor_get_config(receiver_sched_ctx);
                         int i;
                         for(i = 0; i < nworkers_to_move; i++)
                                 new_config->max_idle[workers_to_move[i]] = new_config->max_idle[workers_to_move[i]] !=MAX_IDLE_TIME ? new_config->max_idle[workers_to_move[i]] :  new_config->new_workers_max_idle;
@@ -256,7 +256,7 @@ static void gflops_rate_resize(unsigned sched_ctx)
 			double slowest_flops_left_pct = _get_flops_left_pct(slowest_sched_ctx);
 			if(slowest_flops_left_pct != 0.0f)
 			{
-				struct policy_config* config = sched_ctx_hypervisor_get_config(sched_ctx);
+				struct starpu_sched_ctx_hypervisor_policy_config* config = sched_ctx_hypervisor_get_config(sched_ctx);
 				config->min_nworkers = 0;
 				config->max_nworkers = 0;
 				printf("ctx %d finished & gives away the res to %d; slow_left %lf\n", sched_ctx, slowest_sched_ctx, slowest_flops_left_pct);
@@ -280,7 +280,7 @@ static void gflops_rate_resize(unsigned sched_ctx)
 			if(fast_flops_left_pct < 0.8)
 			{
 
-				struct sched_ctx_wrapper *sc_w = sched_ctx_hypervisor_get_wrapper(slowest_sched_ctx);
+				struct starpu_sched_ctx_hypervisor_wrapper *sc_w = sched_ctx_hypervisor_get_wrapper(slowest_sched_ctx);
 				double elapsed_flops = sched_ctx_hypervisor_get_elapsed_flops_per_sched_ctx(sc_w);
 				if((elapsed_flops/sc_w->total_flops) > 0.1)
 					_gflops_rate_resize(fastest_sched_ctx, slowest_sched_ctx, 0);
@@ -294,7 +294,7 @@ void gflops_rate_handle_poped_task(unsigned sched_ctx, int worker)
 	gflops_rate_resize(sched_ctx);
 }
 
-struct hypervisor_policy gflops_rate_policy = {
+struct starpu_sched_ctx_hypervisor_policy gflops_rate_policy = {
 	.size_ctxs = NULL,
 	.handle_poped_task = gflops_rate_handle_poped_task,
 	.handle_pushed_task = NULL,

+ 3 - 3
sched_ctx_hypervisor/src/hypervisor_policies/idle_policy.c

@@ -30,8 +30,8 @@ unsigned worker_belong_to_other_sched_ctx(unsigned sched_ctx, int worker)
 
 void idle_handle_idle_cycle(unsigned sched_ctx, int worker)
 {
-	struct sched_ctx_wrapper* sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctx);
-	struct policy_config *config = sc_w->config;
+	struct starpu_sched_ctx_hypervisor_wrapper* sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctx);
+	struct starpu_sched_ctx_hypervisor_policy_config *config = sc_w->config;
 	if(config != NULL &&  sc_w->current_idle_time[worker] > config->max_idle[worker])
 	{
 		if(worker_belong_to_other_sched_ctx(sched_ctx, worker))
@@ -41,7 +41,7 @@ void idle_handle_idle_cycle(unsigned sched_ctx, int worker)
 	}
 }
 
-struct hypervisor_policy idle_policy =
+struct starpu_sched_ctx_hypervisor_policy idle_policy =
 {
 	.size_ctxs = NULL,
 	.handle_poped_task = NULL,

+ 4 - 4
sched_ctx_hypervisor/src/hypervisor_policies/lp2_policy.c

@@ -179,7 +179,7 @@ static void _redistribute_resources_in_ctxs(int ns, int nw, int nt, double w_in_
 		}
 
 		sched_ctx_hypervisor_add_workers_to_sched_ctx(workers_to_add, nadd, sched_ctxs[s]);
-		struct policy_config *new_config = sched_ctx_hypervisor_get_config(sched_ctxs[s]);
+		struct starpu_sched_ctx_hypervisor_policy_config *new_config = sched_ctx_hypervisor_get_config(sched_ctxs[s]);
 		int i;
 		for(i = 0; i < nadd; i++)
 			new_config->max_idle[workers_to_add[i]] = new_config->max_idle[workers_to_add[i]] != MAX_IDLE_TIME ? new_config->max_idle[workers_to_add[i]] :  new_config->new_workers_max_idle;
@@ -246,7 +246,7 @@ static void size_if_required()
 
 	if(has_req)
 	{
-		struct sched_ctx_wrapper* sc_w = NULL;
+		struct starpu_sched_ctx_hypervisor_wrapper* sc_w = NULL;
 		unsigned ready_to_size = 1;
 		int s;
 		pthread_mutex_lock(&act_hypervisor_mutex);
@@ -503,7 +503,7 @@ static double _find_tmax(double t1, double t2)
 
 static void lp2_handle_poped_task(unsigned sched_ctx, int worker)
 {
-	struct sched_ctx_wrapper* sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctx);
+	struct starpu_sched_ctx_hypervisor_wrapper* sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctx);
 
 	int ret = pthread_mutex_trylock(&act_hypervisor_mutex);
 	if(ret != EBUSY)
@@ -580,7 +580,7 @@ static void lp2_size_ctxs(int *sched_ctxs, int nsched_ctxs , int *workers, int n
 	sched_ctx_hypervisor_save_size_req(sched_ctxs, nsched_ctxs, workers, nworkers);
 }
 
-struct hypervisor_policy lp2_policy = {
+struct starpu_sched_ctx_hypervisor_policy lp2_policy = {
 	.size_ctxs = lp2_size_ctxs,
 	.handle_poped_task = lp2_handle_poped_task,
 	.handle_pushed_task = NULL,

+ 1 - 1
sched_ctx_hypervisor/src/hypervisor_policies/lp_policy.c

@@ -86,7 +86,7 @@ static void lp_size_ctxs(int *sched_ctxs, int ns, int *workers, int nworkers)
 }
 
 #ifdef STARPU_HAVE_GLPK_H
-struct hypervisor_policy lp_policy = {
+struct starpu_sched_ctx_hypervisor_policy lp_policy = {
 	.size_ctxs = lp_size_ctxs,
 	.handle_poped_task = lp_handle_poped_task,
 	.handle_pushed_task = NULL,

+ 2 - 2
sched_ctx_hypervisor/src/hypervisor_policies/lp_tools.c

@@ -182,7 +182,7 @@ double _lp_get_nworkers_per_ctx(int nsched_ctxs, int ntypes_of_workers, double r
 	double flops[nsched_ctxs];
 #endif
 	int i = 0;
-	struct sched_ctx_wrapper* sc_w;
+	struct starpu_sched_ctx_hypervisor_wrapper* sc_w;
 	for(i = 0; i < nsched_ctxs; i++)
 	{
 		sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctxs[i]);
@@ -392,7 +392,7 @@ void _lp_distribute_resources_in_ctxs(int* sched_ctxs, int ns, int nw, int res_r
 				{
 					sched_ctx_hypervisor_add_workers_to_sched_ctx(workers_to_add, nworkers_to_add, current_sched_ctxs[s]);
 					sched_ctx_hypervisor_start_resize(current_sched_ctxs[s]);
-					struct policy_config *new_config = sched_ctx_hypervisor_get_config(current_sched_ctxs[s]);
+					struct starpu_sched_ctx_hypervisor_policy_config *new_config = sched_ctx_hypervisor_get_config(current_sched_ctxs[s]);
 					int i;
 					for(i = 0; i < nworkers_to_add; i++)
 						new_config->max_idle[workers_to_add[i]] = new_config->max_idle[workers_to_add[i]] != MAX_IDLE_TIME ? new_config->max_idle[workers_to_add[i]] :  new_config->new_workers_max_idle;

+ 13 - 13
sched_ctx_hypervisor/src/hypervisor_policies/policy_tools.c

@@ -21,7 +21,7 @@
 
 static int _compute_priority(unsigned sched_ctx)
 {
-	struct policy_config *config = sched_ctx_hypervisor_get_config(sched_ctx);
+	struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(sched_ctx);
 
 	int total_priority = 0;
 
@@ -53,7 +53,7 @@ unsigned _find_poor_sched_ctx(unsigned req_sched_ctx, int nworkers_to_move)
 	int nsched_ctxs = sched_ctx_hypervisor_get_nsched_ctxs();
 
 
-	struct policy_config *config = NULL;
+	struct starpu_sched_ctx_hypervisor_policy_config *config = NULL;
 
 	for(i = 0; i < nsched_ctxs; i++)
 	{
@@ -101,8 +101,8 @@ int* _get_first_workers_in_list(int *workers, int nall_workers,  unsigned *nwork
 /* get first nworkers with the highest idle time in the context */
 int* _get_first_workers(unsigned sched_ctx, int *nworkers, enum starpu_archtype arch)
 {
-	struct sched_ctx_wrapper* sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctx);
-	struct policy_config *config = sched_ctx_hypervisor_get_config(sched_ctx);
+	struct starpu_sched_ctx_hypervisor_wrapper* sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctx);
+	struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(sched_ctx);
 
 	int *curr_workers = (int*)malloc((*nworkers) * sizeof(int));
 	int i;
@@ -176,7 +176,7 @@ int* _get_first_workers(unsigned sched_ctx, int *nworkers, enum starpu_archtype
 }
 
 /* get the number of workers in the context that are allowed to be moved (that are not fixed) */
-unsigned _get_potential_nworkers(struct policy_config *config, unsigned sched_ctx, enum starpu_archtype arch)
+unsigned _get_potential_nworkers(struct starpu_sched_ctx_hypervisor_policy_config *config, unsigned sched_ctx, enum starpu_archtype arch)
 {
 	struct starpu_sched_ctx_worker_collection *workers = starpu_get_worker_collection_of_sched_ctx(sched_ctx);
 
@@ -206,7 +206,7 @@ unsigned _get_potential_nworkers(struct policy_config *config, unsigned sched_ct
    - on the resource granularity imposed by the user for the resizing process*/
 int _get_nworkers_to_move(unsigned req_sched_ctx)
 {
-       	struct policy_config *config = sched_ctx_hypervisor_get_config(req_sched_ctx);
+       	struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(req_sched_ctx);
 	unsigned nworkers = starpu_get_nworkers_of_sched_ctx(req_sched_ctx);
 	unsigned nworkers_to_move = 0;
 
@@ -268,7 +268,7 @@ unsigned _resize(unsigned sender_sched_ctx, unsigned receiver_sched_ctx, unsigne
 			else
 			{
 				poor_sched_ctx = receiver_sched_ctx;
-				struct policy_config *config = sched_ctx_hypervisor_get_config(poor_sched_ctx);
+				struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(poor_sched_ctx);
 				unsigned nworkers = starpu_get_nworkers_of_sched_ctx(poor_sched_ctx);
 				unsigned nshared_workers = starpu_get_nshared_workers(sender_sched_ctx, poor_sched_ctx);
 				if((nworkers+nworkers_to_move-nshared_workers) > config->max_nworkers)
@@ -280,7 +280,7 @@ unsigned _resize(unsigned sender_sched_ctx, unsigned receiver_sched_ctx, unsigne
 				int *workers_to_move = _get_first_workers(sender_sched_ctx, &nworkers_to_move, STARPU_ANY_WORKER);
 				sched_ctx_hypervisor_move_workers(sender_sched_ctx, poor_sched_ctx, workers_to_move, nworkers_to_move, now);
 
-				struct policy_config *new_config = sched_ctx_hypervisor_get_config(poor_sched_ctx);
+				struct starpu_sched_ctx_hypervisor_policy_config *new_config = sched_ctx_hypervisor_get_config(poor_sched_ctx);
 				int i;
 				for(i = 0; i < nworkers_to_move; i++)
 					new_config->max_idle[workers_to_move[i]] = new_config->max_idle[workers_to_move[i]] !=MAX_IDLE_TIME ? new_config->max_idle[workers_to_move[i]] :  new_config->new_workers_max_idle;
@@ -301,7 +301,7 @@ unsigned _resize_to_unknown_receiver(unsigned sender_sched_ctx, unsigned now)
 	return _resize(sender_sched_ctx, STARPU_NMAX_SCHED_CTXS, 0, now);
 }
 
-static double _get_elapsed_flops(struct sched_ctx_wrapper* sc_w, int *npus, enum starpu_archtype req_arch)
+static double _get_elapsed_flops(struct starpu_sched_ctx_hypervisor_wrapper* sc_w, int *npus, enum starpu_archtype req_arch)
 {
 	double ret_val = 0.0;
 	struct starpu_sched_ctx_worker_collection *workers = starpu_get_worker_collection_of_sched_ctx(sc_w->sched_ctx);
@@ -327,7 +327,7 @@ static double _get_elapsed_flops(struct sched_ctx_wrapper* sc_w, int *npus, enum
 	return ret_val;
 }
 
-double _get_ctx_velocity(struct sched_ctx_wrapper* sc_w)
+double _get_ctx_velocity(struct starpu_sched_ctx_hypervisor_wrapper* sc_w)
 {
         double elapsed_flops = sched_ctx_hypervisor_get_elapsed_flops_per_sched_ctx(sc_w);
 	double total_elapsed_flops = sched_ctx_hypervisor_get_total_elapsed_flops_per_sched_ctx(sc_w);
@@ -344,7 +344,7 @@ double _get_ctx_velocity(struct sched_ctx_wrapper* sc_w)
 }
 
 /* compute an average value of the cpu velocity */
-double _get_velocity_per_worker_type(struct sched_ctx_wrapper* sc_w, enum starpu_archtype arch)
+double _get_velocity_per_worker_type(struct starpu_sched_ctx_hypervisor_wrapper* sc_w, enum starpu_archtype arch)
 {
         int npus = 0;
         double elapsed_flops = _get_elapsed_flops(sc_w, &npus, arch);
@@ -366,8 +366,8 @@ int _velocity_gap_btw_ctxs()
 	int *sched_ctxs = sched_ctx_hypervisor_get_sched_ctxs();
 	int nsched_ctxs = sched_ctx_hypervisor_get_nsched_ctxs();
 	int i = 0, j = 0;
-	struct sched_ctx_wrapper* sc_w;
-	struct sched_ctx_wrapper* other_sc_w;
+	struct starpu_sched_ctx_hypervisor_wrapper* sc_w;
+	struct starpu_sched_ctx_hypervisor_wrapper* other_sc_w;
 
 	for(i = 0; i < nsched_ctxs; i++)
 	{

+ 3 - 3
sched_ctx_hypervisor/src/hypervisor_policies/policy_tools.h

@@ -40,7 +40,7 @@ int* _get_first_workers(unsigned sched_ctx, int *nworkers, enum starpu_archtype
 
 int* _get_first_workers_in_list(int *workers, int nall_workers,  unsigned *nworkers, enum starpu_archtype arch);
 
-unsigned _get_potential_nworkers(struct policy_config *config, unsigned sched_ctx, enum starpu_archtype arch);
+unsigned _get_potential_nworkers(struct starpu_sched_ctx_hypervisor_policy_config *config, unsigned sched_ctx, enum starpu_archtype arch);
 
 int _get_nworkers_to_move(unsigned req_sched_ctx);
 
@@ -48,9 +48,9 @@ unsigned _resize(unsigned sender_sched_ctx, unsigned receiver_sched_ctx, unsigne
 
 unsigned _resize_to_unknown_receiver(unsigned sender_sched_ctx, unsigned now);
 
-double _get_ctx_velocity(struct sched_ctx_wrapper* sc_w);
+double _get_ctx_velocity(struct starpu_sched_ctx_hypervisor_wrapper* sc_w);
 
-double _get_velocity_per_worker_type(struct sched_ctx_wrapper* sc_w, enum starpu_archtype arch);
+double _get_velocity_per_worker_type(struct starpu_sched_ctx_hypervisor_wrapper* sc_w, enum starpu_archtype arch);
 
 int _velocity_gap_btw_ctxs(void);
 

+ 15 - 15
sched_ctx_hypervisor/src/hypervisor_policies/simple_policy.c

@@ -19,7 +19,7 @@
 
 static int _compute_priority(unsigned sched_ctx)
 {
-	struct policy_config *config = sched_ctx_hypervisor_get_config(sched_ctx);
+	struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(sched_ctx);
 
 	int total_priority = 0;
 
@@ -50,7 +50,7 @@ static unsigned _find_poor_sched_ctx(unsigned req_sched_ctx, int nworkers_to_mov
 	int nsched_ctxs = sched_ctx_hypervisor_get_nsched_ctxs();
 
 
-	struct policy_config *config = NULL;
+	struct starpu_sched_ctx_hypervisor_policy_config *config = NULL;
 
 	for(i = 0; i < nsched_ctxs; i++)
 	{
@@ -75,7 +75,7 @@ static unsigned _find_poor_sched_ctx(unsigned req_sched_ctx, int nworkers_to_mov
 
 int* _get_first_workers(unsigned sched_ctx, unsigned *nworkers, enum starpu_archtype arch)
 {
-	struct policy_config *config = sched_ctx_hypervisor_get_config(sched_ctx);
+	struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(sched_ctx);
 
 	int *curr_workers = (int*)malloc((*nworkers) * sizeof(int));
 	int i;
@@ -148,7 +148,7 @@ int* _get_first_workers(unsigned sched_ctx, unsigned *nworkers, enum starpu_arch
 	return curr_workers;
 }
 
-static unsigned _get_potential_nworkers(struct policy_config *config, unsigned sched_ctx, enum starpu_archtype arch)
+static unsigned _get_potential_nworkers(struct starpu_sched_ctx_hypervisor_policy_config *config, unsigned sched_ctx, enum starpu_archtype arch)
 {
 	struct starpu_sched_ctx_worker_collection *workers = starpu_get_worker_collection_of_sched_ctx(sched_ctx);
 
@@ -175,7 +175,7 @@ static unsigned _get_potential_nworkers(struct policy_config *config, unsigned s
 
 static unsigned _get_nworkers_to_move(unsigned req_sched_ctx)
 {
-       	struct policy_config *config = sched_ctx_hypervisor_get_config(req_sched_ctx);
+       	struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(req_sched_ctx);
 	unsigned nworkers = starpu_get_nworkers_of_sched_ctx(req_sched_ctx);
 	unsigned nworkers_to_move = 0;
 
@@ -236,7 +236,7 @@ static unsigned _simple_resize(unsigned sender_sched_ctx, unsigned receiver_sche
 			else
 			{
 				poor_sched_ctx = receiver_sched_ctx;
-				struct policy_config *config = sched_ctx_hypervisor_get_config(poor_sched_ctx);
+				struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(poor_sched_ctx);
 				unsigned nworkers = starpu_get_nworkers_of_sched_ctx(poor_sched_ctx);
 				unsigned nshared_workers = starpu_get_nshared_workers(sender_sched_ctx, poor_sched_ctx);
 				if((nworkers+nworkers_to_move-nshared_workers) > config->max_nworkers)
@@ -250,7 +250,7 @@ static unsigned _simple_resize(unsigned sender_sched_ctx, unsigned receiver_sche
 				int *workers_to_move = _get_first_workers(sender_sched_ctx, &nworkers_to_move, 0);
 				sched_ctx_hypervisor_move_workers(sender_sched_ctx, poor_sched_ctx, workers_to_move, nworkers_to_move);
 
-				struct policy_config *new_config = sched_ctx_hypervisor_get_config(poor_sched_ctx);
+				struct starpu_sched_ctx_hypervisor_policy_config *new_config = sched_ctx_hypervisor_get_config(poor_sched_ctx);
 				int i;
 				for(i = 0; i < nworkers_to_move; i++)
 					new_config->max_idle[workers_to_move[i]] = new_config->max_idle[workers_to_move[i]] !=MAX_IDLE_TIME ? new_config->max_idle[workers_to_move[i]] :  new_config->new_workers_max_idle;
@@ -282,11 +282,11 @@ static int* _get_workers_to_move(unsigned sender_sched_ctx, unsigned receiver_sc
 /*             v_receiver, v_for_rctx, sender_v_cpu, nworkers_needed); */
         if(nworkers_needed > 0)
         {
-                struct policy_config *sender_config = sched_ctx_hypervisor_get_config(sender_sched_ctx);
+                struct starpu_sched_ctx_hypervisor_policy_config *sender_config = sched_ctx_hypervisor_get_config(sender_sched_ctx);
                 unsigned potential_moving_cpus = _get_potential_nworkers(sender_config, sender_sched_ctx, STARPU_CPU_WORKER);
                 unsigned potential_moving_gpus = _get_potential_nworkers(sender_config, sender_sched_ctx, STARPU_CUDA_WORKER);
                 unsigned sender_nworkers = starpu_get_nworkers_of_sched_ctx(sender_sched_ctx);
-                struct policy_config *config = sched_ctx_hypervisor_get_config(receiver_sched_ctx);
+                struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(receiver_sched_ctx);
                 unsigned nworkers_ctx = starpu_get_nworkers_of_sched_ctx(receiver_sched_ctx);
 
                 if(nworkers_needed < (potential_moving_cpus + 5 * potential_moving_gpus))
@@ -353,7 +353,7 @@ static unsigned _simple_resize2(unsigned sender_sched_ctx, unsigned receiver_sch
                 {
                         sched_ctx_hypervisor_move_workers(sender_sched_ctx, receiver_sched_ctx, workers_to_move, nworkers_to_move);
 
-                        struct policy_config *new_config = sched_ctx_hypervisor_get_config(receiver_sched_ctx);
+                        struct starpu_sched_ctx_hypervisor_policy_config *new_config = sched_ctx_hypervisor_get_config(receiver_sched_ctx);
                         int i;
                         for(i = 0; i < nworkers_to_move; i++)
                                 new_config->max_idle[workers_to_move[i]] = new_config->max_idle[workers_to_move[i]] !=MAX_IDLE_TIME ? new_config->max_idle[workers_to_move[i]] :  new_config->new_workers_max_idle;
@@ -374,7 +374,7 @@ static unsigned simple_resize(unsigned sender_sched_ctx)
 
 static void simple_manage_idle_time(unsigned req_sched_ctx, int worker, double idle_time)
 {
-       	struct policy_config *config = sched_ctx_hypervisor_get_config(req_sched_ctx);
+       	struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(req_sched_ctx);
 
 	if(config != NULL && idle_time > config->max_idle[worker])
 		simple_resize(req_sched_ctx);
@@ -473,7 +473,7 @@ static void simple_manage_gflops_rate(unsigned sched_ctx)
 			printf("ctx %d finished & gives away the res to %d; slow_left %lf\n", sched_ctx, slowest_sched_ctx, slowest_flops_left_pct);
 			if(slowest_flops_left_pct != 0.0f)
 			{
-				struct policy_config* config = sched_ctx_hypervisor_get_config(sched_ctx);
+				struct starpu_sched_ctx_hypervisor_policy_config* config = sched_ctx_hypervisor_get_config(sched_ctx);
 				config->min_nworkers = 0;
 				config->max_nworkers = 0;
 				_simple_resize(sched_ctx, slowest_sched_ctx, 1);
@@ -503,21 +503,21 @@ static void simple_manage_gflops_rate(unsigned sched_ctx)
 }
 
 
-struct hypervisor_policy idle_policy =
+struct starpu_sched_ctx_hypervisor_policy idle_policy =
 {
 	.manage_idle_time = simple_manage_idle_time,
 	.manage_gflops_rate = simple_manage_gflops_rate,
 	.resize = simple_resize,
 };
 
-struct hypervisor_policy app_driven_policy =
+struct starpu_sched_ctx_hypervisor_policy app_driven_policy =
 {
 	.manage_idle_time = simple_manage_idle_time,
 	.manage_gflops_rate = simple_manage_gflops_rate,
 	.resize = simple_resize,
 };
 
-struct hypervisor_policy gflops_rate_policy =
+struct starpu_sched_ctx_hypervisor_policy gflops_rate_policy =
 {
 	.manage_idle_time = simple_manage_idle_time,
 	.manage_gflops_rate = simple_manage_gflops_rate,

+ 8 - 8
sched_ctx_hypervisor/src/sched_ctx_config.c

@@ -16,9 +16,9 @@
 
 #include <sched_ctx_hypervisor_intern.h>
 
-static struct policy_config* _create_config(void)
+static struct starpu_sched_ctx_hypervisor_policy_config* _create_config(void)
 {
-	struct policy_config *config = (struct policy_config *)malloc(sizeof(struct policy_config));
+	struct starpu_sched_ctx_hypervisor_policy_config *config = (struct starpu_sched_ctx_hypervisor_policy_config *)malloc(sizeof(struct starpu_sched_ctx_hypervisor_policy_config));
 	config->min_nworkers = -1;
 	config->max_nworkers = -1;
 	config->new_workers_max_idle = -1.0;
@@ -37,7 +37,7 @@ static struct policy_config* _create_config(void)
 	return config;
 }
 
-static void _update_config(struct policy_config *old, struct policy_config* new)
+static void _update_config(struct starpu_sched_ctx_hypervisor_policy_config *old, struct starpu_sched_ctx_hypervisor_policy_config* new)
 {
 	old->min_nworkers = new->min_nworkers != -1 ? new->min_nworkers : old->min_nworkers ;
 	old->max_nworkers = new->max_nworkers != -1 ? new->max_nworkers : old->max_nworkers ;
@@ -69,7 +69,7 @@ void sched_ctx_hypervisor_set_config(unsigned sched_ctx, void *config)
 
 void _add_config(unsigned sched_ctx)
 {
-	struct policy_config *config = _create_config();
+	struct starpu_sched_ctx_hypervisor_policy_config *config = _create_config();
 	config->min_nworkers = 0;
 	config->max_nworkers = STARPU_NMAXWORKERS;
 	config->new_workers_max_idle = MAX_IDLE_TIME;
@@ -93,14 +93,14 @@ void _remove_config(unsigned sched_ctx)
 	sched_ctx_hypervisor_set_config(sched_ctx, NULL);
 }
 
-struct policy_config* sched_ctx_hypervisor_get_config(unsigned sched_ctx)
+struct starpu_sched_ctx_hypervisor_policy_config* sched_ctx_hypervisor_get_config(unsigned sched_ctx)
 {
 	return hypervisor.sched_ctx_w[sched_ctx].config;
 }
 
-static struct policy_config* _ioctl(unsigned sched_ctx, va_list varg_list, unsigned later)
+static struct starpu_sched_ctx_hypervisor_policy_config* _ioctl(unsigned sched_ctx, va_list varg_list, unsigned later)
 {
-	struct policy_config *config = NULL;
+	struct starpu_sched_ctx_hypervisor_policy_config *config = NULL;
 
 	if(later)
 		config = _create_config();
@@ -229,7 +229,7 @@ void sched_ctx_hypervisor_ioctl(unsigned sched_ctx, ...)
 	va_start(varg_list, sched_ctx);
 
 	/* if config not null => save hypervisor configuration and consider it later */
-	struct policy_config *config = _ioctl(sched_ctx, varg_list, (task_tag > 0));
+	struct starpu_sched_ctx_hypervisor_policy_config *config = _ioctl(sched_ctx, varg_list, (task_tag > 0));
 	if(config != NULL)
 	{
 		struct configuration_entry *entry;

+ 24 - 24
sched_ctx_hypervisor/src/sched_ctx_hypervisor.c

@@ -28,16 +28,16 @@ static void notify_post_exec_hook(unsigned sched_ctx, int taskid);
 static void notify_idle_end(unsigned sched_ctx, int  worker);
 static void notify_submitted_job(struct starpu_task *task, unsigned footprint);
 
-extern struct hypervisor_policy idle_policy;
-extern struct hypervisor_policy app_driven_policy;
-extern struct hypervisor_policy gflops_rate_policy;
+extern struct starpu_sched_ctx_hypervisor_policy idle_policy;
+extern struct starpu_sched_ctx_hypervisor_policy app_driven_policy;
+extern struct starpu_sched_ctx_hypervisor_policy gflops_rate_policy;
 #ifdef STARPU_HAVE_GLPK_H
-extern struct hypervisor_policy lp_policy;
-extern struct hypervisor_policy lp2_policy;
+extern struct starpu_sched_ctx_hypervisor_policy lp_policy;
+extern struct starpu_sched_ctx_hypervisor_policy lp2_policy;
 #endif // STARPU_HAVE_GLPK_H
 
 
-static struct hypervisor_policy *predefined_policies[] =
+static struct starpu_sched_ctx_hypervisor_policy *predefined_policies[] =
 {
         &idle_policy,
 	&app_driven_policy,
@@ -48,7 +48,7 @@ static struct hypervisor_policy *predefined_policies[] =
 	&gflops_rate_policy
 };
 
-static void _load_hypervisor_policy(struct hypervisor_policy *policy)
+static void _load_hypervisor_policy(struct starpu_sched_ctx_hypervisor_policy *policy)
 {
 	STARPU_ASSERT(policy);
 
@@ -63,7 +63,7 @@ static void _load_hypervisor_policy(struct hypervisor_policy *policy)
 }
 
 
-static struct hypervisor_policy *_find_hypervisor_policy_from_name(const char *policy_name)
+static struct starpu_sched_ctx_hypervisor_policy *_find_hypervisor_policy_from_name(const char *policy_name)
 {
 
 	if (!policy_name)
@@ -72,7 +72,7 @@ static struct hypervisor_policy *_find_hypervisor_policy_from_name(const char *p
 	unsigned i;
 	for (i = 0; i < sizeof(predefined_policies)/sizeof(predefined_policies[0]); i++)
 	{
-		struct hypervisor_policy *p;
+		struct starpu_sched_ctx_hypervisor_policy *p;
 		p = predefined_policies[i];
 		if (p->name)
 		{
@@ -88,9 +88,9 @@ static struct hypervisor_policy *_find_hypervisor_policy_from_name(const char *p
 	return NULL;
 }
 
-static struct hypervisor_policy *_select_hypervisor_policy(struct hypervisor_policy* hypervisor_policy)
+static struct starpu_sched_ctx_hypervisor_policy *_select_hypervisor_policy(struct starpu_sched_ctx_hypervisor_policy* hypervisor_policy)
 {
-	struct hypervisor_policy *selected_policy = NULL;
+	struct starpu_sched_ctx_hypervisor_policy *selected_policy = NULL;
 
 	if(hypervisor_policy && hypervisor_policy->custom)
 		return hypervisor_policy;
@@ -120,7 +120,7 @@ static struct hypervisor_policy *_select_hypervisor_policy(struct hypervisor_pol
 
 
 /* initializez the performance counters that starpu will use to retrive hints for resizing */
-struct starpu_performance_counters* sched_ctx_hypervisor_init(struct hypervisor_policy *hypervisor_policy)
+struct starpu_performance_counters* sched_ctx_hypervisor_init(struct starpu_sched_ctx_hypervisor_policy *hypervisor_policy)
 {
 	hypervisor.min_tasks = 0;
 	hypervisor.nsched_ctxs = 0;
@@ -157,7 +157,7 @@ struct starpu_performance_counters* sched_ctx_hypervisor_init(struct hypervisor_
 		}
 	}
 
-	struct hypervisor_policy *selected_hypervisor_policy = _select_hypervisor_policy(hypervisor_policy);
+	struct starpu_sched_ctx_hypervisor_policy *selected_hypervisor_policy = _select_hypervisor_policy(hypervisor_policy);
 	_load_hypervisor_policy(selected_hypervisor_policy);
 
 	perf_counters = (struct starpu_performance_counters*)malloc(sizeof(struct starpu_performance_counters));
@@ -399,7 +399,7 @@ void sched_ctx_hypervisor_move_workers(unsigned sender_sched_ctx, unsigned recei
 				pthread_mutex_unlock(&hypervisor.sched_ctx_w[sender_sched_ctx].mutex);
 			}
 		}
-		struct policy_config *new_config = sched_ctx_hypervisor_get_config(receiver_sched_ctx);
+		struct starpu_sched_ctx_hypervisor_policy_config *new_config = sched_ctx_hypervisor_get_config(receiver_sched_ctx);
 		int i;
 		for(i = 0; i < nworkers_to_move; i++)
 			new_config->max_idle[workers_to_move[i]] = new_config->max_idle[workers_to_move[i]] !=MAX_IDLE_TIME ? new_config->max_idle[workers_to_move[i]] :  new_config->new_workers_max_idle;
@@ -418,7 +418,7 @@ void sched_ctx_hypervisor_add_workers_to_sched_ctx(int* workers_to_add, unsigned
 /* 			printf(" %d", workers_to_add[j]); */
 /* 		printf("\n"); */
 		starpu_add_workers_to_sched_ctx(workers_to_add, nworkers_to_add, sched_ctx);
-		struct policy_config *new_config = sched_ctx_hypervisor_get_config(sched_ctx);
+		struct starpu_sched_ctx_hypervisor_policy_config *new_config = sched_ctx_hypervisor_get_config(sched_ctx);
 		int i;
 		for(i = 0; i < nworkers_to_add; i++)
 			new_config->max_idle[workers_to_add[i]] = new_config->max_idle[workers_to_add[i]] != MAX_IDLE_TIME ? new_config->max_idle[workers_to_add[i]] :  new_config->new_workers_max_idle;
@@ -488,7 +488,7 @@ static void _set_elapsed_flops_per_sched_ctx(unsigned sched_ctx, double val)
 		hypervisor.sched_ctx_w[sched_ctx].elapsed_flops[i] = val;
 }
 
-double sched_ctx_hypervisor_get_elapsed_flops_per_sched_ctx(struct sched_ctx_wrapper* sc_w)
+double sched_ctx_hypervisor_get_elapsed_flops_per_sched_ctx(struct starpu_sched_ctx_hypervisor_wrapper* sc_w)
 {
 	double ret_val = 0.0;
 	int i;
@@ -497,7 +497,7 @@ double sched_ctx_hypervisor_get_elapsed_flops_per_sched_ctx(struct sched_ctx_wra
 	return ret_val;
 }
 
-double sched_ctx_hypervisor_get_total_elapsed_flops_per_sched_ctx(struct sched_ctx_wrapper* sc_w)
+double sched_ctx_hypervisor_get_total_elapsed_flops_per_sched_ctx(struct starpu_sched_ctx_hypervisor_wrapper* sc_w)
 {
 	double ret_val = 0.0;
 	int i;
@@ -511,7 +511,7 @@ static unsigned _ack_resize_completed(unsigned sched_ctx, int worker)
 	if(worker != -1 && !starpu_worker_belongs_to_sched_ctx(worker, sched_ctx))
 		return 0;
 
-	struct resize_ack *resize_ack = NULL;
+	struct starpu_sched_ctx_hypervisor_resize_ack *resize_ack = NULL;
 	unsigned sender_sched_ctx = STARPU_NMAX_SCHED_CTXS;
 
 	int i;
@@ -519,7 +519,7 @@ static unsigned _ack_resize_completed(unsigned sched_ctx, int worker)
 	{
 		if(hypervisor.sched_ctxs[i] != STARPU_NMAX_SCHED_CTXS)
 		{
-			struct sched_ctx_wrapper *sc_w = &hypervisor.sched_ctx_w[hypervisor.sched_ctxs[i]];
+			struct starpu_sched_ctx_hypervisor_wrapper *sc_w = &hypervisor.sched_ctx_w[hypervisor.sched_ctxs[i]];
 			pthread_mutex_lock(&sc_w->mutex);
 			unsigned only_remove = 0;
 			if(sc_w->resize_ack.receiver_sched_ctx == -1 && hypervisor.sched_ctxs[i] != sched_ctx &&
@@ -591,8 +591,8 @@ static unsigned _ack_resize_completed(unsigned sched_ctx, int worker)
 				starpu_remove_workers_from_sched_ctx(moved_workers, nmoved_workers, sender_sched_ctx);
 
 				/* info concerning only the gflops_rate strateg */
-				struct sched_ctx_wrapper *sender_sc_w = &hypervisor.sched_ctx_w[sender_sched_ctx];
-				struct sched_ctx_wrapper *receiver_sc_w = &hypervisor.sched_ctx_w[receiver_sched_ctx];
+				struct starpu_sched_ctx_hypervisor_wrapper *sender_sc_w = &hypervisor.sched_ctx_w[sender_sched_ctx];
+				struct starpu_sched_ctx_hypervisor_wrapper *receiver_sc_w = &hypervisor.sched_ctx_w[receiver_sched_ctx];
 
 				double start_time =  starpu_timing_now();
 				sender_sc_w->start_time = start_time;
@@ -655,7 +655,7 @@ static void notify_idle_cycle(unsigned sched_ctx, int worker, double idle_time)
 {
 	if(hypervisor.resize[sched_ctx])
 	{
-		struct sched_ctx_wrapper *sc_w = &hypervisor.sched_ctx_w[sched_ctx];
+		struct starpu_sched_ctx_hypervisor_wrapper *sc_w = &hypervisor.sched_ctx_w[sched_ctx];
 		sc_w->current_idle_time[worker] += idle_time;
 		if(hypervisor.policy.handle_idle_cycle)
 		{
@@ -723,7 +723,7 @@ static void notify_post_exec_hook(unsigned sched_ctx, int task_tag)
 
 		if (entry != NULL)
 		{
-			struct policy_config *config = entry->configuration;
+			struct starpu_sched_ctx_hypervisor_policy_config *config = entry->configuration;
 
 			sched_ctx_hypervisor_set_config(conf_sched_ctx, config);
 			HASH_DEL(hypervisor.configurations[conf_sched_ctx], entry);
@@ -779,7 +779,7 @@ void sched_ctx_hypervisor_size_ctxs(int *sched_ctxs, int nsched_ctxs, int *worke
 		hypervisor.policy.size_ctxs(curr_sched_ctxs, curr_nsched_ctxs, workers, nworkers);
 }
 
-struct sched_ctx_wrapper* sched_ctx_hypervisor_get_wrapper(unsigned sched_ctx)
+struct starpu_sched_ctx_hypervisor_wrapper* sched_ctx_hypervisor_get_wrapper(unsigned sched_ctx)
 {
 	return &hypervisor.sched_ctx_w[sched_ctx];
 }

+ 3 - 3
sched_ctx_hypervisor/src/sched_ctx_hypervisor_intern.h

@@ -46,7 +46,7 @@ struct configuration_entry
 	uint32_t task_tag;
 
 	/* Value: configuration of the scheduling context.  */
-	struct policy_config *configuration;
+	struct starpu_sched_ctx_hypervisor_policy_config *configuration;
 
 	/* Bookkeeping.  */
 	UT_hash_handle hh;
@@ -54,12 +54,12 @@ struct configuration_entry
 
 struct sched_ctx_hypervisor
 {
-	struct sched_ctx_wrapper sched_ctx_w[STARPU_NMAX_SCHED_CTXS];
+	struct starpu_sched_ctx_hypervisor_wrapper sched_ctx_w[STARPU_NMAX_SCHED_CTXS];
 	int sched_ctxs[STARPU_NMAX_SCHED_CTXS];
 	unsigned nsched_ctxs;
 	unsigned resize[STARPU_NMAX_SCHED_CTXS];
 	int min_tasks;
-	struct hypervisor_policy policy;
+	struct starpu_sched_ctx_hypervisor_policy policy;
 
 	struct configuration_entry *configurations[STARPU_NMAX_SCHED_CTXS];