소스 검색

sched_ctx_hypervisor/: add missing copyrights and fix code style

Nathalie Furmento 12 년 전
부모
커밋
75984def69

+ 3 - 3
sched_ctx_hypervisor/examples/Makefile.am

@@ -23,7 +23,7 @@ noinst_HEADERS = 				\
 	sched_ctx_utils/sched_ctx_utils.h
 endif
 
-AM_LDFLAGS = $(top_builddir)/src/libstarpu-1.0.la 
+AM_LDFLAGS = $(top_builddir)/src/libstarpu-1.0.la
 
 LIBS = $(top_builddir)/sched_ctx_hypervisor/src/libsched_ctx_hypervisor.la
 
@@ -49,9 +49,9 @@ cholesky_cholesky_implicit_LDADD =		\
 	$(STARPU_BLAS_LDFLAGS)
 
 app_driven_test_app_driven_test_SOURCES =		\
-	app_driven_test/app_driven_test.c		
+	app_driven_test/app_driven_test.c
 
 app_driven_test_app_driven_test_LDADD =		\
-	$(top_builddir)/sched_ctx_hypervisor/src/libsched_ctx_hypervisor.la 
+	$(top_builddir)/sched_ctx_hypervisor/src/libsched_ctx_hypervisor.la
 
 endif

+ 32 - 18
sched_ctx_hypervisor/examples/app_driven_test/app_driven_test.c

@@ -1,3 +1,19 @@
+/* StarPU --- Runtime system for heterogeneous multicore architectures.
+ *
+ * Copyright (C) 2010-2012  INRIA
+ *
+ * StarPU is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ *
+ * StarPU is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU Lesser General Public License in COPYING.LGPL for more details.
+ */
+
 #include <stdio.h>
 #include <stdint.h>
 #include <starpu.h>
@@ -7,10 +23,9 @@
 
 #define FPRINTF(ofile, fmt, args ...) do { if (!getenv("STARPU_SSILENT")) {fprintf(ofile, fmt, ##args); }} while(0)
 
-
-/* Every implementation of a codelet must have this prototype, the first                                                                                                                                             * argument (buffers) describes the buffers/streams that are managed by the                                                                                                                                       
- * DSM; the second arguments references read-only data that is passed as an                                                                                                                                        
- * argument of the codelet (task->cl_arg). Here, "buffers" is unused as there                                                                                                                                      
+/* Every implementation of a codelet must have this prototype, the first                                                                                                                                             * argument (buffers) describes the buffers/streams that are managed by the
+ * DSM; the second arguments references read-only data that is passed as an
+ * argument of the codelet (task->cl_arg). Here, "buffers" is unused as there
  * are no data input/output managed by the DSM (cl.nbuffers = 0) */
 struct params
 {
@@ -20,11 +35,11 @@ struct params
 
 void cpu_func(void *buffers[], void *cl_arg)
 {
-    struct params *params = (struct params *) cl_arg;
+	struct params *params = (struct params *) cl_arg;
 
 	int i;
-	for(i = 0; i < 1000; i++); 
-    FPRINTF(stdout, "Hello world sched_ctx = %d task_tag = %d \n", params->sched_ctx, params->task_tag);
+	for(i = 0; i < 1000; i++);
+	FPRINTF(stdout, "Hello world sched_ctx = %d task_tag = %d \n", params->sched_ctx, params->task_tag);
 }
 
 struct starpu_codelet cl = {};
@@ -36,28 +51,28 @@ void* start_thread(void *arg)
 	starpu_set_sched_ctx(&sched_ctx);
 
 	struct starpu_task *task[10];
-    struct params params[10];
+	struct params params[10];
 	int i;
 	for(i = 0; i < 10; i++)
 	{
 		int j;
 		for(j = 0; j < 1000; j++);
 		task[i] = starpu_task_create();
-		
+
 		cl.where = STARPU_CPU;
 		cl.cpu_funcs[0] = cpu_func;
 		cl.nbuffers = 0;
-		
+
 		task[i]->cl = &cl;
-		
+
 		if(sched_ctx == 1 && i == 5)
 		{
 			task[i]->hypervisor_tag = tag;
 			sched_ctx_hypervisor_ioctl(sched_ctx,
-									   HYPERVISOR_TIME_TO_APPLY, tag,
-									   HYPERVISOR_MIN_WORKERS, 2,
-									   HYPERVISOR_MAX_WORKERS, 12,
-									   HYPERVISOR_NULL);
+						   HYPERVISOR_TIME_TO_APPLY, tag,
+						   HYPERVISOR_MIN_WORKERS, 2,
+						   HYPERVISOR_MAX_WORKERS, 12,
+						   HYPERVISOR_NULL);
 			printf("require resize for sched_ctx %d at tag %d\n", sched_ctx, tag);
 			sched_ctx_hypervisor_resize(sched_ctx, tag);
 		}
@@ -67,10 +82,10 @@ void* start_thread(void *arg)
 
 		task[i]->cl_arg = &params[i];
 		task[i]->cl_arg_size = sizeof(params);
-		
+
 		starpu_task_submit(task[i]);
 	}
-	
+
 	starpu_task_wait_for_all();
 }
 
@@ -95,7 +110,6 @@ int main()
 	unsigned sched_ctx1 = starpu_create_sched_ctx("heft", ressources1, nres1, "sched_ctx1");
 	unsigned sched_ctx2 = starpu_create_sched_ctx("heft", ressources2, nres2, "sched_ctx2");
 
-
 	struct hypervisor_policy policy;
 	policy.custom = 0;
 	policy.name = "app_driven";

+ 23 - 21
sched_ctx_hypervisor/examples/sched_ctx_utils/sched_ctx_utils.c

@@ -29,7 +29,8 @@ unsigned gpu;
 unsigned gpu1;
 unsigned gpu2;
 
-typedef struct {
+typedef struct
+{
 	unsigned id;
 	unsigned ctx;
 	int the_other_ctx;
@@ -41,7 +42,8 @@ typedef struct {
 	float *mat[NSAMPLES];
 } params;
 
-typedef struct {
+typedef struct
+{
 	double flops;
 	double avg_timing;
 } retvals;
@@ -87,7 +89,8 @@ void update_sched_ctx_timing_results(double flops, double avg_timing)
 	rv[*id].avg_timing += avg_timing;
 }
 
-void* start_bench(void *val){
+void* start_bench(void *val)
+{
 	params *p = (params*)val;
 	int i;
 
@@ -98,7 +101,7 @@ void* start_bench(void *val){
 
 	for(i = 0; i < NSAMPLES; i++)
 		p->bench(p->mat[i], p->size, p->nblocks);
-	
+
 	/* if(p->ctx != 0) */
 	/* { */
 	/* 	pthread_mutex_lock(&mut); */
@@ -106,7 +109,7 @@ void* start_bench(void *val){
 	/* 		sched_ctx_hypervisor_unregiser_ctx(p->ctx); */
 	/* 		starpu_delete_sched_ctx(p->ctx, p->the_other_ctx); */
 	/* 	} */
-		
+
 	/* 	first = 0; */
 	/* 	pthread_mutex_unlock(&mut); */
 	/* } */
@@ -136,7 +139,7 @@ void start_2benchs(void (*bench)(float*, unsigned, unsigned))
 	p1.bench = bench;
 	p1.size = size1;
 	p1.nblocks = nblocks1;
-	
+
 	p2.bench = bench;
 	p2.size = size2;
 	p2.nblocks = nblocks2;
@@ -158,7 +161,7 @@ void start_2benchs(void (*bench)(float*, unsigned, unsigned))
 
 	pthread_create(&tid[0], NULL, (void*)start_bench, (void*)&p1);
 	pthread_create(&tid[1], NULL, (void*)start_bench, (void*)&p2);
- 
+
 	pthread_join(tid[0], NULL);
 	pthread_join(tid[1], NULL);
 
@@ -214,7 +217,7 @@ void start_2ndbench(void (*bench)(float*, unsigned, unsigned))
 	{
 		p2.mat[i] = construct_matrix(p2.size);
 	}
-	
+
 	struct timeval start;
 	struct timeval end;
 
@@ -261,14 +264,14 @@ void construct_contexts(void (*bench)(float*, unsigned, unsigned))
 
 
 	for(i = 0; i < 12; i++)
-		p1.workers[i] = i; 
+		p1.workers[i] = i;
 
 	p1.ctx = starpu_create_sched_ctx("heft", p1.workers, nworkers1, "sched_ctx1");
 	starpu_set_perf_counters(p1.ctx, perf_counters);
 	p2.the_other_ctx = (int)p1.ctx;
 	p1.nworkers = nworkers1;
 	sched_ctx_hypervisor_register_ctx(p1.ctx, 0.0);
-	
+
 	/* sched_ctx_hypervisor_ioctl(p1.ctx, */
 	/* 			   HYPERVISOR_MAX_IDLE, p1.workers, p1.nworkers, 5000.0, */
 	/* 			   HYPERVISOR_MAX_IDLE, p1.workers, gpu+gpu1, 100000.0, */
@@ -304,7 +307,7 @@ void construct_contexts(void (*bench)(float*, unsigned, unsigned))
 	p1.the_other_ctx = (int)p2.ctx;
 	p2.nworkers = 0;
 	sched_ctx_hypervisor_register_ctx(p2.ctx, 0.0);
-	
+
 	/* sched_ctx_hypervisor_ioctl(p2.ctx, */
 	/* 			   HYPERVISOR_MAX_IDLE, p2.workers, p2.nworkers, 2000.0, */
 	/* 			   HYPERVISOR_MAX_IDLE, p2.workers, gpu+gpu2, 5000.0, */
@@ -366,7 +369,7 @@ void set_hypervisor_conf(int event, int task_tag)
 /* 				sched_ctx_hypervisor_resize(p1.ctx, task_tag); */
 /* 			} */
 /* 			it++; */
-				
+
 /* 		} */
 /* 	} */
 /* 	else */
@@ -426,7 +429,7 @@ void set_hypervisor_conf(int event, int task_tag)
 	/* 		} */
 	/* 		it2++; */
 	/* 	} */
-		
+
 	/* } else { */
 	/* 	if(event == START_BENCH) */
 	/* 	{ */
@@ -457,7 +460,7 @@ void set_hypervisor_conf(int event, int task_tag)
 	/* 						   HYPERVISOR_TIME_TO_APPLY, task_tag, */
 	/* 						   NULL); */
 	/* 		} */
-			
+
 	/* 		it++; */
 	/* 	} */
 
@@ -485,7 +488,7 @@ void parse_args_ctx(int argc, char **argv)
 			char *argptr;
 			nblocks1 = strtol(argv[++i], &argptr, 10);
 		}
-		
+
 		if (strcmp(argv[i], "-size2") == 0) {
 			char *argptr;
 			size2 = strtol(argv[++i], &argptr, 10);
@@ -499,27 +502,26 @@ void parse_args_ctx(int argc, char **argv)
 		if (strcmp(argv[i], "-cpu1") == 0) {
 			char *argptr;
 			cpu1 = strtol(argv[++i], &argptr, 10);
-		}    
+		}
 
 		if (strcmp(argv[i], "-cpu2") == 0) {
 			char *argptr;
 			cpu2 = strtol(argv[++i], &argptr, 10);
-		}    
+		}
 
 		if (strcmp(argv[i], "-gpu") == 0) {
 			char *argptr;
 			gpu = strtol(argv[++i], &argptr, 10);
-		}    
+		}
 
 		if (strcmp(argv[i], "-gpu1") == 0) {
 			char *argptr;
 			gpu1 = strtol(argv[++i], &argptr, 10);
-		}    
+		}
 
 		if (strcmp(argv[i], "-gpu2") == 0) {
 			char *argptr;
 			gpu2 = strtol(argv[++i], &argptr, 10);
-		}    
+		}
 	}
 }
-

+ 11 - 9
sched_ctx_hypervisor/include/sched_ctx_hypervisor.h

@@ -39,13 +39,14 @@ pthread_mutex_t act_hypervisor_mutex;
 #define MAX_IDLE_TIME 5000000000
 #define MIN_WORKING_TIME 500
 
-struct policy_config {
+struct policy_config
+{
 	/* underneath this limit we cannot resize */
 	int min_nworkers;
 
 	/* above this limit we cannot resize */
 	int max_nworkers;
-	
+
 	/*resize granularity */
 	int granularity;
 
@@ -70,15 +71,16 @@ struct policy_config {
 	double empty_ctx_max_idle[STARPU_NMAXWORKERS];
 };
 
-
-struct resize_ack{
+struct resize_ack
+{
 	int receiver_sched_ctx;
 	int *moved_workers;
 	int nmoved_workers;
 	int *acked_workers;
 };
 
-struct sched_ctx_wrapper {
+struct sched_ctx_wrapper
+{
 	unsigned sched_ctx;
 	struct policy_config *config;
 	double current_idle_time[STARPU_NMAXWORKERS];
@@ -99,7 +101,8 @@ struct sched_ctx_wrapper {
  * FIXME: Remove when no longer exposed.  */
 struct resize_request_entry;
 
-struct hypervisor_policy {
+struct hypervisor_policy
+{
 	const char* name;
 	unsigned custom;
 	void (*size_ctxs)(int *sched_ctxs, int nsched_ctxs , int *workers, int nworkers);
@@ -113,7 +116,6 @@ struct hypervisor_policy {
 	void (*handle_submitted_job)(struct starpu_task *task, unsigned footprint);
 };
 
-
 struct starpu_performance_counters* sched_ctx_hypervisor_init(struct hypervisor_policy* policy);
 
 void sched_ctx_hypervisor_shutdown(void);
@@ -156,9 +158,9 @@ void sched_ctx_hypervisor_remove_workers_from_sched_ctx(int* workers_to_remove,
 
 void sched_ctx_hypervisor_size_ctxs(int *sched_ctxs, int nsched_ctxs, int *workers, int nworkers);
 
-unsigned sched_ctx_hypervisor_get_size_req(int **sched_ctxs, int* nsched_ctxs, int **workers, int *nworkers);	
+unsigned sched_ctx_hypervisor_get_size_req(int **sched_ctxs, int* nsched_ctxs, int **workers, int *nworkers);
 
-void sched_ctx_hypervisor_save_size_req(int *sched_ctxs, int nsched_ctxs, int *workers, int nworkers);	
+void sched_ctx_hypervisor_save_size_req(int *sched_ctxs, int nsched_ctxs, int *workers, int nworkers);
 
 void sched_ctx_hypervisor_free_size_req(void);
 

+ 1 - 1
sched_ctx_hypervisor/src/Makefile.am

@@ -32,7 +32,7 @@ libsched_ctx_hypervisor_la_SOURCES = 			\
 	hypervisor_policies/app_driven_policy.c		\
 	hypervisor_policies/gflops_rate_policy.c	\
 	hypervisor_policies/lp_policy.c			\
-	hypervisor_policies/lp2_policy.c		
+	hypervisor_policies/lp2_policy.c
 
 noinst_HEADERS = sched_ctx_hypervisor_intern.h		\
 	hypervisor_policies/policy_tools.h		\

+ 2 - 1
sched_ctx_hypervisor/src/hypervisor_policies/app_driven_policy.c

@@ -23,7 +23,8 @@ static void app_driven_handle_post_exec_hook(unsigned sched_ctx, int task_tag)
 	_resize_to_unknown_receiver(sched_ctx, 1);
 }
 
-struct hypervisor_policy app_driven_policy = {
+struct hypervisor_policy app_driven_policy =
+{
 	.size_ctxs = NULL,
 	.handle_poped_task = NULL,
 	.handle_pushed_task = NULL,

+ 4 - 4
sched_ctx_hypervisor/src/hypervisor_policies/gflops_rate_policy.c

@@ -48,7 +48,7 @@ double _get_flops_left_pct(unsigned sched_ctx)
 	double total_elapsed_flops = _get_total_elapsed_flops_per_sched_ctx(sched_ctx);
 	if(wrapper->total_flops == total_elapsed_flops || total_elapsed_flops > wrapper->total_flops)
 		return 0.0;
-       
+
 	return (wrapper->total_flops - total_elapsed_flops)/wrapper->total_flops;
 }
 
@@ -113,10 +113,10 @@ static int* _get_workers_to_move(unsigned sender_sched_ctx, unsigned receiver_sc
                 }
 		else
                 {
-			/*if the needed number of workers is to big we only move the number of workers 
+			/*if the needed number of workers is to big we only move the number of workers
 			  corresponding to the granularity set by the user */
                         int nworkers_to_move = _get_nworkers_to_move(sender_sched_ctx);
-			
+
                         if(sender_nworkers - nworkers_to_move >= sender_config->min_nworkers)
                         {
                                 unsigned nshared_workers = starpu_get_nshared_workers(sender_sched_ctx, receiver_sched_ctx);
@@ -246,7 +246,7 @@ static void gflops_rate_resize(unsigned sched_ctx)
 	_get_exp_end(sched_ctx);
 	double flops_left_pct = _get_flops_left_pct(sched_ctx);
 
-	/* if the context finished all the instructions it had to execute 
+	/* if the context finished all the instructions it had to execute
 	 we move all the resources to the slowest context */
 	if(flops_left_pct == 0.0f)
 	{

+ 2 - 1
sched_ctx_hypervisor/src/hypervisor_policies/idle_policy.c

@@ -41,7 +41,8 @@ void idle_handle_idle_cycle(unsigned sched_ctx, int worker)
 	}
 }
 
-struct hypervisor_policy idle_policy = {
+struct hypervisor_policy idle_policy =
+{
 	.size_ctxs = NULL,
 	.handle_poped_task = NULL,
 	.handle_pushed_task = NULL,

+ 23 - 24
sched_ctx_hypervisor/src/hypervisor_policies/lp2_policy.c

@@ -23,10 +23,10 @@ static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
 static double _glp_resolve(int ns, int nw, int nt, double tasks[nw][nt], double tmax, double w_in_s[ns][nw], int *in_sched_ctxs, int *workers);
 static double _find_tmax(double t1, double t2);
 static unsigned _compute_task_distribution_over_ctxs(int ns, int nw, int nt, double w_in_s[ns][nw], double tasks[nw][nt], int *sched_ctxs, int *workers)
-{	
+{
 	double draft_tasks[nw][nt];
 	double draft_w_in_s[ns][nw];
-	
+
 	int w,t, s;
 	for(w = 0; w < nw; w++)
 		for(t = 0; t < nt; t++)
@@ -34,7 +34,7 @@ static unsigned _compute_task_distribution_over_ctxs(int ns, int nw, int nt, dou
 			tasks[w][t] = 0.0;
 			draft_tasks[w][t] = 0.0;
 		}
-	
+
 	for(s = 0; s < ns; s++)
 		for(w = 0; w < nw; w++)
 		{
@@ -42,11 +42,11 @@ static unsigned _compute_task_distribution_over_ctxs(int ns, int nw, int nt, dou
 			draft_w_in_s[s][w] = 0.0;
 		}
 
-	/* smallest possible tmax, difficult to obtain as we 
+	/* smallest possible tmax, difficult to obtain as we
 	   compute the nr of flops and not the tasks */
 	double smallest_tmax = _lp_get_tmax(nw, workers);
 	double tmax = smallest_tmax * ns;
-	
+
 	double res = 1.0;
 	unsigned has_sol = 0;
 	double tmin = 0.0;
@@ -79,7 +79,7 @@ static unsigned _compute_task_distribution_over_ctxs(int ns, int nw, int nt, dou
 		}
 		else
 			has_sol = 0;
-		
+
 		/* if we have a solution with this tmax try a smaller value
 		   bigger than the old min */
 		if(has_sol)
@@ -96,7 +96,7 @@ static unsigned _compute_task_distribution_over_ctxs(int ns, int nw, int nt, dou
 		}
 		if(tmin == tmax) break;
 		tmax = _find_tmax(tmin, tmax);
-		
+
 		if(tmax < smallest_tmax)
 		{
 			tmax = old_tmax;
@@ -109,7 +109,7 @@ static unsigned _compute_task_distribution_over_ctxs(int ns, int nw, int nt, dou
 
 	long diff_s = end_time.tv_sec  - start_time.tv_sec;
 	long diff_us = end_time.tv_usec  - start_time.tv_usec;
-	
+
 	float timing = (float)(diff_s*1000000 + diff_us)/1000;
 
 //        fprintf(stdout, "nd = %d total time: %f ms \n", nd, timing);
@@ -155,7 +155,7 @@ static void _redistribute_resources_in_ctxs(int ns, int nw, int nt, double w_in_
 						if(s2 != s && w_in_s[s2][w] >= 0.5)
 							destination_ctx[w][s2] = 1;
 						else
-							destination_ctx[w][s2] = 0;	
+							destination_ctx[w][s2] = 0;
 				}
 			}
 			else
@@ -174,7 +174,7 @@ static void _redistribute_resources_in_ctxs(int ns, int nw, int nt, double w_in_
 							destination_ctx[w][s2] = 0;
 				}
 			}
-	
+
 		}
 
 		sched_ctx_hypervisor_add_workers_to_sched_ctx(workers_to_add, nadd, sched_ctxs[s]);
@@ -182,7 +182,7 @@ static void _redistribute_resources_in_ctxs(int ns, int nw, int nt, double w_in_
 		int i;
 		for(i = 0; i < nadd; i++)
 			new_config->max_idle[workers_to_add[i]] = new_config->max_idle[workers_to_add[i]] != MAX_IDLE_TIME ? new_config->max_idle[workers_to_add[i]] :  new_config->new_workers_max_idle;
-		
+
 		if(!first_time)
 		{
 			/* do not remove workers if they can't go anywhere */
@@ -228,9 +228,9 @@ static void _size_ctxs(int *sched_ctxs, int nsched_ctxs , int *workers, int nwor
 	struct bound_task_pool * tp;
 	for (tp = task_pools; tp; tp = tp->next)
 		nt++;
-	
+
 	double w_in_s[ns][nw];
-	
+
 	unsigned found_sol = _compute_task_distribution_over_ctxs(ns, nw, nt, w_in_s, NULL, sched_ctxs, workers);
 	/* if we did find at least one solution redistribute the resources */
 	if(found_sol)
@@ -241,7 +241,7 @@ static void size_if_required()
 {
 	int nsched_ctxs, nworkers;
 	int *sched_ctxs, *workers;
-	unsigned has_req = sched_ctx_hypervisor_get_size_req(&sched_ctxs, &nsched_ctxs, &workers, &nworkers);	
+	unsigned has_req = sched_ctx_hypervisor_get_size_req(&sched_ctxs, &nsched_ctxs, &workers, &nworkers);
 
 	if(has_req)
 	{
@@ -307,13 +307,13 @@ static void _starpu_get_tasks_times(int nw, int nt, double times[nw][nt], int *w
                         if (isnan(length))
                                 times[w][t] = NAN;
                        else
-                                times[w][t] = length / 1000.;	
+                                times[w][t] = length / 1000.;
                 }
         }
 }
 
-/*                                                                                                                                                                                                                  
- * GNU Linear Programming Kit backend                                                                                                                                                                               
+/*
+ * GNU Linear Programming Kit backend
  */
 #ifdef HAVE_GLPK_H
 #include <glpk.h>
@@ -360,7 +360,7 @@ static double _glp_resolve(int ns, int nw, int nt, double tasks[nw][nt], double
 			{
 				char name[32];
 				snprintf(name, sizeof(name), "w%ds%dn", w, s);
-				glp_set_col_name(lp, nw*nt+s*nw+w+1, name);	
+				glp_set_col_name(lp, nw*nt+s*nw+w+1, name);
 				glp_set_col_bnds(lp, nw*nt+s*nw+w+1, GLP_DB, 0.0, 1.0);
 			}
 
@@ -503,7 +503,7 @@ static double _find_tmax(double t1, double t2)
 static void lp2_handle_poped_task(unsigned sched_ctx, int worker)
 {
 	struct sched_ctx_wrapper* sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctx);
-	
+
 	int ret = pthread_mutex_trylock(&act_hypervisor_mutex);
 	if(ret != EBUSY)
 	{
@@ -521,7 +521,7 @@ static void lp2_handle_poped_task(unsigned sched_ctx, int worker)
 			struct bound_task_pool * tp;
 			for (tp = task_pools; tp; tp = tp->next)
 				nt++;
-			
+
 			double w_in_s[ns][nw];
 			double tasks_per_worker[nw][nt];
 
@@ -546,7 +546,7 @@ static void lp2_handle_poped_task(unsigned sched_ctx, int worker)
 					for(w = 0; w < nw; w++)
 					{
 						enum starpu_perf_archtype arch = starpu_worker_get_type(w);
-						
+
 						if(arch == STARPU_CUDA_WORKER)
 						{
 							nworkers[s][0] += w_in_s[s][w];
@@ -570,7 +570,7 @@ static void lp2_handle_poped_task(unsigned sched_ctx, int worker)
 			}
 		}
 		pthread_mutex_unlock(&act_hypervisor_mutex);
-	}		
+	}
 }
 
 
@@ -590,6 +590,5 @@ struct hypervisor_policy lp2_policy = {
 	.custom = 0,
 	.name = "lp2"
 };
-	
-#endif /* HAVE_GLPK_H */
 
+#endif /* HAVE_GLPK_H */

+ 7 - 8
sched_ctx_hypervisor/src/hypervisor_policies/lp_policy.c

@@ -22,12 +22,12 @@ static void lp_handle_poped_task(unsigned sched_ctx, int worker)
 	if(_velocity_gap_btw_ctxs())
 	{
 		int nsched_ctxs = sched_ctx_hypervisor_get_nsched_ctxs();
-		
+
 		double nworkers[nsched_ctxs][2];
 
 		int ret = pthread_mutex_trylock(&act_hypervisor_mutex);
 		if(ret != EBUSY)
-		{ 
+		{
 			int total_nw[2];
 			_get_total_nw(NULL, -1, 2, total_nw);
 
@@ -47,15 +47,15 @@ static void lp_handle_poped_task(unsigned sched_ctx, int worker)
 			if(vmax != 0.0)
 			{
 				int nworkers_rounded[nsched_ctxs][2];
-				_lp_round_double_to_int(nsched_ctxs, 2, nworkers, nworkers_rounded);				
+				_lp_round_double_to_int(nsched_ctxs, 2, nworkers, nworkers_rounded);
 				_lp_redistribute_resources_in_ctxs(nsched_ctxs, 2, nworkers_rounded, nworkers);
 			}
 			pthread_mutex_unlock(&act_hypervisor_mutex);
 		}
-	}		
+	}
 }
 static void lp_size_ctxs(int *sched_ctxs, int ns, int *workers, int nworkers)
-{	
+{
 	int nsched_ctxs = sched_ctxs == NULL ? sched_ctx_hypervisor_get_nsched_ctxs() : ns;
 	double nworkers_per_type[nsched_ctxs][2];
 	int total_nw[2];
@@ -78,7 +78,7 @@ static void lp_size_ctxs(int *sched_ctxs, int ns, int *workers, int nworkers)
 /* 			printf("ctx %d/worker type %d: n = %d \n", i, 0, res_rounded[i][0]); */
 /* 			printf("ctx %d/worker type %d: n = %d \n", i, 1, res_rounded[i][1]); */
 /* 		} */
-		
+
 		_lp_distribute_resources_in_ctxs(sched_ctxs, nsched_ctxs, 2, nworkers_per_type_rounded, nworkers_per_type, workers, nworkers);
 	}
 	pthread_mutex_unlock(&act_hypervisor_mutex);
@@ -96,6 +96,5 @@ struct hypervisor_policy lp_policy = {
 	.custom = 0,
 	.name = "lp"
 };
-	
-#endif /* HAVE_GLPK_H */
 
+#endif /* HAVE_GLPK_H */

+ 32 - 16
sched_ctx_hypervisor/src/hypervisor_policies/lp_tools.c

@@ -1,3 +1,19 @@
+/* StarPU --- Runtime system for heterogeneous multicore architectures.
+ *
+ * Copyright (C) 2010-2012  INRIA
+ *
+ * StarPU is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ *
+ * StarPU is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU Lesser General Public License in COPYING.LGPL for more details.
+ */
+
 #include <math.h>
 #include "lp_tools.h"
 
@@ -7,7 +23,7 @@ static double _glp_get_nworkers_per_ctx(int ns, int nw, double v[ns][nw], double
 {
 	int s, w;
 	glp_prob *lp;
-	
+
 	int ne =
 		(ns*nw+1)*(ns+nw)
 		+ 1; /* glp dumbness */
@@ -21,7 +37,7 @@ static double _glp_get_nworkers_per_ctx(int ns, int nw, double v[ns][nw], double
 	glp_set_obj_dir(lp, GLP_MAX);
         glp_set_obj_name(lp, "max speed");
 
-	/* we add nw*ns columns one for each type of worker in each context 
+	/* we add nw*ns columns one for each type of worker in each context
 	   and another column corresponding to the 1/tmax bound (bc 1/tmax is a variable too)*/
 	glp_add_cols(lp, nw*ns+1);
 
@@ -83,7 +99,7 @@ static double _glp_get_nworkers_per_ctx(int ns, int nw, double v[ns][nw], double
 //		printf("ia[%d]=%d ja[%d]=%d ar[%d]=%lf\n", n, ia[n], n, ja[n], n, ar[n]);
 		n++;
 	}
-	
+
 	/*we add another linear constraint : sum(all cpus) = 9 and sum(all gpus) = 3 */
 	glp_add_rows(lp, nw);
 
@@ -126,7 +142,7 @@ static double _glp_get_nworkers_per_ctx(int ns, int nw, double v[ns][nw], double
 			glp_set_row_bnds(lp, ns+w+1, GLP_FX, total_nw[0], total_nw[0]);
 
 		/*sum(all cpus) = 9*/
-		if(w == 1) 
+		if(w == 1)
 			glp_set_row_bnds(lp, ns+w+1, GLP_FX, total_nw[1], total_nw[1]);
 	}
 
@@ -138,7 +154,7 @@ static double _glp_get_nworkers_per_ctx(int ns, int nw, double v[ns][nw], double
 	glp_init_smcp(&parm);
 	parm.msg_lev = GLP_MSG_OFF;
 	glp_simplex(lp, &parm);
-	
+
 	double vmax = glp_get_obj_val(lp);
 
 	n = 1;
@@ -177,7 +193,7 @@ double _lp_get_nworkers_per_ctx(int nsched_ctxs, int ntypes_of_workers, double r
 #endif
 	}
 
-#ifdef HAVE_GLPK_H	
+#ifdef HAVE_GLPK_H
 	return 1/_glp_get_nworkers_per_ctx(nsched_ctxs, ntypes_of_workers, v, flops, res, total_nw);
 #else
 	return 0.0;
@@ -191,7 +207,7 @@ double _lp_get_tmax(int nw, int *workers)
 	_get_total_nw(workers, nw, 2, total_nw);
 
 	int nsched_ctxs = sched_ctx_hypervisor_get_nsched_ctxs();
-	
+
 	double res[nsched_ctxs][ntypes_of_workers];
 	return _lp_get_nworkers_per_ctx(nsched_ctxs, ntypes_of_workers, res, total_nw) * 1000;
 }
@@ -209,7 +225,7 @@ void _lp_round_double_to_int(int ns, int nw, double res[ns][nw], int res_rounded
 			int x = floor(res[s][w]);
 			double x_double = (double)x;
 			double diff = res[s][w] - x_double;
-			
+
 			if(diff != 0.0)
 			{
 				if(diff > 0.5)
@@ -249,7 +265,7 @@ void _lp_round_double_to_int(int ns, int nw, double res[ns][nw], int res_rounded
 				}
 			}
 		}
-	}		
+	}
 }
 
 void _lp_redistribute_resources_in_ctxs(int ns, int nw, int res_rounded[ns][nw], double res[ns][nw])
@@ -300,7 +316,7 @@ void _lp_redistribute_resources_in_ctxs(int ns, int nw, int res_rounded[ns][nw],
 							int i;
 							for(i = 0; i < x; i++)
 								workers_move[nw_move++] = workers_to_move[i];
-							
+
 						}
 						free(workers_to_move);
 					}
@@ -313,19 +329,19 @@ void _lp_redistribute_resources_in_ctxs(int ns, int nw, int res_rounded[ns][nw],
 							int i;
 							for(i = 0; i < x-1; i++)
 								workers_move[nw_move++] = workers_to_move[i];
-							
+
 							if(diff > 0.8)
 								workers_move[nw_move++] = workers_to_move[x-1];
 							else
 								if(diff > 0.3)
 									workers_add[nw_add++] = workers_to_move[x-1];
-							
+
 						}
 						free(workers_to_move);
 					}
 				}
 			}
-			
+
 			for(s2 = 0; s2 < ns; s2++)
 			{
 				if(sched_ctxs[s2] != sched_ctxs[s])
@@ -394,7 +410,7 @@ void _lp_distribute_resources_in_ctxs(int* sched_ctxs, int ns, int nw, int res_r
 					if(x > 0)
 					{
 						sched_ctx_hypervisor_add_workers_to_sched_ctx(workers_to_add, x, current_sched_ctxs[s]);
-						sched_ctx_hypervisor_start_resize(current_sched_ctxs[s]);						
+						sched_ctx_hypervisor_start_resize(current_sched_ctxs[s]);
 					}
 					free(workers_to_add);
 				}
@@ -410,10 +426,10 @@ void _lp_distribute_resources_in_ctxs(int* sched_ctxs, int ns, int nw, int res_r
 							sched_ctx_hypervisor_add_workers_to_sched_ctx(workers_to_add, x-1, current_sched_ctxs[s]);
 						sched_ctx_hypervisor_start_resize(current_sched_ctxs[s]);
 					}
-					free(workers_to_add);			
+					free(workers_to_add);
 				}
 			}
-			
+
 		}
 		sched_ctx_hypervisor_stop_resize(current_sched_ctxs[s]);
 	}

+ 18 - 2
sched_ctx_hypervisor/src/hypervisor_policies/lp_tools.h

@@ -1,6 +1,22 @@
+/* StarPU --- Runtime system for heterogeneous multicore architectures.
+ *
+ * Copyright (C) 2010-2012  INRIA
+ *
+ * StarPU is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ *
+ * StarPU is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU Lesser General Public License in COPYING.LGPL for more details.
+ */
+
 #include "policy_tools.h"
-/*                                                                                                                                                                                                                  
- * GNU Linear Programming Kit backend                                                                                                                                                                               
+/*
+ * GNU Linear Programming Kit backend
  */
 #ifdef HAVE_GLPK_H
 #include <glpk.h>

+ 37 - 21
sched_ctx_hypervisor/src/hypervisor_policies/policy_tools.c

@@ -1,3 +1,19 @@
+/* StarPU --- Runtime system for heterogeneous multicore architectures.
+ *
+ * Copyright (C) 2010-2012  INRIA
+ *
+ * StarPU is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ *
+ * StarPU is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU Lesser General Public License in COPYING.LGPL for more details.
+ */
+
 /* #include <sched_ctx_hypervisor.h> */
 /* #include <pthread.h> */
 
@@ -56,14 +72,14 @@ unsigned _find_poor_sched_ctx(unsigned req_sched_ctx, int nworkers_to_move)
 			}
 		}
 	}
-	
+
 	return sched_ctx;
 }
 
 int* _get_first_workers_in_list(int *workers, int nall_workers,  unsigned *nworkers, enum starpu_archtype arch)
 {
 	int *curr_workers = (int*)malloc((*nworkers)*sizeof(int));
-	
+
 	int w, worker;
 	int nfound_workers = 0;
 	for(w = 0; w < nall_workers; w++)
@@ -121,7 +137,7 @@ int* _get_first_workers(unsigned sched_ctx, int *nworkers, enum starpu_archtype
 							break;
 						}
 					}
-					
+
 					if(!considered)
 					{
 						/* the first iteration*/
@@ -145,7 +161,7 @@ int* _get_first_workers(unsigned sched_ctx, int *nworkers, enum starpu_archtype
 				}
 			}
 		}
-			
+
 		if(curr_workers[index] < 0)
 		{
 			*nworkers = index;
@@ -181,26 +197,26 @@ unsigned _get_potential_nworkers(struct policy_config *config, unsigned sched_ct
 	}
 	if(workers->init_cursor)
 		workers->deinit_cursor(workers);
-	
+
 	return potential_workers;
 }
 
 /* compute the number of workers that should be moved depending:
-   - on the min/max number of workers in a context imposed by the user, 
+   - on the min/max number of workers in a context imposed by the user,
    - on the resource granularity imposed by the user for the resizing process*/
 int _get_nworkers_to_move(unsigned req_sched_ctx)
 {
        	struct policy_config *config = sched_ctx_hypervisor_get_config(req_sched_ctx);
 	unsigned nworkers = starpu_get_nworkers_of_sched_ctx(req_sched_ctx);
 	unsigned nworkers_to_move = 0;
-	
+
 	unsigned potential_moving_workers = _get_potential_nworkers(config, req_sched_ctx, STARPU_ANY_WORKER);
 	if(potential_moving_workers > 0)
 	{
 		if(potential_moving_workers <= config->min_nworkers)
-			/* if we have to give more than min better give it all */ 
+			/* if we have to give more than min better give it all */
 			/* => empty ctx will block until having the required workers */
-			nworkers_to_move = potential_moving_workers; 
+			nworkers_to_move = potential_moving_workers;
 		else if(potential_moving_workers > config->max_nworkers)
 		{
 			if((potential_moving_workers - config->granularity) > config->max_nworkers)
@@ -208,11 +224,11 @@ int _get_nworkers_to_move(unsigned req_sched_ctx)
 				nworkers_to_move = potential_moving_workers;
 			else
 				nworkers_to_move = potential_moving_workers - config->max_nworkers;
- 
+
 		}
 		else if(potential_moving_workers > config->granularity)
 		{
-			if((nworkers - config->granularity) > config->min_nworkers)	
+			if((nworkers - config->granularity) > config->min_nworkers)
 				nworkers_to_move = config->granularity;
 			else
 				nworkers_to_move = potential_moving_workers - config->min_nworkers;
@@ -223,7 +239,7 @@ int _get_nworkers_to_move(unsigned req_sched_ctx)
 			if(nfixed_workers >= config->min_nworkers)
 				nworkers_to_move = potential_moving_workers;
 			else
-				nworkers_to_move = potential_moving_workers - (config->min_nworkers - nfixed_workers);	
+				nworkers_to_move = potential_moving_workers - (config->min_nworkers - nfixed_workers);
 		}
 
 		if((nworkers - nworkers_to_move) > config->max_nworkers)
@@ -240,7 +256,7 @@ unsigned _resize(unsigned sender_sched_ctx, unsigned receiver_sched_ctx, unsigne
 	else
 		ret = pthread_mutex_trylock(&act_hypervisor_mutex);
 	if(ret != EBUSY)
-	{					
+	{
 		int nworkers_to_move = _get_nworkers_to_move(sender_sched_ctx);
 		if(nworkers_to_move > 0)
 		{
@@ -260,18 +276,18 @@ unsigned _resize(unsigned sender_sched_ctx, unsigned receiver_sched_ctx, unsigne
 				if(nworkers_to_move == 0) poor_sched_ctx = STARPU_NMAX_SCHED_CTXS;
 			}
 			if(poor_sched_ctx != STARPU_NMAX_SCHED_CTXS)
-			{						
+			{
 				int *workers_to_move = _get_first_workers(sender_sched_ctx, &nworkers_to_move, STARPU_ANY_WORKER);
 				sched_ctx_hypervisor_move_workers(sender_sched_ctx, poor_sched_ctx, workers_to_move, nworkers_to_move, now);
-				
+
 				struct policy_config *new_config = sched_ctx_hypervisor_get_config(poor_sched_ctx);
 				int i;
 				for(i = 0; i < nworkers_to_move; i++)
 					new_config->max_idle[workers_to_move[i]] = new_config->max_idle[workers_to_move[i]] !=MAX_IDLE_TIME ? new_config->max_idle[workers_to_move[i]] :  new_config->new_workers_max_idle;
-				
+
 				free(workers_to_move);
 			}
-		}	
+		}
 		pthread_mutex_unlock(&act_hypervisor_mutex);
 		return 1;
 	}
@@ -317,7 +333,7 @@ double _get_ctx_velocity(struct sched_ctx_wrapper* sc_w)
 	double total_elapsed_flops = sched_ctx_hypervisor_get_total_elapsed_flops_per_sched_ctx(sc_w);
 	double prc = elapsed_flops/sc_w->total_flops;
 	unsigned nworkers = starpu_get_nworkers_of_sched_ctx(sc_w->sched_ctx);
-	double redim_sample = elapsed_flops == total_elapsed_flops ? HYPERVISOR_START_REDIM_SAMPLE*nworkers : HYPERVISOR_REDIM_SAMPLE*nworkers;  
+	double redim_sample = elapsed_flops == total_elapsed_flops ? HYPERVISOR_START_REDIM_SAMPLE*nworkers : HYPERVISOR_REDIM_SAMPLE*nworkers;
 	if(prc >= redim_sample)
         {
                 double curr_time = starpu_timing_now();
@@ -352,7 +368,7 @@ int _velocity_gap_btw_ctxs()
 	int i = 0, j = 0;
 	struct sched_ctx_wrapper* sc_w;
 	struct sched_ctx_wrapper* other_sc_w;
-	
+
 	for(i = 0; i < nsched_ctxs; i++)
 	{
 		sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctxs[i]);
@@ -370,9 +386,9 @@ int _velocity_gap_btw_ctxs()
 						double gap = ctx_v < other_ctx_v ? other_ctx_v / ctx_v : ctx_v / other_ctx_v ;
 						if(gap > 1.5)
 							return 1;
-					} 
+					}
 					else
-						return 1;						
+						return 1;
 				}
 			}
 		}

+ 16 - 0
sched_ctx_hypervisor/src/hypervisor_policies/policy_tools.h

@@ -1,3 +1,19 @@
+/* StarPU --- Runtime system for heterogeneous multicore architectures.
+ *
+ * Copyright (C) 2010-2012  INRIA
+ *
+ * StarPU is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ *
+ * StarPU is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU Lesser General Public License in COPYING.LGPL for more details.
+ */
+
 #include <sched_ctx_hypervisor.h>
 #include <pthread.h>
 

+ 42 - 23
sched_ctx_hypervisor/src/hypervisor_policies/simple_policy.c

@@ -1,3 +1,19 @@
+/* StarPU --- Runtime system for heterogeneous multicore architectures.
+ *
+ * Copyright (C) 2010-2012  INRIA
+ *
+ * StarPU is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ *
+ * StarPU is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU Lesser General Public License in COPYING.LGPL for more details.
+ */
+
 #include <sched_ctx_hypervisor.h>
 #include <pthread.h>
 
@@ -53,7 +69,7 @@ static unsigned _find_poor_sched_ctx(unsigned req_sched_ctx, int nworkers_to_mov
 			}
 		}
 	}
-	
+
 	return sched_ctx;
 }
 
@@ -94,7 +110,7 @@ int* _get_first_workers(unsigned sched_ctx, unsigned *nworkers, enum starpu_arch
 							break;
 						}
 					}
-					
+
 					if(!considered)
 					{
 						/* the first iteration*/
@@ -118,7 +134,7 @@ int* _get_first_workers(unsigned sched_ctx, unsigned *nworkers, enum starpu_arch
 				}
 			}
 		}
-			
+
 		if(curr_workers[index] < 0)
 		{
 			*nworkers = index;
@@ -153,7 +169,7 @@ static unsigned _get_potential_nworkers(struct policy_config *config, unsigned s
 	}
 	if(workers->init_cursor)
 		workers->deinit_cursor(workers);
-	
+
 	return potential_workers;
 }
 
@@ -162,26 +178,26 @@ static unsigned _get_nworkers_to_move(unsigned req_sched_ctx)
        	struct policy_config *config = sched_ctx_hypervisor_get_config(req_sched_ctx);
 	unsigned nworkers = starpu_get_nworkers_of_sched_ctx(req_sched_ctx);
 	unsigned nworkers_to_move = 0;
-	
+
 	unsigned potential_moving_workers = _get_potential_nworkers(config, req_sched_ctx, 0);
 	if(potential_moving_workers > 0)
 	{
 		if(potential_moving_workers <= config->min_nworkers)
-			/* if we have to give more than min better give it all */ 
+			/* if we have to give more than min better give it all */
 			/* => empty ctx will block until having the required workers */
-			
-			nworkers_to_move = potential_moving_workers; 
+
+			nworkers_to_move = potential_moving_workers;
 		else if(potential_moving_workers > config->max_nworkers)
 		{
 			if((potential_moving_workers - config->granularity) > config->max_nworkers)
 				nworkers_to_move = config->granularity;
 			else
 				nworkers_to_move = potential_moving_workers - config->max_nworkers;
- 
+
 		}
 		else if(potential_moving_workers > config->granularity)
 		{
-			if((nworkers - config->granularity) > config->min_nworkers)	
+			if((nworkers - config->granularity) > config->min_nworkers)
 				nworkers_to_move = config->granularity;
 			else
 				nworkers_to_move = potential_moving_workers - config->min_nworkers;
@@ -192,7 +208,7 @@ static unsigned _get_nworkers_to_move(unsigned req_sched_ctx)
 			if(nfixed_workers >= config->min_nworkers)
 				nworkers_to_move = potential_moving_workers;
 			else
-				nworkers_to_move = potential_moving_workers - (config->min_nworkers - nfixed_workers);	
+				nworkers_to_move = potential_moving_workers - (config->min_nworkers - nfixed_workers);
 		}
 
 		if((nworkers - nworkers_to_move) > config->max_nworkers)
@@ -209,7 +225,7 @@ static unsigned _simple_resize(unsigned sender_sched_ctx, unsigned receiver_sche
 	else
 		ret = pthread_mutex_trylock(&act_hypervisor_mutex);
 	if(ret != EBUSY)
-	{					
+	{
 		unsigned nworkers_to_move = _get_nworkers_to_move(sender_sched_ctx);
 
 		if(nworkers_to_move > 0)
@@ -230,18 +246,18 @@ static unsigned _simple_resize(unsigned sender_sched_ctx, unsigned receiver_sche
 
 
 			if(poor_sched_ctx != STARPU_NMAX_SCHED_CTXS)
-			{						
+			{
 				int *workers_to_move = _get_first_workers(sender_sched_ctx, &nworkers_to_move, 0);
 				sched_ctx_hypervisor_move_workers(sender_sched_ctx, poor_sched_ctx, workers_to_move, nworkers_to_move);
-				
+
 				struct policy_config *new_config = sched_ctx_hypervisor_get_config(poor_sched_ctx);
 				int i;
 				for(i = 0; i < nworkers_to_move; i++)
 					new_config->max_idle[workers_to_move[i]] = new_config->max_idle[workers_to_move[i]] !=MAX_IDLE_TIME ? new_config->max_idle[workers_to_move[i]] :  new_config->new_workers_max_idle;
-				
+
 				free(workers_to_move);
 			}
-		}	
+		}
 		pthread_mutex_unlock(&act_hypervisor_mutex);
 		return 1;
 	}
@@ -256,10 +272,10 @@ static int* _get_workers_to_move(unsigned sender_sched_ctx, unsigned receiver_sc
         double receiver_remainig_flops = sched_ctx_hypervisor_get_flops_left(receiver_sched_ctx);
         double sender_exp_end = sched_ctx_hypervisor_get_exp_end(sender_sched_ctx);
         double sender_v_cpu = sched_ctx_hypervisor_get_cpu_velocity(sender_sched_ctx);
-//      double v_gcpu = sched_ctx_hypervisor_get_gpu_velocity(sender_sched_ctx);                                                                                                                                                                                                                                                                                                                                                                                                              
+//      double v_gcpu = sched_ctx_hypervisor_get_gpu_velocity(sender_sched_ctx);
 
         double v_for_rctx = (receiver_remainig_flops/(sender_exp_end - starpu_timing_now())) - v_receiver;
-//      v_for_rctx /= 2;                                                                                                                                                                                                                                                                                                                                                                                                                                                                      
+//      v_for_rctx /= 2;
 
         int nworkers_needed = v_for_rctx/sender_v_cpu;
 /*      printf("%d->%d: v_rec %lf v %lf v_cpu %lf w_needed %d \n", sender_sched_ctx, receiver_sched_ctx, */
@@ -474,9 +490,9 @@ static void simple_manage_gflops_rate(unsigned sched_ctx)
 		double slowest_exp_end = sched_ctx_hypervisor_get_exp_end(slowest_sched_ctx);
 		double fastest_bef_res_exp_end = sched_ctx_hypervisor_get_bef_res_exp_end(fastest_sched_ctx);
 		double slowest_bef_res_exp_end = sched_ctx_hypervisor_get_bef_res_exp_end(slowest_sched_ctx);
-//					       (fastest_bef_res_exp_end < slowest_bef_res_exp_end || 
+//					       (fastest_bef_res_exp_end < slowest_bef_res_exp_end ||
 //						fastest_bef_res_exp_end == 0.0 || slowest_bef_res_exp_end == 0)))
-		
+
 		if((slowest_exp_end == -1.0 && fastest_exp_end != -1.0) || ((fastest_exp_end + (fastest_exp_end*0.5)) < slowest_exp_end ))
 		{
 			double fast_flops_left_pct = sched_ctx_hypervisor_get_flops_left_pct(fastest_sched_ctx);
@@ -487,19 +503,22 @@ static void simple_manage_gflops_rate(unsigned sched_ctx)
 }
 
 
-struct hypervisor_policy idle_policy = {
+struct hypervisor_policy idle_policy =
+{
 	.manage_idle_time = simple_manage_idle_time,
 	.manage_gflops_rate = simple_manage_gflops_rate,
 	.resize = simple_resize,
 };
 
-struct hypervisor_policy app_driven_policy = {
+struct hypervisor_policy app_driven_policy =
+{
 	.manage_idle_time = simple_manage_idle_time,
 	.manage_gflops_rate = simple_manage_gflops_rate,
 	.resize = simple_resize,
 };
 
-struct hypervisor_policy gflops_rate_policy = {
+struct hypervisor_policy gflops_rate_policy =
+{
 	.manage_idle_time = simple_manage_idle_time,
 	.manage_gflops_rate = simple_manage_gflops_rate,
 	.resize = simple_resize,

+ 9 - 9
sched_ctx_hypervisor/src/sched_ctx_config.c

@@ -20,7 +20,7 @@ static struct policy_config* _create_config(void)
 {
 	struct policy_config *config = (struct policy_config *)malloc(sizeof(struct policy_config));
 	config->min_nworkers = -1;
-	config->max_nworkers = -1;	
+	config->max_nworkers = -1;
 	config->new_workers_max_idle = -1.0;
 
 	int i;
@@ -33,7 +33,7 @@ static struct policy_config* _create_config(void)
 		config->empty_ctx_max_idle[i] = -1.0;
 		config->min_working[i] = -1.0;
 	}
-	
+
 	return config;
 }
 
@@ -63,7 +63,7 @@ void sched_ctx_hypervisor_set_config(unsigned sched_ctx, void *config)
 	}
 	else
 		hypervisor.sched_ctx_w[sched_ctx].config = config;
-	
+
 	return;
 }
 
@@ -71,7 +71,7 @@ void _add_config(unsigned sched_ctx)
 {
 	struct policy_config *config = _create_config();
 	config->min_nworkers = 0;
-	config->max_nworkers = STARPU_NMAXWORKERS;	
+	config->max_nworkers = STARPU_NMAXWORKERS;
 	config->new_workers_max_idle = MAX_IDLE_TIME;
 
 	int i;
@@ -114,7 +114,7 @@ static struct policy_config* _ioctl(unsigned sched_ctx, va_list varg_list, unsig
 	int *workerids;
 	int nworkers;
 
-	while ((arg_type = va_arg(varg_list, int)) != HYPERVISOR_NULL) 
+	while ((arg_type = va_arg(varg_list, int)) != HYPERVISOR_NULL)
 	{
 		switch(arg_type)
 		{
@@ -131,7 +131,7 @@ static struct policy_config* _ioctl(unsigned sched_ctx, va_list varg_list, unsig
 			workerids = va_arg(varg_list, int*);
 			nworkers = va_arg(varg_list, int);
 			double empty_ctx_max_idle = va_arg(varg_list, double);
-			
+
 			for(i = 0; i < nworkers; i++)
 				config->empty_ctx_max_idle[workerids[i]] = empty_ctx_max_idle;
 
@@ -151,7 +151,7 @@ static struct policy_config* _ioctl(unsigned sched_ctx, va_list varg_list, unsig
 			workerids = va_arg(varg_list, int*);
 			nworkers = va_arg(varg_list, int);
 			int priority = va_arg(varg_list, int);
-	
+
 			for(i = 0; i < nworkers; i++)
 				config->priority[workerids[i]] = priority;
 			break;
@@ -180,7 +180,7 @@ static struct policy_config* _ioctl(unsigned sched_ctx, va_list varg_list, unsig
 			config->new_workers_max_idle = va_arg(varg_list, double);
 			break;
 
-/* not important for the strateg, needed just to jump these args in the iteration of the args */			
+/* not important for the strateg, needed just to jump these args in the iteration of the args */
 		case HYPERVISOR_TIME_TO_APPLY:
 			va_arg(varg_list, int);
 			break;
@@ -207,7 +207,7 @@ void sched_ctx_hypervisor_ioctl(unsigned sched_ctx, ...)
 	int stop = 0;
 	int task_tag = -1;
 
-	while ((arg_type = va_arg(varg_list, int)) != HYPERVISOR_NULL) 
+	while ((arg_type = va_arg(varg_list, int)) != HYPERVISOR_NULL)
 	{
 		switch(arg_type)
 		{

+ 64 - 63
sched_ctx_hypervisor/src/sched_ctx_hypervisor.c

@@ -36,14 +36,15 @@ extern struct hypervisor_policy lp2_policy;
 #endif
 
 
-static struct hypervisor_policy *predefined_policies[] = {
+static struct hypervisor_policy *predefined_policies[] =
+{
         &idle_policy,
-		&app_driven_policy,
+	&app_driven_policy,
 #ifdef HAVE_GLPK_H
-		&lp_policy,
-		&lp2_policy,
+	&lp_policy,
+	&lp2_policy,
 #endif
-		&gflops_rate_policy
+	&gflops_rate_policy
 };
 
 static void _load_hypervisor_policy(struct hypervisor_policy *policy)
@@ -66,7 +67,7 @@ static struct hypervisor_policy *_find_hypervisor_policy_from_name(const char *p
 
 	if (!policy_name)
 		return NULL;
-	
+
 	unsigned i;
 	for (i = 0; i < sizeof(predefined_policies)/sizeof(predefined_policies[0]); i++)
 	{
@@ -81,7 +82,7 @@ static struct hypervisor_policy *_find_hypervisor_policy_from_name(const char *p
 		}
 	}
 	fprintf(stderr, "Warning: hypervisor policy \"%s\" was not found, try \"help\" to get a list\n", policy_name);
-	
+
 	/* nothing was found */
 	return NULL;
 }
@@ -89,30 +90,30 @@ static struct hypervisor_policy *_find_hypervisor_policy_from_name(const char *p
 static struct hypervisor_policy *_select_hypervisor_policy(struct hypervisor_policy* hypervisor_policy)
 {
 	struct hypervisor_policy *selected_policy = NULL;
-	
+
 	if(hypervisor_policy && hypervisor_policy->custom)
 		return hypervisor_policy;
-	
+
 	/* we look if the application specified the name of a policy to load */
 	const char *policy_name;
 	if (hypervisor_policy && hypervisor_policy->name)
 	{
 		policy_name = hypervisor_policy->name;
 	}
-	else 
+	else
 	{
 		policy_name = getenv("HYPERVISOR_POLICY");
 	}
-	
+
 	if (policy_name)
 		selected_policy = _find_hypervisor_policy_from_name(policy_name);
-	
+
 	/* Perhaps there was no policy that matched the name */
 	if (selected_policy)
 		return selected_policy;
-	
+
 	/* If no policy was specified, we use the idle policy as a default */
-	
+
 	return &idle_policy;
 }
 
@@ -123,7 +124,7 @@ struct starpu_performance_counters* sched_ctx_hypervisor_init(struct hypervisor_
 	hypervisor.min_tasks = 0;
 	hypervisor.nsched_ctxs = 0;
 	pthread_mutex_init(&act_hypervisor_mutex, NULL);
-	
+
 	int i;
 	for(i = 0; i < STARPU_NMAX_SCHED_CTXS; i++)
 	{
@@ -241,13 +242,13 @@ static int _get_first_free_sched_ctx(int *sched_ctxs, unsigned nsched_ctxs)
 	for(i = 0; i < nsched_ctxs; i++)
 		if(sched_ctxs[i] == STARPU_NMAX_SCHED_CTXS)
 			return i;
-	
+
 	return STARPU_NMAX_SCHED_CTXS;
 }
 
-/* rearange array of sched_ctxs in order not to have {MAXVAL, MAXVAL, 5, MAXVAL, 7}    
-   and have instead {5, 7, MAXVAL, MAXVAL, MAXVAL}                                    
-   it is easier afterwards to iterate the array                           
+/* rearange array of sched_ctxs in order not to have {MAXVAL, MAXVAL, 5, MAXVAL, 7}
+   and have instead {5, 7, MAXVAL, MAXVAL, MAXVAL}
+   it is easier afterwards to iterate the array
 */
 static void _rearange_sched_ctxs(int *sched_ctxs, int old_nsched_ctxs)
 {
@@ -285,7 +286,7 @@ void sched_ctx_hypervisor_unregister_ctx(unsigned sched_ctx)
 	hypervisor.nsched_ctxs--;
 	hypervisor.sched_ctx_w[sched_ctx].sched_ctx = STARPU_NMAX_SCHED_CTXS;
 	_remove_config(sched_ctx);
-	
+
 /* 	free(hypervisor.configurations[sched_ctx]); */
 /* 	free(hypervisor.resize_requests[sched_ctx]); */
 	pthread_mutex_destroy(&hypervisor.conf_mut[sched_ctx]);
@@ -352,12 +353,12 @@ void sched_ctx_hypervisor_move_workers(unsigned sender_sched_ctx, unsigned recei
 /* 		for(j = 0; j < nworkers_to_move; j++) */
 /* 			printf(" %d", workers_to_move[j]); */
 /* 		printf("\n"); */
-		
+
 		int *cpus = (int*) malloc(nworkers_to_move * sizeof(int));
 		int ncpus;
-		
+
 		_get_cpus(workers_to_move, nworkers_to_move, cpus, &ncpus);
-		
+
 //		if(ncpus != 0)
 //			starpu_remove_workers_from_sched_ctx(cpus, ncpus, sender_sched_ctx);
 
@@ -370,30 +371,30 @@ void sched_ctx_hypervisor_move_workers(unsigned sender_sched_ctx, unsigned recei
 /* 			for(j = 0; j < nworkers_to_move; j++) */
 /* 				printf(" %d", workers_to_move[j]); */
 /* 			printf("\n"); */
-			
+
 			starpu_remove_workers_from_sched_ctx(workers_to_move, nworkers_to_move, sender_sched_ctx);
 		}
 		else
 		{
-			int ret = pthread_mutex_trylock(&hypervisor.sched_ctx_w[sender_sched_ctx].mutex);	
+			int ret = pthread_mutex_trylock(&hypervisor.sched_ctx_w[sender_sched_ctx].mutex);
 			if(ret != EBUSY)
 			{
 				hypervisor.sched_ctx_w[sender_sched_ctx].resize_ack.receiver_sched_ctx = receiver_sched_ctx;
 				hypervisor.sched_ctx_w[sender_sched_ctx].resize_ack.moved_workers = (int*)malloc(nworkers_to_move * sizeof(int));
 				hypervisor.sched_ctx_w[sender_sched_ctx].resize_ack.nmoved_workers = nworkers_to_move;
 				hypervisor.sched_ctx_w[sender_sched_ctx].resize_ack.acked_workers = (int*)malloc(nworkers_to_move * sizeof(int));
-				
-				
+
+
 				int i;
 				for(i = 0; i < nworkers_to_move; i++)
 				{
 					hypervisor.sched_ctx_w[sender_sched_ctx].current_idle_time[workers_to_move[i]] = 0.0;
-					hypervisor.sched_ctx_w[sender_sched_ctx].resize_ack.moved_workers[i] = workers_to_move[i];	
-					hypervisor.sched_ctx_w[sender_sched_ctx].resize_ack.acked_workers[i] = 0;	
+					hypervisor.sched_ctx_w[sender_sched_ctx].resize_ack.moved_workers[i] = workers_to_move[i];
+					hypervisor.sched_ctx_w[sender_sched_ctx].resize_ack.acked_workers[i] = 0;
 				}
-				
+
 				hypervisor.resize[sender_sched_ctx] = 0;
-				
+
 				pthread_mutex_unlock(&hypervisor.sched_ctx_w[sender_sched_ctx].mutex);
 			}
 		}
@@ -401,7 +402,7 @@ void sched_ctx_hypervisor_move_workers(unsigned sender_sched_ctx, unsigned recei
 		int i;
 		for(i = 0; i < nworkers_to_move; i++)
 			new_config->max_idle[workers_to_move[i]] = new_config->max_idle[workers_to_move[i]] !=MAX_IDLE_TIME ? new_config->max_idle[workers_to_move[i]] :  new_config->new_workers_max_idle;
-		
+
 	}
 	return;
 }
@@ -420,7 +421,7 @@ void sched_ctx_hypervisor_add_workers_to_sched_ctx(int* workers_to_add, unsigned
 		int i;
 		for(i = 0; i < nworkers_to_add; i++)
 			new_config->max_idle[workers_to_add[i]] = new_config->max_idle[workers_to_add[i]] != MAX_IDLE_TIME ? new_config->max_idle[workers_to_add[i]] :  new_config->new_workers_max_idle;
-		
+
 	}
 	return;
 }
@@ -436,7 +437,7 @@ void sched_ctx_hypervisor_remove_workers_from_sched_ctx(int* workers_to_remove,
 	{
 		int nworkers=0;
 		int workers[nworkers_to_remove];
-	
+
 		if(now)
 		{
 /* 				int j; */
@@ -444,31 +445,31 @@ void sched_ctx_hypervisor_remove_workers_from_sched_ctx(int* workers_to_remove,
 /* 				for(j = 0; j < nworkers_to_remove; j++) */
 /* 					printf(" %d", workers_to_remove[j]); */
 /* 				printf("\n"); */
-				
+
 				starpu_remove_workers_from_sched_ctx(workers_to_remove, nworkers_to_remove, sched_ctx);
 		}
 		else
 		{
-			int ret = pthread_mutex_trylock(&hypervisor.sched_ctx_w[sched_ctx].mutex);	
+			int ret = pthread_mutex_trylock(&hypervisor.sched_ctx_w[sched_ctx].mutex);
 			if(ret != EBUSY)
 			{
-				
+
 				int i;
 				for(i = 0; i < nworkers_to_remove; i++)
 					if(starpu_worker_belongs_to_sched_ctx(workers_to_remove[i], sched_ctx))
 						workers[nworkers++] = workers_to_remove[i];
-				
+
 				hypervisor.sched_ctx_w[sched_ctx].resize_ack.receiver_sched_ctx = -1;
 				hypervisor.sched_ctx_w[sched_ctx].resize_ack.moved_workers = (int*)malloc(nworkers_to_remove * sizeof(int));
 				hypervisor.sched_ctx_w[sched_ctx].resize_ack.nmoved_workers = nworkers;
 				hypervisor.sched_ctx_w[sched_ctx].resize_ack.acked_workers = (int*)malloc(nworkers_to_remove * sizeof(int));
-				
-				
+
+
 				for(i = 0; i < nworkers; i++)
 				{
 					hypervisor.sched_ctx_w[sched_ctx].current_idle_time[workers[i]] = 0.0;
-					hypervisor.sched_ctx_w[sched_ctx].resize_ack.moved_workers[i] = workers[i];	
-					hypervisor.sched_ctx_w[sched_ctx].resize_ack.acked_workers[i] = 0;	
+					hypervisor.sched_ctx_w[sched_ctx].resize_ack.moved_workers[i] = workers[i];
+					hypervisor.sched_ctx_w[sched_ctx].resize_ack.acked_workers[i] = 0;
 				}
 
 				hypervisor.resize[sched_ctx] = 0;
@@ -520,7 +521,7 @@ static unsigned _ack_resize_completed(unsigned sched_ctx, int worker)
 			struct sched_ctx_wrapper *sc_w = &hypervisor.sched_ctx_w[hypervisor.sched_ctxs[i]];
 			pthread_mutex_lock(&sc_w->mutex);
 			unsigned only_remove = 0;
-			if(sc_w->resize_ack.receiver_sched_ctx == -1 && hypervisor.sched_ctxs[i] != sched_ctx && 
+			if(sc_w->resize_ack.receiver_sched_ctx == -1 && hypervisor.sched_ctxs[i] != sched_ctx &&
 			   sc_w->resize_ack.nmoved_workers > 0 && starpu_worker_belongs_to_sched_ctx(worker, hypervisor.sched_ctxs[i]))
 			{
 				int j;
@@ -531,7 +532,7 @@ static unsigned _ack_resize_completed(unsigned sched_ctx, int worker)
 						break;
 					}
 			}
-			if(only_remove || 
+			if(only_remove ||
 			   (sc_w->resize_ack.receiver_sched_ctx != -1 && sc_w->resize_ack.receiver_sched_ctx == sched_ctx))
 			{
 				resize_ack = &sc_w->resize_ack;
@@ -546,7 +547,7 @@ static unsigned _ack_resize_completed(unsigned sched_ctx, int worker)
 	/* if there is no ctx waiting for its ack return 1*/
 	if(resize_ack == NULL)
 		return 1;
-	
+
 	int ret = pthread_mutex_trylock(&hypervisor.sched_ctx_w[sender_sched_ctx].mutex);
 	if(ret != EBUSY)
 	{
@@ -565,13 +566,13 @@ static unsigned _ack_resize_completed(unsigned sched_ctx, int worker)
 				}
 			}
 		}
-			
+
 		int nacked_workers = 0;
 		for(i = 0; i < nmoved_workers; i++)
 		{
 			nacked_workers += (acked_workers[i] == 1);
 		}
-		
+
 		unsigned resize_completed = (nacked_workers == nmoved_workers);
 		int receiver_sched_ctx = sched_ctx;
 		if(resize_completed)
@@ -579,33 +580,33 @@ static unsigned _ack_resize_completed(unsigned sched_ctx, int worker)
 			/* if the permission to resize is not allowed by the user don't do it
 			   whatever the application says */
 			if(!((hypervisor.resize[sender_sched_ctx] == 0 || hypervisor.resize[receiver_sched_ctx] == 0) && imposed_resize))
-			{				
+			{
 /* 				int j; */
 /* 				printf("remove from ctx %d:", sender_sched_ctx); */
 /* 				for(j = 0; j < nmoved_workers; j++) */
 /* 					printf(" %d", moved_workers[j]); */
 /* 				printf("\n"); */
-				
+
 				starpu_remove_workers_from_sched_ctx(moved_workers, nmoved_workers, sender_sched_ctx);
-				
+
 				/* info concerning only the gflops_rate strateg */
 				struct sched_ctx_wrapper *sender_sc_w = &hypervisor.sched_ctx_w[sender_sched_ctx];
 				struct sched_ctx_wrapper *receiver_sc_w = &hypervisor.sched_ctx_w[receiver_sched_ctx];
-				
+
 				double start_time =  starpu_timing_now();
 				sender_sc_w->start_time = start_time;
 				sender_sc_w->remaining_flops = sender_sc_w->remaining_flops - sched_ctx_hypervisor_get_elapsed_flops_per_sched_ctx(sender_sc_w);
 				_set_elapsed_flops_per_sched_ctx(sender_sched_ctx, 0.0);
-				
+
 				receiver_sc_w->start_time = start_time;
 				receiver_sc_w->remaining_flops = receiver_sc_w->remaining_flops - sched_ctx_hypervisor_get_elapsed_flops_per_sched_ctx(receiver_sc_w);
 				_set_elapsed_flops_per_sched_ctx(receiver_sched_ctx, 0.0);
-				
+
 				hypervisor.resize[sender_sched_ctx] = 1;
 //				hypervisor.resize[receiver_sched_ctx] = 1;
 				/* if the user allowed resizing leave the decisions to the application */
 				if(imposed_resize)  imposed_resize = 0;
-				
+
 				resize_ack->receiver_sched_ctx = -1;
 				resize_ack->nmoved_workers = 0;
 				free(resize_ack->moved_workers);
@@ -642,10 +643,10 @@ static void notify_idle_end(unsigned sched_ctx, int worker)
 {
 	if(hypervisor.resize[sched_ctx])
 		hypervisor.sched_ctx_w[sched_ctx].current_idle_time[worker] = 0.0;
-	
+
 	if(hypervisor.policy.handle_idle_end)
 		hypervisor.policy.handle_idle_end(sched_ctx, worker);
-		
+
 }
 
 /* notifies the hypervisor that the worker spent another cycle in idle time */
@@ -659,19 +660,19 @@ static void notify_idle_cycle(unsigned sched_ctx, int worker, double idle_time)
 		{
 			hypervisor.policy.handle_idle_cycle(sched_ctx, worker);
 		}
-	}		
+	}
 	return;
 }
 
 /* notifies the hypervisor that a new task was pushed on the queue of the worker */
 static void notify_pushed_task(unsigned sched_ctx, int worker)
-{	
+{
 	hypervisor.sched_ctx_w[sched_ctx].pushed_tasks[worker]++;
 	if(hypervisor.sched_ctx_w[sched_ctx].total_flops != 0.0 && hypervisor.sched_ctx_w[sched_ctx].start_time == 0.0)
 		hypervisor.sched_ctx_w[sched_ctx].start_time = starpu_timing_now();
-	
+
 	int ntasks = get_ntasks(hypervisor.sched_ctx_w[sched_ctx].pushed_tasks);
-	
+
 	if((hypervisor.min_tasks == 0 || (!(hypervisor.resize[sched_ctx] == 0 && imposed_resize) && ntasks == hypervisor.min_tasks)) && hypervisor.check_min_tasks[sched_ctx])
 	{
 		hypervisor.resize[sched_ctx] = 1;
@@ -709,7 +710,7 @@ static void notify_post_exec_hook(unsigned sched_ctx, int task_tag)
 	pthread_mutex_lock(&act_hypervisor_mutex);
 	unsigned ns = hypervisor.nsched_ctxs;
 	pthread_mutex_unlock(&act_hypervisor_mutex);
-	
+
 	for(i = 0; i < ns; i++)
 	{
 		struct configuration_entry *entry;
@@ -728,8 +729,8 @@ static void notify_post_exec_hook(unsigned sched_ctx, int task_tag)
 			free(config);
 		}
 		pthread_mutex_unlock(&hypervisor.conf_mut[conf_sched_ctx]);
-	}	
-		
+	}
+
 	if(hypervisor.resize[sched_ctx])
 	{
 		pthread_mutex_lock(&hypervisor.resize_mut[sched_ctx]);

+ 10 - 5
sched_ctx_hypervisor/src/sched_ctx_hypervisor_intern.h

@@ -16,7 +16,8 @@
 
 #include <sched_ctx_hypervisor.h>
 #include <common/uthash.h>
-struct size_request {
+struct size_request
+{
 	int *workers;
 	int nworkers;
 	int *sched_ctxs;
@@ -25,7 +26,8 @@ struct size_request {
 
 
 /* Entry in the resize request hash table.  */
-struct resize_request_entry {
+struct resize_request_entry
+{
 	/* Key: the tag of tasks concerned by this resize request.  */
 	uint32_t task_tag;
 
@@ -38,7 +40,8 @@ struct resize_request_entry {
 	UT_hash_handle hh;
 };
 
-struct configuration_entry {
+struct configuration_entry
+{
 	/* Key: the tag of tasks concerned by this configuration.  */
 	uint32_t task_tag;
 
@@ -49,7 +52,8 @@ struct configuration_entry {
 	UT_hash_handle hh;
 };
 
-struct sched_ctx_hypervisor {
+struct sched_ctx_hypervisor
+{
 	struct sched_ctx_wrapper sched_ctx_w[STARPU_NMAX_SCHED_CTXS];
 	int sched_ctxs[STARPU_NMAX_SCHED_CTXS];
 	unsigned nsched_ctxs;
@@ -68,7 +72,8 @@ struct sched_ctx_hypervisor {
 	int check_min_tasks[STARPU_NMAX_SCHED_CTXS];
 };
 
-struct sched_ctx_hypervisor_adjustment {
+struct sched_ctx_hypervisor_adjustment
+{
 	int workerids[STARPU_NMAXWORKERS];
 	int nworkers;
 };