Explorar el Código

src/sched_policies: fix coding style

Nathalie Furmento hace 12 años
padre
commit
64222192ad

+ 63 - 61
src/sched_policies/deque_modeling_policy_data_aware.c

@@ -41,7 +41,8 @@
 #define DBL_MAX __DBL_MAX__
 #endif
 
-typedef struct {
+struct _starpu_dmda_data
+{
 	double alpha;
 	double beta;
 	double _gamma;
@@ -51,7 +52,7 @@ typedef struct {
 
 	long int total_task_cnt;
 	long int ready_task_cnt;
-} dmda_data;
+};
 
 static double alpha = _STARPU_DEFAULT_ALPHA;
 static double beta = _STARPU_DEFAULT_BETA;
@@ -153,7 +154,7 @@ static struct starpu_task *_starpu_fifo_pop_first_ready_task(struct _starpu_fifo
 
 static struct starpu_task *dmda_pop_ready_task(unsigned sched_ctx_id)
 {
-	dmda_data *dt = (dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_dmda_data *dt = (struct _starpu_dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	struct starpu_task *task;
 
@@ -188,7 +189,7 @@ static struct starpu_task *dmda_pop_ready_task(unsigned sched_ctx_id)
 
 static struct starpu_task *dmda_pop_task(unsigned sched_ctx_id)
 {
-	dmda_data *dt = (dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_dmda_data *dt = (struct _starpu_dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	struct starpu_task *task;
 
@@ -221,7 +222,7 @@ static struct starpu_task *dmda_pop_task(unsigned sched_ctx_id)
 
 static struct starpu_task *dmda_pop_every_task(unsigned sched_ctx_id)
 {
-	dmda_data *dt = (dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_dmda_data *dt = (struct _starpu_dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	struct starpu_task *new_list;
 
@@ -251,7 +252,7 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 				    double predicted, double predicted_transfer,
 				    int prio, unsigned sched_ctx_id)
 {
-	dmda_data *dt = (dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_dmda_data *dt = (struct _starpu_dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	/* make sure someone coule execute that task ! */
 	STARPU_ASSERT(best_workerid != -1);
 
@@ -290,9 +291,9 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 
 	fifo->exp_end += predicted_transfer;
 	fifo->exp_len += predicted_transfer;
-	
+
 	_STARPU_PTHREAD_MUTEX_UNLOCK(sched_mutex);
-	
+
 	task->predicted = predicted;
 	task->predicted_transfer = predicted_transfer;
 
@@ -310,7 +311,8 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 	}
 
 #ifdef HAVE_AYUDAME_H
-	if (AYU_event) {
+	if (AYU_event)
+	{
 		int id = best_workerid;
 		AYU_event(AYU_ADDTASKTOQUEUE, _starpu_get_job_associated_to_task(task)->job_id, &id);
 	}
@@ -326,10 +328,10 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 /* TODO: factorize with dmda!! */
 static int _dm_push_task(struct starpu_task *task, unsigned prio, unsigned sched_ctx_id)
 {
-	dmda_data *dt = (dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_dmda_data *dt = (struct _starpu_dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	unsigned worker, worker_ctx = 0;
 	int best = -1;
-	
+
 	double best_exp_end = 0.0;
 	double model_best = 0.0;
 	double transfer_model_best = 0.0;
@@ -337,24 +339,24 @@ static int _dm_push_task(struct starpu_task *task, unsigned prio, unsigned sched
 	int ntasks_best = -1;
 	double ntasks_best_end = 0.0;
 	int calibrating = 0;
-	
+
 	/* A priori, we know all estimations */
 	int unknown = 0;
-	
+
 	unsigned best_impl = 0;
 	unsigned nimpl;
 	struct starpu_sched_ctx_worker_collection *workers = starpu_get_worker_collection_of_sched_ctx(sched_ctx_id);
-	
+
 	if(workers->init_cursor)
 		workers->init_cursor(workers);
-	
+
 	while(workers->has_next(workers))
 	{
 		worker = workers->get_next(workers);
 		struct _starpu_fifo_taskq *fifo  = dt->queue_array[worker];
 		unsigned memory_node = starpu_worker_get_memory_node(worker);
 		enum starpu_perf_archtype perf_arch = starpu_worker_get_perf_archtype(worker);
-		
+
 		for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
 		{
 			if (!starpu_worker_can_execute_task(worker, task, nimpl))
@@ -363,25 +365,25 @@ static int _dm_push_task(struct starpu_task *task, unsigned prio, unsigned sched
 				//			worker_ctx++;
 				continue;
 			}
-			
+
 			double exp_end;
 			_starpu_pthread_mutex_t *sched_mutex;
 			_starpu_pthread_cond_t *sched_cond;
 			starpu_sched_ctx_get_worker_mutex_and_cond(sched_ctx_id, worker, &sched_mutex, &sched_cond);
-			
+
 			/* Sometimes workers didn't take the tasks as early as we expected */
 			_STARPU_PTHREAD_MUTEX_LOCK(sched_mutex);
 			fifo->exp_start = STARPU_MAX(fifo->exp_start, starpu_timing_now());
 			fifo->exp_end = fifo->exp_start + fifo->exp_len;
 			_STARPU_PTHREAD_MUTEX_UNLOCK(sched_mutex);
-			
-			
+
+
 			double local_length = starpu_task_expected_length(task, perf_arch, nimpl);
 			double local_penalty = starpu_task_expected_data_transfer_time(memory_node, task);
 			double ntasks_end = fifo->ntasks / starpu_worker_get_relative_speedup(perf_arch);
-			
+
 			//_STARPU_DEBUG("Scheduler dm: task length (%lf) worker (%u) kernel (%u) \n", local_length,worker,nimpl);
-			
+
 			if (ntasks_best == -1
 			    || (!calibrating && ntasks_end < ntasks_best_end) /* Not calibrating, take better task */
 			    || (!calibrating && isnan(local_length)) /* Not calibrating but this worker is being calibrated */
@@ -392,23 +394,23 @@ static int _dm_push_task(struct starpu_task *task, unsigned prio, unsigned sched
 				ntasks_best = worker;
 				best_impl = nimpl;
 			}
-			
+
 			if (isnan(local_length))
 				/* we are calibrating, we want to speed-up calibration time
 				 * so we privilege non-calibrated tasks (but still
 				 * greedily distribute them to avoid dumb schedules) */
 				calibrating = 1;
-			
+
 			if (isnan(local_length) || _STARPU_IS_ZERO(local_length))
 				/* there is no prediction available for that task
 				 * with that arch yet, so switch to a greedy strategy */
 				unknown = 1;
-			
+
 			if (unknown)
 				continue;
 
 			exp_end = fifo->exp_start + fifo->exp_len + local_length;
-			
+
 			if (best == -1 || exp_end < best_exp_end)
 			{
 				/* a better solution was found */
@@ -428,27 +430,27 @@ static int _dm_push_task(struct starpu_task *task, unsigned prio, unsigned sched
 		model_best = 0.0;
 		transfer_model_best = 0.0;
 	}
-	
+
 	//_STARPU_DEBUG("Scheduler dm: kernel (%u)\n", best_impl);
-	
+
 	if (workers->deinit_cursor)
 		workers->deinit_cursor(workers);
-	
+
 	_starpu_get_job_associated_to_task(task)->nimpl = best_impl;
-	
+
 	/* we should now have the best worker in variable "best" */
 	return push_task_on_best_worker(task, best,
 									model_best, transfer_model_best, prio, sched_ctx_id);
 }
 
 static void compute_all_performance_predictions(struct starpu_task *task,
-												double local_task_length[STARPU_NMAXWORKERS][STARPU_MAXIMPLEMENTATIONS],
-												double exp_end[STARPU_NMAXWORKERS][STARPU_MAXIMPLEMENTATIONS],
-												double *max_exp_endp,
-												double *best_exp_endp,
-												double local_data_penalty[STARPU_NMAXWORKERS][STARPU_MAXIMPLEMENTATIONS],
-												double local_power[STARPU_NMAXWORKERS][STARPU_MAXIMPLEMENTATIONS],
-												int *forced_worker, int *forced_impl, unsigned sched_ctx_id)
+						double local_task_length[STARPU_NMAXWORKERS][STARPU_MAXIMPLEMENTATIONS],
+						double exp_end[STARPU_NMAXWORKERS][STARPU_MAXIMPLEMENTATIONS],
+						double *max_exp_endp,
+						double *best_exp_endp,
+						double local_data_penalty[STARPU_NMAXWORKERS][STARPU_MAXIMPLEMENTATIONS],
+						double local_power[STARPU_NMAXWORKERS][STARPU_MAXIMPLEMENTATIONS],
+						int *forced_worker, int *forced_impl, unsigned sched_ctx_id)
 {
 	int calibrating = 0;
 	double max_exp_end = DBL_MIN;
@@ -464,9 +466,9 @@ static void compute_all_performance_predictions(struct starpu_task *task,
 	unsigned nimpl;
 
 	starpu_task_bundle_t bundle = task->bundle;
-	dmda_data *dt = (dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_dmda_data *dt = (struct _starpu_dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	struct starpu_sched_ctx_worker_collection *workers = starpu_get_worker_collection_of_sched_ctx(sched_ctx_id);
-		
+
 	while(workers->has_next(workers))
 	{
 		worker = workers->get_next(workers);
@@ -530,7 +532,7 @@ static void compute_all_performance_predictions(struct starpu_task *task,
 				 * so we privilege non-calibrated tasks (but still
 				 * greedily distribute them to avoid dumb schedules) */
 				calibrating = 1;
-			
+
 			if (isnan(local_task_length[worker_ctx][nimpl])
 					|| _STARPU_IS_ZERO(local_task_length[worker_ctx][nimpl]))
 				/* there is no prediction available for that task
@@ -539,19 +541,19 @@ static void compute_all_performance_predictions(struct starpu_task *task,
 
 			if (unknown)
 				continue;
-			
+
 			exp_end[worker_ctx][nimpl] = fifo->exp_start + fifo->exp_len + local_task_length[worker_ctx][nimpl];
-			
+
 			if (exp_end[worker_ctx][nimpl] < best_exp_end)
 			{
 				/* a better solution was found */
 				best_exp_end = exp_end[worker_ctx][nimpl];
 				nimpl_best = nimpl;
 			}
-			
+
 			if (isnan(local_power[worker_ctx][nimpl]))
 				local_power[worker_ctx][nimpl] = 0.;
-			
+
 		}
 		worker_ctx++;
 	}
@@ -577,7 +579,7 @@ static int _dmda_push_task(struct starpu_task *task, unsigned prio, unsigned sch
 	int forced_best = -1;
 	int forced_impl = -1;
 
-	dmda_data *dt = (dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_dmda_data *dt = (struct _starpu_dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	struct starpu_sched_ctx_worker_collection *workers = starpu_get_worker_collection_of_sched_ctx(sched_ctx_id);
 	unsigned nworkers_ctx = workers->nworkers;
 	double local_task_length[STARPU_NMAXWORKERS][STARPU_MAXIMPLEMENTATIONS];
@@ -617,12 +619,12 @@ static int _dmda_push_task(struct starpu_task *task, unsigned prio, unsigned sch
 					/* no one on that queue may execute this task */
 					continue;
 				}
-				
-				
-				fitness[worker_ctx][nimpl] = dt->alpha*(exp_end[worker_ctx][nimpl] - best_exp_end) 
+
+
+				fitness[worker_ctx][nimpl] = dt->alpha*(exp_end[worker_ctx][nimpl] - best_exp_end)
 					+ dt->beta*(local_data_penalty[worker_ctx][nimpl])
 					+ dt->_gamma*(local_power[worker_ctx][nimpl]);
-				
+
 				if (exp_end[worker_ctx][nimpl] > max_exp_end)
 				{
 					/* This placement will make the computation
@@ -630,7 +632,7 @@ static int _dmda_push_task(struct starpu_task *task, unsigned prio, unsigned sch
 					 * consumption of other cpus */
 					fitness[worker_ctx][nimpl] += dt->_gamma * dt->idle_power * (exp_end[worker_ctx][nimpl] - max_exp_end) / 1000000.0;
 				}
-				
+
 				if (best == -1 || fitness[worker_ctx][nimpl] < best_fitness)
 				{
 					/* we found a better solution */
@@ -669,7 +671,7 @@ static int _dmda_push_task(struct starpu_task *task, unsigned prio, unsigned sch
 		model_best = local_task_length[best_in_ctx][selected_impl];
 		transfer_model_best = local_data_penalty[best_in_ctx][selected_impl];
 	}
-	
+
 	if (task->bundle)
 		starpu_task_bundle_remove(task->bundle, task);
         if (workers->deinit_cursor)
@@ -744,9 +746,9 @@ static int dmda_push_task(struct starpu_task *task)
 	return ret_val;
 }
 
-static void dmda_add_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers) 
+static void dmda_add_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers)
 {
-	dmda_data *dt = (dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_dmda_data *dt = (struct _starpu_dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	int workerid;
 	unsigned i;
@@ -760,7 +762,7 @@ static void dmda_add_workers(unsigned sched_ctx_id, int *workerids, unsigned nwo
 
 static void dmda_remove_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers)
 {
-	dmda_data *dt = (dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_dmda_data *dt = (struct _starpu_dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	int workerid;
 	unsigned i;
@@ -772,11 +774,11 @@ static void dmda_remove_workers(unsigned sched_ctx_id, int *workerids, unsigned
 	}
 }
 
-static void initialize_dmda_policy(unsigned sched_ctx_id) 
+static void initialize_dmda_policy(unsigned sched_ctx_id)
 {
 	starpu_create_worker_collection_for_sched_ctx(sched_ctx_id, WORKER_LIST);
 
-	dmda_data *dt = (dmda_data*)malloc(sizeof(dmda_data));
+	struct _starpu_dmda_data *dt = (struct _starpu_dmda_data*)malloc(sizeof(struct _starpu_dmda_data));
 	dt->alpha = _STARPU_DEFAULT_ALPHA;
 	dt->beta = _STARPU_DEFAULT_BETA;
 	dt->_gamma = _STARPU_DEFAULT_GAMMA;
@@ -796,7 +798,7 @@ static void initialize_dmda_policy(unsigned sched_ctx_id)
 
 	const char *strval_gamma = getenv("STARPU_SCHED_GAMMA");
 	if (strval_gamma)
-		dt->_gamma = atof(strval_gamma);	
+		dt->_gamma = atof(strval_gamma);
 
 	const char *strval_idle_power = getenv("STARPU_IDLE_POWER");
 	if (strval_idle_power)
@@ -823,9 +825,9 @@ static void initialize_dmda_sorted_policy(unsigned sched_ctx_id)
 	starpu_sched_set_max_priority(INT_MAX);
 }
 
-static void deinitialize_dmda_policy(unsigned sched_ctx_id) 
+static void deinitialize_dmda_policy(unsigned sched_ctx_id)
 {
-	dmda_data *dt = (dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_dmda_data *dt = (struct _starpu_dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	free(dt->queue_array);
 	free(dt);
 	starpu_delete_worker_collection_for_sched_ctx(sched_ctx_id);
@@ -840,7 +842,7 @@ static void dmda_pre_exec_hook(struct starpu_task *task)
 {
 	unsigned sched_ctx_id = task->sched_ctx;
 	int workerid = starpu_worker_get_id();
-	dmda_data *dt = (dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_dmda_data *dt = (struct _starpu_dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	struct _starpu_fifo_taskq *fifo = dt->queue_array[workerid];
 	double model = task->predicted;
 	double transfer_model = task->predicted_transfer;
@@ -859,7 +861,7 @@ static void dmda_pre_exec_hook(struct starpu_task *task)
 
 static void dmda_push_task_notify(struct starpu_task *task, int workerid, unsigned sched_ctx_id)
 {
-	dmda_data *dt = (dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_dmda_data *dt = (struct _starpu_dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	struct _starpu_fifo_taskq *fifo = dt->queue_array[workerid];
 	/* Compute the expected penality */
 	enum starpu_perf_archtype perf_arch = starpu_worker_get_perf_archtype(workerid);

+ 142 - 142
src/sched_policies/detect_combined_workers.c

@@ -27,121 +27,122 @@
 
 static void find_workers(hwloc_obj_t obj, int cpu_workers[STARPU_NMAXWORKERS], unsigned *n)
 {
-		if (!obj->userdata)
-				/* Not something we run something on, don't care */
-				return;
-		if (obj->userdata == (void*) -1)
-		{
-				/* Intra node, recurse */
-				unsigned i;
-				for (i = 0; i < obj->arity; i++)
-						find_workers(obj->children[i], cpu_workers, n);
-				return;
-		}
-		
-		/* Got to a PU leaf */
-		struct _starpu_worker *worker = obj->userdata;
-		/* is it a CPU worker? */
-		if (worker->perf_arch == STARPU_CPU_DEFAULT)
-		{
-				_STARPU_DEBUG("worker %d is part of it\n", worker->workerid);
-				/* Add it to the combined worker */
-				cpu_workers[(*n)++] = worker->workerid;
-		}
+	if (!obj->userdata)
+		/* Not something we run something on, don't care */
+		return;
+	if (obj->userdata == (void*) -1)
+	{
+		/* Intra node, recurse */
+		unsigned i;
+		for (i = 0; i < obj->arity; i++)
+			find_workers(obj->children[i], cpu_workers, n);
+		return;
+	}
+
+	/* Got to a PU leaf */
+	struct _starpu_worker *worker = obj->userdata;
+	/* is it a CPU worker? */
+	if (worker->perf_arch == STARPU_CPU_DEFAULT)
+	{
+		_STARPU_DEBUG("worker %d is part of it\n", worker->workerid);
+		/* Add it to the combined worker */
+		cpu_workers[(*n)++] = worker->workerid;
+	}
 }
 
 static void synthesize_intermediate_workers(hwloc_obj_t *children, unsigned arity, unsigned n, unsigned synthesize_arity)
 {
-		unsigned nworkers, i, j;
-		unsigned chunk_size = (n + synthesize_arity-1) / synthesize_arity;
-		unsigned chunk_start;
-		int cpu_workers[STARPU_NMAXWORKERS];
-		int ret;
-		
-		if (n <= synthesize_arity)
-				/* Not too many children, do not synthesize */
-				return;
-
-		_STARPU_DEBUG("%u children > %u, synthesizing intermediate combined workers of size %u\n", n, synthesize_arity, chunk_size);
-
-		n = 0;
-		j = 0;
-		nworkers = 0;
-		chunk_start = 0;
-		for (i = 0 ; i < arity; i++)
+	unsigned nworkers, i, j;
+	unsigned chunk_size = (n + synthesize_arity-1) / synthesize_arity;
+	unsigned chunk_start;
+	int cpu_workers[STARPU_NMAXWORKERS];
+	int ret;
+
+	if (n <= synthesize_arity)
+		/* Not too many children, do not synthesize */
+		return;
+
+	_STARPU_DEBUG("%u children > %u, synthesizing intermediate combined workers of size %u\n", n, synthesize_arity, chunk_size);
+
+	n = 0;
+	j = 0;
+	nworkers = 0;
+	chunk_start = 0;
+	for (i = 0 ; i < arity; i++)
+	{
+		if (children[i]->userdata)
+		{
+			n++;
+			_STARPU_DEBUG("child %u\n", i);
+			find_workers(children[i], cpu_workers, &nworkers);
+			j++;
+		}
+		/* Completed a chunk, or last bit (but not if it's just 1 subobject) */
+		if (j == chunk_size || (i == arity-1 && j > 1))
 		{
-				if (children[i]->userdata) 
-				{
-						n++;
-						_STARPU_DEBUG("child %u\n", i);
-						find_workers(children[i], cpu_workers, &nworkers);
-						j++;
-				}
-				/* Completed a chunk, or last bit (but not if it's just 1 subobject) */
-				if (j == chunk_size || (i == arity-1 && j > 1)) 
-				{
-						_STARPU_DEBUG("Adding it\n");
-						ret = starpu_combined_worker_assign_workerid(nworkers, cpu_workers);
-						STARPU_ASSERT(ret >= 0);
-						/* Recurse there */
-						synthesize_intermediate_workers(children+chunk_start, i - chunk_start, n, synthesize_arity);
-						/* And restart another one */
-						n = 0;
-						j = 0;
-						nworkers = 0;
-						chunk_start = i+1;
-				}
+			_STARPU_DEBUG("Adding it\n");
+			ret = starpu_combined_worker_assign_workerid(nworkers, cpu_workers);
+			STARPU_ASSERT(ret >= 0);
+			/* Recurse there */
+			synthesize_intermediate_workers(children+chunk_start, i - chunk_start, n, synthesize_arity);
+			/* And restart another one */
+			n = 0;
+			j = 0;
+			nworkers = 0;
+			chunk_start = i+1;
 		}
+	}
 }
 
 static void find_and_assign_combinations(hwloc_obj_t obj, unsigned synthesize_arity)
 {
-    char name[64];
-    unsigned i, n, nworkers;
-    int cpu_workers[STARPU_NMAXWORKERS];
+	char name[64];
+	unsigned i, n, nworkers;
+	int cpu_workers[STARPU_NMAXWORKERS];
 
 	struct _starpu_machine_config *config = _starpu_get_machine_config();
 	struct starpu_machine_topology *topology = &config->topology;
 
-    hwloc_obj_snprintf(name, sizeof(name), topology->hwtopology, obj, "#", 0);
-    _STARPU_DEBUG("Looking at %s\n", name);
-
-    for (n = 0, i = 0; i < obj->arity; i++)
-			if (obj->children[i]->userdata)
-					/* it has a CPU worker */
-					n++;
-	
-    if (n == 1) {
-			/* If there is only one child, we go to the next level right away */
-			find_and_assign_combinations(obj->children[0], synthesize_arity);
-			return;
-    }
-	
-    /* Add this object */
-    nworkers = 0;
-    find_workers(obj, cpu_workers, &nworkers);
-	
-    if (nworkers > 1)
-    {
-			_STARPU_DEBUG("Adding it\n");
-			unsigned sched_ctx_id  = starpu_get_sched_ctx();
-			if(sched_ctx_id == STARPU_NMAX_SCHED_CTXS)
-					sched_ctx_id = 0; 
-			
-			struct starpu_sched_ctx_worker_collection* workers = starpu_get_worker_collection_of_sched_ctx(sched_ctx_id);
-
-			int newworkerid = starpu_combined_worker_assign_workerid(nworkers, cpu_workers);
-			STARPU_ASSERT(newworkerid >= 0);
-			workers->add(workers,newworkerid);
-    }
-	
-    /* Add artificial intermediate objects recursively */
-    synthesize_intermediate_workers(obj->children, obj->arity, n, synthesize_arity);
-	
-    /* And recurse */
-    for (i = 0; i < obj->arity; i++)
-			if (obj->children[i]->userdata == (void*) -1)
-					find_and_assign_combinations(obj->children[i], synthesize_arity);
+	hwloc_obj_snprintf(name, sizeof(name), topology->hwtopology, obj, "#", 0);
+	_STARPU_DEBUG("Looking at %s\n", name);
+
+	for (n = 0, i = 0; i < obj->arity; i++)
+		if (obj->children[i]->userdata)
+			/* it has a CPU worker */
+			n++;
+
+	if (n == 1)
+	{
+		/* If there is only one child, we go to the next level right away */
+		find_and_assign_combinations(obj->children[0], synthesize_arity);
+		return;
+	}
+
+	/* Add this object */
+	nworkers = 0;
+	find_workers(obj, cpu_workers, &nworkers);
+
+	if (nworkers > 1)
+	{
+		_STARPU_DEBUG("Adding it\n");
+		unsigned sched_ctx_id  = starpu_get_sched_ctx();
+		if(sched_ctx_id == STARPU_NMAX_SCHED_CTXS)
+			sched_ctx_id = 0;
+
+		struct starpu_sched_ctx_worker_collection* workers = starpu_get_worker_collection_of_sched_ctx(sched_ctx_id);
+
+		int newworkerid = starpu_combined_worker_assign_workerid(nworkers, cpu_workers);
+		STARPU_ASSERT(newworkerid >= 0);
+		workers->add(workers,newworkerid);
+	}
+
+	/* Add artificial intermediate objects recursively */
+	synthesize_intermediate_workers(obj->children, obj->arity, n, synthesize_arity);
+
+	/* And recurse */
+	for (i = 0; i < obj->arity; i++)
+		if (obj->children[i]->userdata == (void*) -1)
+			find_and_assign_combinations(obj->children[i], synthesize_arity);
 }
 
 static void find_and_assign_combinations_with_hwloc(int *workerids, int nworkers)
@@ -149,10 +150,10 @@ static void find_and_assign_combinations_with_hwloc(int *workerids, int nworkers
 	struct _starpu_machine_config *config = _starpu_get_machine_config();
 	struct starpu_machine_topology *topology = &config->topology;
 	int synthesize_arity = starpu_get_env_number("STARPU_SYNTHESIZE_ARITY_COMBINED_WORKER");
-	
+
 	if (synthesize_arity == -1)
 		synthesize_arity = 2;
-	
+
 	/* First, mark nodes which contain CPU workers, simply by setting their userdata field */
 	int i;
 	for (i = 0; i < nworkers; i++)
@@ -163,7 +164,8 @@ static void find_and_assign_combinations_with_hwloc(int *workerids, int nworkers
 			hwloc_obj_t obj = hwloc_get_obj_by_depth(topology->hwtopology, config->cpu_depth, worker->bindid);
 			STARPU_ASSERT(obj->userdata == worker);
 			obj = obj->parent;
-			while (obj) {
+			while (obj)
+			{
 				obj->userdata = (void*) -1;
 				obj = obj->parent;
 			}
@@ -176,40 +178,39 @@ static void find_and_assign_combinations_with_hwloc(int *workerids, int nworkers
 
 static void find_and_assign_combinations_without_hwloc(int *workerids, int nworkers)
 {
-    unsigned sched_ctx_id  = starpu_get_sched_ctx();
-    if(sched_ctx_id == STARPU_NMAX_SCHED_CTXS)
-	    sched_ctx_id = 0; 
-	
-    struct starpu_sched_ctx_worker_collection* workers = starpu_get_worker_collection_of_sched_ctx(sched_ctx_id);
-	
-	
-    /* We put the id of all CPU workers in this array */
-    int cpu_workers[STARPU_NMAXWORKERS];
-    unsigned ncpus = 0;
-	
-    struct _starpu_worker *worker;
-    unsigned i;
-    for (i = 0; i < nworkers; i++)
-    {
-	    worker = _starpu_get_worker_struct(workerids[i]);
-		
-	    if (worker->perf_arch == STARPU_CPU_DEFAULT)
-		    cpu_workers[ncpus++] = i;
-    }
-	
-    unsigned size;
-    for (size = 2; size <= ncpus; size *= 2)
-    {
+	unsigned sched_ctx_id  = starpu_get_sched_ctx();
+	if(sched_ctx_id == STARPU_NMAX_SCHED_CTXS)
+		sched_ctx_id = 0;
+
+	struct starpu_sched_ctx_worker_collection* workers = starpu_get_worker_collection_of_sched_ctx(sched_ctx_id);
+
+	/* We put the id of all CPU workers in this array */
+	int cpu_workers[STARPU_NMAXWORKERS];
+	unsigned ncpus = 0;
+
+	struct _starpu_worker *worker;
+	unsigned i;
+	for (i = 0; i < nworkers; i++)
+	{
+		worker = _starpu_get_worker_struct(workerids[i]);
+
+		if (worker->perf_arch == STARPU_CPU_DEFAULT)
+			cpu_workers[ncpus++] = i;
+	}
+
+	unsigned size;
+	for (size = 2; size <= ncpus; size *= 2)
+	{
 		unsigned first_cpu;
 		for (first_cpu = 0; first_cpu < ncpus; first_cpu += size)
 		{
 			if (first_cpu + size <= ncpus)
 			{
 				int found_workerids[size];
-				
+
 				for (i = 0; i < size; i++)
 					found_workerids[i] = cpu_workers[first_cpu + i];
-				
+
 				/* We register this combination */
 				int newworkerid;
 				newworkerid = starpu_combined_worker_assign_workerid(size, found_workerids);
@@ -217,12 +218,11 @@ static void find_and_assign_combinations_without_hwloc(int *workerids, int nwork
 				workers->add(workers, newworkerid);
 			}
 		}
-    }
+	}
 }
 
 #endif /* STARPU_HAVE_HWLOC */
 
-
 static void combine_all_cpu_workers(int *workerids, int nworkers)
 {
 	unsigned sched_ctx_id  = starpu_get_sched_ctx();
@@ -236,11 +236,11 @@ static void combine_all_cpu_workers(int *workerids, int nworkers)
 	for (i = 0; i < nworkers; i++)
 	{
 		worker = _starpu_get_worker_struct(workerids[i]);
-		
+
 		if (worker->perf_arch == STARPU_CPU_DEFAULT)
 			cpu_workers[ncpus++] = workerids[i];
 	}
-	
+
 	for (i = 1; i <= ncpus; i++)
 	{
 		int newworkerid;
@@ -252,16 +252,16 @@ static void combine_all_cpu_workers(int *workerids, int nworkers)
 
 void _starpu_sched_find_worker_combinations(int *workerids, int nworkers)
 {
-    struct _starpu_machine_config *config = _starpu_get_machine_config();
+	struct _starpu_machine_config *config = _starpu_get_machine_config();
 
-    if (config->conf->single_combined_worker > 0)
-	    combine_all_cpu_workers(workerids, nworkers);
-    else
-    {
+	if (config->conf->single_combined_worker > 0)
+		combine_all_cpu_workers(workerids, nworkers);
+	else
+	{
 #ifdef STARPU_HAVE_HWLOC
-	    find_and_assign_combinations_with_hwloc(workerids, nworkers);
+		find_and_assign_combinations_with_hwloc(workerids, nworkers);
 #else
-	    find_and_assign_combinations_without_hwloc(workerids, nworkers);
+		find_and_assign_combinations_without_hwloc(workerids, nworkers);
 #endif
-    }
+	}
 }

+ 16 - 15
src/sched_policies/eager_central_policy.c

@@ -24,15 +24,16 @@
 #include <core/workers.h>
 #include <sched_policies/fifo_queues.h>
 
-typedef struct {
+struct _starpu_eager_center_policy_data
+{
 	struct _starpu_fifo_taskq *fifo;
 	_starpu_pthread_mutex_t sched_mutex;
 	_starpu_pthread_cond_t sched_cond;
-} eager_center_policy_data;
+};
 
-static void eager_add_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers) 
+static void eager_add_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers)
 {
-	eager_center_policy_data *data = (eager_center_policy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_eager_center_policy_data *data = (struct _starpu_eager_center_policy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	unsigned i;
 	int workerid;
 	for (i = 0; i < nworkers; i++)
@@ -53,11 +54,11 @@ static void eager_remove_workers(unsigned sched_ctx_id, int *workerids, unsigned
 	}
 }
 
-static void initialize_eager_center_policy(unsigned sched_ctx_id) 
+static void initialize_eager_center_policy(unsigned sched_ctx_id)
 {
 	starpu_create_worker_collection_for_sched_ctx(sched_ctx_id, WORKER_LIST);
 
-	eager_center_policy_data *data = (eager_center_policy_data*)malloc(sizeof(eager_center_policy_data));
+	struct _starpu_eager_center_policy_data *data = (struct _starpu_eager_center_policy_data*)malloc(sizeof(struct _starpu_eager_center_policy_data));
 
 	_STARPU_DISP("Warning: you are running the default eager scheduler, which is not very smart. Make sure to read the StarPU documentation about adding performance models in order to be able to use the dmda scheduler instead.\n");
 
@@ -70,31 +71,31 @@ static void initialize_eager_center_policy(unsigned sched_ctx_id)
 	starpu_sched_ctx_set_policy_data(sched_ctx_id, (void*)data);
 }
 
-static void deinitialize_eager_center_policy(unsigned sched_ctx_id) 
+static void deinitialize_eager_center_policy(unsigned sched_ctx_id)
 {
 	/* TODO check that there is no task left in the queue */
 
-	eager_center_policy_data *data = (eager_center_policy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_eager_center_policy_data *data = (struct _starpu_eager_center_policy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	/* deallocate the job queue */
 	_starpu_destroy_fifo(data->fifo);
 
 	_STARPU_PTHREAD_MUTEX_DESTROY(&data->sched_mutex);
 	_STARPU_PTHREAD_COND_DESTROY(&data->sched_cond);
-	
+
 	starpu_delete_worker_collection_for_sched_ctx(sched_ctx_id);
 
-	free(data);	
+	free(data);
 }
 
 static int push_task_eager_policy(struct starpu_task *task)
 {
 	unsigned sched_ctx_id = task->sched_ctx;
-	eager_center_policy_data *data = (eager_center_policy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_eager_center_policy_data *data = (struct _starpu_eager_center_policy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	_starpu_pthread_mutex_t *changing_ctx_mutex = starpu_get_changing_ctx_mutex(sched_ctx_id);
 	unsigned nworkers;
 	int ret_val = -1;
-	
+
 	_STARPU_PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
 	nworkers = starpu_get_nworkers_of_sched_ctx(sched_ctx_id);
 	if(nworkers == 0)
@@ -110,15 +111,15 @@ static int push_task_eager_policy(struct starpu_task *task)
 
 static struct starpu_task *pop_every_task_eager_policy(unsigned sched_ctx_id)
 {
-	eager_center_policy_data *data = (eager_center_policy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_eager_center_policy_data *data = (struct _starpu_eager_center_policy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	return _starpu_fifo_pop_every_task(data->fifo, &data->sched_mutex, starpu_worker_get_id());
 }
 
 static struct starpu_task *pop_task_eager_policy(unsigned sched_ctx_id)
 {
 	unsigned workerid = starpu_worker_get_id();
-	eager_center_policy_data *data = (eager_center_policy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
-	
+	struct _starpu_eager_center_policy_data *data = (struct _starpu_eager_center_policy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+
 	return _starpu_fifo_pop_task(data->fifo, workerid);
 }
 

+ 13 - 14
src/sched_policies/eager_central_priority_policy.c

@@ -42,11 +42,12 @@ struct _starpu_priority_taskq
 	unsigned total_ntasks;
 };
 
-typedef struct eager_central_prio_data{
+struct _starpu_eager_central_prio_data
+{
 	struct _starpu_priority_taskq *taskq;
 	_starpu_pthread_mutex_t sched_mutex;
 	_starpu_pthread_cond_t sched_cond;
-} eager_central_prio_data;
+};
 
 /*
  * Centralized queue with priorities
@@ -74,9 +75,9 @@ static void _starpu_destroy_priority_taskq(struct _starpu_priority_taskq *priori
 	free(priority_queue);
 }
 
-static void eager_priority_add_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers) 
+static void eager_priority_add_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers)
 {
-	eager_central_prio_data *data = (eager_central_prio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_eager_central_prio_data *data = (struct _starpu_eager_central_prio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	unsigned i;
 	int workerid;
@@ -95,13 +96,13 @@ static void eager_priority_remove_workers(unsigned sched_ctx_id, int *workerids,
 	{
 		workerid = workerids[i];
 		starpu_sched_ctx_set_worker_mutex_and_cond(sched_ctx_id, workerid, NULL, NULL);
-	}	
+	}
 }
 
-static void initialize_eager_center_priority_policy(unsigned sched_ctx_id) 
+static void initialize_eager_center_priority_policy(unsigned sched_ctx_id)
 {
 	starpu_create_worker_collection_for_sched_ctx(sched_ctx_id, WORKER_LIST);
-	eager_central_prio_data *data = (eager_central_prio_data*)malloc(sizeof(eager_central_prio_data));
+	struct _starpu_eager_central_prio_data *data = (struct _starpu_eager_central_prio_data*)malloc(sizeof(struct _starpu_eager_central_prio_data));
 
 	/* In this policy, we support more than two levels of priority. */
 	starpu_sched_set_min_priority(MIN_LEVEL);
@@ -113,13 +114,12 @@ static void initialize_eager_center_priority_policy(unsigned sched_ctx_id)
 
 	_STARPU_PTHREAD_MUTEX_INIT(&data->sched_mutex, NULL);
 	_STARPU_PTHREAD_COND_INIT(&data->sched_cond, NULL);
-
 }
 
-static void deinitialize_eager_center_priority_policy(unsigned sched_ctx_id) 
+static void deinitialize_eager_center_priority_policy(unsigned sched_ctx_id)
 {
 	/* TODO check that there is no task left in the queue */
-	eager_central_prio_data *data = (eager_central_prio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_eager_central_prio_data *data = (struct _starpu_eager_central_prio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	/* deallocate the task queue */
 	_starpu_destroy_priority_taskq(data->taskq);
@@ -129,13 +129,12 @@ static void deinitialize_eager_center_priority_policy(unsigned sched_ctx_id)
 
 	starpu_delete_worker_collection_for_sched_ctx(sched_ctx_id);
         free(data);
-	
 }
 
 static int _starpu_priority_push_task(struct starpu_task *task)
 {
 	unsigned sched_ctx_id = task->sched_ctx;
-	eager_central_prio_data *data = (eager_central_prio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_eager_central_prio_data *data = (struct _starpu_eager_central_prio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	struct _starpu_priority_taskq *taskq = data->taskq;
 
@@ -178,8 +177,8 @@ static struct starpu_task *_starpu_priority_pop_task(unsigned sched_ctx_id)
 	unsigned workerid = starpu_worker_get_id();
 	int skipped = 0;
 
-	eager_central_prio_data *data = (eager_central_prio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
-	
+	struct _starpu_eager_central_prio_data *data = (struct _starpu_eager_central_prio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+
 	struct _starpu_priority_taskq *taskq = data->taskq;
 
 	/* block until some event happens */

+ 20 - 19
src/sched_policies/parallel_greedy.c

@@ -21,7 +21,8 @@
 #include <common/barrier.h>
 #include <sched_policies/detect_combined_workers.h>
 
-typedef struct pgreedy_data {
+struct _starpu_pgreedy_data
+{
 	struct _starpu_fifo_taskq *fifo;
 	struct _starpu_fifo_taskq *local_fifo[STARPU_NMAXWORKERS];
 
@@ -32,7 +33,7 @@ typedef struct pgreedy_data {
 
 	_starpu_pthread_cond_t master_sched_cond[STARPU_NMAXWORKERS];
 	_starpu_pthread_mutex_t master_sched_mutex[STARPU_NMAXWORKERS];
-} pgreedy_data;
+};
 
 /* XXX instead of 10, we should use some "MAX combination .."*/
 static int possible_combinations_cnt[STARPU_NMAXWORKERS];
@@ -46,13 +47,13 @@ static int possible_combinations_size[STARPU_NMAXWORKERS][10];
 
 static void pgreedy_add_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers)
 {
-	struct pgreedy_data *data = (struct pgreedy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_pgreedy_data *data = (struct _starpu_pgreedy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	_starpu_sched_find_worker_combinations(workerids, nworkers);
 
 	unsigned workerid, i;
 	unsigned ncombinedworkers;
-	
+
 	ncombinedworkers = starpu_combined_worker_get_count();
 
 	/* Find the master of each worker. We first assign the worker as its
@@ -61,15 +62,15 @@ static void pgreedy_add_workers(unsigned sched_ctx_id, int *workerids, unsigned
 	for(i = 0; i < nworkers; i++)
 	{
 		workerid = workerids[i];
-		
+
 		int cnt = possible_combinations_cnt[workerid]++;
 		possible_combinations[workerid][cnt] = workerid;
 		possible_combinations_size[workerid][cnt] = 1;
-		
+
 		data->master_id[workerid] = workerid;
 	}
-	
-	
+
+
 	for (i = 0; i < ncombinedworkers; i++)
 	{
 		workerid = nworkers + i;
@@ -130,7 +131,7 @@ static void pgreedy_add_workers(unsigned sched_ctx_id, int *workerids, unsigned
 
 static void pgreedy_remove_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers)
 {
-	struct pgreedy_data *data = (struct pgreedy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_pgreedy_data *data = (struct _starpu_pgreedy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	int workerid;
 	unsigned i;
 	for(i = 0; i < nworkers; i++)
@@ -143,11 +144,11 @@ static void pgreedy_remove_workers(unsigned sched_ctx_id, int *workerids, unsign
 	}
 }
 
-static void initialize_pgreedy_policy(unsigned sched_ctx_id) 
+static void initialize_pgreedy_policy(unsigned sched_ctx_id)
 {
 	starpu_create_worker_collection_for_sched_ctx(sched_ctx_id, WORKER_LIST);
 
-	struct pgreedy_data *data = (struct pgreedy_data*)malloc(sizeof(pgreedy_data));
+	struct _starpu_pgreedy_data *data = (struct _starpu_pgreedy_data*)malloc(sizeof(struct _starpu_pgreedy_data));
 	/* masters pick tasks from that queue */
 	data->fifo = _starpu_create_fifo();
 
@@ -157,10 +158,10 @@ static void initialize_pgreedy_policy(unsigned sched_ctx_id)
 	starpu_sched_ctx_set_policy_data(sched_ctx_id, (void*)data);
 }
 
-static void deinitialize_pgreedy_policy(unsigned sched_ctx_id) 
+static void deinitialize_pgreedy_policy(unsigned sched_ctx_id)
 {
 	/* TODO check that there is no task left in the queue */
-	struct pgreedy_data *data = (struct pgreedy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_pgreedy_data *data = (struct _starpu_pgreedy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	/* deallocate the job queue */
 	_starpu_destroy_fifo(data->fifo);
@@ -170,7 +171,7 @@ static void deinitialize_pgreedy_policy(unsigned sched_ctx_id)
 
 	starpu_delete_worker_collection_for_sched_ctx(sched_ctx_id);
 
-	free(data);	
+	free(data);
 }
 
 static int push_task_pgreedy_policy(struct starpu_task *task)
@@ -189,22 +190,22 @@ static int push_task_pgreedy_policy(struct starpu_task *task)
    		_STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);
 		return ret_val;
 	}
-	struct pgreedy_data *data = (struct pgreedy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_pgreedy_data *data = (struct _starpu_pgreedy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	ret_val = _starpu_fifo_push_task(data->fifo, &data->sched_mutex, &data->sched_cond, task);
 	_STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);
-	
+
 	return ret_val;
 }
 
 static struct starpu_task *pop_task_pgreedy_policy(unsigned sched_ctx_id)
 {
-	struct pgreedy_data *data = (struct pgreedy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_pgreedy_data *data = (struct _starpu_pgreedy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	int workerid = starpu_worker_get_id();
 
 	/* If this is not a CPU, then the worker simply grabs tasks from the fifo */
 	if (starpu_worker_get_type(workerid) != STARPU_CPU_WORKER)
-		return  _starpu_fifo_pop_task(data->fifo, workerid);
+		return _starpu_fifo_pop_task(data->fifo, workerid);
 
 	int master = data->master_id[workerid];
 
@@ -274,7 +275,7 @@ static struct starpu_task *pop_task_pgreedy_policy(unsigned sched_ctx_id)
 				int local_worker = combined_workerid[i];
 
 				_starpu_fifo_push_task(data->local_fifo[local_worker],
-						       &data->master_sched_mutex[master], 
+						       &data->master_sched_mutex[master],
 						       &data->master_sched_cond[master], alias);
 			}
 

+ 25 - 23
src/sched_policies/parallel_heft.c

@@ -38,7 +38,8 @@
 //static enum starpu_perf_archtype applicable_perf_archtypes[STARPU_NARCH_VARIATIONS];
 //static unsigned napplicable_perf_archtypes = 0;
 
-typedef struct {
+struct _starpu_pheft_data
+{
 	double alpha;
 	double beta;
 	double _gamma;
@@ -46,7 +47,7 @@ typedef struct {
 /* When we push a task on a combined worker we need all the cpu workers it contains
  * to be locked at once */
 	_starpu_pthread_mutex_t global_push_mutex;
-} pheft_data;
+};
 
 static double worker_exp_start[STARPU_NMAXWORKERS];
 static double worker_exp_end[STARPU_NMAXWORKERS];
@@ -54,8 +55,8 @@ static double worker_exp_len[STARPU_NMAXWORKERS];
 static int ntasks[STARPU_NMAXWORKERS];
 
 
-/*!!!!!!! It doesn't work with several contexts because the combined workers are constructed         
-  from the workers available to the program, and not to the context !!!!!!!!!!!!!!!!!!!!!!!          
+/*!!!!!!! It doesn't work with several contexts because the combined workers are constructed
+  from the workers available to the program, and not to the context !!!!!!!!!!!!!!!!!!!!!!!
 */
 
 static void parallel_heft_pre_exec_hook(struct starpu_task *task)
@@ -89,8 +90,8 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 {
 	/* make sure someone coule execute that task ! */
 	STARPU_ASSERT(best_workerid != -1);
-	
-	pheft_data *hd = (pheft_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+
+	struct _starpu_pheft_data *hd = (struct _starpu_pheft_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	/* Is this a basic worker or a combined worker ? */
 	unsigned memory_node;
@@ -242,14 +243,14 @@ static double compute_ntasks_end(int workerid)
 
 static int _parallel_heft_push_task(struct starpu_task *task, unsigned prio, unsigned sched_ctx_id)
 {
-	pheft_data *hd = (pheft_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_pheft_data *hd = (struct _starpu_pheft_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	struct starpu_sched_ctx_worker_collection *workers = starpu_get_worker_collection_of_sched_ctx(sched_ctx_id);
 	unsigned nworkers_ctx = workers->nworkers;
 
 	unsigned worker, worker_ctx = 0;
 	int best = -1, best_id_ctx = -1;
-	
+
 	/* this flag is set if the corresponding worker is selected because
 	   there is no performance prediction available yet */
 	int forced_best = -1, forced_best_ctx = -1, forced_nimpl = -1;
@@ -314,7 +315,7 @@ static int _parallel_heft_push_task(struct starpu_task *task, unsigned prio, uns
 				skip_worker[worker_ctx][nimpl] = 0;
 			}
 
-       
+
 			enum starpu_perf_archtype perf_arch = starpu_worker_get_perf_archtype(worker);
 
 			local_task_length[worker_ctx][nimpl] = starpu_task_expected_length(task, perf_arch,nimpl);
@@ -373,7 +374,8 @@ static int _parallel_heft_push_task(struct starpu_task *task, unsigned prio, uns
 		worker_ctx++;
 	}
 
-	if (unknown) {
+	if (unknown)
+	{
 		forced_best = ntasks_best;
 		forced_best_ctx = ntasks_best_ctx;
 		forced_nimpl = nimpl_best;
@@ -387,7 +389,7 @@ static int _parallel_heft_push_task(struct starpu_task *task, unsigned prio, uns
 		while(workers->has_next(workers))
 		{
 			worker = workers->get_next(workers);
-			
+
 			for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
 			{
 				if (skip_worker[worker_ctx][nimpl])
@@ -396,7 +398,7 @@ static int _parallel_heft_push_task(struct starpu_task *task, unsigned prio, uns
 					continue;
 				}
 
-				fitness[worker_ctx][nimpl] = hd->alpha*(local_exp_end[worker_ctx][nimpl] - best_exp_end) 
+				fitness[worker_ctx][nimpl] = hd->alpha*(local_exp_end[worker_ctx][nimpl] - best_exp_end)
 						+ hd->beta*(local_data_penalty[worker_ctx][nimpl])
 						+ hd->_gamma*(local_power[worker_ctx][nimpl]);
 
@@ -457,7 +459,7 @@ static int parallel_heft_push_task(struct starpu_task *task)
 	int ret_val = -1;
 
 	if (task->priority == STARPU_MAX_PRIO)
-	{  
+	{
 		_STARPU_PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
                 nworkers = starpu_get_nworkers_of_sched_ctx(sched_ctx_id);
                 if(nworkers == 0)
@@ -465,7 +467,7 @@ static int parallel_heft_push_task(struct starpu_task *task)
                         _STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);
                         return ret_val;
                 }
-		
+
 		ret_val = _parallel_heft_push_task(task, 1, sched_ctx_id);
 		_STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);
                 return ret_val;
@@ -498,7 +500,7 @@ static void parallel_heft_add_workers(unsigned sched_ctx_id, int *workerids, uns
 		{
 			worker_exp_start[workerid] = starpu_timing_now();
 			worker_exp_len[workerid] = 0.0;
-			worker_exp_end[workerid] = worker_exp_start[workerid]; 
+			worker_exp_end[workerid] = worker_exp_start[workerid];
 			ntasks[workerid] = 0;
 			workerarg->has_prev_init = 1;
 		}
@@ -546,15 +548,15 @@ static void parallel_heft_remove_workers(unsigned sched_ctx_id, int *workerids,
 		starpu_sched_ctx_deinit_worker_mutex_and_cond(sched_ctx_id, worker);
 	}
 }
-static void initialize_parallel_heft_policy(unsigned sched_ctx_id) 
-{	
+static void initialize_parallel_heft_policy(unsigned sched_ctx_id)
+{
 	starpu_create_worker_collection_for_sched_ctx(sched_ctx_id, WORKER_LIST);
-	pheft_data *hd = (pheft_data*)malloc(sizeof(pheft_data));
+	struct _starpu_pheft_data *hd = (struct _starpu_pheft_data*)malloc(sizeof(struct _starpu_pheft_data));
 	hd->alpha = _STARPU_DEFAULT_ALPHA;
 	hd->beta = _STARPU_DEFAULT_BETA;
 	hd->_gamma = _STARPU_DEFAULT_GAMMA;
 	hd->idle_power = 0.0;
-	
+
 	starpu_sched_ctx_set_policy_data(sched_ctx_id, (void*)hd);
 
 	const char *strval_alpha = getenv("STARPU_SCHED_ALPHA");
@@ -572,14 +574,14 @@ static void initialize_parallel_heft_policy(unsigned sched_ctx_id)
 	const char *strval_idle_power = getenv("STARPU_IDLE_POWER");
 	if (strval_idle_power)
 		hd->idle_power = atof(strval_idle_power);
-	
+
 	_STARPU_PTHREAD_MUTEX_INIT(&hd->global_push_mutex, NULL);
 
 }
 
-static void parallel_heft_deinit(unsigned sched_ctx_id) 
+static void parallel_heft_deinit(unsigned sched_ctx_id)
 {
-	pheft_data *hd = (pheft_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_pheft_data *hd = (struct _starpu_pheft_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	starpu_delete_worker_collection_for_sched_ctx(sched_ctx_id);
 	_STARPU_PTHREAD_MUTEX_DESTROY(&hd->global_push_mutex);
 	free(hd);
@@ -592,7 +594,7 @@ struct starpu_sched_policy _starpu_sched_parallel_heft_policy =
 	.deinit_sched = parallel_heft_deinit,
 	.add_workers = parallel_heft_add_workers,
 	.remove_workers = parallel_heft_remove_workers,
-	.push_task = parallel_heft_push_task, 
+	.push_task = parallel_heft_push_task,
 	.pop_task = NULL,
 	.pre_exec_hook = parallel_heft_pre_exec_hook,
 	.post_exec_hook = NULL,

+ 5 - 4
src/sched_policies/random_policy.c

@@ -69,7 +69,8 @@ static int _random_push_task(struct starpu_task *task, unsigned prio)
 	}
 
 #ifdef HAVE_AYUDAME_H
-	if (AYU_event) {
+	if (AYU_event)
+	{
 		int id = selected;
 		AYU_event(AYU_ADDTASKTOQUEUE, _starpu_get_job_associated_to_task(task)->job_id, &id);
 	}
@@ -102,7 +103,7 @@ static int random_push_task(struct starpu_task *task)
         return ret_val;
 }
 
-static void random_add_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers) 
+static void random_add_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers)
 {
 	unsigned i;
 	int workerid;
@@ -126,13 +127,13 @@ static void random_remove_workers(unsigned sched_ctx_id, int *workerids, unsigne
 
 }
 
-static void initialize_random_policy(unsigned sched_ctx_id) 
+static void initialize_random_policy(unsigned sched_ctx_id)
 {
 	starpu_create_worker_collection_for_sched_ctx(sched_ctx_id, WORKER_LIST);
 	starpu_srand48(time(NULL));
 }
 
-static void deinitialize_random_policy(unsigned sched_ctx_id) 
+static void deinitialize_random_policy(unsigned sched_ctx_id)
 {
 	starpu_delete_worker_collection_for_sched_ctx(sched_ctx_id);
 }

+ 22 - 20
src/sched_policies/work_stealing_policy.c

@@ -1,7 +1,7 @@
 /* StarPU --- Runtime system for heterogeneous multicore architectures.
  *
  * Copyright (C) 2010-2012  Université de Bordeaux 1
- * Copyright (C) 2010, 2011  Centre National de la Recherche Scientifique
+ * Copyright (C) 2010, 2011, 2012  Centre National de la Recherche Scientifique
  * Copyright (C) 2011, 2012  INRIA
  *
  * StarPU is free software; you can redistribute it and/or modify
@@ -24,7 +24,8 @@
 #include <sched_policies/deque_queues.h>
 #include <core/debug.h>
 
-typedef struct{
+struct _starpu_work_stealing_data
+{
 	struct _starpu_deque_jobq **queue_array;
 	unsigned rr_worker;
 	/* keep track of the work performed from the beginning of the algorithm to make
@@ -35,7 +36,7 @@ typedef struct{
 	_starpu_pthread_cond_t sched_cond;
 	unsigned last_pop_worker;
 	unsigned last_push_worker;
-} work_stealing_data;
+};
 
 #ifdef USE_OVERLOAD
 
@@ -56,7 +57,7 @@ static int calibration_value = 0;
  */
 static unsigned select_victim_round_robin(unsigned sched_ctx_id)
 {
-	work_stealing_data *ws = (work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_work_stealing_data *ws = (struct _starpu_work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	unsigned worker = ws->last_pop_worker;
 	unsigned nworkers = starpu_get_nworkers_of_sched_ctx(sched_ctx_id);
 
@@ -84,7 +85,7 @@ static unsigned select_victim_round_robin(unsigned sched_ctx_id)
  */
 static unsigned select_worker_round_robin(unsigned sched_ctx_id)
 {
-	work_stealing_data *ws = (work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_work_stealing_data *ws = (struct _starpu_work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	unsigned worker = ws->last_push_worker;
 	unsigned nworkers = starpu_get_nworkers_of_sched_ctx(sched_ctx_id);
 
@@ -105,7 +106,7 @@ static unsigned select_worker_round_robin(unsigned sched_ctx_id)
  */
 static float overload_metric(unsigned sched_ctx_id, unsigned id)
 {
-	work_stealing_data *ws = (work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_work_stealing_data *ws = (struct _starpu_work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	float execution_ratio = 0.0f;
 	float current_ratio = 0.0f;
 
@@ -140,7 +141,7 @@ static unsigned select_victim_overload(unsigned sched_ctx_id)
 	unsigned worker;
 	float  worker_ratio;
 	unsigned best_worker = 0;
-	float best_ratio = FLT_MIN;	
+	float best_ratio = FLT_MIN;
 
 	/* Don't try to play smart until we get
 	 * enough informations. */
@@ -250,7 +251,7 @@ static inline unsigned select_worker(unsigned sched_ctx_id)
 #endif
 static struct starpu_task *ws_pop_task(unsigned sched_ctx_id)
 {
-	work_stealing_data *ws = (work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_work_stealing_data *ws = (struct _starpu_work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	struct starpu_task *task;
 	struct _starpu_deque_jobq *q;
@@ -293,10 +294,10 @@ static struct starpu_task *ws_pop_task(unsigned sched_ctx_id)
 int ws_push_task(struct starpu_task *task)
 {
 	unsigned sched_ctx_id = task->sched_ctx;
-	work_stealing_data *ws = (work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_work_stealing_data *ws = (struct _starpu_work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	struct _starpu_deque_jobq *deque_queue;
-	struct _starpu_job *j = _starpu_get_job_associated_to_task(task); 
+	struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
 	int workerid = starpu_worker_get_id();
 
 	_starpu_pthread_mutex_t *changing_ctx_mutex = starpu_get_changing_ctx_mutex(sched_ctx_id);
@@ -324,7 +325,8 @@ int ws_push_task(struct starpu_task *task)
 
 	_STARPU_TRACE_JOB_PUSH(task, 0);
 #ifdef HAVE_AYUDAME_H
-	if (AYU_event) {
+	if (AYU_event)
+	{
 		int id = workerid;
 		AYU_event(AYU_ADDTASKTOQUEUE, j->job_id, &id);
 	}
@@ -340,13 +342,13 @@ int ws_push_task(struct starpu_task *task)
 	return 0;
 }
 
-static void ws_add_workers(unsigned sched_ctx_id, int *workerids,unsigned nworkers) 
+static void ws_add_workers(unsigned sched_ctx_id, int *workerids,unsigned nworkers)
 {
-	work_stealing_data *ws = (work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_work_stealing_data *ws = (struct _starpu_work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	unsigned i;
 	int workerid;
-	
+
 	for (i = 0; i < nworkers; i++)
 	{
 		workerid = workerids[i];
@@ -364,11 +366,11 @@ static void ws_add_workers(unsigned sched_ctx_id, int *workerids,unsigned nworke
 
 static void ws_remove_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers)
 {
-	work_stealing_data *ws = (work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_work_stealing_data *ws = (struct _starpu_work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	unsigned i;
 	int workerid;
-	
+
 	for (i = 0; i < nworkers; i++)
 	{
 		workerid = workerids[i];
@@ -377,13 +379,13 @@ static void ws_remove_workers(unsigned sched_ctx_id, int *workerids, unsigned nw
 	}
 }
 
-static void initialize_ws_policy(unsigned sched_ctx_id) 
+static void initialize_ws_policy(unsigned sched_ctx_id)
 {
 	starpu_create_worker_collection_for_sched_ctx(sched_ctx_id, WORKER_LIST);
 
-	work_stealing_data *ws = (work_stealing_data*)malloc(sizeof(work_stealing_data));
+	struct _starpu_work_stealing_data *ws = (struct _starpu_work_stealing_data*)malloc(sizeof(struct _starpu_work_stealing_data));
 	starpu_sched_ctx_set_policy_data(sched_ctx_id, (void*)ws);
-	
+
 	ws->last_pop_worker = 0;
 	ws->last_push_worker = 0;
 
@@ -401,7 +403,7 @@ static void initialize_ws_policy(unsigned sched_ctx_id)
 
 static void deinit_ws_policy(unsigned sched_ctx_id)
 {
-	work_stealing_data *ws = (work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
+	struct _starpu_work_stealing_data *ws = (struct _starpu_work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	free(ws->queue_array);
 	_STARPU_PTHREAD_MUTEX_DESTROY(&ws->sched_mutex);