Browse Source

sched_ctx_hypervisor: use starpu-pthread API

Nathalie Furmento 12 years ago
parent
commit
d87217abab

+ 5 - 7
sched_ctx_hypervisor/examples/app_driven_test/app_driven_test.c

@@ -19,8 +19,6 @@
 #include <starpu.h>
 #include <sched_ctx_hypervisor.h>
 
-#include <pthread.h>
-
 #define FPRINTF(ofile, fmt, args ...) do { if (!getenv("STARPU_SSILENT")) {fprintf(ofile, fmt, ##args); }} while(0)
 
 /* Every implementation of a codelet must have this prototype, the first                                                                                                                                             * argument (buffers) describes the buffers/streams that are managed by the
@@ -119,13 +117,13 @@ int main()
 	sched_ctx_hypervisor_register_ctx(sched_ctx1, 0.0);
 	sched_ctx_hypervisor_register_ctx(sched_ctx2, 0.0);
 
-	pthread_t tid[2];
+	starpu_pthread_t tid[2];
 
-	pthread_create(&tid[0], NULL, start_thread, (void*)&sched_ctx1);
-	pthread_create(&tid[1], NULL, start_thread, (void*)&sched_ctx2);
+	starpu_pthread_create(&tid[0], NULL, start_thread, (void*)&sched_ctx1);
+	starpu_pthread_create(&tid[1], NULL, start_thread, (void*)&sched_ctx2);
 
-	pthread_join(tid[0], NULL);
-	pthread_join(tid[1], NULL);
+	starpu_pthread_join(tid[0], NULL);
+	starpu_pthread_join(tid[1], NULL);
 
 	starpu_shutdown();
 	sched_ctx_hypervisor_shutdown();

+ 17 - 17
sched_ctx_hypervisor/examples/sched_ctx_utils/sched_ctx_utils.c

@@ -49,13 +49,13 @@ typedef struct
 } retvals;
 
 int first = 1;
-pthread_mutex_t mut;
+starpu_pthread_mutex_t mut;
 retvals rv[2];
 params p1, p2;
 int it = 0;
 int it2 = 0;
 
-pthread_key_t key;
+starpu_pthread_key_t key;
 
 void init()
 {
@@ -79,12 +79,12 @@ void init()
 
 	p1.id = 0;
 	p2.id = 1;
-	pthread_key_create(&key, NULL);
+	starpu_pthread_key_create(&key, NULL);
 }
 
 void update_sched_ctx_timing_results(double flops, double avg_timing)
 {
-	unsigned *id = pthread_getspecific(key);
+	unsigned *id = starpu_pthread_getspecific(key);
 	rv[*id].flops += flops;
 	rv[*id].avg_timing += avg_timing;
 }
@@ -94,7 +94,7 @@ void* start_bench(void *val)
 	params *p = (params*)val;
 	int i;
 
-	pthread_setspecific(key, &p->id);
+	starpu_pthread_setspecific(key, &p->id);
 
 	if(p->ctx != 0)
 		starpu_sched_ctx_set_context(&p->ctx);
@@ -104,14 +104,14 @@ void* start_bench(void *val)
 
 	/* if(p->ctx != 0) */
 	/* { */
-	/* 	pthread_mutex_lock(&mut); */
+	/* 	starpu_pthread_mutex_lock(&mut); */
 	/* 	if(first){ */
 	/* 		sched_ctx_hypervisor_unregiser_ctx(p->ctx); */
 	/* 		starpu_sched_ctx_delete(p->ctx, p->the_other_ctx); */
 	/* 	} */
 
 	/* 	first = 0; */
-	/* 	pthread_mutex_unlock(&mut); */
+	/* 	starpu_pthread_mutex_unlock(&mut); */
 	/* } */
 	sched_ctx_hypervisor_stop_resize(p->the_other_ctx);
 	rv[p->id].flops /= NSAMPLES;
@@ -151,23 +151,23 @@ void start_2benchs(void (*bench)(float*, unsigned, unsigned))
 		p2.mat[i] = construct_matrix(p2.size);
 	}
 
-	pthread_t tid[2];
-	pthread_mutex_init(&mut, NULL);
+	starpu_pthread_t tid[2];
+	starpu_pthread_mutex_init(&mut, NULL);
 
 	struct timeval start;
 	struct timeval end;
 
 	gettimeofday(&start, NULL);
 
-	pthread_create(&tid[0], NULL, (void*)start_bench, (void*)&p1);
-	pthread_create(&tid[1], NULL, (void*)start_bench, (void*)&p2);
+	starpu_pthread_create(&tid[0], NULL, (void*)start_bench, (void*)&p1);
+	starpu_pthread_create(&tid[1], NULL, (void*)start_bench, (void*)&p2);
 
-	pthread_join(tid[0], NULL);
-	pthread_join(tid[1], NULL);
+	starpu_pthread_join(tid[0], NULL);
+	starpu_pthread_join(tid[1], NULL);
 
 	gettimeofday(&end, NULL);
 
-	pthread_mutex_destroy(&mut);
+	starpu_pthread_mutex_destroy(&mut);
 
 	double timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
 	timing /= 1000000;
@@ -198,7 +198,7 @@ void start_1stbench(void (*bench)(float*, unsigned, unsigned))
 
 	gettimeofday(&end, NULL);
 
-	pthread_mutex_destroy(&mut);
+	starpu_pthread_mutex_destroy(&mut);
 
 	double timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
 	timing /= 1000000;
@@ -227,7 +227,7 @@ void start_2ndbench(void (*bench)(float*, unsigned, unsigned))
 
 	gettimeofday(&end, NULL);
 
-	pthread_mutex_destroy(&mut);
+	starpu_pthread_mutex_destroy(&mut);
 
 	double timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
 	timing /= 1000000;
@@ -330,7 +330,7 @@ void construct_contexts(void (*bench)(float*, unsigned, unsigned))
 
 void set_hypervisor_conf(int event, int task_tag)
 {
-/* 	unsigned *id = pthread_getspecific(key); */
+/* 	unsigned *id = starpu_pthread_getspecific(key); */
 /* 	if(*id == 0) */
 /* 	{ */
 /* 		if(event == END_BENCH) */

+ 2 - 3
sched_ctx_hypervisor/include/sched_ctx_hypervisor.h

@@ -18,7 +18,6 @@
 #define SCHED_CTX_HYPERVISOR_H
 
 #include <starpu.h>
-#include <pthread.h>
 
 #ifdef __cplusplus
 extern "C"
@@ -45,7 +44,7 @@ extern "C"
 #define	HYPERVISOR_ISPEED_W_SAMPLE -13
 #define HYPERVISOR_ISPEED_CTX_SAMPLE -14
 
-pthread_mutex_t act_hypervisor_mutex;
+starpu_pthread_mutex_t act_hypervisor_mutex;
 
 #define MAX_IDLE_TIME 5000000000
 #define MIN_WORKING_TIME 500
@@ -153,7 +152,7 @@ struct sched_ctx_hypervisor_wrapper
 	struct sched_ctx_hypervisor_resize_ack resize_ack;
 
 	/* mutex to protect the ack of workers */
-	pthread_mutex_t mutex;
+	starpu_pthread_mutex_t mutex;
 };
 
 /* Forward declaration of an internal data structure

+ 2 - 2
sched_ctx_hypervisor/src/hypervisor_policies/debit_lp_policy.c

@@ -227,7 +227,7 @@ static void debit_lp_handle_poped_task(unsigned sched_ctx, int worker, struct st
 {
 	struct sched_ctx_hypervisor_wrapper* sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctx);
 	_get_velocity_per_worker(sc_w, worker);
-	int ret = pthread_mutex_trylock(&act_hypervisor_mutex);
+	int ret = starpu_pthread_mutex_trylock(&act_hypervisor_mutex);
 	if(ret != EBUSY)
 	{
 		if(_velocity_gap_btw_ctxs())
@@ -280,7 +280,7 @@ static void debit_lp_handle_poped_task(unsigned sched_ctx, int worker, struct st
 
 			}
 		}
-		pthread_mutex_unlock(&act_hypervisor_mutex);
+		starpu_pthread_mutex_unlock(&act_hypervisor_mutex);
 	}
 }
 

+ 3 - 3
sched_ctx_hypervisor/src/hypervisor_policies/gflops_rate_policy.c

@@ -138,9 +138,9 @@ static unsigned _gflops_rate_resize(unsigned sender_sched_ctx, unsigned receiver
 {
         int ret = 1;
         if(force_resize)
-                pthread_mutex_lock(&act_hypervisor_mutex);
+                starpu_pthread_mutex_lock(&act_hypervisor_mutex);
         else
-                ret = pthread_mutex_trylock(&act_hypervisor_mutex);
+                ret = starpu_pthread_mutex_trylock(&act_hypervisor_mutex);
         if(ret != EBUSY)
         {
                 int nworkers_to_move = 0;
@@ -156,7 +156,7 @@ static unsigned _gflops_rate_resize(unsigned sender_sched_ctx, unsigned receiver
 
                         free(workers_to_move);
                 }
-                pthread_mutex_unlock(&act_hypervisor_mutex);
+                starpu_pthread_mutex_unlock(&act_hypervisor_mutex);
                 return 1;
         }
         return 0;

+ 2 - 2
sched_ctx_hypervisor/src/hypervisor_policies/ispeed_lp_policy.c

@@ -356,7 +356,7 @@ static void ispeed_lp_handle_poped_task(unsigned sched_ctx, int worker, struct s
 {
 	struct sched_ctx_hypervisor_wrapper* sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctx);
 	_get_velocity_per_worker(sc_w, worker);
-	int ret = pthread_mutex_trylock(&act_hypervisor_mutex);
+	int ret = starpu_pthread_mutex_trylock(&act_hypervisor_mutex);
 	if(ret != EBUSY)
 	{
 		if(_velocity_gap_btw_ctxs())
@@ -411,7 +411,7 @@ static void ispeed_lp_handle_poped_task(unsigned sched_ctx, int worker, struct s
 
 			}
 		}
-		pthread_mutex_unlock(&act_hypervisor_mutex);
+		starpu_pthread_mutex_unlock(&act_hypervisor_mutex);
 	}
 }
 

+ 2 - 2
sched_ctx_hypervisor/src/hypervisor_policies/ispeed_policy.c

@@ -143,7 +143,7 @@ static int* _get_slowest_workers(unsigned sched_ctx, int *nworkers, enum starpu_
 
 static void ispeed_handle_poped_task(unsigned sched_ctx, int worker, struct starpu_task *task, uint32_t footprint)
 {
-	int ret = pthread_mutex_trylock(&act_hypervisor_mutex);
+	int ret = starpu_pthread_mutex_trylock(&act_hypervisor_mutex);
 	if(ret != EBUSY)
 	{
 		if(_velocity_gap_btw_ctxs())
@@ -177,7 +177,7 @@ static void ispeed_handle_poped_task(unsigned sched_ctx, int worker, struct star
 
 			}
 		}
-		pthread_mutex_unlock(&act_hypervisor_mutex);
+		starpu_pthread_mutex_unlock(&act_hypervisor_mutex);
 	}
 }
 

+ 14 - 14
sched_ctx_hypervisor/src/hypervisor_policies/lp2_policy.c

@@ -20,7 +20,7 @@
 
 static struct bound_task_pool *task_pools = NULL;
 
-static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+static starpu_pthread_mutex_t mutex = STARPU_PTHREAD_MUTEX_INITIALIZER;
 static double _glp_resolve(int ns, int nw, int nt, double tasks[nw][nt], double tmax, double w_in_s[ns][nw], int *in_sched_ctxs, int *workers, unsigned interger,
 			   struct bound_task_pool *tmp_task_pools, unsigned size_ctxs);
 static unsigned _compute_task_distribution_over_ctxs(int ns, int nw, int nt, double w_in_s[ns][nw], double tasks[nw][nt], 
@@ -125,7 +125,7 @@ static void _size_ctxs(int *sched_ctxs, int nsched_ctxs , int *workers, int nwor
 	int ns = sched_ctxs == NULL ? sched_ctx_hypervisor_get_nsched_ctxs() : nsched_ctxs;
 	int nw = workers == NULL ? (int)starpu_worker_get_count() : nworkers; /* Number of different workers */
 	int nt = 0; /* Number of different kinds of tasks */
-	pthread_mutex_lock(&mutex);
+	starpu_pthread_mutex_lock(&mutex);
 	struct bound_task_pool * tp;
 	for (tp = task_pools; tp; tp = tp->next)
 		nt++;
@@ -133,7 +133,7 @@ static void _size_ctxs(int *sched_ctxs, int nsched_ctxs , int *workers, int nwor
 	double w_in_s[ns][nw];
 	double tasks[nw][nt];
 	unsigned found_sol = _compute_task_distribution_over_ctxs(ns, nw, nt, w_in_s, tasks, sched_ctxs, workers, task_pools, 1);
-	pthread_mutex_unlock(&mutex);
+	starpu_pthread_mutex_unlock(&mutex);
 	/* if we did find at least one solution redistribute the resources */
 	if(found_sol)
 		_lp_place_resources_in_ctx(ns, nw, w_in_s, sched_ctxs, workers, 1);
@@ -150,7 +150,7 @@ static void size_if_required()
 		struct sched_ctx_hypervisor_wrapper* sc_w = NULL;
 		unsigned ready_to_size = 1;
 		int s;
-		pthread_mutex_lock(&act_hypervisor_mutex);
+		starpu_pthread_mutex_lock(&act_hypervisor_mutex);
 		for(s = 0; s < nsched_ctxs; s++)
 		{
 			sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctxs[s]);
@@ -160,14 +160,14 @@ static void size_if_required()
 
 		if(ready_to_size)
 			_size_ctxs(sched_ctxs, nsched_ctxs, workers, nworkers);
-		pthread_mutex_unlock(&act_hypervisor_mutex);
+		starpu_pthread_mutex_unlock(&act_hypervisor_mutex);
 	}
 }
 
 static void lp2_handle_submitted_job(struct starpu_task *task, uint32_t footprint)
 {
 	/* count the tasks of the same type */
-	pthread_mutex_lock(&mutex);
+	starpu_pthread_mutex_lock(&mutex);
 	struct bound_task_pool *tp = NULL;
 
 	for (tp = task_pools; tp; tp = tp->next)
@@ -189,7 +189,7 @@ static void lp2_handle_submitted_job(struct starpu_task *task, uint32_t footprin
 
 	/* One more task of this kind */
 	tp->n++;
-	pthread_mutex_unlock(&mutex);
+	starpu_pthread_mutex_unlock(&mutex);
 
 	size_if_required();
 }
@@ -529,12 +529,12 @@ static void lp2_handle_poped_task(unsigned sched_ctx, int worker, struct starpu_
 {
 	struct sched_ctx_hypervisor_wrapper* sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctx);
 
-	int ret = pthread_mutex_trylock(&act_hypervisor_mutex);
+	int ret = starpu_pthread_mutex_trylock(&act_hypervisor_mutex);
 	if(ret != EBUSY)
 	{
 		if(sc_w->submitted_flops < sc_w->total_flops)
 		{
-			pthread_mutex_unlock(&act_hypervisor_mutex);
+			starpu_pthread_mutex_unlock(&act_hypervisor_mutex);
 			return;
 		}
 
@@ -544,7 +544,7 @@ static void lp2_handle_poped_task(unsigned sched_ctx, int worker, struct starpu_
 			int nw = starpu_worker_get_count(); /* Number of different workers */
 			int nt = 0; /* Number of different kinds of tasks */
 
-//			pthread_mutex_lock(&mutex);
+//			starpu_pthread_mutex_lock(&mutex);
 
 			/* we don't take the mutex bc a correct value of the number of tasks is
 			   not required but we do a copy in order to be sure
@@ -562,7 +562,7 @@ static void lp2_handle_poped_task(unsigned sched_ctx, int worker, struct starpu_
 			double tasks_per_worker[nw][nt];
 
 			unsigned found_sol = _compute_task_distribution_over_ctxs(ns, nw, nt, w_in_s, tasks_per_worker, NULL, NULL, tmp_task_pools, 0);
-//			pthread_mutex_unlock(&mutex);
+//			starpu_pthread_mutex_unlock(&mutex);
 
 			/* if we did find at least one solution redistribute the resources */
 			if(found_sol)
@@ -580,12 +580,12 @@ static void lp2_handle_poped_task(unsigned sched_ctx, int worker, struct starpu_
 			
 
 		}
-		pthread_mutex_unlock(&act_hypervisor_mutex);
+		starpu_pthread_mutex_unlock(&act_hypervisor_mutex);
 	}
 	/* too expensive to take this mutex and correct value of the number of tasks is not compulsory */
-//	pthread_mutex_lock(&mutex);
+//	starpu_pthread_mutex_lock(&mutex);
 	_remove_task_from_pool(task, footprint);
-//	pthread_mutex_unlock(&mutex);
+//	starpu_pthread_mutex_unlock(&mutex);
 
 }
 

+ 4 - 4
sched_ctx_hypervisor/src/hypervisor_policies/lp_policy.c

@@ -27,7 +27,7 @@ static void lp_handle_poped_task(unsigned sched_ctx, int worker, struct starpu_t
 
 		double nworkers[nsched_ctxs][2];
 
-		int ret = pthread_mutex_trylock(&act_hypervisor_mutex);
+		int ret = starpu_pthread_mutex_trylock(&act_hypervisor_mutex);
 		if(ret != EBUSY)
 		{
 			int total_nw[2];
@@ -52,7 +52,7 @@ static void lp_handle_poped_task(unsigned sched_ctx, int worker, struct starpu_t
 				_lp_round_double_to_int(nsched_ctxs, 2, nworkers, nworkers_rounded);
 				_lp_redistribute_resources_in_ctxs(nsched_ctxs, 2, nworkers_rounded, nworkers);
 			}
-			pthread_mutex_unlock(&act_hypervisor_mutex);
+			starpu_pthread_mutex_unlock(&act_hypervisor_mutex);
 		}
 	}
 }
@@ -63,7 +63,7 @@ static void lp_size_ctxs(int *sched_ctxs, int ns, int *workers, int nworkers)
 	int total_nw[2];
 	_get_total_nw(workers, nworkers, 2, total_nw);
 
-	pthread_mutex_lock(&act_hypervisor_mutex);
+	starpu_pthread_mutex_lock(&act_hypervisor_mutex);
 	double vmax = _lp_get_nworkers_per_ctx(nsched_ctxs, 2, nworkers_per_type, total_nw);
 	if(vmax != 0.0)
 	{
@@ -101,7 +101,7 @@ static void lp_size_ctxs(int *sched_ctxs, int ns, int *workers, int nworkers)
 		else
 			_lp_distribute_resources_in_ctxs(sched_ctxs, nsched_ctxs, 2, nworkers_per_type_rounded, nworkers_per_type, workers, nworkers);
 	}
-	pthread_mutex_unlock(&act_hypervisor_mutex);
+	starpu_pthread_mutex_unlock(&act_hypervisor_mutex);
 }
 
 struct sched_ctx_hypervisor_policy lp_policy = {

+ 3 - 4
sched_ctx_hypervisor/src/hypervisor_policies/policy_tools.c

@@ -15,7 +15,6 @@
  */
 
 /* #include <sched_ctx_hypervisor.h> */
-/* #include <pthread.h> */
 
 #include "policy_tools.h"
 
@@ -253,9 +252,9 @@ unsigned _resize(unsigned sender_sched_ctx, unsigned receiver_sched_ctx, unsigne
 {
 	int ret = 1;
 	if(force_resize)
-		pthread_mutex_lock(&act_hypervisor_mutex);
+		starpu_pthread_mutex_lock(&act_hypervisor_mutex);
 	else
-		ret = pthread_mutex_trylock(&act_hypervisor_mutex);
+		ret = starpu_pthread_mutex_trylock(&act_hypervisor_mutex);
 	if(ret != EBUSY)
 	{
 		int nworkers_to_move = _get_nworkers_to_move(sender_sched_ctx);
@@ -289,7 +288,7 @@ unsigned _resize(unsigned sender_sched_ctx, unsigned receiver_sched_ctx, unsigne
 				free(workers_to_move);
 			}
 		}
-		pthread_mutex_unlock(&act_hypervisor_mutex);
+		starpu_pthread_mutex_unlock(&act_hypervisor_mutex);
 		return 1;
 	}
 	return 0;

+ 0 - 1
sched_ctx_hypervisor/src/hypervisor_policies/policy_tools.h

@@ -15,7 +15,6 @@
  */
 
 #include <sched_ctx_hypervisor.h>
-#include <pthread.h>
 
 #define HYPERVISOR_REDIM_SAMPLE 0.02
 #define HYPERVISOR_START_REDIM_SAMPLE 0.1

+ 6 - 7
sched_ctx_hypervisor/src/hypervisor_policies/simple_policy.c

@@ -15,7 +15,6 @@
  */
 
 #include <sched_ctx_hypervisor.h>
-#include <pthread.h>
 
 static int _compute_priority(unsigned sched_ctx)
 {
@@ -216,9 +215,9 @@ static unsigned _simple_resize(unsigned sender_sched_ctx, unsigned receiver_sche
 {
 	int ret = 1;
 	if(force_resize)
-		pthread_mutex_lock(&act_hypervisor_mutex);
+		starpu_pthread_mutex_lock(&act_hypervisor_mutex);
 	else
-		ret = pthread_mutex_trylock(&act_hypervisor_mutex);
+		ret = starpu_pthread_mutex_trylock(&act_hypervisor_mutex);
 	if(ret != EBUSY)
 	{
 		unsigned nworkers_to_move = _get_nworkers_to_move(sender_sched_ctx);
@@ -253,7 +252,7 @@ static unsigned _simple_resize(unsigned sender_sched_ctx, unsigned receiver_sche
 				free(workers_to_move);
 			}
 		}
-		pthread_mutex_unlock(&act_hypervisor_mutex);
+		starpu_pthread_mutex_unlock(&act_hypervisor_mutex);
 		return 1;
 	}
 	return 0;
@@ -337,9 +336,9 @@ static unsigned _simple_resize2(unsigned sender_sched_ctx, unsigned receiver_sch
 {
         int ret = 1;
         if(force_resize)
-                pthread_mutex_lock(&act_hypervisor_mutex);
+                starpu_pthread_mutex_lock(&act_hypervisor_mutex);
         else
-                ret = pthread_mutex_trylock(&act_hypervisor_mutex);
+                ret = starpu_pthread_mutex_trylock(&act_hypervisor_mutex);
         if(ret != EBUSY)
         {
                 int nworkers_to_move = 0;
@@ -355,7 +354,7 @@ static unsigned _simple_resize2(unsigned sender_sched_ctx, unsigned receiver_sch
 
                         free(workers_to_move);
                 }
-                pthread_mutex_unlock(&act_hypervisor_mutex);
+                starpu_pthread_mutex_unlock(&act_hypervisor_mutex);
                 return 1;
         }
         return 0;

+ 2 - 2
sched_ctx_hypervisor/src/sched_ctx_config.c

@@ -257,9 +257,9 @@ void sched_ctx_hypervisor_ioctl(unsigned sched_ctx, ...)
 		entry->task_tag = task_tag;
 		entry->configuration = config;
 
-		pthread_mutex_lock(&hypervisor.conf_mut[sched_ctx]);
+		starpu_pthread_mutex_lock(&hypervisor.conf_mut[sched_ctx]);
 		HASH_ADD_INT(hypervisor.configurations[sched_ctx], task_tag, entry);
-		pthread_mutex_unlock(&hypervisor.conf_mut[sched_ctx]);
+		starpu_pthread_mutex_unlock(&hypervisor.conf_mut[sched_ctx]);
 	}
 
 	return;

+ 34 - 34
sched_ctx_hypervisor/src/sched_ctx_hypervisor.c

@@ -132,7 +132,7 @@ struct starpu_sched_ctx_performance_counters* sched_ctx_hypervisor_init(struct s
 {
 	hypervisor.min_tasks = 0;
 	hypervisor.nsched_ctxs = 0;
-	pthread_mutex_init(&act_hypervisor_mutex, NULL);
+	starpu_pthread_mutex_init(&act_hypervisor_mutex, NULL);
 	hypervisor.start_executing_time = starpu_timing_now();
 	int i;
 	for(i = 0; i < STARPU_NMAX_SCHED_CTXS; i++)
@@ -154,7 +154,7 @@ struct starpu_sched_ctx_performance_counters* sched_ctx_hypervisor_init(struct s
 		hypervisor.sched_ctx_w[i].resize_ack.moved_workers = NULL;
 		hypervisor.sched_ctx_w[i].resize_ack.nmoved_workers = 0;
 		hypervisor.sched_ctx_w[i].resize_ack.acked_workers = NULL;
-		pthread_mutex_init(&hypervisor.sched_ctx_w[i].mutex, NULL);
+		starpu_pthread_mutex_init(&hypervisor.sched_ctx_w[i].mutex, NULL);
 
 		int j;
 		for(j = 0; j < STARPU_NMAXWORKERS; j++)
@@ -239,7 +239,7 @@ void sched_ctx_hypervisor_shutdown(void)
 		{
 			sched_ctx_hypervisor_stop_resize(hypervisor.sched_ctxs[i]);
 			sched_ctx_hypervisor_unregister_ctx(hypervisor.sched_ctxs[i]);
-			pthread_mutex_destroy(&hypervisor.sched_ctx_w[i].mutex);
+			starpu_pthread_mutex_destroy(&hypervisor.sched_ctx_w[i].mutex);
 		}
 	}
 	perf_counters->notify_idle_cycle = NULL;
@@ -252,17 +252,17 @@ void sched_ctx_hypervisor_shutdown(void)
 	free(perf_counters);
 	perf_counters = NULL;
 
-	pthread_mutex_destroy(&act_hypervisor_mutex);
+	starpu_pthread_mutex_destroy(&act_hypervisor_mutex);
 }
 
 /* the hypervisor is in charge only of the contexts registered to it*/
 void sched_ctx_hypervisor_register_ctx(unsigned sched_ctx, double total_flops)
 {
-	pthread_mutex_lock(&act_hypervisor_mutex);
+	starpu_pthread_mutex_lock(&act_hypervisor_mutex);
 	hypervisor.configurations[sched_ctx] = NULL;
 	hypervisor.resize_requests[sched_ctx] = NULL;
-	pthread_mutex_init(&hypervisor.conf_mut[sched_ctx], NULL);
-	pthread_mutex_init(&hypervisor.resize_mut[sched_ctx], NULL);
+	starpu_pthread_mutex_init(&hypervisor.conf_mut[sched_ctx], NULL);
+	starpu_pthread_mutex_init(&hypervisor.resize_mut[sched_ctx], NULL);
 
 	_add_config(sched_ctx);
 	hypervisor.sched_ctx_w[sched_ctx].sched_ctx = sched_ctx;
@@ -272,7 +272,7 @@ void sched_ctx_hypervisor_register_ctx(unsigned sched_ctx, double total_flops)
 	hypervisor.sched_ctx_w[sched_ctx].remaining_flops = total_flops;
 	if(strcmp(hypervisor.policy.name, "app_driven") == 0)
 		hypervisor.resize[sched_ctx] = 1;
-	pthread_mutex_unlock(&act_hypervisor_mutex);
+	starpu_pthread_mutex_unlock(&act_hypervisor_mutex);
 }
 
 static int _get_first_free_sched_ctx(int *sched_ctxs, int nsched_ctxs)
@@ -312,7 +312,7 @@ void sched_ctx_hypervisor_unregister_ctx(unsigned sched_ctx)
 {
 	if(hypervisor.policy.end_ctx)
 		hypervisor.policy.end_ctx(sched_ctx);
-	pthread_mutex_lock(&act_hypervisor_mutex);
+	starpu_pthread_mutex_lock(&act_hypervisor_mutex);
 	unsigned i;
 	for(i = 0; i < hypervisor.nsched_ctxs; i++)
 	{
@@ -330,12 +330,12 @@ void sched_ctx_hypervisor_unregister_ctx(unsigned sched_ctx)
 
 /* 	free(hypervisor.configurations[sched_ctx]); */
 /* 	free(hypervisor.resize_requests[sched_ctx]); */
-	pthread_mutex_destroy(&hypervisor.conf_mut[sched_ctx]);
-	pthread_mutex_destroy(&hypervisor.resize_mut[sched_ctx]);
+	starpu_pthread_mutex_destroy(&hypervisor.conf_mut[sched_ctx]);
+	starpu_pthread_mutex_destroy(&hypervisor.resize_mut[sched_ctx]);
 	if(hypervisor.nsched_ctxs == 1)
 		sched_ctx_hypervisor_stop_resize(hypervisor.sched_ctxs[0]);
 
-	pthread_mutex_unlock(&act_hypervisor_mutex);
+	starpu_pthread_mutex_unlock(&act_hypervisor_mutex);
 }
 
 static double _get_best_total_elapsed_flops(struct sched_ctx_hypervisor_wrapper* sc_w, int *npus, enum starpu_archtype req_arch)
@@ -535,7 +535,7 @@ void sched_ctx_hypervisor_move_workers(unsigned sender_sched_ctx, unsigned recei
 		}
 		else
 		{
-			int ret = pthread_mutex_trylock(&hypervisor.sched_ctx_w[sender_sched_ctx].mutex);
+			int ret = starpu_pthread_mutex_trylock(&hypervisor.sched_ctx_w[sender_sched_ctx].mutex);
 			if(ret != EBUSY)
 			{
 				hypervisor.sched_ctx_w[sender_sched_ctx].resize_ack.receiver_sched_ctx = receiver_sched_ctx;
@@ -555,7 +555,7 @@ void sched_ctx_hypervisor_move_workers(unsigned sender_sched_ctx, unsigned recei
 				hypervisor.resize[sender_sched_ctx] = 0;
 //				hypervisor.resize[receiver_sched_ctx] = 0;
 
-				pthread_mutex_unlock(&hypervisor.sched_ctx_w[sender_sched_ctx].mutex);
+				starpu_pthread_mutex_unlock(&hypervisor.sched_ctx_w[sender_sched_ctx].mutex);
 			}
 		}
 		struct sched_ctx_hypervisor_policy_config *new_config = sched_ctx_hypervisor_get_config(receiver_sched_ctx);
@@ -618,7 +618,7 @@ void sched_ctx_hypervisor_remove_workers_from_sched_ctx(int* workers_to_remove,
 				printf(" %d", workers_to_remove[j]);
 			printf("\n");
 
-			int ret = pthread_mutex_trylock(&hypervisor.sched_ctx_w[sched_ctx].mutex);
+			int ret = starpu_pthread_mutex_trylock(&hypervisor.sched_ctx_w[sched_ctx].mutex);
 			if(ret != EBUSY)
 			{
 
@@ -641,7 +641,7 @@ void sched_ctx_hypervisor_remove_workers_from_sched_ctx(int* workers_to_remove,
 				}
 
 				hypervisor.resize[sched_ctx] = 0;
-				pthread_mutex_unlock(&hypervisor.sched_ctx_w[sched_ctx].mutex);
+				starpu_pthread_mutex_unlock(&hypervisor.sched_ctx_w[sched_ctx].mutex);
 			}
 		}
  	}
@@ -662,7 +662,7 @@ static unsigned _ack_resize_completed(unsigned sched_ctx, int worker)
 		if(hypervisor.sched_ctxs[i] != STARPU_NMAX_SCHED_CTXS)
 		{
 			struct sched_ctx_hypervisor_wrapper *sc_w = &hypervisor.sched_ctx_w[hypervisor.sched_ctxs[i]];
-			pthread_mutex_lock(&sc_w->mutex);
+			starpu_pthread_mutex_lock(&sc_w->mutex);
 			unsigned only_remove = 0;
 			if(sc_w->resize_ack.receiver_sched_ctx == -1 && hypervisor.sched_ctxs[i] != (int)sched_ctx &&
 			   sc_w->resize_ack.nmoved_workers > 0 && starpu_sched_ctx_contains_worker(worker, hypervisor.sched_ctxs[i]))
@@ -680,10 +680,10 @@ static unsigned _ack_resize_completed(unsigned sched_ctx, int worker)
 			{
 				resize_ack = &sc_w->resize_ack;
 				sender_sched_ctx = hypervisor.sched_ctxs[i];
-				pthread_mutex_unlock(&sc_w->mutex);
+				starpu_pthread_mutex_unlock(&sc_w->mutex);
 				break;
 			}
-			pthread_mutex_unlock(&sc_w->mutex);
+			starpu_pthread_mutex_unlock(&sc_w->mutex);
 		}
 	}
 
@@ -691,7 +691,7 @@ static unsigned _ack_resize_completed(unsigned sched_ctx, int worker)
 	if(resize_ack == NULL)
 		return 1;
 
-	int ret = pthread_mutex_trylock(&hypervisor.sched_ctx_w[sender_sched_ctx].mutex);
+	int ret = starpu_pthread_mutex_trylock(&hypervisor.sched_ctx_w[sender_sched_ctx].mutex);
 	if(ret != EBUSY)
 	{
 		int *moved_workers = resize_ack->moved_workers;
@@ -746,10 +746,10 @@ static unsigned _ack_resize_completed(unsigned sched_ctx, int worker)
 				free(resize_ack->acked_workers);
 
 			}
-			pthread_mutex_unlock(&hypervisor.sched_ctx_w[sender_sched_ctx].mutex);
+			starpu_pthread_mutex_unlock(&hypervisor.sched_ctx_w[sender_sched_ctx].mutex);
 			return resize_completed;
 		}
-		pthread_mutex_unlock(&hypervisor.sched_ctx_w[sender_sched_ctx].mutex);
+		starpu_pthread_mutex_unlock(&hypervisor.sched_ctx_w[sender_sched_ctx].mutex);
 	}
 	return 0;
 }
@@ -766,9 +766,9 @@ void sched_ctx_hypervisor_resize(unsigned sched_ctx, int task_tag)
 	entry->sched_ctx = sched_ctx;
 	entry->task_tag = task_tag;
 
-	pthread_mutex_lock(&hypervisor.resize_mut[sched_ctx]);
+	starpu_pthread_mutex_lock(&hypervisor.resize_mut[sched_ctx]);
 	HASH_ADD_INT(hypervisor.resize_requests[sched_ctx], task_tag, entry);
-	pthread_mutex_unlock(&hypervisor.resize_mut[sched_ctx]);
+	starpu_pthread_mutex_unlock(&hypervisor.resize_mut[sched_ctx]);
 }
 
 /* notifies the hypervisor that the worker is no longer idle and a new task was pushed on its queue */
@@ -847,16 +847,16 @@ static void notify_post_exec_hook(unsigned sched_ctx, int task_tag)
 
 	unsigned conf_sched_ctx;
 	unsigned i;
-	pthread_mutex_lock(&act_hypervisor_mutex);
+	starpu_pthread_mutex_lock(&act_hypervisor_mutex);
 	unsigned ns = hypervisor.nsched_ctxs;
-	pthread_mutex_unlock(&act_hypervisor_mutex);
+	starpu_pthread_mutex_unlock(&act_hypervisor_mutex);
 
 	for(i = 0; i < ns; i++)
 	{
 		struct configuration_entry *entry;
 
 		conf_sched_ctx = hypervisor.sched_ctxs[i];
-		pthread_mutex_lock(&hypervisor.conf_mut[conf_sched_ctx]);
+		starpu_pthread_mutex_lock(&hypervisor.conf_mut[conf_sched_ctx]);
 
 		HASH_FIND_INT(hypervisor.configurations[conf_sched_ctx], &task_tag, entry);
 
@@ -868,12 +868,12 @@ static void notify_post_exec_hook(unsigned sched_ctx, int task_tag)
 			HASH_DEL(hypervisor.configurations[conf_sched_ctx], entry);
 			free(config);
 		}
-		pthread_mutex_unlock(&hypervisor.conf_mut[conf_sched_ctx]);
+		starpu_pthread_mutex_unlock(&hypervisor.conf_mut[conf_sched_ctx]);
 	}
 
 	if(hypervisor.resize[sched_ctx])
 	{
-		pthread_mutex_lock(&hypervisor.resize_mut[sched_ctx]);
+		starpu_pthread_mutex_lock(&hypervisor.resize_mut[sched_ctx]);
 
 		if(hypervisor.policy.handle_post_exec_hook)
 		{
@@ -889,16 +889,16 @@ static void notify_post_exec_hook(unsigned sched_ctx, int task_tag)
 			}
 
 		}
-		pthread_mutex_unlock(&hypervisor.resize_mut[sched_ctx]);
+		starpu_pthread_mutex_unlock(&hypervisor.resize_mut[sched_ctx]);
 	}
 	return;
 }
 
 static void notify_submitted_job(struct starpu_task *task, uint32_t footprint)
 {
-	pthread_mutex_lock(&act_hypervisor_mutex);
+	starpu_pthread_mutex_lock(&act_hypervisor_mutex);
 	hypervisor.sched_ctx_w[task->sched_ctx].submitted_flops += task->flops;
-	pthread_mutex_unlock(&act_hypervisor_mutex);
+	starpu_pthread_mutex_unlock(&act_hypervisor_mutex);
 
 	if(hypervisor.policy.handle_submitted_job)
 		hypervisor.policy.handle_submitted_job(task, footprint);
@@ -912,10 +912,10 @@ static void notify_delete_context(unsigned sched_ctx)
 
 void sched_ctx_hypervisor_size_ctxs(int *sched_ctxs, int nsched_ctxs, int *workers, int nworkers)
 {
-	pthread_mutex_lock(&act_hypervisor_mutex);
+	starpu_pthread_mutex_lock(&act_hypervisor_mutex);
 	unsigned curr_nsched_ctxs = sched_ctxs == NULL ? hypervisor.nsched_ctxs : nsched_ctxs;
 	int *curr_sched_ctxs = sched_ctxs == NULL ? hypervisor.sched_ctxs : sched_ctxs;
-	pthread_mutex_unlock(&act_hypervisor_mutex);
+	starpu_pthread_mutex_unlock(&act_hypervisor_mutex);
 	unsigned s;
 	for(s = 0; s < curr_nsched_ctxs; s++)
 		hypervisor.resize[curr_sched_ctxs[s]] = 1;

+ 2 - 2
sched_ctx_hypervisor/src/sched_ctx_hypervisor_intern.h

@@ -67,8 +67,8 @@ struct sched_ctx_hypervisor
 	/* Set of pending resize requests for any context/tag pair.  */
 	struct resize_request_entry *resize_requests[STARPU_NMAX_SCHED_CTXS];
 
-	pthread_mutex_t conf_mut[STARPU_NMAX_SCHED_CTXS];
-	pthread_mutex_t resize_mut[STARPU_NMAX_SCHED_CTXS];
+	starpu_pthread_mutex_t conf_mut[STARPU_NMAX_SCHED_CTXS];
+	starpu_pthread_mutex_t resize_mut[STARPU_NMAX_SCHED_CTXS];
 	struct size_request *sr;
 	int check_min_tasks[STARPU_NMAX_SCHED_CTXS];