Browse Source

fix build with simgrid

Samuel Thibault 12 years ago
parent
commit
b27f1f7932

+ 13 - 0
include/starpu_sched_ctx.h

@@ -92,11 +92,22 @@ void starpu_sched_ctx_set_policy_data(unsigned sched_ctx_id, void *policy_data);
 
 void* starpu_sched_ctx_get_policy_data(unsigned sched_ctx);
 
+/* When there is no available task for a worker, StarPU blocks this worker on a
+condition variable. This function specifies which condition variable (and the
+associated mutex) should be used to block (and to wake up) a worker. Note that
+multiple workers may use the same condition variable. For instance, in the case
+of a scheduling strategy with a single task queue, the same condition variable
+would be used to block and wake up all workers.  The initialization method of a
+scheduling strategy (init_sched) must call this function once per worker. */
+#if !defined(_MSC_VER) && !defined(STARPU_SIMGRID)
+void starpu_worker_set_sched_condition(unsigned sched_ctx_id, int workerid, pthread_mutex_t *sched_mutex, pthread_cond_t *sched_cond);
+
 #ifdef STARPU_DEVEL
 #warning do we really need both starpu_sched_ctx_set_worker_mutex_and_cond and starpu_sched_ctx_init_worker_mutex_and_cond functions
 #endif
 
 void starpu_sched_ctx_set_worker_mutex_and_cond(unsigned sched_ctx_id, int workerid, pthread_mutex_t *sched_mutex, pthread_cond_t *sched_cond);
+#endif
 
 void starpu_sched_ctx_get_worker_mutex_and_cond(unsigned sched_ctx_id, int workerid, pthread_mutex_t **sched_mutex, pthread_cond_t **sched_cond);
 
@@ -110,7 +121,9 @@ void starpu_delete_worker_collection_for_sched_ctx(unsigned sched_ctx_id);
 
 struct starpu_sched_ctx_worker_collection* starpu_get_worker_collection_of_sched_ctx(unsigned sched_ctx_id);
 
+#if !defined(_MSC_VER) && !defined(STARPU_SIMGRID)
 pthread_mutex_t* starpu_get_changing_ctx_mutex(unsigned sched_ctx_id);
+#endif
 
 void starpu_set_sched_ctx(unsigned *sched_ctx);
 

+ 1 - 1
src/common/barrier_counter.h

@@ -18,7 +18,7 @@
 
 struct _starpu_barrier_counter {
 	struct _starpu_barrier barrier;
-	pthread_cond_t cond2;
+	_starpu_pthread_cond_t cond2;
 };
 
 int _starpu_barrier_counter_init(struct _starpu_barrier_counter *barrier_c, int count);

+ 2 - 2
src/core/sched_ctx.c

@@ -19,8 +19,8 @@
 #include <common/utils.h>
 
 extern struct starpu_sched_ctx_worker_collection worker_list;
-static _starpu_pthread_mutex_t sched_ctx_manag = PTHREAD_MUTEX_INITIALIZER;
-static _starpu_pthread_mutex_t finished_submit_mutex = PTHREAD_MUTEX_INITIALIZER;
+static _starpu_pthread_mutex_t sched_ctx_manag = _STARPU_PTHREAD_MUTEX_INITIALIZER;
+static _starpu_pthread_mutex_t finished_submit_mutex = _STARPU_PTHREAD_MUTEX_INITIALIZER;
 struct starpu_task stop_submission_task = STARPU_TASK_INITIALIZER;
 pthread_key_t sched_ctx_key;
 unsigned with_hypervisor = 0;

+ 4 - 0
src/core/sched_ctx.h

@@ -136,4 +136,8 @@ _starpu_pthread_mutex_t *_starpu_get_sched_mutex(struct _starpu_sched_ctx *sched
  take care: no mutex taken, the list of workers might not be updated */
 int starpu_get_workers_of_sched_ctx(unsigned sched_ctx_id, int *pus, enum starpu_archtype arch);
 
+#if defined(_MSC_VER) || defined(STARPU_SIMGRID)
+_starpu_pthread_mutex_t* starpu_get_changing_ctx_mutex(unsigned sched_ctx_id);
+#endif
+
 #endif // __SCHED_CONTEXT_H__

+ 6 - 0
src/core/workers.h

@@ -246,6 +246,12 @@ void _starpu_worker_set_status(int workerid, enum _starpu_worker_status status);
 /* TODO move */
 unsigned _starpu_execute_registered_progression_hooks(void);
 
+#if defined(_MSC_VER) || defined(STARPU_SIMGRID)
+void starpu_worker_set_sched_condition(unsigned sched_ctx_id, int workerid, _starpu_pthread_mutex_t *sched_mutex, _starpu_pthread_cond_t *sched_cond);
+
+void starpu_worker_get_sched_condition(unsigned sched_ctx_id, int workerid, _starpu_pthread_mutex_t **sched_mutex, _starpu_pthread_cond_t **sched_cond);
+#endif
+
 /* We keep an initial sched ctx which might be used in case no other ctx is available */
 struct _starpu_sched_ctx* _starpu_get_initial_sched_ctx(void);
 

+ 15 - 15
src/sched_policies/deque_modeling_policy_data_aware.c

@@ -228,8 +228,8 @@ static struct starpu_task *dmda_pop_every_task(unsigned sched_ctx_id)
 	int workerid = starpu_worker_get_id();
 	struct _starpu_fifo_taskq *fifo = dt->queue_array[workerid];
 
-	pthread_mutex_t *sched_mutex;
-	pthread_cond_t *sched_cond;
+	_starpu_pthread_mutex_t *sched_mutex;
+	_starpu_pthread_cond_t *sched_cond;
 	starpu_sched_ctx_get_worker_mutex_and_cond(sched_ctx_id, workerid, &sched_mutex, &sched_cond);
 	new_list = _starpu_fifo_pop_every_task(fifo, sched_mutex, workerid);
 
@@ -257,8 +257,8 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 
 	struct _starpu_fifo_taskq *fifo = dt->queue_array[best_workerid];
 
-	pthread_mutex_t *sched_mutex;
-	pthread_cond_t *sched_cond;
+	_starpu_pthread_mutex_t *sched_mutex;
+	_starpu_pthread_cond_t *sched_cond;
 	starpu_sched_ctx_get_worker_mutex_and_cond(sched_ctx_id, best_workerid, &sched_mutex, &sched_cond);
 
 #ifdef STARPU_USE_SCHED_CTX_HYPERVISOR
@@ -365,8 +365,8 @@ static int _dm_push_task(struct starpu_task *task, unsigned prio, unsigned sched
 			}
 			
 			double exp_end;
-			pthread_mutex_t *sched_mutex;
-			pthread_cond_t *sched_cond;
+			_starpu_pthread_mutex_t *sched_mutex;
+			_starpu_pthread_cond_t *sched_cond;
 			starpu_sched_ctx_get_worker_mutex_and_cond(sched_ctx_id, worker, &sched_mutex, &sched_cond);
 			
 			/* Sometimes workers didn't take the tasks as early as we expected */
@@ -483,8 +483,8 @@ static void compute_all_performance_predictions(struct starpu_task *task,
 			}
 
 			/* Sometimes workers didn't take the tasks as early as we expected */
-			pthread_mutex_t *sched_mutex;
-			pthread_cond_t *sched_cond;
+			_starpu_pthread_mutex_t *sched_mutex;
+			_starpu_pthread_cond_t *sched_cond;
 			starpu_sched_ctx_get_worker_mutex_and_cond(sched_ctx_id, worker, &sched_mutex, &sched_cond);
 			_STARPU_PTHREAD_MUTEX_LOCK(sched_mutex);
 			fifo->exp_start = STARPU_MAX(fifo->exp_start, starpu_timing_now());
@@ -685,7 +685,7 @@ static int _dmda_push_task(struct starpu_task *task, unsigned prio, unsigned sch
 static int dmda_push_sorted_task(struct starpu_task *task)
 {
 	unsigned sched_ctx_id = task->sched_ctx;
-        pthread_mutex_t *changing_ctx_mutex = starpu_get_changing_ctx_mutex(sched_ctx_id);
+        _starpu_pthread_mutex_t *changing_ctx_mutex = starpu_get_changing_ctx_mutex(sched_ctx_id);
         unsigned nworkers;
         int ret_val = -1;
 
@@ -706,7 +706,7 @@ static int dmda_push_sorted_task(struct starpu_task *task)
 static int dm_push_task(struct starpu_task *task)
 {
 	unsigned sched_ctx_id = task->sched_ctx;
-        pthread_mutex_t *changing_ctx_mutex = starpu_get_changing_ctx_mutex(sched_ctx_id);
+        _starpu_pthread_mutex_t *changing_ctx_mutex = starpu_get_changing_ctx_mutex(sched_ctx_id);
         unsigned nworkers;
         int ret_val = -1;
 
@@ -726,7 +726,7 @@ static int dm_push_task(struct starpu_task *task)
 static int dmda_push_task(struct starpu_task *task)
 {
 	unsigned sched_ctx_id = task->sched_ctx;
-        pthread_mutex_t *changing_ctx_mutex = starpu_get_changing_ctx_mutex(sched_ctx_id);
+        _starpu_pthread_mutex_t *changing_ctx_mutex = starpu_get_changing_ctx_mutex(sched_ctx_id);
         unsigned nworkers;
         int ret_val = -1;
 
@@ -845,8 +845,8 @@ static void dmda_pre_exec_hook(struct starpu_task *task)
 	double model = task->predicted;
 	double transfer_model = task->predicted_transfer;
 
-	pthread_mutex_t *sched_mutex;
-	pthread_cond_t *sched_cond;
+	_starpu_pthread_mutex_t *sched_mutex;
+	_starpu_pthread_cond_t *sched_cond;
 	starpu_sched_ctx_get_worker_mutex_and_cond(sched_ctx_id, workerid, &sched_mutex, &sched_cond);
 	/* Once the task is executing, we can update the predicted amount
 	 * of work. */
@@ -869,8 +869,8 @@ static void dmda_push_task_notify(struct starpu_task *task, int workerid, unsign
 			_starpu_get_job_associated_to_task(task)->nimpl);
 
 	double predicted_transfer = starpu_task_expected_data_transfer_time(memory_node, task);
-	pthread_mutex_t *sched_mutex;
-	pthread_cond_t *sched_cond;
+	_starpu_pthread_mutex_t *sched_mutex;
+	_starpu_pthread_cond_t *sched_cond;
 	starpu_sched_ctx_get_worker_mutex_and_cond(sched_ctx_id, workerid, &sched_mutex, &sched_cond);
 
 

+ 3 - 3
src/sched_policies/eager_central_policy.c

@@ -26,8 +26,8 @@
 
 typedef struct {
 	struct _starpu_fifo_taskq *fifo;
-	pthread_mutex_t sched_mutex;
-	pthread_cond_t sched_cond;
+	_starpu_pthread_mutex_t sched_mutex;
+	_starpu_pthread_cond_t sched_cond;
 } eager_center_policy_data;
 
 static void eager_add_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers) 
@@ -91,7 +91,7 @@ static int push_task_eager_policy(struct starpu_task *task)
 {
 	unsigned sched_ctx_id = task->sched_ctx;
 	eager_center_policy_data *data = (eager_center_policy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
-	pthread_mutex_t *changing_ctx_mutex = starpu_get_changing_ctx_mutex(sched_ctx_id);
+	_starpu_pthread_mutex_t *changing_ctx_mutex = starpu_get_changing_ctx_mutex(sched_ctx_id);
 	unsigned nworkers;
 	int ret_val = -1;
 	

+ 1 - 1
src/sched_policies/eager_central_priority_policy.c

@@ -140,7 +140,7 @@ static int _starpu_priority_push_task(struct starpu_task *task)
 	struct _starpu_priority_taskq *taskq = data->taskq;
 
 	/* if the context has no workers return */
-	pthread_mutex_t *changing_ctx_mutex = starpu_get_changing_ctx_mutex(sched_ctx_id);
+	_starpu_pthread_mutex_t *changing_ctx_mutex = starpu_get_changing_ctx_mutex(sched_ctx_id);
         unsigned nworkers;
         int ret_val = -1;
 

+ 9 - 9
src/sched_policies/heft.c

@@ -148,8 +148,8 @@ static void heft_pre_exec_hook(struct starpu_task *task)
 	double model = task->predicted;
 	double transfer_model = task->predicted_transfer;
 
-	pthread_mutex_t *sched_mutex;
-	pthread_cond_t *sched_cond;
+	_starpu_pthread_mutex_t *sched_mutex;
+	_starpu_pthread_cond_t *sched_cond;
 	starpu_sched_ctx_get_worker_mutex_and_cond(sched_ctx_id, workerid, &sched_mutex, &sched_cond);
 	/* Once the task is executing, we can update the predicted amount
 	 * of work. */
@@ -173,8 +173,8 @@ static void heft_push_task_notify(struct starpu_task *task, int workerid)
 			_starpu_get_job_associated_to_task(task)->nimpl);
 
 	double predicted_transfer = starpu_task_expected_data_transfer_time(memory_node, task);
-	pthread_mutex_t *sched_mutex;
-	pthread_cond_t *sched_cond;
+	_starpu_pthread_mutex_t *sched_mutex;
+	_starpu_pthread_cond_t *sched_cond;
 	starpu_sched_ctx_get_worker_mutex_and_cond(sched_ctx_id, workerid, &sched_mutex, &sched_cond);
 
 
@@ -225,8 +225,8 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 	heft_data *hd = (heft_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	struct _starpu_fifo_taskq *fifo = hd->queue_array[best_workerid];
 
-	pthread_mutex_t *sched_mutex;
-	pthread_cond_t *sched_cond;
+	_starpu_pthread_mutex_t *sched_mutex;
+	_starpu_pthread_cond_t *sched_cond;
 	starpu_sched_ctx_get_worker_mutex_and_cond(sched_ctx_id, best_workerid, &sched_mutex, &sched_cond);
 
 #ifdef STARPU_USE_SCHED_CTX_HYPERVISOR
@@ -339,8 +339,8 @@ static void compute_all_performance_predictions(struct starpu_task *task,
 		
 				/* Sometimes workers didn't take the tasks as early as we expected */
 				struct _starpu_fifo_taskq *fifo = hd->queue_array[worker];
-				pthread_mutex_t *sched_mutex;
-				pthread_cond_t *sched_cond;
+				_starpu_pthread_mutex_t *sched_mutex;
+				_starpu_pthread_cond_t *sched_cond;
 				starpu_sched_ctx_get_worker_mutex_and_cond(sched_ctx_id, worker, &sched_mutex, &sched_cond);
 				_STARPU_PTHREAD_MUTEX_LOCK(sched_mutex);
 				fifo->exp_start = STARPU_MAX(fifo->exp_start, starpu_timing_now());
@@ -565,7 +565,7 @@ static int _heft_push_task(struct starpu_task *task, unsigned prio, unsigned sch
 static int heft_push_task(struct starpu_task *task)
 {
 	unsigned sched_ctx_id = task->sched_ctx;
-	pthread_mutex_t *changing_ctx_mutex = starpu_get_changing_ctx_mutex(sched_ctx_id);
+	_starpu_pthread_mutex_t *changing_ctx_mutex = starpu_get_changing_ctx_mutex(sched_ctx_id);
 	unsigned nworkers; 
 	int ret_val = -1;
 	if (task->priority > 0)

+ 2 - 2
src/sched_policies/parallel_greedy.c

@@ -1,6 +1,6 @@
 /* StarPU --- Runtime system for heterogeneous multicore architectures.
  *
- * Copyright (C) 2011  Université de Bordeaux 1
+ * Copyright (C) 2011-2012  Université de Bordeaux 1
  * Copyright (C) 2011  Télécom-SudParis
  * Copyright (C) 2011  INRIA
  *
@@ -176,7 +176,7 @@ static void deinitialize_pgreedy_policy(unsigned sched_ctx_id)
 static int push_task_pgreedy_policy(struct starpu_task *task)
 {
 	unsigned sched_ctx_id = task->sched_ctx;
-	pthread_mutex_t *changing_ctx_mutex = starpu_get_changing_ctx_mutex(sched_ctx_id);
+	_starpu_pthread_mutex_t *changing_ctx_mutex = starpu_get_changing_ctx_mutex(sched_ctx_id);
 	unsigned nworkers;
 	int ret_val = -1;
 

+ 10 - 10
src/sched_policies/parallel_heft.c

@@ -45,7 +45,7 @@ typedef struct {
 	double idle_power;
 /* When we push a task on a combined worker we need all the cpu workers it contains
  * to be locked at once */
-	pthread_mutex_t global_push_mutex;
+	_starpu_pthread_mutex_t global_push_mutex;
 } pheft_data;
 
 static double worker_exp_start[STARPU_NMAXWORKERS];
@@ -71,8 +71,8 @@ static void parallel_heft_pre_exec_hook(struct starpu_task *task)
 	if (isnan(model))
 		model = 0.0;
 
-	pthread_mutex_t *sched_mutex;
-	pthread_cond_t *sched_cond;
+	_starpu_pthread_mutex_t *sched_mutex;
+	_starpu_pthread_cond_t *sched_cond;
 	starpu_sched_ctx_get_worker_mutex_and_cond(sched_ctx_id, workerid, &sched_mutex, &sched_cond);
 
 	/* Once we have executed the task, we can update the predicted amount
@@ -106,8 +106,8 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 		task->predicted = exp_end_predicted - worker_exp_end[best_workerid];
 		/* TODO */
 		task->predicted_transfer = 0;
-		pthread_mutex_t *sched_mutex;
-		pthread_cond_t *sched_cond;
+		_starpu_pthread_mutex_t *sched_mutex;
+		_starpu_pthread_cond_t *sched_cond;
 		starpu_sched_ctx_get_worker_mutex_and_cond(sched_ctx_id, best_workerid, &sched_mutex, &sched_cond);
 
 		_STARPU_PTHREAD_MUTEX_LOCK(sched_mutex);
@@ -160,8 +160,8 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 			alias->predicted = exp_end_predicted - worker_exp_end[local_worker];
 			/* TODO */
 			alias->predicted_transfer = 0;
-			pthread_mutex_t *sched_mutex;
-			pthread_cond_t *sched_cond;
+			_starpu_pthread_mutex_t *sched_mutex;
+			_starpu_pthread_cond_t *sched_cond;
 			starpu_sched_ctx_get_worker_mutex_and_cond(sched_ctx_id, local_worker, &sched_mutex, &sched_cond);
 			_STARPU_PTHREAD_MUTEX_LOCK(sched_mutex);
 			worker_exp_len[local_worker] += alias->predicted;
@@ -282,8 +282,8 @@ static int _parallel_heft_push_task(struct starpu_task *task, unsigned prio, uns
 
 		if(!starpu_worker_is_combined_worker(worker))
 		{
-			pthread_mutex_t *sched_mutex;
-			pthread_cond_t *sched_cond;
+			_starpu_pthread_mutex_t *sched_mutex;
+			_starpu_pthread_cond_t *sched_cond;
 			starpu_sched_ctx_get_worker_mutex_and_cond(sched_ctx_id, worker, &sched_mutex, &sched_cond);
 			/* Sometimes workers didn't take the tasks as early as we expected */
 			_STARPU_PTHREAD_MUTEX_LOCK(sched_mutex);
@@ -452,7 +452,7 @@ static int _parallel_heft_push_task(struct starpu_task *task, unsigned prio, uns
 static int parallel_heft_push_task(struct starpu_task *task)
 {
 	unsigned sched_ctx_id = task->sched_ctx;
-	pthread_mutex_t *changing_ctx_mutex = starpu_get_changing_ctx_mutex(sched_ctx_id);
+	_starpu_pthread_mutex_t *changing_ctx_mutex = starpu_get_changing_ctx_mutex(sched_ctx_id);
 	unsigned nworkers;
 	int ret_val = -1;
 

+ 4 - 1
src/sched_policies/random_policy.c

@@ -21,6 +21,9 @@
 #include <core/workers.h>
 #include <core/sched_ctx.h>
 #include <sched_policies/fifo_queues.h>
+#ifdef HAVE_AYUDAME_H
+#include <Ayudame.h>
+#endif
 
 static int _random_push_task(struct starpu_task *task, unsigned prio)
 {
@@ -82,7 +85,7 @@ static int _random_push_task(struct starpu_task *task, unsigned prio)
 static int random_push_task(struct starpu_task *task)
 {
 	unsigned sched_ctx_id = task->sched_ctx;
-	pthread_mutex_t *changing_ctx_mutex = starpu_get_changing_ctx_mutex(sched_ctx_id);
+	_starpu_pthread_mutex_t *changing_ctx_mutex = starpu_get_changing_ctx_mutex(sched_ctx_id);
 	unsigned nworkers;
         int ret_val = -1;
 

+ 2 - 2
src/sched_policies/work_stealing_policy.c

@@ -1,6 +1,6 @@
 /* StarPU --- Runtime system for heterogeneous multicore architectures.
  *
- * Copyright (C) 2010-2011  Université de Bordeaux 1
+ * Copyright (C) 2010-2012  Université de Bordeaux 1
  * Copyright (C) 2010, 2011  Centre National de la Recherche Scientifique
  * Copyright (C) 2011, 2012  INRIA
  *
@@ -299,7 +299,7 @@ int ws_push_task(struct starpu_task *task)
 	struct _starpu_job *j = _starpu_get_job_associated_to_task(task); 
 	int workerid = starpu_worker_get_id();
 
-	pthread_mutex_t *changing_ctx_mutex = starpu_get_changing_ctx_mutex(sched_ctx_id);
+	_starpu_pthread_mutex_t *changing_ctx_mutex = starpu_get_changing_ctx_mutex(sched_ctx_id);
         unsigned nworkers;
         int ret_val = -1;