Forráskód Böngészése

src: prefix names of internal types and functions with _starpu

Nathalie Furmento 13 éve
szülő
commit
906d935956
47 módosított fájl, 481 hozzáadás és 481 törlés
  1. 19 19
      src/common/barrier.c
  2. 5 5
      src/common/barrier.h
  3. 23 23
      src/common/rwlock.c
  4. 9 9
      src/common/rwlock.h
  5. 5 5
      src/common/starpu_spinlock.c
  6. 8 8
      src/common/starpu_spinlock.h
  7. 4 4
      src/common/timing.c
  8. 17 17
      src/common/utils.h
  9. 2 2
      src/core/debug.c
  10. 5 5
      src/core/dependencies/cg.c
  11. 11 11
      src/core/dependencies/implicit_data_deps.c
  12. 9 9
      src/core/dependencies/tags.c
  13. 2 2
      src/core/dependencies/tags.h
  14. 4 4
      src/core/dependencies/task_deps.c
  15. 19 19
      src/core/jobs.c
  16. 24 24
      src/core/perfmodel/perfmodel_history.c
  17. 9 9
      src/core/progress_hook.c
  18. 4 4
      src/core/sched_policy.c
  19. 20 20
      src/core/task.c
  20. 16 16
      src/core/task_bundle.c
  21. 23 23
      src/core/workers.c
  22. 1 1
      src/datawizard/coherency.h
  23. 10 10
      src/datawizard/copy_driver.c
  24. 24 24
      src/datawizard/data_request.c
  25. 1 1
      src/datawizard/data_request.h
  26. 3 3
      src/datawizard/filters.c
  27. 20 20
      src/datawizard/interfaces/data_interface.c
  28. 17 17
      src/datawizard/memalloc.c
  29. 24 24
      src/datawizard/user_interactions.c
  30. 10 10
      src/drivers/cpu/driver_cpu.c
  31. 6 6
      src/drivers/cuda/driver_cuda.c
  32. 1 1
      src/drivers/driver_common/driver_common.c
  33. 11 11
      src/drivers/gordon/driver_gordon.c
  34. 12 12
      src/drivers/opencl/driver_opencl.c
  35. 25 25
      src/profiling/bound.c
  36. 13 13
      src/profiling/profiling.c
  37. 5 5
      src/sched_policies/deque_modeling_policy_data_aware.c
  38. 2 2
      src/sched_policies/deque_queues.c
  39. 2 2
      src/sched_policies/eager_central_policy.c
  40. 9 9
      src/sched_policies/eager_central_priority_policy.c
  41. 5 5
      src/sched_policies/fifo_queues.c
  42. 12 12
      src/sched_policies/heft.c
  43. 6 6
      src/sched_policies/parallel_greedy.c
  44. 9 9
      src/sched_policies/parallel_heft.c
  45. 2 2
      src/sched_policies/random_policy.c
  46. 5 5
      src/sched_policies/stack_queues.c
  47. 8 8
      src/sched_policies/work_stealing_policy.c

+ 19 - 19
src/common/barrier.c

@@ -17,34 +17,34 @@
 #include <common/barrier.h>
 #include <common/utils.h>
 
-int _starpu_barrier_init(_starpu_barrier_t *barrier, int count)
+int _starpu_barrier_init(struct _starpu_barrier *barrier, int count)
 {
 	barrier->count = count;
 	barrier->reached_start = 0;
 	barrier->reached_exit = 0;
-	PTHREAD_MUTEX_INIT(&barrier->mutex, NULL);
-	PTHREAD_MUTEX_INIT(&barrier->mutex_exit, NULL);
-	PTHREAD_COND_INIT(&barrier->cond, NULL);
+	_STARPU_PTHREAD_MUTEX_INIT(&barrier->mutex, NULL);
+	_STARPU_PTHREAD_MUTEX_INIT(&barrier->mutex_exit, NULL);
+	_STARPU_PTHREAD_COND_INIT(&barrier->cond, NULL);
 	return 0;
 }
 
 static
-int _starpu_barrier_test(_starpu_barrier_t *barrier)
+int _starpu_barrier_test(struct _starpu_barrier *barrier)
 {
     /*
      * Check whether any threads are known to be waiting; report
      * "BUSY" if so.
      */
-        PTHREAD_MUTEX_LOCK(&barrier->mutex_exit);
+        _STARPU_PTHREAD_MUTEX_LOCK(&barrier->mutex_exit);
         if (barrier->reached_exit != barrier->count) {
-                PTHREAD_MUTEX_UNLOCK(&barrier->mutex_exit);
+                _STARPU_PTHREAD_MUTEX_UNLOCK(&barrier->mutex_exit);
                 return EBUSY;
         }
-        PTHREAD_MUTEX_UNLOCK(&barrier->mutex_exit);
+        _STARPU_PTHREAD_MUTEX_UNLOCK(&barrier->mutex_exit);
         return 0;
 }
 
-int _starpu_barrier_destroy(_starpu_barrier_t *barrier)
+int _starpu_barrier_destroy(struct _starpu_barrier *barrier)
 {
 	int ret = _starpu_barrier_test(barrier);
 	while (ret == EBUSY) {
@@ -52,36 +52,36 @@ int _starpu_barrier_destroy(_starpu_barrier_t *barrier)
 	}
 	_STARPU_DEBUG("reached_exit %d\n", barrier->reached_exit);
 
-	PTHREAD_MUTEX_DESTROY(&barrier->mutex);
-	PTHREAD_MUTEX_DESTROY(&barrier->mutex_exit);
-	PTHREAD_COND_DESTROY(&barrier->cond);
+	_STARPU_PTHREAD_MUTEX_DESTROY(&barrier->mutex);
+	_STARPU_PTHREAD_MUTEX_DESTROY(&barrier->mutex_exit);
+	_STARPU_PTHREAD_COND_DESTROY(&barrier->cond);
 	return 0;
 }
 
-int _starpu_barrier_wait(_starpu_barrier_t *barrier)
+int _starpu_barrier_wait(struct _starpu_barrier *barrier)
 {
 	int ret=0;
 
         // Wait until all threads enter the barrier
-	PTHREAD_MUTEX_LOCK(&barrier->mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&barrier->mutex);
 	barrier->reached_exit=0;
 	barrier->reached_start++;
 	if (barrier->reached_start == barrier->count)
 	{
 		barrier->reached_start = 0;
-		PTHREAD_COND_BROADCAST(&barrier->cond);
+		_STARPU_PTHREAD_COND_BROADCAST(&barrier->cond);
 		ret = PTHREAD_BARRIER_SERIAL_THREAD;
 	}
 	else
 	{
-                PTHREAD_COND_WAIT(&barrier->cond,&barrier->mutex);
+                _STARPU_PTHREAD_COND_WAIT(&barrier->cond,&barrier->mutex);
 	}
-	PTHREAD_MUTEX_UNLOCK(&barrier->mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&barrier->mutex);
 
         // Count number of threads that exit the barrier
-	PTHREAD_MUTEX_LOCK(&barrier->mutex_exit);
+	_STARPU_PTHREAD_MUTEX_LOCK(&barrier->mutex_exit);
 	barrier->reached_exit ++;
-	PTHREAD_MUTEX_UNLOCK(&barrier->mutex_exit);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&barrier->mutex_exit);
 
 	return ret;
 }

+ 5 - 5
src/common/barrier.h

@@ -19,20 +19,20 @@
 
 #include <pthread.h>
 
-typedef struct {
+struct _starpu_barrier {
 	int count;
 	int reached_start;
 	int reached_exit;
 	pthread_mutex_t mutex;
 	pthread_mutex_t mutex_exit;
 	pthread_cond_t cond;
-} _starpu_barrier_t;
+};
 
-int _starpu_barrier_init(_starpu_barrier_t *barrier, int count);
+int _starpu_barrier_init(struct _starpu_barrier *barrier, int count);
 
-int _starpu_barrier_destroy(_starpu_barrier_t *barrier);
+int _starpu_barrier_destroy(struct _starpu_barrier *barrier);
 
-int _starpu_barrier_wait(_starpu_barrier_t *barrier);
+int _starpu_barrier_wait(struct _starpu_barrier *barrier);
 
 #if !defined(PTHREAD_BARRIER_SERIAL_THREAD)
 #  define PTHREAD_BARRIER_SERIAL_THREAD -1

+ 23 - 23
src/common/rwlock.c

@@ -1,7 +1,7 @@
 /* StarPU --- Runtime system for heterogeneous multicore architectures.
  *
  * Copyright (C) 2009, 2010  Université de Bordeaux 1
- * Copyright (C) 2010  Centre National de la Recherche Scientifique
+ * Copyright (C) 2010, 2011  Centre National de la Recherche Scientifique
  *
  * StarPU is free software; you can redistribute it and/or modify
  * it under the terms of the GNU Lesser General Public License as published by
@@ -21,7 +21,7 @@
 
 #include "rwlock.h"
 
-static void _take_busy_lock(starpu_rw_lock_t *lock)
+static void _starpu_take_busy_lock(struct _starpu_rw_lock *lock)
 {
 	uint32_t prev;
 	do {
@@ -29,12 +29,12 @@ static void _take_busy_lock(starpu_rw_lock_t *lock)
 	} while (prev);
 }
 
-static void _release_busy_lock(starpu_rw_lock_t *lock)
+static void _starpu_release_busy_lock(struct _starpu_rw_lock *lock)
 {
 	STARPU_RELEASE(&lock->busy);
 }
 
-void _starpu_init_rw_lock(starpu_rw_lock_t *lock)
+void _starpu_init_rw_lock(struct _starpu_rw_lock *lock)
 {
 	STARPU_ASSERT(lock);
 
@@ -44,14 +44,14 @@ void _starpu_init_rw_lock(starpu_rw_lock_t *lock)
 }
 
 
-int _starpu_take_rw_lock_write_try(starpu_rw_lock_t *lock)
+int _starpu_take_rw_lock_write_try(struct _starpu_rw_lock *lock)
 {
-	_take_busy_lock(lock);
+	_starpu_take_busy_lock(lock);
 	
 	if (lock->readercnt > 0 || lock->writer)
 	{
 		/* fail to take the lock */
-		_release_busy_lock(lock);
+		_starpu_release_busy_lock(lock);
 		return -1;
 	}
 	else {
@@ -60,19 +60,19 @@ int _starpu_take_rw_lock_write_try(starpu_rw_lock_t *lock)
 
 		/* no one was either writing nor reading */
 		lock->writer = 1;
-		_release_busy_lock(lock);
+		_starpu_release_busy_lock(lock);
 		return 0;
 	}
 }
 
-int _starpu_take_rw_lock_read_try(starpu_rw_lock_t *lock)
+int _starpu_take_rw_lock_read_try(struct _starpu_rw_lock *lock)
 {
-	_take_busy_lock(lock);
+	_starpu_take_busy_lock(lock);
 
 	if (lock->writer)
 	{
 		/* there is a writer ... */
-		_release_busy_lock(lock);
+		_starpu_release_busy_lock(lock);
 		return -1;
 	}
 	else {
@@ -81,7 +81,7 @@ int _starpu_take_rw_lock_read_try(starpu_rw_lock_t *lock)
 		/* no one is writing */
 		/* XXX check wrap arounds ... */
 		lock->readercnt++;
-		_release_busy_lock(lock);
+		_starpu_release_busy_lock(lock);
 
 		return 0;
 	}
@@ -89,15 +89,15 @@ int _starpu_take_rw_lock_read_try(starpu_rw_lock_t *lock)
 
 
 
-void _starpu_take_rw_lock_write(starpu_rw_lock_t *lock)
+void _starpu_take_rw_lock_write(struct _starpu_rw_lock *lock)
 {
 	do {
-		_take_busy_lock(lock);
+		_starpu_take_busy_lock(lock);
 		
 		if (lock->readercnt > 0 || lock->writer)
 		{
 			/* fail to take the lock */
-			_release_busy_lock(lock);
+			_starpu_release_busy_lock(lock);
 		}
 		else {
 			STARPU_ASSERT(lock->readercnt == 0);
@@ -105,21 +105,21 @@ void _starpu_take_rw_lock_write(starpu_rw_lock_t *lock)
 	
 			/* no one was either writing nor reading */
 			lock->writer = 1;
-			_release_busy_lock(lock);
+			_starpu_release_busy_lock(lock);
 			return;
 		}
 	} while (1);
 }
 
-void _starpu_take_rw_lock_read(starpu_rw_lock_t *lock)
+void _starpu_take_rw_lock_read(struct _starpu_rw_lock *lock)
 {
 	do {
-		_take_busy_lock(lock);
+		_starpu_take_busy_lock(lock);
 
 		if (lock->writer)
 		{
 			/* there is a writer ... */
-			_release_busy_lock(lock);
+			_starpu_release_busy_lock(lock);
 		}
 		else {
 			STARPU_ASSERT(lock->writer == 0);
@@ -127,16 +127,16 @@ void _starpu_take_rw_lock_read(starpu_rw_lock_t *lock)
 			/* no one is writing */
 			/* XXX check wrap arounds ... */
 			lock->readercnt++;
-			_release_busy_lock(lock);
+			_starpu_release_busy_lock(lock);
 
 			return;
 		}
 	} while (1);
 }
 
-void _starpu_release_rw_lock(starpu_rw_lock_t *lock)
+void _starpu_release_rw_lock(struct _starpu_rw_lock *lock)
 {
-	_take_busy_lock(lock);
+	_starpu_take_busy_lock(lock);
 	/* either writer or reader (exactly one !) */
 	if (lock->writer) 
 	{
@@ -148,5 +148,5 @@ void _starpu_release_rw_lock(starpu_rw_lock_t *lock)
 		STARPU_ASSERT(lock->writer == 0);
 		lock->readercnt--;
 	}
-	_release_busy_lock(lock);
+	_starpu_release_busy_lock(lock);
 }

+ 9 - 9
src/common/rwlock.h

@@ -1,7 +1,7 @@
 /* StarPU --- Runtime system for heterogeneous multicore architectures.
  *
  * Copyright (C) 2009, 2010  Université de Bordeaux 1
- * Copyright (C) 2010  Centre National de la Recherche Scientifique
+ * Copyright (C) 2010, 2011  Centre National de la Recherche Scientifique
  *
  * StarPU is free software; you can redistribute it and/or modify
  * it under the terms of the GNU Lesser General Public License as published by
@@ -22,30 +22,30 @@
 #include <starpu.h>
 
 /* Dummy implementation of a RW-lock using a spinlock. */
-typedef struct starpu_rw_lock_s {
+struct _starpu_rw_lock {
 	uint32_t busy;
 	uint8_t writer;
 	uint16_t readercnt;
-} starpu_rw_lock_t;
+};
 
 /* Initialize the RW-lock */
-void _starpu_init_rw_lock(starpu_rw_lock_t *lock);
+void _starpu_init_rw_lock(struct _starpu_rw_lock *lock);
 
 /* Grab the RW-lock in a write mode */
-void _starpu_take_rw_lock_write(starpu_rw_lock_t *lock);
+void _starpu_take_rw_lock_write(struct _starpu_rw_lock *lock);
 
 /* Grab the RW-lock in a read mode */
-void _starpu_take_rw_lock_read(starpu_rw_lock_t *lock);
+void _starpu_take_rw_lock_read(struct _starpu_rw_lock *lock);
 
 /* Try to grab the RW-lock in a write mode. Returns 0 in case of success, -1
  * otherwise. */
-int _starpu_take_rw_lock_write_try(starpu_rw_lock_t *lock);
+int _starpu_take_rw_lock_write_try(struct _starpu_rw_lock *lock);
 
 /* Try to grab the RW-lock in a read mode. Returns 0 in case of success, -1
  * otherwise. */
-int _starpu_take_rw_lock_read_try(starpu_rw_lock_t *lock);
+int _starpu_take_rw_lock_read_try(struct _starpu_rw_lock *lock);
 
 /* Unlock the RW-lock. */
-void _starpu_release_rw_lock(starpu_rw_lock_t *lock);
+void _starpu_release_rw_lock(struct _starpu_rw_lock *lock);
 
 #endif

+ 5 - 5
src/common/starpu_spinlock.c

@@ -19,7 +19,7 @@
 #include <common/config.h>
 #include <starpu_util.h>
 
-int _starpu_spin_init(starpu_spinlock_t *lock)
+int _starpu_spin_init(struct _starpu_spinlock *lock)
 {
 #ifdef STARPU_SPINLOCK_CHECK
 //	memcpy(&lock->errcheck_lock, PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, sizeof(PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP));
@@ -44,7 +44,7 @@ int _starpu_spin_init(starpu_spinlock_t *lock)
 #endif
 }
 
-int _starpu_spin_destroy(starpu_spinlock_t *lock STARPU_ATTRIBUTE_UNUSED)
+int _starpu_spin_destroy(struct _starpu_spinlock *lock STARPU_ATTRIBUTE_UNUSED)
 {
 #ifdef STARPU_SPINLOCK_CHECK
 	pthread_mutexattr_destroy(&lock->errcheck_attr);
@@ -61,7 +61,7 @@ int _starpu_spin_destroy(starpu_spinlock_t *lock STARPU_ATTRIBUTE_UNUSED)
 #endif
 }
 
-int _starpu_spin_lock(starpu_spinlock_t *lock)
+int _starpu_spin_lock(struct _starpu_spinlock *lock)
 {
 #ifdef STARPU_SPINLOCK_CHECK
 	int ret = pthread_mutex_lock(&lock->errcheck_lock);
@@ -82,7 +82,7 @@ int _starpu_spin_lock(starpu_spinlock_t *lock)
 #endif
 }
 
-int _starpu_spin_trylock(starpu_spinlock_t *lock)
+int _starpu_spin_trylock(struct _starpu_spinlock *lock)
 {
 #ifdef STARPU_SPINLOCK_CHECK
 	int ret = pthread_mutex_trylock(&lock->errcheck_lock);
@@ -101,7 +101,7 @@ int _starpu_spin_trylock(starpu_spinlock_t *lock)
 #endif
 }
 
-int _starpu_spin_unlock(starpu_spinlock_t *lock STARPU_ATTRIBUTE_UNUSED)
+int _starpu_spin_unlock(struct _starpu_spinlock *lock STARPU_ATTRIBUTE_UNUSED)
 {
 #ifdef STARPU_SPINLOCK_CHECK
 	int ret = pthread_mutex_unlock(&lock->errcheck_lock);

+ 8 - 8
src/common/starpu_spinlock.h

@@ -1,7 +1,7 @@
 /* StarPU --- Runtime system for heterogeneous multicore architectures.
  *
  * Copyright (C) 2010-2011  Université de Bordeaux 1
- * Copyright (C) 2010  Centre National de la Recherche Scientifique
+ * Copyright (C) 2010, 2011  Centre National de la Recherche Scientifique
  *
  * StarPU is free software; you can redistribute it and/or modify
  * it under the terms of the GNU Lesser General Public License as published by
@@ -23,7 +23,7 @@
 #include <common/utils.h>
 #include <common/config.h>
 
-typedef struct starpu_spinlock_s {
+struct _starpu_spinlock {
 #ifdef STARPU_SPINLOCK_CHECK
 	pthread_mutexattr_t errcheck_attr;
 	pthread_mutex_t errcheck_lock;
@@ -35,13 +35,13 @@ typedef struct starpu_spinlock_s {
 	uint32_t taken __attribute__ ((aligned(16)));
 #endif
 #endif
-} starpu_spinlock_t;
+};
 
-int _starpu_spin_init(starpu_spinlock_t *lock);
-int _starpu_spin_destroy(starpu_spinlock_t *lock);
+int _starpu_spin_init(struct _starpu_spinlock *lock);
+int _starpu_spin_destroy(struct _starpu_spinlock *lock);
 
-int _starpu_spin_lock(starpu_spinlock_t *lock);
-int _starpu_spin_trylock(starpu_spinlock_t *lock);
-int _starpu_spin_unlock(starpu_spinlock_t *lock);
+int _starpu_spin_lock(struct _starpu_spinlock *lock);
+int _starpu_spin_trylock(struct _starpu_spinlock *lock);
+int _starpu_spin_unlock(struct _starpu_spinlock *lock);
 
 #endif // __STARPU_SPINLOCK_H__

+ 4 - 4
src/common/timing.c

@@ -94,8 +94,8 @@ typedef union starpu_u_tick
 } starpu_tick_t;
 
 #define STARPU_GET_TICK(t) __asm__ volatile("rdtsc" : "=a" ((t).sub.low), "=d" ((t).sub.high))
-#define TICK_RAW_DIFF(t1, t2) ((t2).tick - (t1).tick)
-#define TICK_DIFF(t1, t2) (TICK_RAW_DIFF(t1, t2) - residual)
+#define STARPU_TICK_RAW_DIFF(t1, t2) ((t2).tick - (t1).tick)
+#define STARPU_TICK_DIFF(t1, t2) (STARPU_TICK_RAW_DIFF(t1, t2) - residual)
 
 static starpu_tick_t reference_start_tick;
 static double scale = 0.0;
@@ -116,7 +116,7 @@ void _starpu_timing_init(void)
     {
       STARPU_GET_TICK(t1);
       STARPU_GET_TICK(t2);
-      residual = STARPU_MIN(residual, TICK_RAW_DIFF(t1, t2));
+      residual = STARPU_MIN(residual, STARPU_TICK_RAW_DIFF(t1, t2));
     }
   
   {
@@ -129,7 +129,7 @@ void _starpu_timing_init(void)
     gettimeofday(&tv2,0);
     scale = ((tv2.tv_sec*1e6 + tv2.tv_usec) -
 	     (tv1.tv_sec*1e6 + tv1.tv_usec)) / 
-      (double)(TICK_DIFF(t1, t2));
+      (double)(STARPU_TICK_DIFF(t1, t2));
   }
 
   STARPU_GET_TICK(reference_start_tick);

+ 17 - 17
src/common/utils.h

@@ -56,25 +56,25 @@ int _starpu_check_mutex_deadlock(pthread_mutex_t *mutex);
 /* If FILE is currently on a comment line, eat it.  */
 void _starpu_drop_comments(FILE *f);
 
-#define PTHREAD_MUTEX_INIT(mutex, attr) { int p_ret = pthread_mutex_init((mutex), (attr)); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_mutex_init: %s\n", strerror(p_ret)); STARPU_ABORT(); }}
-#define PTHREAD_MUTEX_DESTROY(mutex) { int p_ret = pthread_mutex_destroy(mutex); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_mutex_destroy: %s\n", strerror(p_ret)); STARPU_ABORT(); }}
-#define PTHREAD_MUTEX_LOCK(mutex) { int p_ret = pthread_mutex_lock(mutex); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_mutex_lock : %s\n", strerror(p_ret)); STARPU_ABORT(); }}
-#define PTHREAD_MUTEX_UNLOCK(mutex) { int p_ret = pthread_mutex_unlock(mutex); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_mutex_unlock : %s\n", strerror(p_ret)); STARPU_ABORT(); }}
+#define _STARPU_PTHREAD_MUTEX_INIT(mutex, attr) { int p_ret = pthread_mutex_init((mutex), (attr)); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_mutex_init: %s\n", strerror(p_ret)); STARPU_ABORT(); }}
+#define _STARPU_PTHREAD_MUTEX_DESTROY(mutex) { int p_ret = pthread_mutex_destroy(mutex); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_mutex_destroy: %s\n", strerror(p_ret)); STARPU_ABORT(); }}
+#define _STARPU_PTHREAD_MUTEX_LOCK(mutex) { int p_ret = pthread_mutex_lock(mutex); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_mutex_lock : %s\n", strerror(p_ret)); STARPU_ABORT(); }}
+#define _STARPU_PTHREAD_MUTEX_UNLOCK(mutex) { int p_ret = pthread_mutex_unlock(mutex); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_mutex_unlock : %s\n", strerror(p_ret)); STARPU_ABORT(); }}
 
-#define PTHREAD_RWLOCK_INIT(rwlock, attr) { int p_ret = pthread_rwlock_init((rwlock), (attr)); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_rwlock_init : %s\n", strerror(p_ret)); STARPU_ABORT();}}
-#define PTHREAD_RWLOCK_RDLOCK(rwlock) { int p_ret = pthread_rwlock_rdlock(rwlock); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_rwlock_rdlock : %s\n", strerror(p_ret)); STARPU_ABORT();}}
-#define PTHREAD_RWLOCK_WRLOCK(rwlock) { int p_ret = pthread_rwlock_wrlock(rwlock); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_rwlock_wrlock : %s\n", strerror(p_ret)); STARPU_ABORT();}}
-#define PTHREAD_RWLOCK_UNLOCK(rwlock) { int p_ret = pthread_rwlock_unlock(rwlock); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_rwlock_unlock : %s\n", strerror(p_ret)); STARPU_ABORT();}}
-#define PTHREAD_RWLOCK_DESTROY(rwlock) { int p_ret = pthread_rwlock_destroy(rwlock); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_rwlock_destroy : %s\n", strerror(p_ret)); STARPU_ABORT();}}
+#define _STARPU_PTHREAD_RWLOCK_INIT(rwlock, attr) { int p_ret = pthread_rwlock_init((rwlock), (attr)); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_rwlock_init : %s\n", strerror(p_ret)); STARPU_ABORT();}}
+#define _STARPU_PTHREAD_RWLOCK_RDLOCK(rwlock) { int p_ret = pthread_rwlock_rdlock(rwlock); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_rwlock_rdlock : %s\n", strerror(p_ret)); STARPU_ABORT();}}
+#define _STARPU_PTHREAD_RWLOCK_WRLOCK(rwlock) { int p_ret = pthread_rwlock_wrlock(rwlock); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_rwlock_wrlock : %s\n", strerror(p_ret)); STARPU_ABORT();}}
+#define _STARPU_PTHREAD_RWLOCK_UNLOCK(rwlock) { int p_ret = pthread_rwlock_unlock(rwlock); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_rwlock_unlock : %s\n", strerror(p_ret)); STARPU_ABORT();}}
+#define _STARPU_PTHREAD_RWLOCK_DESTROY(rwlock) { int p_ret = pthread_rwlock_destroy(rwlock); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_rwlock_destroy : %s\n", strerror(p_ret)); STARPU_ABORT();}}
 
-#define PTHREAD_COND_INIT(cond, attr) { int p_ret = pthread_cond_init((cond), (attr)); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_cond_init : %s\n", strerror(p_ret)); STARPU_ABORT();}}
-#define PTHREAD_COND_DESTROY(cond) { int p_ret = pthread_cond_destroy(cond); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_cond_destroy : %s\n", strerror(p_ret)); STARPU_ABORT();}}
-#define PTHREAD_COND_SIGNAL(cond) { int p_ret = pthread_cond_signal(cond); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_cond_signal : %s\n", strerror(p_ret)); STARPU_ABORT();}}
-#define PTHREAD_COND_BROADCAST(cond) { int p_ret = pthread_cond_broadcast(cond); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_cond_broadcast : %s\n", strerror(p_ret)); STARPU_ABORT();}}
-#define PTHREAD_COND_WAIT(cond, mutex) { int p_ret = pthread_cond_wait((cond), (mutex)); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_cond_wait : %s\n", strerror(p_ret)); STARPU_ABORT();}}
+#define _STARPU_PTHREAD_COND_INIT(cond, attr) { int p_ret = pthread_cond_init((cond), (attr)); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_cond_init : %s\n", strerror(p_ret)); STARPU_ABORT();}}
+#define _STARPU_PTHREAD_COND_DESTROY(cond) { int p_ret = pthread_cond_destroy(cond); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_cond_destroy : %s\n", strerror(p_ret)); STARPU_ABORT();}}
+#define _STARPU_PTHREAD_COND_SIGNAL(cond) { int p_ret = pthread_cond_signal(cond); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_cond_signal : %s\n", strerror(p_ret)); STARPU_ABORT();}}
+#define _STARPU_PTHREAD_COND_BROADCAST(cond) { int p_ret = pthread_cond_broadcast(cond); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_cond_broadcast : %s\n", strerror(p_ret)); STARPU_ABORT();}}
+#define _STARPU_PTHREAD_COND_WAIT(cond, mutex) { int p_ret = pthread_cond_wait((cond), (mutex)); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_cond_wait : %s\n", strerror(p_ret)); STARPU_ABORT();}}
 
-#define PTHREAD_BARRIER_INIT(barrier, attr, count) { int p_ret = pthread_barrier_init((barrier), (attr), (count)); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_barrier_init : %s\n", strerror(p_ret)); STARPU_ABORT();}}
-#define PTHREAD_BARRIER_DESTROY(barrier) { int p_ret = pthread_barrier_destroy((barrier)); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_barrier_destroy : %s\n", strerror(p_ret)); STARPU_ABORT();}}
-#define PTHREAD_BARRIER_WAIT(barrier) { int p_ret = pthread_barrier_wait(barrier); if (STARPU_UNLIKELY(!((p_ret == 0) || (p_ret == PTHREAD_BARRIER_SERIAL_THREAD)))) { fprintf(stderr, "pthread_barrier_wait : %s\n", strerror(p_ret)); STARPU_ABORT();}}
+#define _STARPU_PTHREAD_BARRIER_INIT(barrier, attr, count) { int p_ret = pthread_barrier_init((barrier), (attr), (count)); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_barrier_init : %s\n", strerror(p_ret)); STARPU_ABORT();}}
+#define _STARPU_PTHREAD_BARRIER_DESTROY(barrier) { int p_ret = pthread_barrier_destroy((barrier)); if (STARPU_UNLIKELY(p_ret)) { fprintf(stderr, "pthread_barrier_destroy : %s\n", strerror(p_ret)); STARPU_ABORT();}}
+#define _STARPU_PTHREAD_BARRIER_WAIT(barrier) { int p_ret = pthread_barrier_wait(barrier); if (STARPU_UNLIKELY(!((p_ret == 0) || (p_ret == PTHREAD_BARRIER_SERIAL_THREAD)))) { fprintf(stderr, "pthread_barrier_wait : %s\n", strerror(p_ret)); STARPU_ABORT();}}
 
 #endif // __COMMON_UTILS_H__

+ 2 - 2
src/core/debug.c

@@ -60,9 +60,9 @@ void _starpu_print_to_logfile(const char *format STARPU_ATTRIBUTE_UNUSED, ...)
 #ifdef STARPU_VERBOSE
 	va_list args;
 	va_start(args, format);
-	PTHREAD_MUTEX_LOCK(&logfile_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&logfile_mutex);
 	vfprintf(logfile, format, args);
-	PTHREAD_MUTEX_UNLOCK(&logfile_mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&logfile_mutex);
 	va_end( args );
 #endif
 }

+ 5 - 5
src/core/dependencies/cg.c

@@ -98,10 +98,10 @@ void _starpu_notify_cg(starpu_cg_t *cg)
 			case STARPU_CG_APPS: {
 				/* this is a cg for an application waiting on a set of
 	 			 * tags, wake the thread */
-				PTHREAD_MUTEX_LOCK(&cg->succ.succ_apps.cg_mutex);
+				_STARPU_PTHREAD_MUTEX_LOCK(&cg->succ.succ_apps.cg_mutex);
 				cg->succ.succ_apps.completed = 1;
-				PTHREAD_COND_SIGNAL(&cg->succ.succ_apps.cg_cond);
-				PTHREAD_MUTEX_UNLOCK(&cg->succ.succ_apps.cg_mutex);
+				_STARPU_PTHREAD_COND_SIGNAL(&cg->succ.succ_apps.cg_cond);
+				_STARPU_PTHREAD_MUTEX_UNLOCK(&cg->succ.succ_apps.cg_mutex);
 				break;
 			}
 
@@ -174,7 +174,7 @@ void _starpu_notify_cg_list(struct starpu_cg_list_s *successors)
 		if (cg_type == STARPU_CG_TASK)
 		{
 			starpu_job_t j = cg->succ.job;
-			PTHREAD_MUTEX_LOCK(&j->sync_mutex);
+			_STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
 		}			
 
 		_starpu_notify_cg(cg);
@@ -193,7 +193,7 @@ void _starpu_notify_cg_list(struct starpu_cg_list_s *successors)
 			if (j->submitted && (j->terminated > 0) && task->destroy && task->detach)
 				must_destroy_task = 1;
 
-			PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
+			_STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
 
 			if (must_destroy_task)
 				starpu_task_destroy(task);

+ 11 - 11
src/core/dependencies/implicit_data_deps.c

@@ -296,9 +296,9 @@ void _starpu_detect_implicit_data_deps(struct starpu_task *task)
 		if (mode & STARPU_SCRATCH)
 			continue;
 
-		PTHREAD_MUTEX_LOCK(&handle->sequential_consistency_mutex);
+		_STARPU_PTHREAD_MUTEX_LOCK(&handle->sequential_consistency_mutex);
 		_starpu_detect_implicit_data_deps_with_handle(task, task, handle, mode);
-		PTHREAD_MUTEX_UNLOCK(&handle->sequential_consistency_mutex);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&handle->sequential_consistency_mutex);
 	}
         _STARPU_LOG_OUT();
 }
@@ -313,7 +313,7 @@ void _starpu_detect_implicit_data_deps(struct starpu_task *task)
  * dependency as this is not needed anymore. */
 void _starpu_release_data_enforce_sequential_consistency(struct starpu_task *task, starpu_data_handle handle)
 {
-	PTHREAD_MUTEX_LOCK(&handle->sequential_consistency_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&handle->sequential_consistency_mutex);
 
 	if (handle->sequential_consistency)
 	{
@@ -391,13 +391,13 @@ void _starpu_release_data_enforce_sequential_consistency(struct starpu_task *tas
 		}
 	}
 
-	PTHREAD_MUTEX_UNLOCK(&handle->sequential_consistency_mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&handle->sequential_consistency_mutex);
 }
 
 void _starpu_add_post_sync_tasks(struct starpu_task *post_sync_task, starpu_data_handle handle)
 {
         _STARPU_LOG_IN();
-	PTHREAD_MUTEX_LOCK(&handle->sequential_consistency_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&handle->sequential_consistency_mutex);
 
 	if (handle->sequential_consistency)
 	{
@@ -409,7 +409,7 @@ void _starpu_add_post_sync_tasks(struct starpu_task *post_sync_task, starpu_data
 		handle->post_sync_tasks = link;		
 	}
 
-	PTHREAD_MUTEX_UNLOCK(&handle->sequential_consistency_mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&handle->sequential_consistency_mutex);
         _STARPU_LOG_OUT();
 }
 
@@ -418,7 +418,7 @@ void _starpu_unlock_post_sync_tasks(starpu_data_handle handle)
 	struct starpu_task_wrapper_list *post_sync_tasks = NULL;
 	unsigned do_submit_tasks = 0;
 
-	PTHREAD_MUTEX_LOCK(&handle->sequential_consistency_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&handle->sequential_consistency_mutex);
 
 	if (handle->sequential_consistency)
 	{
@@ -434,7 +434,7 @@ void _starpu_unlock_post_sync_tasks(starpu_data_handle handle)
 
 	}
 
-	PTHREAD_MUTEX_UNLOCK(&handle->sequential_consistency_mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&handle->sequential_consistency_mutex);
 
 	if (do_submit_tasks)
 	{
@@ -456,7 +456,7 @@ void _starpu_unlock_post_sync_tasks(starpu_data_handle handle)
 int _starpu_data_wait_until_available(starpu_data_handle handle, starpu_access_mode mode)
 {
 	/* If sequential consistency is enabled, wait until data is available */
-	PTHREAD_MUTEX_LOCK(&handle->sequential_consistency_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&handle->sequential_consistency_mutex);
 	int sequential_consistency = handle->sequential_consistency;
 	if (sequential_consistency)
 	{
@@ -471,7 +471,7 @@ int _starpu_data_wait_until_available(starpu_data_handle handle, starpu_access_m
 		/* It is not really a RW access, but we want to make sure that
 		 * all previous accesses are done */
 		_starpu_detect_implicit_data_deps_with_handle(sync_task, sync_task, handle, mode);
-		PTHREAD_MUTEX_UNLOCK(&handle->sequential_consistency_mutex);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&handle->sequential_consistency_mutex);
 
 		/* TODO detect if this is superflous */
 		int ret = starpu_task_submit(sync_task);
@@ -479,7 +479,7 @@ int _starpu_data_wait_until_available(starpu_data_handle handle, starpu_access_m
 		starpu_task_wait(sync_task);
 	}
 	else {
-		PTHREAD_MUTEX_UNLOCK(&handle->sequential_consistency_mutex);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&handle->sequential_consistency_mutex);
 	}
 
 	return 0;

+ 9 - 9
src/core/dependencies/tags.c

@@ -38,8 +38,8 @@ static starpu_cg_t *create_cg_apps(unsigned ntags)
 	cg->cg_type = STARPU_CG_APPS;
 
 	cg->succ.succ_apps.completed = 0;
-	PTHREAD_MUTEX_INIT(&cg->succ.succ_apps.cg_mutex, NULL);
-	PTHREAD_COND_INIT(&cg->succ.succ_apps.cg_cond, NULL);
+	_STARPU_PTHREAD_MUTEX_INIT(&cg->succ.succ_apps.cg_mutex, NULL);
+	_STARPU_PTHREAD_COND_INIT(&cg->succ.succ_apps.cg_cond, NULL);
 
 	return cg;
 }
@@ -155,12 +155,12 @@ void _starpu_tag_set_ready(struct starpu_tag_s *tag)
 	 * lock again, resulting in a deadlock. */
 	_starpu_spin_unlock(&tag->lock);
 
-	PTHREAD_MUTEX_LOCK(&j->sync_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
 
 	/* enforce data dependencies */
 	_starpu_enforce_deps_starting_from_task(j, 1);
 
-	PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
 
 	_starpu_spin_lock(&tag->lock);
 }
@@ -331,15 +331,15 @@ int starpu_tag_wait_array(unsigned ntags, starpu_tag *id)
 		_starpu_spin_unlock(&tag_array[i]->lock);
 	}
 
-	PTHREAD_MUTEX_LOCK(&cg->succ.succ_apps.cg_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&cg->succ.succ_apps.cg_mutex);
 
 	while (!cg->succ.succ_apps.completed)
-		PTHREAD_COND_WAIT(&cg->succ.succ_apps.cg_cond, &cg->succ.succ_apps.cg_mutex);
+		_STARPU_PTHREAD_COND_WAIT(&cg->succ.succ_apps.cg_cond, &cg->succ.succ_apps.cg_mutex);
 
-	PTHREAD_MUTEX_UNLOCK(&cg->succ.succ_apps.cg_mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&cg->succ.succ_apps.cg_mutex);
 
-	PTHREAD_MUTEX_DESTROY(&cg->succ.succ_apps.cg_mutex);
-	PTHREAD_COND_DESTROY(&cg->succ.succ_apps.cg_cond);
+	_STARPU_PTHREAD_MUTEX_DESTROY(&cg->succ.succ_apps.cg_mutex);
+	_STARPU_PTHREAD_COND_DESTROY(&cg->succ.succ_apps.cg_cond);
 
 	free(cg);
 

+ 2 - 2
src/core/dependencies/tags.h

@@ -1,7 +1,7 @@
 /* StarPU --- Runtime system for heterogeneous multicore architectures.
  *
  * Copyright (C) 2009, 2010  Université de Bordeaux 1
- * Copyright (C) 2010  Centre National de la Recherche Scientifique
+ * Copyright (C) 2010, 2011  Centre National de la Recherche Scientifique
  *
  * StarPU is free software; you can redistribute it and/or modify
  * it under the terms of the GNU Lesser General Public License as published by
@@ -45,7 +45,7 @@ typedef enum {
 struct starpu_job_s;
 
 struct starpu_tag_s {
-	starpu_spinlock_t lock;
+	struct _starpu_spinlock lock;
 	starpu_tag id; /* an identifier for the task */
 	starpu_tag_state state;
 

+ 4 - 4
src/core/dependencies/task_deps.c

@@ -69,7 +69,7 @@ void starpu_task_declare_deps_array(struct starpu_task *task, unsigned ndeps, st
 
 	job = _starpu_get_job_associated_to_task(task);
 
-	PTHREAD_MUTEX_LOCK(&job->sync_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&job->sync_mutex);
 
 	starpu_cg_t *cg = create_cg_task(ndeps, job);
 
@@ -85,11 +85,11 @@ void starpu_task_declare_deps_array(struct starpu_task *task, unsigned ndeps, st
 		STARPU_TRACE_TASK_DEPS(dep_job, job);
 		_starpu_bound_task_dep(job, dep_job);
 
-		PTHREAD_MUTEX_LOCK(&dep_job->sync_mutex);
+		_STARPU_PTHREAD_MUTEX_LOCK(&dep_job->sync_mutex);
 		_starpu_task_add_succ(dep_job, cg);
-		PTHREAD_MUTEX_UNLOCK(&dep_job->sync_mutex);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&dep_job->sync_mutex);
 	}
 
 	
-	PTHREAD_MUTEX_UNLOCK(&job->sync_mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&job->sync_mutex);
 }

+ 19 - 19
src/core/jobs.c

@@ -84,8 +84,8 @@ starpu_job_t __attribute__((malloc)) _starpu_job_create(struct starpu_task *task
 
 	_starpu_cg_list_init(&job->job_successors);
 
-	PTHREAD_MUTEX_INIT(&job->sync_mutex, NULL);
-	PTHREAD_COND_INIT(&job->sync_cond, NULL);
+	_STARPU_PTHREAD_MUTEX_INIT(&job->sync_mutex, NULL);
+	_STARPU_PTHREAD_COND_INIT(&job->sync_cond, NULL);
 
 	job->bound_task = NULL;
 
@@ -101,13 +101,13 @@ starpu_job_t __attribute__((malloc)) _starpu_job_create(struct starpu_task *task
 
 void _starpu_job_destroy(starpu_job_t j)
 {
-	PTHREAD_COND_DESTROY(&j->sync_cond);
-	PTHREAD_MUTEX_DESTROY(&j->sync_mutex);
+	_STARPU_PTHREAD_COND_DESTROY(&j->sync_cond);
+	_STARPU_PTHREAD_MUTEX_DESTROY(&j->sync_mutex);
 
 	if (j->task_size > 1)
 	{
-		PTHREAD_BARRIER_DESTROY(&j->before_work_barrier);
-		PTHREAD_BARRIER_DESTROY(&j->after_work_barrier);
+		_STARPU_PTHREAD_BARRIER_DESTROY(&j->before_work_barrier);
+		_STARPU_PTHREAD_BARRIER_DESTROY(&j->after_work_barrier);
 	}
 
 	_starpu_cg_list_deinit(&j->job_successors);
@@ -121,7 +121,7 @@ void _starpu_wait_job(starpu_job_t j)
 	STARPU_ASSERT(!j->task->detach);
         _STARPU_LOG_IN();
 
-	PTHREAD_MUTEX_LOCK(&j->sync_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
 
 	/* We wait for the flag to have a value of 2 which means that both the
 	 * codelet's implementation and its callback have been executed. That
@@ -129,9 +129,9 @@ void _starpu_wait_job(starpu_job_t j)
 	 * executed (so that we cannot destroy the task while it is still being
 	 * manipulated by the driver). */
 	while (j->terminated != 2)
-		PTHREAD_COND_WAIT(&j->sync_cond, &j->sync_mutex);
+		_STARPU_PTHREAD_COND_WAIT(&j->sync_cond, &j->sync_mutex);
 
-	PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
         _STARPU_LOG_OUT();
 }
 
@@ -140,7 +140,7 @@ void _starpu_handle_job_termination(starpu_job_t j, unsigned job_is_already_lock
 	struct starpu_task *task = j->task;
 
 	if (!job_is_already_locked)
-		PTHREAD_MUTEX_LOCK(&j->sync_mutex);
+		_STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
 
 	task->status = STARPU_TASK_FINISHED;
 
@@ -155,7 +155,7 @@ void _starpu_handle_job_termination(starpu_job_t j, unsigned job_is_already_lock
 	j->terminated = 1;
 
 	if (!job_is_already_locked)
-		PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
 
 	/* the callback is executed after the dependencies so that we may remove the tag 
  	 * of the task itself */
@@ -205,14 +205,14 @@ void _starpu_handle_job_termination(starpu_job_t j, unsigned job_is_already_lock
 		/* we do not desallocate the job structure if some is going to
 		 * wait after the task */
 		if (!job_is_already_locked)
-			PTHREAD_MUTEX_LOCK(&j->sync_mutex);
+			_STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
 		/* A value of 2 is put to specify that not only the codelet but
 		 * also the callback were executed. */
 		j->terminated = 2;
-		PTHREAD_COND_BROADCAST(&j->sync_cond);
+		_STARPU_PTHREAD_COND_BROADCAST(&j->sync_cond);
 
 		if (!job_is_already_locked)
-			PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
+			_STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
 	}
 	else {
 		/* no one is going to synchronize with that task so we release
@@ -281,7 +281,7 @@ static unsigned _starpu_not_all_task_deps_are_fulfilled(starpu_job_t j, unsigned
 	struct starpu_cg_list_s *job_successors = &j->job_successors;
 
 	if (!job_is_already_locked)
-		PTHREAD_MUTEX_LOCK(&j->sync_mutex);	
+		_STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);	
 
 	if (!j->submitted || (job_successors->ndeps != job_successors->ndeps_completed))
 	{
@@ -296,7 +296,7 @@ static unsigned _starpu_not_all_task_deps_are_fulfilled(starpu_job_t j, unsigned
 	}
 
 	if (!job_is_already_locked)
-		PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
 
 	return ret;
 }
@@ -380,15 +380,15 @@ int _starpu_push_local_task(struct starpu_worker_s *worker, struct starpu_task *
 	if (STARPU_UNLIKELY(!(worker->worker_mask & task->cl->where)))
 		return -ENODEV;
 
-	PTHREAD_MUTEX_LOCK(worker->sched_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(worker->sched_mutex);
 
 	if (back)
 		starpu_task_list_push_back(&worker->local_tasks, task);
 	else
 		starpu_task_list_push_front(&worker->local_tasks, task);
 
-	PTHREAD_COND_BROADCAST(worker->sched_cond);
-	PTHREAD_MUTEX_UNLOCK(worker->sched_mutex);
+	_STARPU_PTHREAD_COND_BROADCAST(worker->sched_cond);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(worker->sched_mutex);
 
 	return 0;
 }

+ 24 - 24
src/core/perfmodel/perfmodel_history.c

@@ -534,18 +534,18 @@ static void get_model_debug_path(struct starpu_perfmodel *model, const char *arc
 int _starpu_register_model(struct starpu_perfmodel *model)
 {
 	/* If the model has already been loaded, there is nothing to do */
-	PTHREAD_RWLOCK_RDLOCK(&registered_models_rwlock);
+	_STARPU_PTHREAD_RWLOCK_RDLOCK(&registered_models_rwlock);
 	if (model->is_loaded) {
-		PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
+		_STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
 		return 0;
 	}
-	PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
+	_STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
 
 	/* We have to make sure the model has not been loaded since the
          * last time we took the lock */
-	PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
+	_STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
 	if (model->is_loaded) {
-		PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
+		_STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
 		return 0;
 	}
 
@@ -575,7 +575,7 @@ int _starpu_register_model(struct starpu_perfmodel *model)
 	}
 #endif
 
-	PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
+	_STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
 	return 1;
 }
 
@@ -619,7 +619,7 @@ static void save_history_based_model(struct starpu_perfmodel *model)
 
 static void _starpu_dump_registered_models(void)
 {
-	PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
+	_STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
 
 	struct starpu_model_list_t *node;
 	node = registered_models;
@@ -633,14 +633,14 @@ static void _starpu_dump_registered_models(void)
 		/* XXX free node */
 	}
 
-	PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
+	_STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
 }
 
 void _starpu_initialize_registered_performance_models(void)
 {
 	registered_models = NULL;
 
-	PTHREAD_RWLOCK_INIT(&registered_models_rwlock, NULL);
+	_STARPU_PTHREAD_RWLOCK_INIT(&registered_models_rwlock, NULL);
 }
 
 void _starpu_deinitialize_registered_performance_models(void)
@@ -648,7 +648,7 @@ void _starpu_deinitialize_registered_performance_models(void)
 	if (_starpu_get_calibrate_flag())
 		_starpu_dump_registered_models();
 
-	PTHREAD_RWLOCK_DESTROY(&registered_models_rwlock);
+	_STARPU_PTHREAD_RWLOCK_DESTROY(&registered_models_rwlock);
 }
 
 /* We first try to grab the global lock in read mode to check whether the model
@@ -663,9 +663,9 @@ void _starpu_load_history_based_model(struct starpu_perfmodel *model, unsigned s
 	
 	int already_loaded;
  
-	PTHREAD_RWLOCK_RDLOCK(&registered_models_rwlock);
+	_STARPU_PTHREAD_RWLOCK_RDLOCK(&registered_models_rwlock);
 	already_loaded = model->is_loaded;
-	PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
+	_STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
 
 	if (already_loaded)
 		return;
@@ -673,18 +673,18 @@ void _starpu_load_history_based_model(struct starpu_perfmodel *model, unsigned s
 	/* The model is still not loaded so we grab the lock in write mode, and
 	 * if it's not loaded once we have the lock, we do load it. */
 
-	PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
+	_STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
 
 	/* Was the model initialized since the previous test ? */
 	if (model->is_loaded)
 	{
-		PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
+		_STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
 		return;
 	}
 	
-	PTHREAD_RWLOCK_INIT(&model->model_rwlock, NULL);
+	_STARPU_PTHREAD_RWLOCK_INIT(&model->model_rwlock, NULL);
 
-	PTHREAD_RWLOCK_WRLOCK(&model->model_rwlock);
+	_STARPU_PTHREAD_RWLOCK_WRLOCK(&model->model_rwlock);
 
 	/* make sure the performance model directory exists (or create it) */
 	_starpu_create_sampling_directory_if_needed();
@@ -735,9 +735,9 @@ void _starpu_load_history_based_model(struct starpu_perfmodel *model, unsigned s
 
 	model->is_loaded = 1;
 
-	PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
+	_STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
 
-	PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
+	_STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
 }
 
 /* This function is intended to be used by external tools that should read
@@ -886,9 +886,9 @@ double _starpu_non_linear_regression_based_job_expected_perf(struct starpu_perfm
 		struct starpu_htbl32_node_s *history = per_arch_model->history;
 		struct starpu_history_entry_t *entry;
 
-		PTHREAD_RWLOCK_RDLOCK(&model->model_rwlock);
+		_STARPU_PTHREAD_RWLOCK_RDLOCK(&model->model_rwlock);
 		entry = (struct starpu_history_entry_t *) _starpu_htbl_search_32(history, key);
-		PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
+		_STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
 
 		if (entry && entry->nsample >= STARPU_CALIBRATION_MINIMUM)
 			exp = entry->mean;
@@ -917,9 +917,9 @@ double _starpu_history_based_job_expected_perf(struct starpu_perfmodel *model, e
 	if (!history)
 		return -1.0;
 
-	PTHREAD_RWLOCK_RDLOCK(&model->model_rwlock);
+	_STARPU_PTHREAD_RWLOCK_RDLOCK(&model->model_rwlock);
 	entry = (struct starpu_history_entry_t *) _starpu_htbl_search_32(history, key);
-	PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
+	_STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
 
 	exp = entry?entry->mean:-1.0;
 
@@ -942,7 +942,7 @@ void _starpu_update_perfmodel_history(starpu_job_t j, struct starpu_perfmodel *m
 {
 	if (model)
 	{
-		PTHREAD_RWLOCK_WRLOCK(&model->model_rwlock);
+		_STARPU_PTHREAD_RWLOCK_WRLOCK(&model->model_rwlock);
 
 		struct starpu_per_arch_perfmodel_t *per_arch_model = &model->per_arch[arch][nimpl];
 
@@ -1053,6 +1053,6 @@ void _starpu_update_perfmodel_history(starpu_job_t j, struct starpu_perfmodel *m
 
 #endif
 		
-		PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
+		_STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
 	}
 }

+ 9 - 9
src/core/progress_hook.c

@@ -36,7 +36,7 @@ static int active_hook_cnt = 0;
 int starpu_progression_hook_register(unsigned (*func)(void *arg), void *arg)
 {
 	int hook;
-	PTHREAD_RWLOCK_WRLOCK(&progression_hook_rwlock);
+	_STARPU_PTHREAD_RWLOCK_WRLOCK(&progression_hook_rwlock);
 	for (hook = 0; hook < NMAXHOOKS; hook++)
 	{
 		if (!hooks[hook].active)
@@ -47,13 +47,13 @@ int starpu_progression_hook_register(unsigned (*func)(void *arg), void *arg)
 			hooks[hook].active = 1;
 			active_hook_cnt++;
 
-			PTHREAD_RWLOCK_UNLOCK(&progression_hook_rwlock);
+			_STARPU_PTHREAD_RWLOCK_UNLOCK(&progression_hook_rwlock);
 			
 			return hook;
 		}
 	}
 
-	PTHREAD_RWLOCK_UNLOCK(&progression_hook_rwlock);
+	_STARPU_PTHREAD_RWLOCK_UNLOCK(&progression_hook_rwlock);
 
 	starpu_wake_all_blocked_workers();
 
@@ -63,22 +63,22 @@ int starpu_progression_hook_register(unsigned (*func)(void *arg), void *arg)
 
 void starpu_progression_hook_deregister(int hook_id)
 {
-	PTHREAD_RWLOCK_WRLOCK(&progression_hook_rwlock);
+	_STARPU_PTHREAD_RWLOCK_WRLOCK(&progression_hook_rwlock);
 
 	if (hooks[hook_id].active)
 		active_hook_cnt--;
 
 	hooks[hook_id].active = 0;
 
-	PTHREAD_RWLOCK_UNLOCK(&progression_hook_rwlock);
+	_STARPU_PTHREAD_RWLOCK_UNLOCK(&progression_hook_rwlock);
 }
 
 unsigned _starpu_execute_registered_progression_hooks(void)
 {
 	/* If there is no hook registered, we short-cut loop. */
-	PTHREAD_RWLOCK_RDLOCK(&progression_hook_rwlock);
+	_STARPU_PTHREAD_RWLOCK_RDLOCK(&progression_hook_rwlock);
 	int no_hook = (active_hook_cnt == 0);
-	PTHREAD_RWLOCK_UNLOCK(&progression_hook_rwlock);
+	_STARPU_PTHREAD_RWLOCK_UNLOCK(&progression_hook_rwlock);
 
 	if (no_hook)
 		return 1;
@@ -92,9 +92,9 @@ unsigned _starpu_execute_registered_progression_hooks(void)
 	{
 		unsigned active;
 
-		PTHREAD_RWLOCK_RDLOCK(&progression_hook_rwlock);
+		_STARPU_PTHREAD_RWLOCK_RDLOCK(&progression_hook_rwlock);
 		active = hooks[hook].active;
-		PTHREAD_RWLOCK_UNLOCK(&progression_hook_rwlock);
+		_STARPU_PTHREAD_RWLOCK_UNLOCK(&progression_hook_rwlock);
 
 		unsigned may_block_hook = 1;
 

+ 4 - 4
src/core/sched_policy.c

@@ -253,8 +253,8 @@ static int _starpu_push_task_on_specific_worker(struct starpu_task *task, int wo
 		j->combined_workerid = workerid;
 		j->active_task_alias_count = 0;
 
-		PTHREAD_BARRIER_INIT(&j->before_work_barrier, NULL, worker_size);
-		PTHREAD_BARRIER_INIT(&j->after_work_barrier, NULL, worker_size);
+		_STARPU_PTHREAD_BARRIER_INIT(&j->before_work_barrier, NULL, worker_size);
+		_STARPU_PTHREAD_BARRIER_INIT(&j->after_work_barrier, NULL, worker_size);
 
 		for (i = 0; i < worker_size; i++)
 		{
@@ -359,7 +359,7 @@ void _starpu_wait_on_sched_event(void)
 {
 	struct starpu_worker_s *worker = _starpu_get_local_worker_key();
 
-	PTHREAD_MUTEX_LOCK(worker->sched_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(worker->sched_mutex);
 
 	_starpu_handle_all_pending_node_data_requests(worker->memory_node);
 
@@ -370,7 +370,7 @@ void _starpu_wait_on_sched_event(void)
 #endif
 	}
 
-	PTHREAD_MUTEX_UNLOCK(worker->sched_mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(worker->sched_mutex);
 }
 
 /* The scheduling policy may put tasks directly into a worker's local queue so

+ 20 - 20
src/core/task.c

@@ -97,13 +97,13 @@ void starpu_task_deinit(struct starpu_task *task)
 	struct starpu_task_bundle *bundle = task->bundle;
 	if (bundle)
 	{
-		PTHREAD_MUTEX_LOCK(&bundle->mutex);
+		_STARPU_PTHREAD_MUTEX_LOCK(&bundle->mutex);
 		int ret = starpu_task_bundle_remove(bundle, task);
 
 		/* Perhaps the bundle was destroyed when removing the last
 		 * entry */
 		if (ret != 1)
-			PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
+			_STARPU_PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
 	}
 
 	starpu_job_t j = (struct starpu_job_s *)task->starpu_private;
@@ -207,13 +207,13 @@ int _starpu_submit_job(starpu_job_t j)
 
 	_starpu_increment_nsubmitted_tasks();
 
-	PTHREAD_MUTEX_LOCK(&j->sync_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
 	
 	j->submitted = 1;
 
 	int ret = _starpu_enforce_deps_and_schedule(j, 1);
 
-	PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
 
         _STARPU_LOG_OUT();
         return ret;
@@ -338,14 +338,14 @@ int starpu_task_wait_for_all(void)
 	if (STARPU_UNLIKELY(!_starpu_worker_may_perform_blocking_calls()))
 		return -EDEADLK;
 
-	PTHREAD_MUTEX_LOCK(&submitted_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
 
 	STARPU_TRACE_TASK_WAIT_FOR_ALL;
 
 	while (nsubmitted > 0)
-		PTHREAD_COND_WAIT(&submitted_cond, &submitted_mutex);
+		_STARPU_PTHREAD_COND_WAIT(&submitted_cond, &submitted_mutex);
 	
-	PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
 
 	return 0;
 }
@@ -359,59 +359,59 @@ int starpu_task_wait_for_no_ready(void)
 	if (STARPU_UNLIKELY(!_starpu_worker_may_perform_blocking_calls()))
 		return -EDEADLK;
 
-	PTHREAD_MUTEX_LOCK(&submitted_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
 
 	STARPU_TRACE_TASK_WAIT_FOR_ALL;
 
 	while (nready > 0)
-		PTHREAD_COND_WAIT(&submitted_cond, &submitted_mutex);
+		_STARPU_PTHREAD_COND_WAIT(&submitted_cond, &submitted_mutex);
 	
-	PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
 
 	return 0;
 }
 
 void _starpu_decrement_nsubmitted_tasks(void)
 {
-	PTHREAD_MUTEX_LOCK(&submitted_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
 
 	if (--nsubmitted == 0)
-		PTHREAD_COND_BROADCAST(&submitted_cond);
+		_STARPU_PTHREAD_COND_BROADCAST(&submitted_cond);
 
 	STARPU_TRACE_UPDATE_TASK_CNT(nsubmitted);
 
-	PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
 
 }
 
 static void _starpu_increment_nsubmitted_tasks(void)
 {
-	PTHREAD_MUTEX_LOCK(&submitted_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
 
 	nsubmitted++;
 
 	STARPU_TRACE_UPDATE_TASK_CNT(nsubmitted);
 
-	PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
 }
 
 void _starpu_increment_nready_tasks(void)
 {
-	PTHREAD_MUTEX_LOCK(&submitted_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
 
 	nready++;
 
-	PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
 }
 
 void _starpu_decrement_nready_tasks(void)
 {
-	PTHREAD_MUTEX_LOCK(&submitted_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
 
 	if (--nready == 0)
-		PTHREAD_COND_BROADCAST(&submitted_cond);
+		_STARPU_PTHREAD_COND_BROADCAST(&submitted_cond);
 
-	PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
 
 }
 

+ 16 - 16
src/core/task_bundle.c

@@ -27,7 +27,7 @@ void starpu_task_bundle_init(struct starpu_task_bundle *bundle)
 {
 	STARPU_ASSERT(bundle);
 
-	PTHREAD_MUTEX_INIT(&bundle->mutex, NULL);
+	_STARPU_PTHREAD_MUTEX_INIT(&bundle->mutex, NULL);
 	bundle->closed = 0;
 
 	/* Start with an empty list */
@@ -51,7 +51,7 @@ void starpu_task_bundle_deinit(struct starpu_task_bundle *bundle)
 		free(entry);
 	}
 
-	PTHREAD_MUTEX_DESTROY(&bundle->mutex);
+	_STARPU_PTHREAD_MUTEX_DESTROY(&bundle->mutex);
 
 	if (bundle->destroy)
 		free(bundle);
@@ -60,12 +60,12 @@ void starpu_task_bundle_deinit(struct starpu_task_bundle *bundle)
 /* Insert a task into a bundle. */
 int starpu_task_bundle_insert(struct starpu_task_bundle *bundle, struct starpu_task *task)
 {
-	PTHREAD_MUTEX_LOCK(&bundle->mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&bundle->mutex);
 
 	if (bundle->closed)
 	{
 		/* The bundle is closed, we cannot add tasks anymore */
-		PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
 		return -EPERM;
 	}
 
@@ -73,7 +73,7 @@ int starpu_task_bundle_insert(struct starpu_task_bundle *bundle, struct starpu_t
 	{
 		/* the task has already been submitted, it's too late to put it
 		 * into a bundle now. */
-		PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
 		return -EINVAL;
 	}
 
@@ -99,7 +99,7 @@ int starpu_task_bundle_insert(struct starpu_task_bundle *bundle, struct starpu_t
 
 	task->bundle = bundle;
 
-	PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
 	return 0;
 }
 
@@ -128,7 +128,7 @@ int starpu_task_bundle_remove(struct starpu_task_bundle *bundle, struct starpu_t
 		/* If the list is now empty, deinitialize the bundle */
 		if (bundle->closed && bundle->list == NULL)
 		{
-			PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
+			_STARPU_PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
 			starpu_task_bundle_deinit(bundle);
 			return 1;
 		}
@@ -160,12 +160,12 @@ int starpu_task_bundle_remove(struct starpu_task_bundle *bundle, struct starpu_t
  * automatically gets deinitialized when it becomes empty. */
 void starpu_task_bundle_close(struct starpu_task_bundle *bundle)
 {
-	PTHREAD_MUTEX_LOCK(&bundle->mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&bundle->mutex);
 
 	/* If the bundle is already empty, we deinitialize it now. */
 	if (bundle->list == NULL)
 	{
-		PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
 		starpu_task_bundle_deinit(bundle);
 		return;
 	}
@@ -173,7 +173,7 @@ void starpu_task_bundle_close(struct starpu_task_bundle *bundle)
 	/* Mark the bundle as closed */
 	bundle->closed = 1;
 
-	PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
 
 }
 
@@ -183,7 +183,7 @@ double starpu_task_bundle_expected_length(struct starpu_task_bundle *bundle,  en
 	double expected_length = 0.0;
 
 	/* We expect the length of the bundle the be the sum of the different tasks length. */
-	PTHREAD_MUTEX_LOCK(&bundle->mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&bundle->mutex);
 
 	struct starpu_task_bundle_entry *entry;
 	entry = bundle->list;
@@ -199,7 +199,7 @@ double starpu_task_bundle_expected_length(struct starpu_task_bundle *bundle,  en
 		entry = entry->next;
 	}
 	
-	PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
 
 	return expected_length;
 }
@@ -210,7 +210,7 @@ double starpu_task_bundle_expected_power(struct starpu_task_bundle *bundle,  enu
 	double expected_power = 0.0;
 
 	/* We expect total consumption of the bundle the be the sum of the different tasks consumption. */
-	PTHREAD_MUTEX_LOCK(&bundle->mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&bundle->mutex);
 
 	struct starpu_task_bundle_entry *entry;
 	entry = bundle->list;
@@ -226,7 +226,7 @@ double starpu_task_bundle_expected_power(struct starpu_task_bundle *bundle,  enu
 		entry = entry->next;
 	}
 	
-	PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
 
 	return expected_power;
 }
@@ -285,7 +285,7 @@ static void insertion_handle_sorted(struct handle_list **listp, starpu_data_hand
 /* Return the time (in µs) expected to transfer all data used within the bundle */
 double starpu_task_bundle_expected_data_transfer_time(struct starpu_task_bundle *bundle, unsigned memory_node)
 {
-	PTHREAD_MUTEX_LOCK(&bundle->mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&bundle->mutex);
 
 	struct handle_list *handles = NULL;
 
@@ -333,7 +333,7 @@ double starpu_task_bundle_expected_data_transfer_time(struct starpu_task_bundle
 		free(current);
 	}
 
-	PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
 
 	return total_exp;
 }

+ 23 - 23
src/core/workers.c

@@ -173,8 +173,8 @@ static void _starpu_launch_drivers(struct starpu_machine_config_s *config)
 
 		workerarg->config = config;
 
-		PTHREAD_MUTEX_INIT(&workerarg->mutex, NULL);
-		PTHREAD_COND_INIT(&workerarg->ready_cond, NULL);
+		_STARPU_PTHREAD_MUTEX_INIT(&workerarg->mutex, NULL);
+		_STARPU_PTHREAD_COND_INIT(&workerarg->ready_cond, NULL);
 
 		workerarg->worker_size = 1;
 		workerarg->combined_workerid = workerarg->workerid;
@@ -236,11 +236,11 @@ static void _starpu_launch_drivers(struct starpu_machine_config_s *config)
 					pthread_create(&gordon_worker_set.worker_thread, NULL, 
 							_starpu_gordon_worker, &gordon_worker_set);
 
-					PTHREAD_MUTEX_LOCK(&gordon_worker_set.mutex);
+					_STARPU_PTHREAD_MUTEX_LOCK(&gordon_worker_set.mutex);
 					while (!gordon_worker_set.set_is_initialized)
-						PTHREAD_COND_WAIT(&gordon_worker_set.ready_cond,
+						_STARPU_PTHREAD_COND_WAIT(&gordon_worker_set.ready_cond,
 									&gordon_worker_set.mutex);
-					PTHREAD_MUTEX_UNLOCK(&gordon_worker_set.mutex);
+					_STARPU_PTHREAD_MUTEX_UNLOCK(&gordon_worker_set.mutex);
 
 					gordon_inited = 1;
 				}
@@ -264,10 +264,10 @@ static void _starpu_launch_drivers(struct starpu_machine_config_s *config)
 			case STARPU_CPU_WORKER:
 			case STARPU_CUDA_WORKER:
 			case STARPU_OPENCL_WORKER:			  
-				PTHREAD_MUTEX_LOCK(&workerarg->mutex);
+				_STARPU_PTHREAD_MUTEX_LOCK(&workerarg->mutex);
 				while (!workerarg->worker_is_initialized)
-					PTHREAD_COND_WAIT(&workerarg->ready_cond, &workerarg->mutex);
-				PTHREAD_MUTEX_UNLOCK(&workerarg->mutex);
+					_STARPU_PTHREAD_COND_WAIT(&workerarg->ready_cond, &workerarg->mutex);
+				_STARPU_PTHREAD_MUTEX_UNLOCK(&workerarg->mutex);
 				break;
 #ifdef STARPU_USE_GORDON
 			case STARPU_GORDON_WORKER:
@@ -351,19 +351,19 @@ int starpu_init(struct starpu_conf *user_conf)
 	if (!getenv("STARPU_SILENT")) fprintf(stderr,"Warning: StarPU was configured with --enable-stats, which slows down a bit\n");
 #endif
 
-	PTHREAD_MUTEX_LOCK(&init_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&init_mutex);
 	while (initialized == CHANGING)
 		/* Wait for the other one changing it */
-		PTHREAD_COND_WAIT(&init_cond, &init_mutex);
+		_STARPU_PTHREAD_COND_WAIT(&init_cond, &init_mutex);
 	init_count++;
 	if (initialized == INITIALIZED) {
 	  /* He initialized it, don't do it again, and let the others get the mutex */
-	  PTHREAD_MUTEX_UNLOCK(&init_mutex);
+	  _STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
 	  return 0;
 	  }
 	/* initialized == UNINITIALIZED */
 	initialized = CHANGING;
-	PTHREAD_MUTEX_UNLOCK(&init_mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
 
 #ifdef __MINGW32__
 	WSADATA wsadata;
@@ -392,12 +392,12 @@ int starpu_init(struct starpu_conf *user_conf)
 
 	ret = _starpu_build_topology(&config);
 	if (ret) {
-		PTHREAD_MUTEX_LOCK(&init_mutex);
+		_STARPU_PTHREAD_MUTEX_LOCK(&init_mutex);
 		init_count--;
 		initialized = UNINITIALIZED;
 		/* Let somebody else try to do it */
-		PTHREAD_COND_SIGNAL(&init_cond);
-		PTHREAD_MUTEX_UNLOCK(&init_mutex);
+		_STARPU_PTHREAD_COND_SIGNAL(&init_cond);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
 		return ret;
 	}
 
@@ -413,11 +413,11 @@ int starpu_init(struct starpu_conf *user_conf)
 	/* Launch "basic" workers (ie. non-combined workers) */
 	_starpu_launch_drivers(&config);
 
-	PTHREAD_MUTEX_LOCK(&init_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&init_mutex);
 	initialized = INITIALIZED;
 	/* Tell everybody that we initialized */
-	PTHREAD_COND_BROADCAST(&init_cond);
-	PTHREAD_MUTEX_UNLOCK(&init_mutex);
+	_STARPU_PTHREAD_COND_BROADCAST(&init_cond);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
 
 	_STARPU_DEBUG("Initialisation finished\n");
 	return 0;
@@ -510,7 +510,7 @@ static void _starpu_kill_all_workers(struct starpu_machine_config_s *config)
 void starpu_shutdown(void)
 {
 	const char *stats;
-	PTHREAD_MUTEX_LOCK(&init_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&init_mutex);
 	init_count--;
 	if (init_count) {
 		_STARPU_DEBUG("Still somebody needing StarPU, don't deinitialize\n");
@@ -519,7 +519,7 @@ void starpu_shutdown(void)
 
 	/* We're last */
 	initialized = CHANGING;
-	PTHREAD_MUTEX_UNLOCK(&init_mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
 
 	starpu_task_wait_for_no_ready();
 
@@ -560,11 +560,11 @@ void starpu_shutdown(void)
 
 	_starpu_close_debug_logfile();
 
-	PTHREAD_MUTEX_LOCK(&init_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&init_mutex);
 	initialized = UNINITIALIZED;
 	/* Let someone else that wants to initialize it again do it */
-	PTHREAD_COND_SIGNAL(&init_cond);
-	PTHREAD_MUTEX_UNLOCK(&init_mutex);
+	_STARPU_PTHREAD_COND_SIGNAL(&init_cond);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
 
 	_STARPU_DEBUG("Shutdown finished\n");
 }

+ 1 - 1
src/datawizard/coherency.h

@@ -102,7 +102,7 @@ struct starpu_data_state_t {
 	unsigned refcnt;
 	starpu_access_mode current_mode;
 	/* protect meta data */
-	starpu_spinlock_t header_lock;
+	struct _starpu_spinlock header_lock;
 
 	/* Condition to make application wait for all transfers before freeing handle */
 	/* busy_count is the number of handle->refcnt, handle->per_node[*]->refcnt, and number of starpu_data_requesters */

+ 10 - 10
src/datawizard/copy_driver.c

@@ -34,7 +34,7 @@ void _starpu_wake_all_blocked_workers_on_node(unsigned nodeid)
 
 	starpu_mem_node_descr * const descr = _starpu_get_memory_node_description();
 
-	PTHREAD_RWLOCK_RDLOCK(&descr->conditions_rwlock);
+	_STARPU_PTHREAD_RWLOCK_RDLOCK(&descr->conditions_rwlock);
 
 	unsigned nconds = descr->condition_count[nodeid];
 	for (cond_id = 0; cond_id < nconds; cond_id++)
@@ -43,12 +43,12 @@ void _starpu_wake_all_blocked_workers_on_node(unsigned nodeid)
 		condition  = &descr->conditions_attached_to_node[nodeid][cond_id];
 
 		/* wake anybody waiting on that condition */
-		PTHREAD_MUTEX_LOCK(condition->mutex);
-		PTHREAD_COND_BROADCAST(condition->cond);
-		PTHREAD_MUTEX_UNLOCK(condition->mutex);
+		_STARPU_PTHREAD_MUTEX_LOCK(condition->mutex);
+		_STARPU_PTHREAD_COND_BROADCAST(condition->cond);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(condition->mutex);
 	}
 
-	PTHREAD_RWLOCK_UNLOCK(&descr->conditions_rwlock);
+	_STARPU_PTHREAD_RWLOCK_UNLOCK(&descr->conditions_rwlock);
 }
 
 void starpu_wake_all_blocked_workers(void)
@@ -58,7 +58,7 @@ void starpu_wake_all_blocked_workers(void)
 
 	starpu_mem_node_descr * const descr = _starpu_get_memory_node_description();
 
-	PTHREAD_RWLOCK_RDLOCK(&descr->conditions_rwlock);
+	_STARPU_PTHREAD_RWLOCK_RDLOCK(&descr->conditions_rwlock);
 
 	unsigned nconds = descr->total_condition_count;
 	for (cond_id = 0; cond_id < nconds; cond_id++)
@@ -67,12 +67,12 @@ void starpu_wake_all_blocked_workers(void)
 		condition  = &descr->conditions_all[cond_id];
 
 		/* wake anybody waiting on that condition */
-		PTHREAD_MUTEX_LOCK(condition->mutex);
-		PTHREAD_COND_BROADCAST(condition->cond);
-		PTHREAD_MUTEX_UNLOCK(condition->mutex);
+		_STARPU_PTHREAD_MUTEX_LOCK(condition->mutex);
+		_STARPU_PTHREAD_COND_BROADCAST(condition->cond);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(condition->mutex);
 	}
 
-	PTHREAD_RWLOCK_UNLOCK(&descr->conditions_rwlock);
+	_STARPU_PTHREAD_RWLOCK_UNLOCK(&descr->conditions_rwlock);
 }
 
 #ifdef STARPU_USE_FXT

+ 24 - 24
src/datawizard/data_request.c

@@ -37,10 +37,10 @@ void _starpu_init_data_request_lists(void)
 	{
 		prefetch_requests[i] = starpu_data_request_list_new();
 		data_requests[i] = starpu_data_request_list_new();
-		PTHREAD_MUTEX_INIT(&data_requests_list_mutex[i], NULL);
+		_STARPU_PTHREAD_MUTEX_INIT(&data_requests_list_mutex[i], NULL);
 
 		data_requests_pending[i] = starpu_data_request_list_new();
-		PTHREAD_MUTEX_INIT(&data_requests_pending_list_mutex[i], NULL);
+		_STARPU_PTHREAD_MUTEX_INIT(&data_requests_pending_list_mutex[i], NULL);
 		
 		starpu_memstrategy_drop_prefetch[i]=0;
 	}
@@ -51,10 +51,10 @@ void _starpu_deinit_data_request_lists(void)
 	unsigned i;
 	for (i = 0; i < STARPU_MAXNODES; i++)
 	{
-		PTHREAD_MUTEX_DESTROY(&data_requests_pending_list_mutex[i]);
+		_STARPU_PTHREAD_MUTEX_DESTROY(&data_requests_pending_list_mutex[i]);
 		starpu_data_request_list_delete(data_requests_pending[i]);
 
-		PTHREAD_MUTEX_DESTROY(&data_requests_list_mutex[i]);
+		_STARPU_PTHREAD_MUTEX_DESTROY(&data_requests_list_mutex[i]);
 		starpu_data_request_list_delete(data_requests[i]);
 		starpu_data_request_list_delete(prefetch_requests[i]);
 	}
@@ -190,12 +190,12 @@ void _starpu_post_data_request(starpu_data_request_t r, uint32_t handling_node)
 	}
 
 	/* insert the request in the proper list */
-	PTHREAD_MUTEX_LOCK(&data_requests_list_mutex[handling_node]);
+	_STARPU_PTHREAD_MUTEX_LOCK(&data_requests_list_mutex[handling_node]);
 	if (r->prefetch) {
 		starpu_data_request_list_push_back(prefetch_requests[handling_node], r);
 	} else
 		starpu_data_request_list_push_back(data_requests[handling_node], r);
-	PTHREAD_MUTEX_UNLOCK(&data_requests_list_mutex[handling_node]);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&data_requests_list_mutex[handling_node]);
 
 #ifndef STARPU_NON_BLOCKING_DRIVERS
 	_starpu_wake_all_blocked_workers_on_node(handling_node);
@@ -363,9 +363,9 @@ static int starpu_handle_data_request(starpu_data_request_t r, unsigned may_allo
 		 * requests in the meantime. */
 		_starpu_spin_unlock(&handle->header_lock);
 
-		PTHREAD_MUTEX_LOCK(&data_requests_pending_list_mutex[r->handling_node]);
+		_STARPU_PTHREAD_MUTEX_LOCK(&data_requests_pending_list_mutex[r->handling_node]);
 		starpu_data_request_list_push_front(data_requests_pending[r->handling_node], r);
-		PTHREAD_MUTEX_UNLOCK(&data_requests_pending_list_mutex[r->handling_node]);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&data_requests_pending_list_mutex[r->handling_node]);
 
 		return -EAGAIN;
 	}
@@ -383,14 +383,14 @@ void _starpu_handle_node_data_requests(uint32_t src_node, unsigned may_alloc)
 	starpu_data_request_list_t new_data_requests;
 
 	/* take all the entries from the request list */
-        PTHREAD_MUTEX_LOCK(&data_requests_list_mutex[src_node]);
+        _STARPU_PTHREAD_MUTEX_LOCK(&data_requests_list_mutex[src_node]);
 
 	starpu_data_request_list_t local_list = data_requests[src_node];
 
 	if (starpu_data_request_list_empty(local_list))
 	{
 		/* there is no request */
-                PTHREAD_MUTEX_UNLOCK(&data_requests_list_mutex[src_node]);
+                _STARPU_PTHREAD_MUTEX_UNLOCK(&data_requests_list_mutex[src_node]);
 
 		return;
 	}
@@ -400,7 +400,7 @@ void _starpu_handle_node_data_requests(uint32_t src_node, unsigned may_alloc)
 	 * list, without concurrency issues.*/
 	data_requests[src_node] = starpu_data_request_list_new();
 
-	PTHREAD_MUTEX_UNLOCK(&data_requests_list_mutex[src_node]);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&data_requests_list_mutex[src_node]);
 
 	new_data_requests = starpu_data_request_list_new();
 
@@ -418,9 +418,9 @@ void _starpu_handle_node_data_requests(uint32_t src_node, unsigned may_alloc)
 		}
 	}
 
-	PTHREAD_MUTEX_LOCK(&data_requests_list_mutex[src_node]);
+	_STARPU_PTHREAD_MUTEX_LOCK(&data_requests_list_mutex[src_node]);
 	starpu_data_request_list_push_list_front(new_data_requests, data_requests[src_node]);
-	PTHREAD_MUTEX_UNLOCK(&data_requests_list_mutex[src_node]);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&data_requests_list_mutex[src_node]);
 
 	starpu_data_request_list_delete(new_data_requests);
 	starpu_data_request_list_delete(local_list);
@@ -434,14 +434,14 @@ void _starpu_handle_node_prefetch_requests(uint32_t src_node, unsigned may_alloc
 	starpu_data_request_list_t new_prefetch_requests;
 
 	/* take all the entries from the request list */
-        PTHREAD_MUTEX_LOCK(&data_requests_list_mutex[src_node]);
+        _STARPU_PTHREAD_MUTEX_LOCK(&data_requests_list_mutex[src_node]);
 
 	starpu_data_request_list_t local_list = prefetch_requests[src_node];
 	
 	if (starpu_data_request_list_empty(local_list))
 	{
 		/* there is no request */
-                PTHREAD_MUTEX_UNLOCK(&data_requests_list_mutex[src_node]);
+                _STARPU_PTHREAD_MUTEX_UNLOCK(&data_requests_list_mutex[src_node]);
 		return;
 	}
 
@@ -450,7 +450,7 @@ void _starpu_handle_node_prefetch_requests(uint32_t src_node, unsigned may_alloc
 	 * list, without concurrency issues.*/
 	prefetch_requests[src_node] = starpu_data_request_list_new();
 
-	PTHREAD_MUTEX_UNLOCK(&data_requests_list_mutex[src_node]);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&data_requests_list_mutex[src_node]);
 
 	new_data_requests = starpu_data_request_list_new();
 	new_prefetch_requests = starpu_data_request_list_new();
@@ -486,10 +486,10 @@ void _starpu_handle_node_prefetch_requests(uint32_t src_node, unsigned may_alloc
 			starpu_data_request_list_push_back(new_data_requests, r);
 	}
 
-	PTHREAD_MUTEX_LOCK(&data_requests_list_mutex[src_node]);
+	_STARPU_PTHREAD_MUTEX_LOCK(&data_requests_list_mutex[src_node]);
 	starpu_data_request_list_push_list_front(new_data_requests, data_requests[src_node]);
 	starpu_data_request_list_push_list_front(new_prefetch_requests, prefetch_requests[src_node]);
-	PTHREAD_MUTEX_UNLOCK(&data_requests_list_mutex[src_node]);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&data_requests_list_mutex[src_node]);
 
 	starpu_data_request_list_delete(new_data_requests);
 	starpu_data_request_list_delete(new_prefetch_requests);
@@ -502,13 +502,13 @@ static void _handle_pending_node_data_requests(uint32_t src_node, unsigned force
 //
 	starpu_data_request_list_t new_data_requests_pending = starpu_data_request_list_new();
 
-	PTHREAD_MUTEX_LOCK(&data_requests_pending_list_mutex[src_node]);
+	_STARPU_PTHREAD_MUTEX_LOCK(&data_requests_pending_list_mutex[src_node]);
 
 	/* for all entries of the list */
 	starpu_data_request_list_t local_list = data_requests_pending[src_node];
 	data_requests_pending[src_node] = starpu_data_request_list_new();
 
-	PTHREAD_MUTEX_UNLOCK(&data_requests_pending_list_mutex[src_node]);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&data_requests_pending_list_mutex[src_node]);
 
 	while (!starpu_data_request_list_empty(local_list))
 	{
@@ -544,9 +544,9 @@ static void _handle_pending_node_data_requests(uint32_t src_node, unsigned force
 			}
 		}
 	}
-	PTHREAD_MUTEX_LOCK(&data_requests_pending_list_mutex[src_node]);
+	_STARPU_PTHREAD_MUTEX_LOCK(&data_requests_pending_list_mutex[src_node]);
 	starpu_data_request_list_push_list_back(data_requests_pending[src_node], new_data_requests_pending);
-	PTHREAD_MUTEX_UNLOCK(&data_requests_pending_list_mutex[src_node]);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&data_requests_pending_list_mutex[src_node]);
 
 	starpu_data_request_list_delete(local_list);
 	starpu_data_request_list_delete(new_data_requests_pending);
@@ -585,7 +585,7 @@ void _starpu_update_prefetch_status(starpu_data_request_t r){
 			_starpu_update_prefetch_status(next_req);
 	}
 
-	PTHREAD_MUTEX_LOCK(&data_requests_list_mutex[r->handling_node]);
+	_STARPU_PTHREAD_MUTEX_LOCK(&data_requests_list_mutex[r->handling_node]);
 	
 	/* The request can be in a different list (handling request or the temp list)
 	 * we have to check that it is really in the prefetch list. */
@@ -602,5 +602,5 @@ void _starpu_update_prefetch_status(starpu_data_request_t r){
 			break;
 		}		
 	}
-	PTHREAD_MUTEX_UNLOCK(&data_requests_list_mutex[r->handling_node]);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&data_requests_list_mutex[r->handling_node]);
 }

+ 1 - 1
src/datawizard/data_request.h

@@ -33,7 +33,7 @@ struct callback_list {
 };
 
 LIST_TYPE(starpu_data_request,
-	starpu_spinlock_t lock;
+	struct _starpu_spinlock lock;
 	unsigned refcnt;
 
 	starpu_data_handle handle;

+ 3 - 3
src/datawizard/filters.c

@@ -157,14 +157,14 @@ void starpu_data_partition(starpu_data_handle initial_handle, struct starpu_data
 		child->refcnt = 0;
 		child->busy_count = 0;
 		child->busy_waiting = 0;
-		PTHREAD_MUTEX_INIT(&child->busy_mutex, NULL);
-		PTHREAD_COND_INIT(&child->busy_cond, NULL);
+		_STARPU_PTHREAD_MUTEX_INIT(&child->busy_mutex, NULL);
+		_STARPU_PTHREAD_COND_INIT(&child->busy_cond, NULL);
 		child->reduction_refcnt = 0;
 		_starpu_spin_init(&child->header_lock);
 
 		child->sequential_consistency = initial_handle->sequential_consistency;
 
-		PTHREAD_MUTEX_INIT(&child->sequential_consistency_mutex, NULL);
+		_STARPU_PTHREAD_MUTEX_INIT(&child->sequential_consistency_mutex, NULL);
 		child->last_submitted_mode = STARPU_R;
 		child->last_submitted_writer = NULL;
 		child->last_submitted_readers = NULL;

+ 20 - 20
src/datawizard/interfaces/data_interface.c

@@ -32,7 +32,7 @@ struct handle_entry
 
 /* Hash table mapping host pointers to data handles.  */
 static struct handle_entry *registered_handles;
-static starpu_spinlock_t    registered_handles_lock;
+static struct _starpu_spinlock    registered_handles_lock;
 
 void _starpu_data_interface_init()
 {
@@ -105,8 +105,8 @@ static void _starpu_register_new_data(starpu_data_handle handle,
 	handle->refcnt = 0;
 	handle->busy_count = 0;
 	handle->busy_waiting = 0;
-	PTHREAD_MUTEX_INIT(&handle->busy_mutex, NULL);
-	PTHREAD_COND_INIT(&handle->busy_cond, NULL);
+	_STARPU_PTHREAD_MUTEX_INIT(&handle->busy_mutex, NULL);
+	_STARPU_PTHREAD_COND_INIT(&handle->busy_cond, NULL);
 	_starpu_spin_init(&handle->header_lock);
 
 	/* first take care to properly lock the data */
@@ -126,7 +126,7 @@ static void _starpu_register_new_data(starpu_data_handle handle,
 	handle->sequential_consistency =
 		starpu_data_get_default_sequential_consistency_flag();
 
-	PTHREAD_MUTEX_INIT(&handle->sequential_consistency_mutex, NULL);
+	_STARPU_PTHREAD_MUTEX_INIT(&handle->sequential_consistency_mutex, NULL);
 	handle->last_submitted_mode = STARPU_R;
 	handle->last_submitted_writer = NULL;
 	handle->last_submitted_readers = NULL;
@@ -361,7 +361,7 @@ void _starpu_data_free_interfaces(starpu_data_handle handle)
 	}
 }
 
-struct unregister_callback_arg {
+struct _starpu_unregister_callback_arg {
 	unsigned memory_node;
 	starpu_data_handle handle;
 	unsigned terminated;
@@ -375,16 +375,16 @@ struct unregister_callback_arg {
 void _starpu_data_check_not_busy(starpu_data_handle handle)
 {
 	if (!handle->busy_count && handle->busy_waiting) {
-		PTHREAD_MUTEX_LOCK(&handle->busy_mutex);
-		PTHREAD_COND_BROADCAST(&handle->busy_cond);
-		PTHREAD_MUTEX_UNLOCK(&handle->busy_mutex);
+		_STARPU_PTHREAD_MUTEX_LOCK(&handle->busy_mutex);
+		_STARPU_PTHREAD_COND_BROADCAST(&handle->busy_cond);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&handle->busy_mutex);
 	}
 }
 
 static void _starpu_data_unregister_fetch_data_callback(void *_arg)
 {
 	int ret;
-	struct unregister_callback_arg *arg = (struct unregister_callback_arg *) _arg;
+	struct _starpu_unregister_callback_arg *arg = (struct _starpu_unregister_callback_arg *) _arg;
 
 	starpu_data_handle handle = arg->handle;
 
@@ -396,10 +396,10 @@ static void _starpu_data_unregister_fetch_data_callback(void *_arg)
 	STARPU_ASSERT(!ret);
 	
 	/* unlock the caller */
-	PTHREAD_MUTEX_LOCK(&arg->mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&arg->mutex);
 	arg->terminated = 1;
-	PTHREAD_COND_SIGNAL(&arg->cond);
-	PTHREAD_MUTEX_UNLOCK(&arg->mutex);
+	_STARPU_PTHREAD_COND_SIGNAL(&arg->cond);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&arg->mutex);
 }
 
 /* Unregister the data handle, perhaps we don't need to update the home_node
@@ -418,12 +418,12 @@ static void _starpu_data_unregister(starpu_data_handle handle, unsigned coherent
 		int home_node = handle->home_node; 
 		if (home_node >= 0)
 		{
-			struct unregister_callback_arg arg;
+			struct _starpu_unregister_callback_arg arg;
 			arg.handle = handle;
 			arg.memory_node = (unsigned)home_node;
 			arg.terminated = 0;
-			PTHREAD_MUTEX_INIT(&arg.mutex, NULL);
-			PTHREAD_COND_INIT(&arg.cond, NULL);
+			_STARPU_PTHREAD_MUTEX_INIT(&arg.mutex, NULL);
+			_STARPU_PTHREAD_COND_INIT(&arg.cond, NULL);
 	
 			if (!_starpu_attempt_to_submit_data_request_from_apps(handle, STARPU_R,
 					_starpu_data_unregister_fetch_data_callback, &arg))
@@ -434,10 +434,10 @@ static void _starpu_data_unregister(starpu_data_handle handle, unsigned coherent
 				STARPU_ASSERT(!ret);
 			}
 			else {
-				PTHREAD_MUTEX_LOCK(&arg.mutex);
+				_STARPU_PTHREAD_MUTEX_LOCK(&arg.mutex);
 				while (!arg.terminated)
-					PTHREAD_COND_WAIT(&arg.cond, &arg.mutex);
-				PTHREAD_MUTEX_UNLOCK(&arg.mutex);
+					_STARPU_PTHREAD_COND_WAIT(&arg.cond, &arg.mutex);
+				_STARPU_PTHREAD_MUTEX_UNLOCK(&arg.mutex);
 			}
 			_starpu_release_data_on_node(handle, 0, &handle->per_node[home_node]);
 		}
@@ -454,9 +454,9 @@ static void _starpu_data_unregister(starpu_data_handle handle, unsigned coherent
 	_starpu_spin_unlock(&handle->header_lock);
 
 	/* Wait for all requests to finish (notably WT requests) */
-	PTHREAD_MUTEX_LOCK(&handle->busy_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&handle->busy_mutex);
 	while (handle->busy_count)
-		PTHREAD_COND_WAIT(&handle->busy_cond, &handle->busy_mutex);
+		_STARPU_PTHREAD_COND_WAIT(&handle->busy_cond, &handle->busy_mutex);
 
 	/* Wait for finished requests to release the handle */
 	_starpu_spin_lock(&handle->header_lock);

+ 17 - 17
src/datawizard/memalloc.c

@@ -45,8 +45,8 @@ void _starpu_init_mem_chunk_lists(void)
 	unsigned i;
 	for (i = 0; i < STARPU_MAXNODES; i++)
 	{
-		PTHREAD_RWLOCK_INIT(&mc_rwlock[i], NULL);
-		PTHREAD_RWLOCK_INIT(&lru_rwlock[i], NULL);
+		_STARPU_PTHREAD_RWLOCK_INIT(&mc_rwlock[i], NULL);
+		_STARPU_PTHREAD_RWLOCK_INIT(&lru_rwlock[i], NULL);
 		mc_list[i] = starpu_mem_chunk_list_new();
 		starpu_lru_list[i] = starpu_mem_chunk_lru_list_new();
 		memchunk_cache[i] = starpu_mem_chunk_list_new();
@@ -576,7 +576,7 @@ static size_t reclaim_memory_generic(uint32_t node, unsigned force, size_t recla
 {
 	size_t freed = 0;
 
-	PTHREAD_RWLOCK_WRLOCK(&mc_rwlock[node]);
+	_STARPU_PTHREAD_RWLOCK_WRLOCK(&mc_rwlock[node]);
 
 	starpu_lru(node);
 
@@ -587,7 +587,7 @@ static size_t reclaim_memory_generic(uint32_t node, unsigned force, size_t recla
 	if (reclaim && freed<reclaim)
 		freed += free_potentially_in_use_mc(node, force, reclaim);
 
-	PTHREAD_RWLOCK_UNLOCK(&mc_rwlock[node]);
+	_STARPU_PTHREAD_RWLOCK_UNLOCK(&mc_rwlock[node]);
 
 	return freed;
 
@@ -641,11 +641,11 @@ static void register_mem_chunk(struct starpu_data_replicate_s *replicate, size_t
 	/* Put this memchunk in the list of memchunk in use */
 	mc = _starpu_memchunk_init(replicate, size, interface_size, automatically_allocated);
 
-	PTHREAD_RWLOCK_WRLOCK(&mc_rwlock[dst_node]);
+	_STARPU_PTHREAD_RWLOCK_WRLOCK(&mc_rwlock[dst_node]);
 
 	starpu_mem_chunk_list_push_back(mc_list[dst_node], mc);
 
-	PTHREAD_RWLOCK_UNLOCK(&mc_rwlock[dst_node]);
+	_STARPU_PTHREAD_RWLOCK_UNLOCK(&mc_rwlock[dst_node]);
 }
 
 /* This function is called when the handle is destroyed (eg. when calling
@@ -653,7 +653,7 @@ static void register_mem_chunk(struct starpu_data_replicate_s *replicate, size_t
  * specified handle into the cache. */
 void _starpu_request_mem_chunk_removal(starpu_data_handle handle, unsigned node)
 {
-	PTHREAD_RWLOCK_WRLOCK(&mc_rwlock[node]);
+	_STARPU_PTHREAD_RWLOCK_WRLOCK(&mc_rwlock[node]);
 
 	/* iterate over the list of memory chunks and remove the entry */
 	starpu_mem_chunk_t mc, next_mc;
@@ -680,7 +680,7 @@ void _starpu_request_mem_chunk_removal(starpu_data_handle handle, unsigned node)
 	}
 
 	/* there was no corresponding buffer ... */
-	PTHREAD_RWLOCK_UNLOCK(&mc_rwlock[node]);
+	_STARPU_PTHREAD_RWLOCK_UNLOCK(&mc_rwlock[node]);
 }
 
 static size_t _starpu_get_global_mem_size(int dst_node)
@@ -742,17 +742,17 @@ static ssize_t _starpu_allocate_interface(starpu_data_handle handle, struct star
 	uint32_t footprint = _starpu_compute_data_footprint(handle);
 
 	STARPU_TRACE_START_ALLOC_REUSE(dst_node);
-	PTHREAD_RWLOCK_WRLOCK(&mc_rwlock[node]);
+	_STARPU_PTHREAD_RWLOCK_WRLOCK(&mc_rwlock[node]);
 
 	if (try_to_find_reusable_mem_chunk(dst_node, handle, footprint))
 	{
-		PTHREAD_RWLOCK_UNLOCK(&mc_rwlock[node]);
+		_STARPU_PTHREAD_RWLOCK_UNLOCK(&mc_rwlock[node]);
 		_starpu_allocation_cache_hit(dst_node);
 		ssize_t data_size = _starpu_data_get_size(handle);
 		return data_size;
 	}
 
-	PTHREAD_RWLOCK_UNLOCK(&mc_rwlock[node]);
+	_STARPU_PTHREAD_RWLOCK_UNLOCK(&mc_rwlock[node]);
 	STARPU_TRACE_END_ALLOC_REUSE(dst_node);
 #endif
 
@@ -854,11 +854,11 @@ unsigned starpu_data_test_if_allocated_on_node(starpu_data_handle handle, uint32
 
 void starpu_memchunk_recently_used(starpu_mem_chunk_t mc, unsigned node)
 {
-	PTHREAD_RWLOCK_WRLOCK(&lru_rwlock[node]);
+	_STARPU_PTHREAD_RWLOCK_WRLOCK(&lru_rwlock[node]);
 	starpu_mem_chunk_lru_t mc_lru=starpu_mem_chunk_lru_new();
 	mc_lru->mc=mc;
 	starpu_mem_chunk_lru_list_push_front(starpu_lru_list[node],mc_lru);
-	PTHREAD_RWLOCK_UNLOCK(&lru_rwlock[node]);
+	_STARPU_PTHREAD_RWLOCK_UNLOCK(&lru_rwlock[node]);
 }
 
 /* The mc_rwlock[node] rw-lock should be taken prior to calling this function.*/
@@ -882,7 +882,7 @@ static void starpu_memchunk_recently_used_move(starpu_mem_chunk_t mc, unsigned n
 
 static void starpu_lru(unsigned node)
 {
-	PTHREAD_RWLOCK_WRLOCK(&lru_rwlock[node]);
+	_STARPU_PTHREAD_RWLOCK_WRLOCK(&lru_rwlock[node]);
 	while (!starpu_mem_chunk_lru_list_empty(starpu_lru_list[node]))
 	{
 		starpu_mem_chunk_lru_t mc_lru=starpu_mem_chunk_lru_list_front(starpu_lru_list[node]);
@@ -890,14 +890,14 @@ static void starpu_lru(unsigned node)
 		starpu_mem_chunk_lru_list_erase(starpu_lru_list[node], mc_lru);
 		starpu_mem_chunk_lru_delete(mc_lru);
 	}
-	PTHREAD_RWLOCK_UNLOCK(&lru_rwlock[node]);
+	_STARPU_PTHREAD_RWLOCK_UNLOCK(&lru_rwlock[node]);
 }
 
 
 #ifdef STARPU_MEMORY_STATUS
 void _starpu_display_data_stats_by_node(int node)
 {
-	PTHREAD_RWLOCK_WRLOCK(&mc_rwlock[node]);
+	_STARPU_PTHREAD_RWLOCK_WRLOCK(&mc_rwlock[node]);
 
 	if (!starpu_mem_chunk_list_empty(mc_list[node]))
 	{
@@ -915,6 +915,6 @@ void _starpu_display_data_stats_by_node(int node)
 
 	}
 
-	PTHREAD_RWLOCK_UNLOCK(&mc_rwlock[node]);
+	_STARPU_PTHREAD_RWLOCK_UNLOCK(&mc_rwlock[node]);
 }
 #endif

+ 24 - 24
src/datawizard/user_interactions.c

@@ -121,8 +121,8 @@ int starpu_data_acquire_cb(starpu_data_handle handle,
 	wrapper->mode = mode;
 	wrapper->callback = callback;
 	wrapper->callback_arg = arg;
-	PTHREAD_COND_INIT(&wrapper->cond, NULL);
-	PTHREAD_MUTEX_INIT(&wrapper->lock, NULL);
+	_STARPU_PTHREAD_COND_INIT(&wrapper->cond, NULL);
+	_STARPU_PTHREAD_MUTEX_INIT(&wrapper->lock, NULL);
 	wrapper->finished = 0;
 
 #ifdef STARPU_DEVEL
@@ -133,7 +133,7 @@ int starpu_data_acquire_cb(starpu_data_handle handle,
 	handle->busy_count++;
 	_starpu_spin_unlock(&handle->header_lock);
 
-	PTHREAD_MUTEX_LOCK(&handle->sequential_consistency_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&handle->sequential_consistency_mutex);
 	int sequential_consistency = handle->sequential_consistency;
 	if (sequential_consistency)
 	{
@@ -153,14 +153,14 @@ int starpu_data_acquire_cb(starpu_data_handle handle,
 #endif
 
 		_starpu_detect_implicit_data_deps_with_handle(wrapper->pre_sync_task, wrapper->post_sync_task, handle, mode);
-		PTHREAD_MUTEX_UNLOCK(&handle->sequential_consistency_mutex);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&handle->sequential_consistency_mutex);
 
 		/* TODO detect if this is superflous */
 		int ret = starpu_task_submit(wrapper->pre_sync_task);
 		STARPU_ASSERT(!ret);
 	}
 	else {
-		PTHREAD_MUTEX_UNLOCK(&handle->sequential_consistency_mutex);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&handle->sequential_consistency_mutex);
 
 		starpu_data_acquire_cb_pre_sync_callback(wrapper);
 	}
@@ -185,10 +185,10 @@ static inline void _starpu_data_acquire_continuation(void *arg)
 	_starpu_fetch_data_on_node(handle, ram_replicate, wrapper->mode, 0, NULL, NULL);
 	
 	/* continuation of starpu_data_acquire */
-	PTHREAD_MUTEX_LOCK(&wrapper->lock);
+	_STARPU_PTHREAD_MUTEX_LOCK(&wrapper->lock);
 	wrapper->finished = 1;
-	PTHREAD_COND_SIGNAL(&wrapper->cond);
-	PTHREAD_MUTEX_UNLOCK(&wrapper->lock);
+	_STARPU_PTHREAD_COND_SIGNAL(&wrapper->cond);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&wrapper->lock);
 }
 
 /* The data must be released by calling starpu_data_release later on */
@@ -214,7 +214,7 @@ int starpu_data_acquire(starpu_data_handle handle, starpu_access_mode mode)
 	};
 
 //	_STARPU_DEBUG("TAKE sequential_consistency_mutex starpu_data_acquire\n");
-	PTHREAD_MUTEX_LOCK(&handle->sequential_consistency_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&handle->sequential_consistency_mutex);
 	int sequential_consistency = handle->sequential_consistency;
 	if (sequential_consistency)
 	{
@@ -232,7 +232,7 @@ int starpu_data_acquire(starpu_data_handle handle, starpu_access_mode mode)
 #endif
 
 		_starpu_detect_implicit_data_deps_with_handle(wrapper.pre_sync_task, wrapper.post_sync_task, handle, mode);
-		PTHREAD_MUTEX_UNLOCK(&handle->sequential_consistency_mutex);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&handle->sequential_consistency_mutex);
 
 		/* TODO detect if this is superflous */
 		wrapper.pre_sync_task->synchronous = 1;
@@ -241,7 +241,7 @@ int starpu_data_acquire(starpu_data_handle handle, starpu_access_mode mode)
 		//starpu_task_wait(wrapper.pre_sync_task);
 	}
 	else {
-		PTHREAD_MUTEX_UNLOCK(&handle->sequential_consistency_mutex);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&handle->sequential_consistency_mutex);
 	}
 
 	/* we try to get the data, if we do not succeed immediately, we set a
@@ -255,10 +255,10 @@ int starpu_data_acquire(starpu_data_handle handle, starpu_access_mode mode)
 		STARPU_ASSERT(!ret);
 	}
 	else {
-		PTHREAD_MUTEX_LOCK(&wrapper.lock);
+		_STARPU_PTHREAD_MUTEX_LOCK(&wrapper.lock);
 		while (!wrapper.finished)
-			PTHREAD_COND_WAIT(&wrapper.cond, &wrapper.lock);
-		PTHREAD_MUTEX_UNLOCK(&wrapper.lock);
+			_STARPU_PTHREAD_COND_WAIT(&wrapper.cond, &wrapper.lock);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&wrapper.lock);
 	}
 
 	/* At that moment, the caller holds a reference to the piece of data.
@@ -296,10 +296,10 @@ static void _prefetch_data_on_node(void *arg)
 
 	if (!wrapper->async)
 	{
-		PTHREAD_MUTEX_LOCK(&wrapper->lock);
+		_STARPU_PTHREAD_MUTEX_LOCK(&wrapper->lock);
 		wrapper->finished = 1;
-		PTHREAD_COND_SIGNAL(&wrapper->cond);
-		PTHREAD_MUTEX_UNLOCK(&wrapper->lock);
+		_STARPU_PTHREAD_COND_SIGNAL(&wrapper->cond);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&wrapper->lock);
 	}
 
 	_starpu_spin_lock(&handle->header_lock);
@@ -322,8 +322,8 @@ int _starpu_prefetch_data_on_node_with_mode(starpu_data_handle handle, unsigned
 	wrapper->handle = handle;
 	wrapper->node = node;
 	wrapper->async = async;
-	PTHREAD_COND_INIT(&wrapper->cond, NULL);
-	PTHREAD_MUTEX_INIT(&wrapper->lock, NULL);
+	_STARPU_PTHREAD_COND_INIT(&wrapper->cond, NULL);
+	_STARPU_PTHREAD_MUTEX_INIT(&wrapper->lock, NULL);
 	wrapper->finished = 0;
 
 	if (!_starpu_attempt_to_submit_data_request_from_apps(handle, mode, _prefetch_data_on_node, wrapper))
@@ -349,10 +349,10 @@ int _starpu_prefetch_data_on_node_with_mode(starpu_data_handle handle, unsigned
 
 	}
 	else if (!async) {
-		PTHREAD_MUTEX_LOCK(&wrapper->lock);
+		_STARPU_PTHREAD_MUTEX_LOCK(&wrapper->lock);
 		while (!wrapper->finished)
-			PTHREAD_COND_WAIT(&wrapper->cond, &wrapper->lock);
-		PTHREAD_MUTEX_UNLOCK(&wrapper->lock);
+			_STARPU_PTHREAD_COND_WAIT(&wrapper->cond, &wrapper->lock);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&wrapper->lock);
 	}
 
 	return 0;
@@ -401,9 +401,9 @@ void starpu_data_set_sequential_consistency_flag(starpu_data_handle handle, unsi
 			starpu_data_set_sequential_consistency_flag(child_handle, flag);
 	}
 
-	PTHREAD_MUTEX_LOCK(&handle->sequential_consistency_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&handle->sequential_consistency_mutex);
 	handle->sequential_consistency = flag;
-	PTHREAD_MUTEX_UNLOCK(&handle->sequential_consistency_mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&handle->sequential_consistency_mutex);
 
 	_starpu_spin_unlock(&handle->header_lock);
 }

+ 10 - 10
src/drivers/cpu/driver_cpu.c

@@ -48,7 +48,7 @@ static int execute_job_on_cpu(starpu_job_t j, struct starpu_worker_s *cpu_args,
 	}
 
 	if (is_parallel_task)
-		PTHREAD_BARRIER_WAIT(&j->before_work_barrier);
+		_STARPU_PTHREAD_BARRIER_WAIT(&j->before_work_barrier);
 
 	_starpu_driver_start_job(cpu_args, j, &codelet_start, rank);
 
@@ -72,7 +72,7 @@ static int execute_job_on_cpu(starpu_job_t j, struct starpu_worker_s *cpu_args,
 	_starpu_driver_end_job(cpu_args, j, &codelet_end, rank);
 
 	if (is_parallel_task)
-		PTHREAD_BARRIER_WAIT(&j->after_work_barrier);
+		_STARPU_PTHREAD_BARRIER_WAIT(&j->after_work_barrier);
 
 	if (rank == 0)
 	{
@@ -112,10 +112,10 @@ void *_starpu_cpu_worker(void *arg)
 	STARPU_TRACE_WORKER_INIT_END
 
         /* tell the main thread that we are ready */
-	PTHREAD_MUTEX_LOCK(&cpu_arg->mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&cpu_arg->mutex);
 	cpu_arg->worker_is_initialized = 1;
-	PTHREAD_COND_SIGNAL(&cpu_arg->ready_cond);
-	PTHREAD_MUTEX_UNLOCK(&cpu_arg->mutex);
+	_STARPU_PTHREAD_COND_SIGNAL(&cpu_arg->ready_cond);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&cpu_arg->mutex);
 
         starpu_job_t j;
 	struct starpu_task *task;
@@ -128,7 +128,7 @@ void *_starpu_cpu_worker(void *arg)
 		_starpu_datawizard_progress(memnode, 1);
 		STARPU_TRACE_END_PROGRESS(memnode);
 
-		PTHREAD_MUTEX_LOCK(cpu_arg->sched_mutex);
+		_STARPU_PTHREAD_MUTEX_LOCK(cpu_arg->sched_mutex);
 
 		task = _starpu_pop_task(cpu_arg);
 	
@@ -137,12 +137,12 @@ void *_starpu_cpu_worker(void *arg)
 			if (_starpu_worker_can_block(memnode))
 				_starpu_block_worker(workerid, cpu_arg->sched_cond, cpu_arg->sched_mutex);
 
-			PTHREAD_MUTEX_UNLOCK(cpu_arg->sched_mutex);
+			_STARPU_PTHREAD_MUTEX_UNLOCK(cpu_arg->sched_mutex);
 
 			continue;
 		};
 
-		PTHREAD_MUTEX_UNLOCK(cpu_arg->sched_mutex);	
+		_STARPU_PTHREAD_MUTEX_UNLOCK(cpu_arg->sched_mutex);	
 
 		STARPU_ASSERT(task);
 		j = _starpu_get_job_associated_to_task(task);
@@ -167,9 +167,9 @@ void *_starpu_cpu_worker(void *arg)
 			STARPU_ASSERT(task != j->task);
 			free(task);
 
-			PTHREAD_MUTEX_LOCK(&j->sync_mutex);
+			_STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
 			rank = j->active_task_alias_count++;
-			PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
+			_STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
 
 			struct starpu_combined_worker_s *combined_worker;
 			combined_worker = _starpu_get_combined_worker_struct(j->combined_workerid);

+ 6 - 6
src/drivers/cuda/driver_cuda.c

@@ -283,10 +283,10 @@ void *_starpu_cuda_worker(void *arg)
 	STARPU_TRACE_WORKER_INIT_END
 
 	/* tell the main thread that this one is ready */
-	PTHREAD_MUTEX_LOCK(&args->mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&args->mutex);
 	args->worker_is_initialized = 1;
-	PTHREAD_COND_SIGNAL(&args->ready_cond);
-	PTHREAD_MUTEX_UNLOCK(&args->mutex);
+	_STARPU_PTHREAD_COND_SIGNAL(&args->ready_cond);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&args->mutex);
 
 	struct starpu_job_s * j;
 	struct starpu_task *task;
@@ -298,7 +298,7 @@ void *_starpu_cuda_worker(void *arg)
 		_starpu_datawizard_progress(memnode, 1);
 		STARPU_TRACE_END_PROGRESS(memnode);
 
-		PTHREAD_MUTEX_LOCK(args->sched_mutex);
+		_STARPU_PTHREAD_MUTEX_LOCK(args->sched_mutex);
 
 		task = _starpu_pop_task(args);
 	
@@ -307,12 +307,12 @@ void *_starpu_cuda_worker(void *arg)
 			if (_starpu_worker_can_block(memnode))
 				_starpu_block_worker(workerid, args->sched_cond, args->sched_mutex);
 
-			PTHREAD_MUTEX_UNLOCK(args->sched_mutex);
+			_STARPU_PTHREAD_MUTEX_UNLOCK(args->sched_mutex);
 
 			continue;
 		};
 
-		PTHREAD_MUTEX_UNLOCK(args->sched_mutex);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(args->sched_mutex);
 
 		STARPU_ASSERT(task);
 		j = _starpu_get_job_associated_to_task(task);

+ 1 - 1
src/drivers/driver_common/driver_common.c

@@ -153,7 +153,7 @@ void _starpu_block_worker(int workerid, pthread_cond_t *cond, pthread_mutex_t *m
 	starpu_clock_gettime(&start_time);
 	_starpu_worker_register_sleeping_start_date(workerid, &start_time);
 
-	PTHREAD_COND_WAIT(cond, mutex);
+	_STARPU_PTHREAD_COND_WAIT(cond, mutex);
 
 	_starpu_worker_set_status(workerid, STATUS_UNKNOWN);
 	STARPU_TRACE_WORKER_SLEEP_END

+ 11 - 11
src/drivers/gordon/driver_gordon.c

@@ -57,10 +57,10 @@ void *gordon_worker_progress(void *arg)
 		(gordon_set_arg->workers[0].bindid + 1)%(gordon_set_arg->config->nhwcores);
 	_starpu_bind_thread_on_cpu(gordon_set_arg->config, prog_thread_bind_id);
 
-	PTHREAD_MUTEX_LOCK(&progress_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&progress_mutex);
 	progress_thread_is_inited = 1;
-	PTHREAD_COND_SIGNAL(&progress_cond);
-	PTHREAD_MUTEX_UNLOCK(&progress_mutex);
+	_STARPU_PTHREAD_COND_SIGNAL(&progress_cond);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&progress_mutex);
 
 	while (1) {
 		/* the Gordon runtime needs to make sure that we poll it 
@@ -446,24 +446,24 @@ void *_starpu_gordon_worker(void *arg)
 	 */
 
 	/* launch the progression thread */
-	PTHREAD_MUTEX_INIT(&progress_mutex, NULL);
-	PTHREAD_COND_INIT(&progress_cond, NULL);
+	_STARPU_PTHREAD_MUTEX_INIT(&progress_mutex, NULL);
+	_STARPU_PTHREAD_COND_INIT(&progress_cond, NULL);
 	
 	pthread_create(&progress_thread, NULL, gordon_worker_progress, gordon_set_arg);
 
 	/* wait for the progression thread to be ready */
-	PTHREAD_MUTEX_LOCK(&progress_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&progress_mutex);
 	while (!progress_thread_is_inited)
-		PTHREAD_COND_WAIT(&progress_cond, &progress_mutex);
-	PTHREAD_MUTEX_UNLOCK(&progress_mutex);
+		_STARPU_PTHREAD_COND_WAIT(&progress_cond, &progress_mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&progress_mutex);
 
 	_STARPU_DEBUG("progress thread is running ... \n");
 	
 	/* tell the core that gordon is ready */
-	PTHREAD_MUTEX_LOCK(&gordon_set_arg->mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&gordon_set_arg->mutex);
 	gordon_set_arg->set_is_initialized = 1;
-	PTHREAD_COND_SIGNAL(&gordon_set_arg->ready_cond);
-	PTHREAD_MUTEX_UNLOCK(&gordon_set_arg->mutex);
+	_STARPU_PTHREAD_COND_SIGNAL(&gordon_set_arg->ready_cond);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&gordon_set_arg->mutex);
 
 	gordon_worker_inject(gordon_set_arg);
 

+ 12 - 12
src/drivers/opencl/driver_opencl.c

@@ -124,7 +124,7 @@ cl_int _starpu_opencl_init_context(int devid)
 {
 	cl_int err;
 
-	PTHREAD_MUTEX_LOCK(&big_lock);
+	_STARPU_PTHREAD_MUTEX_LOCK(&big_lock);
 
         _STARPU_DEBUG("Initialising context for dev %d\n", devid);
 
@@ -144,7 +144,7 @@ cl_int _starpu_opencl_init_context(int devid)
         transfer_queues[devid] = clCreateCommandQueue(contexts[devid], devices[devid], props, &err);
         if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
 
-	PTHREAD_MUTEX_UNLOCK(&big_lock);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&big_lock);
 
 	limit_gpu_mem_if_needed(devid);
 
@@ -155,7 +155,7 @@ cl_int _starpu_opencl_deinit_context(int devid)
 {
         cl_int err;
 
-	PTHREAD_MUTEX_LOCK(&big_lock);
+	_STARPU_PTHREAD_MUTEX_LOCK(&big_lock);
 
         _STARPU_DEBUG("De-initialising context for dev %d\n", devid);
 
@@ -172,7 +172,7 @@ cl_int _starpu_opencl_deinit_context(int devid)
 
         contexts[devid] = NULL;
 
-	PTHREAD_MUTEX_UNLOCK(&big_lock);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&big_lock);
 
         return CL_SUCCESS;
 }
@@ -333,7 +333,7 @@ cl_int _starpu_opencl_copy_rect_ram_to_opencl(void *ptr, unsigned src_node STARP
 
 void _starpu_opencl_init(void)
 {
-	PTHREAD_MUTEX_LOCK(&big_lock);
+	_STARPU_PTHREAD_MUTEX_LOCK(&big_lock);
         if (!init_done) {
                 cl_platform_id platform_id[STARPU_OPENCL_PLATFORM_MAX];
                 cl_uint nb_platforms;
@@ -405,7 +405,7 @@ void _starpu_opencl_init(void)
 
                 init_done=1;
         }
-	PTHREAD_MUTEX_UNLOCK(&big_lock);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&big_lock);
 }
 
 static unsigned _starpu_opencl_get_device_name(int dev, char *name, int lname);
@@ -449,10 +449,10 @@ void *_starpu_opencl_worker(void *arg)
 	STARPU_TRACE_WORKER_INIT_END
 
 	/* tell the main thread that this one is ready */
-	PTHREAD_MUTEX_LOCK(&args->mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&args->mutex);
 	args->worker_is_initialized = 1;
-	PTHREAD_COND_SIGNAL(&args->ready_cond);
-	PTHREAD_MUTEX_UNLOCK(&args->mutex);
+	_STARPU_PTHREAD_COND_SIGNAL(&args->ready_cond);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&args->mutex);
 
 	struct starpu_job_s * j;
 	struct starpu_task *task;
@@ -464,7 +464,7 @@ void *_starpu_opencl_worker(void *arg)
 		_starpu_datawizard_progress(memnode, 1);
 		STARPU_TRACE_END_PROGRESS(memnode);
 
-		PTHREAD_MUTEX_LOCK(args->sched_mutex);
+		_STARPU_PTHREAD_MUTEX_LOCK(args->sched_mutex);
 
 		task = _starpu_pop_task(args);
 		
@@ -473,12 +473,12 @@ void *_starpu_opencl_worker(void *arg)
 			if (_starpu_worker_can_block(memnode))
 				_starpu_block_worker(workerid, args->sched_cond, args->sched_mutex);
 
-			PTHREAD_MUTEX_UNLOCK(args->sched_mutex);
+			_STARPU_PTHREAD_MUTEX_UNLOCK(args->sched_mutex);
 
 			continue;
 		};
 
-		PTHREAD_MUTEX_UNLOCK(args->sched_mutex);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(args->sched_mutex);
 
 		STARPU_ASSERT(task);
 		j = _starpu_get_job_associated_to_task(task);

+ 25 - 25
src/profiling/bound.c

@@ -121,7 +121,7 @@ void starpu_bound_start(int deps, int prio)
 	struct bound_task *t;
 	struct bound_tag_dep *td;
 
-	PTHREAD_MUTEX_LOCK(&mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&mutex);
 
 	tp = task_pools;
 	task_pools = NULL;
@@ -137,7 +137,7 @@ void starpu_bound_start(int deps, int prio)
 	recorddeps = deps;
 	recordprio = prio;
 
-	PTHREAD_MUTEX_UNLOCK(&mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
 
 	for ( ; tp; tp = tp->next)
 		free(tp);
@@ -195,10 +195,10 @@ void _starpu_bound_record(starpu_job_t j)
 	if (!good_job(j))
 		return;
 
-	PTHREAD_MUTEX_LOCK(&mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&mutex);
 	/* Re-check, this time with mutex held */
 	if (!_starpu_bound_recording) {
-		PTHREAD_MUTEX_UNLOCK(&mutex);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
 		return;
 	}
 
@@ -229,7 +229,7 @@ void _starpu_bound_record(starpu_job_t j)
 		tp->n++;
 	}
 
-	PTHREAD_MUTEX_UNLOCK(&mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
 }
 
 void _starpu_bound_tag_dep(starpu_tag id, starpu_tag dep_id)
@@ -239,10 +239,10 @@ void _starpu_bound_tag_dep(starpu_tag id, starpu_tag dep_id)
 	if (!_starpu_bound_recording || !recorddeps)
 		return;
 
-	PTHREAD_MUTEX_LOCK(&mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&mutex);
 	/* Re-check, this time with mutex held */
 	if (!_starpu_bound_recording || !recorddeps) {
-		PTHREAD_MUTEX_UNLOCK(&mutex);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
 		return;
 	}
 
@@ -251,7 +251,7 @@ void _starpu_bound_tag_dep(starpu_tag id, starpu_tag dep_id)
 	td->dep_tag = dep_id;
 	td->next = tag_deps;
 	tag_deps = td;
-	PTHREAD_MUTEX_UNLOCK(&mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
 }
 
 void _starpu_bound_task_dep(starpu_job_t j, starpu_job_t dep_j)
@@ -264,10 +264,10 @@ void _starpu_bound_task_dep(starpu_job_t j, starpu_job_t dep_j)
 	if (!good_job(j) || !good_job(dep_j))
 		return;
 
-	PTHREAD_MUTEX_LOCK(&mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&mutex);
 	/* Re-check, this time with mutex held */
 	if (!_starpu_bound_recording || !recorddeps) {
-		PTHREAD_MUTEX_UNLOCK(&mutex);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
 		return;
 	}
 
@@ -276,7 +276,7 @@ void _starpu_bound_task_dep(starpu_job_t j, starpu_job_t dep_j)
 	t = j->bound_task;
 	t->deps = (struct bound_task **) realloc(t->deps, ++t->depsn * sizeof(t->deps[0]));
 	t->deps[t->depsn-1] = dep_j->bound_task;
-	PTHREAD_MUTEX_UNLOCK(&mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
 }
 
 static struct bound_task *find_job(unsigned long id)
@@ -299,10 +299,10 @@ void _starpu_bound_job_id_dep(starpu_job_t j, unsigned long id)
 	if (!good_job(j))
 		return;
 
-	PTHREAD_MUTEX_LOCK(&mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&mutex);
 	/* Re-check, this time with mutex held */
 	if (!_starpu_bound_recording || !recorddeps) {
-		PTHREAD_MUTEX_UNLOCK(&mutex);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
 		return;
 	}
 
@@ -310,20 +310,20 @@ void _starpu_bound_job_id_dep(starpu_job_t j, unsigned long id)
 	dep_t = find_job(id);
 	if (!dep_t) {
 		fprintf(stderr,"dependency %lu not found !\n", id);
-		PTHREAD_MUTEX_UNLOCK(&mutex);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
 		return;
 	}
 	t = j->bound_task;
 	t->deps = (struct bound_task **) realloc(t->deps, ++t->depsn * sizeof(t->deps[0]));
 	t->deps[t->depsn-1] = dep_t;
-	PTHREAD_MUTEX_UNLOCK(&mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
 }
 
 void starpu_bound_stop(void)
 {
-	PTHREAD_MUTEX_LOCK(&mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&mutex);
 	_starpu_bound_recording = 0;
-	PTHREAD_MUTEX_UNLOCK(&mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
 }
 
 static void _starpu_get_tasks_times(int nw, int nt, double *times) {
@@ -385,7 +385,7 @@ void starpu_bound_print_lp(FILE *output)
 	int nw; /* Number of different workers */
 	int t, w;
 
-	PTHREAD_MUTEX_LOCK(&mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&mutex);
 	nw = starpu_worker_get_count();
 
 	if (recorddeps) {
@@ -630,7 +630,7 @@ void starpu_bound_print_lp(FILE *output)
 		}
 	}
 
-	PTHREAD_MUTEX_UNLOCK(&mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
 }
 
 /*
@@ -648,7 +648,7 @@ void starpu_bound_print_mps(FILE *output)
 		return;
 	}
 
-	PTHREAD_MUTEX_LOCK(&mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&mutex);
 
 	nw = starpu_worker_get_count();
 	nt = 0;
@@ -707,7 +707,7 @@ void starpu_bound_print_mps(FILE *output)
 		fprintf(output, "ENDATA\n");
 	}
 
-	PTHREAD_MUTEX_UNLOCK(&mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
 }
 
 /*
@@ -844,7 +844,7 @@ void starpu_bound_print(FILE *output, int integer __attribute__ ((unused))) {
 		return;
 	}
 
-	PTHREAD_MUTEX_LOCK(&mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&mutex);
 	glp_prob *lp = _starpu_bound_glp_resolve(integer);
 	if (lp) {
 		struct bound_task_pool * tp;
@@ -875,7 +875,7 @@ void starpu_bound_print(FILE *output, int integer __attribute__ ((unused))) {
 	} else {
 		fprintf(stderr, "Simplex failed\n");
 	}
-	PTHREAD_MUTEX_UNLOCK(&mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
 #else /* HAVE_GLPK_H */
 	fprintf(output, "Please rebuild StarPU with glpk installed.\n");
 #endif /* HAVE_GLPK_H */
@@ -890,7 +890,7 @@ void starpu_bound_compute(double *res, double *integer_res __attribute__ ((unuse
 		return;
 	}
 
-	PTHREAD_MUTEX_LOCK(&mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&mutex);
 	glp_prob *lp = _starpu_bound_glp_resolve(integer);
 	if (lp) {
 		ret = glp_get_obj_val(lp);
@@ -899,7 +899,7 @@ void starpu_bound_compute(double *res, double *integer_res __attribute__ ((unuse
 		glp_delete_prob(lp);
 	} else
 		ret = 0.;
-	PTHREAD_MUTEX_UNLOCK(&mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
 	*res = ret;
 #else /* HAVE_GLPK_H */
 	*res = 0.;

+ 13 - 13
src/profiling/profiling.c

@@ -98,7 +98,7 @@ void _starpu_profiling_init(void)
 	const char *env;
 	for (worker = 0; worker < STARPU_NMAXWORKERS; worker++)
 	{
-		PTHREAD_MUTEX_INIT(&worker_info_mutex[worker], NULL);
+		_STARPU_PTHREAD_MUTEX_INIT(&worker_info_mutex[worker], NULL);
 		_starpu_worker_reset_profiling_info(worker);
 	}
 	if ((env = getenv("STARPU_PROFILING")) && atoi(env))
@@ -174,19 +174,19 @@ static void _starpu_worker_reset_profiling_info_with_lock(int workerid)
 
 void _starpu_worker_reset_profiling_info(int workerid)
 {
-	PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
+	_STARPU_PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
 	_starpu_worker_reset_profiling_info_with_lock(workerid);
-	PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
 }
 
 void _starpu_worker_register_sleeping_start_date(int workerid, struct timespec *sleeping_start)
 {
 	if (profiling)
 	{
-		PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
+		_STARPU_PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
 		worker_registered_sleeping_start[workerid] = 1;	
 		memcpy(&sleeping_start_date[workerid], sleeping_start, sizeof(struct timespec));
-		PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
 	}
 }
 
@@ -194,10 +194,10 @@ void _starpu_worker_register_executing_start_date(int workerid, struct timespec
 {
 	if (profiling)
 	{
-		PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
+		_STARPU_PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
 		worker_registered_executing_start[workerid] = 1;	
 		memcpy(&executing_start_date[workerid], executing_start, sizeof(struct timespec));
-		PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
 	}
 }
 
@@ -205,7 +205,7 @@ void _starpu_worker_update_profiling_info_sleeping(int workerid, struct timespec
 {
 	if (profiling)
 	{
-		PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
+		_STARPU_PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
 
                 /* Perhaps that profiling was enabled while the worker was
                  * already blocked, so we don't measure (end - start), but 
@@ -225,7 +225,7 @@ void _starpu_worker_update_profiling_info_sleeping(int workerid, struct timespec
 
 		worker_registered_sleeping_start[workerid] = 0;	
 
-		PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
 	}
 }
 
@@ -234,7 +234,7 @@ void _starpu_worker_update_profiling_info_executing(int workerid, struct timespe
 {
 	if (profiling)
 	{
-		PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
+		_STARPU_PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
 
 		if (executing_time)
 			starpu_timespec_accumulate(&worker_info[workerid].executing_time, executing_time);
@@ -244,7 +244,7 @@ void _starpu_worker_update_profiling_info_executing(int workerid, struct timespe
 		worker_info[workerid].power_consumed += power_consumed;
 		worker_info[workerid].executed_tasks += executed_tasks;
 	
-		PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
 	} else /* Not thread safe, shouldn't be too much a problem */
 		worker_info[workerid].executed_tasks += executed_tasks;
 }
@@ -257,7 +257,7 @@ int starpu_worker_get_profiling_info(int workerid, struct starpu_worker_profilin
 		info->executed_tasks = worker_info[workerid].executed_tasks;
 	}
 
-	PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
+	_STARPU_PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
 
 	if (info)
 	{
@@ -290,7 +290,7 @@ int starpu_worker_get_profiling_info(int workerid, struct starpu_worker_profilin
 
 	_starpu_worker_reset_profiling_info_with_lock(workerid);
 
-	PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
 
 	return 0;
 }

+ 5 - 5
src/sched_policies/deque_modeling_policy_data_aware.c

@@ -207,7 +207,7 @@ int _starpu_fifo_push_sorted_task(struct starpu_fifo_taskq_s *fifo_queue, pthrea
 {
 	struct starpu_task_list *list = &fifo_queue->taskq;
 
-	PTHREAD_MUTEX_LOCK(sched_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(sched_mutex);
 
 	STARPU_TRACE_JOB_PUSH(task, 0);
 
@@ -261,8 +261,8 @@ int _starpu_fifo_push_sorted_task(struct starpu_fifo_taskq_s *fifo_queue, pthrea
 	fifo_queue->ntasks++;
 	fifo_queue->nprocessed++;
 
-	PTHREAD_COND_SIGNAL(sched_cond);
-	PTHREAD_MUTEX_UNLOCK(sched_mutex);
+	_STARPU_PTHREAD_COND_SIGNAL(sched_cond);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(sched_mutex);
 
 	return 0;
 }
@@ -595,8 +595,8 @@ static void initialize_dmda_policy(struct starpu_machine_topology_s *topology,
 	{
 		queue_array[workerid] = _starpu_create_fifo();
 	
-		PTHREAD_MUTEX_INIT(&sched_mutex[workerid], NULL);
-		PTHREAD_COND_INIT(&sched_cond[workerid], NULL);
+		_STARPU_PTHREAD_MUTEX_INIT(&sched_mutex[workerid], NULL);
+		_STARPU_PTHREAD_COND_INIT(&sched_cond[workerid], NULL);
 	
 		starpu_worker_set_sched_condition(workerid, &sched_cond[workerid], &sched_mutex[workerid]);
 	}

+ 2 - 2
src/sched_policies/deque_queues.c

@@ -89,7 +89,7 @@ struct starpu_job_list_s *_starpu_deque_pop_every_task(struct starpu_deque_jobq_
 	struct starpu_job_list_s *new_list, *old_list;
 
 	/* block until some task is available in that queue */
-	PTHREAD_MUTEX_LOCK(sched_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(sched_mutex);
 
 	if (deque_queue->njobs == 0)
 	{
@@ -138,7 +138,7 @@ struct starpu_job_list_s *_starpu_deque_pop_every_task(struct starpu_deque_jobq_
 		}
 	}
 	
-	PTHREAD_MUTEX_UNLOCK(sched_mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(sched_mutex);
 
 	return new_list;
 }

+ 2 - 2
src/sched_policies/eager_central_policy.c

@@ -35,8 +35,8 @@ static void initialize_eager_center_policy(struct starpu_machine_topology_s *top
 	/* there is only a single queue in that trivial design */
 	fifo = _starpu_create_fifo();
 
-	PTHREAD_MUTEX_INIT(&sched_mutex, NULL);
-	PTHREAD_COND_INIT(&sched_cond, NULL);
+	_STARPU_PTHREAD_MUTEX_INIT(&sched_mutex, NULL);
+	_STARPU_PTHREAD_COND_INIT(&sched_cond, NULL);
 
 	unsigned workerid;
 	for (workerid = 0; workerid < topology->nworkers; workerid++)

+ 9 - 9
src/sched_policies/eager_central_priority_policy.c

@@ -84,8 +84,8 @@ static void initialize_eager_center_priority_policy(struct starpu_machine_topolo
 	/* only a single queue (even though there are several internaly) */
 	taskq = _starpu_create_priority_taskq();
 
-	PTHREAD_MUTEX_INIT(&global_sched_mutex, NULL);
-	PTHREAD_COND_INIT(&global_sched_cond, NULL);
+	_STARPU_PTHREAD_MUTEX_INIT(&global_sched_mutex, NULL);
+	_STARPU_PTHREAD_COND_INIT(&global_sched_cond, NULL);
 
 	unsigned workerid;
 	for (workerid = 0; workerid < topology->nworkers; workerid++)
@@ -104,7 +104,7 @@ static void deinitialize_eager_center_priority_policy(struct starpu_machine_topo
 static int _starpu_priority_push_task(struct starpu_task *task)
 {
 	/* wake people waiting for a task */
-	PTHREAD_MUTEX_LOCK(&global_sched_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&global_sched_mutex);
 
 	STARPU_TRACE_JOB_PUSH(task, 1);
 	
@@ -114,8 +114,8 @@ static int _starpu_priority_push_task(struct starpu_task *task)
 	taskq->ntasks[priolevel]++;
 	taskq->total_ntasks++;
 
-	PTHREAD_COND_SIGNAL(&global_sched_cond);
-	PTHREAD_MUTEX_UNLOCK(&global_sched_mutex);
+	_STARPU_PTHREAD_COND_SIGNAL(&global_sched_cond);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&global_sched_mutex);
 
 	return 0;
 }
@@ -125,15 +125,15 @@ static struct starpu_task *_starpu_priority_pop_task(void)
 	struct starpu_task *task = NULL;
 
 	/* block until some event happens */
-	PTHREAD_MUTEX_LOCK(&global_sched_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&global_sched_mutex);
 
 	if ((taskq->total_ntasks == 0) && _starpu_machine_is_running())
 	{
 #ifdef STARPU_NON_BLOCKING_DRIVERS
-		PTHREAD_MUTEX_UNLOCK(&global_sched_mutex);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&global_sched_mutex);
 		return NULL;
 #else
-		PTHREAD_COND_WAIT(&global_sched_cond, &global_sched_mutex);
+		_STARPU_PTHREAD_COND_WAIT(&global_sched_cond, &global_sched_mutex);
 #endif
 	}
 
@@ -151,7 +151,7 @@ static struct starpu_task *_starpu_priority_pop_task(void)
 		} while (!task && priolevel-- > 0);
 	}
 
-	PTHREAD_MUTEX_UNLOCK(&global_sched_mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&global_sched_mutex);
 
 	return task;
 }

+ 5 - 5
src/sched_policies/fifo_queues.c

@@ -51,7 +51,7 @@ void _starpu_destroy_fifo(struct starpu_fifo_taskq_s *fifo)
 
 int _starpu_fifo_push_task(struct starpu_fifo_taskq_s *fifo_queue, pthread_mutex_t *sched_mutex, pthread_cond_t *sched_cond, struct starpu_task *task)
 {
-	PTHREAD_MUTEX_LOCK(sched_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(sched_mutex);
 
 	STARPU_TRACE_JOB_PUSH(task, 0);
 	/* TODO: if prio, put at back */
@@ -59,8 +59,8 @@ int _starpu_fifo_push_task(struct starpu_fifo_taskq_s *fifo_queue, pthread_mutex
 	fifo_queue->ntasks++;
 	fifo_queue->nprocessed++;
 
-	PTHREAD_COND_SIGNAL(sched_cond);
-	PTHREAD_MUTEX_UNLOCK(sched_mutex);
+	_STARPU_PTHREAD_COND_SIGNAL(sched_cond);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(sched_mutex);
 
 	return 0;
 }
@@ -96,7 +96,7 @@ struct starpu_task *_starpu_fifo_pop_every_task(struct starpu_fifo_taskq_s *fifo
 	struct starpu_task *new_list = NULL;
 	struct starpu_task *new_list_tail = NULL;
 	
-	PTHREAD_MUTEX_LOCK(sched_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(sched_mutex);
 
 	size = fifo_queue->ntasks;
 
@@ -140,7 +140,7 @@ struct starpu_task *_starpu_fifo_pop_every_task(struct starpu_fifo_taskq_s *fifo
 		fifo_queue->ntasks -= new_list_size;
 	}
 
-	PTHREAD_MUTEX_UNLOCK(sched_mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(sched_mutex);
 
 	return new_list;
 }

+ 12 - 12
src/sched_policies/heft.c

@@ -91,8 +91,8 @@ static void heft_init(struct starpu_machine_topology_s *topology,
 		exp_end[workerid] = exp_start[workerid]; 
 		ntasks[workerid] = 0;
 
-		PTHREAD_MUTEX_INIT(&sched_mutex[workerid], NULL);
-		PTHREAD_COND_INIT(&sched_cond[workerid], NULL);
+		_STARPU_PTHREAD_MUTEX_INIT(&sched_mutex[workerid], NULL);
+		_STARPU_PTHREAD_COND_INIT(&sched_cond[workerid], NULL);
 	
 		starpu_worker_set_sched_condition(workerid, &sched_cond[workerid], &sched_mutex[workerid]);
 	}
@@ -106,12 +106,12 @@ static void heft_post_exec_hook(struct starpu_task *task)
 	
 	/* Once we have executed the task, we can update the predicted amount
 	 * of work. */
-	PTHREAD_MUTEX_LOCK(&sched_mutex[workerid]);
+	_STARPU_PTHREAD_MUTEX_LOCK(&sched_mutex[workerid]);
 	exp_len[workerid] -= model + transfer_model;
 	exp_start[workerid] = starpu_timing_now();
 	exp_end[workerid] = exp_start[workerid] + exp_len[workerid];
 	ntasks[workerid]--;
-	PTHREAD_MUTEX_UNLOCK(&sched_mutex[workerid]);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&sched_mutex[workerid]);
 }
 
 static void heft_push_task_notify(struct starpu_task *task, int workerid)
@@ -126,7 +126,7 @@ static void heft_push_task_notify(struct starpu_task *task, int workerid)
 	double predicted_transfer = starpu_task_expected_data_transfer_time(memory_node, task);
 
 	/* Update the predictions */
-	PTHREAD_MUTEX_LOCK(&sched_mutex[workerid]);
+	_STARPU_PTHREAD_MUTEX_LOCK(&sched_mutex[workerid]);
 
 	/* Sometimes workers didn't take the tasks as early as we expected */
 	exp_start[workerid] = STARPU_MAX(exp_start[workerid], starpu_timing_now());
@@ -159,7 +159,7 @@ static void heft_push_task_notify(struct starpu_task *task, int workerid)
 
 	ntasks[workerid]++;
 
-	PTHREAD_MUTEX_UNLOCK(&sched_mutex[workerid]);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&sched_mutex[workerid]);
 }
 
 static int push_task_on_best_worker(struct starpu_task *task, int best_workerid, double predicted, double predicted_transfer, int prio)
@@ -167,7 +167,7 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 	/* make sure someone coule execute that task ! */
 	STARPU_ASSERT(best_workerid != -1);
 
-	PTHREAD_MUTEX_LOCK(&sched_mutex[best_workerid]);
+	_STARPU_PTHREAD_MUTEX_LOCK(&sched_mutex[best_workerid]);
 
 	/* Sometimes workers didn't take the tasks as early as we expected */
 	exp_start[best_workerid] = STARPU_MAX(exp_start[best_workerid], starpu_timing_now());
@@ -189,7 +189,7 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 	exp_len[best_workerid] += predicted_transfer;
 
 	ntasks[best_workerid]++;
-	PTHREAD_MUTEX_UNLOCK(&sched_mutex[best_workerid]);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&sched_mutex[best_workerid]);
 
 	task->predicted = predicted;
 	task->predicted_transfer = predicted_transfer;
@@ -410,13 +410,13 @@ static int _heft_push_task(struct starpu_task *task, unsigned prio)
 		/* Remove the task from the bundle since we have made a
 		 * decision for it, and that other tasks should not consider it
 		 * anymore. */
-		PTHREAD_MUTEX_LOCK(&bundle->mutex);
+		_STARPU_PTHREAD_MUTEX_LOCK(&bundle->mutex);
 		int ret = starpu_task_bundle_remove(bundle, task);
 
 		/* Perhaps the bundle was destroyed when removing the last
 		 * entry */
 		if (ret != 1)
-			PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
+			_STARPU_PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
 
 	}
 	else {
@@ -444,8 +444,8 @@ static void heft_deinit(__attribute__ ((unused)) struct starpu_machine_topology_
 	unsigned workerid;
 	for (workerid = 0; workerid < nworkers; workerid++)
 	{
-		PTHREAD_MUTEX_DESTROY(&sched_mutex[workerid]);
-		PTHREAD_COND_DESTROY(&sched_cond[workerid]);
+		_STARPU_PTHREAD_MUTEX_DESTROY(&sched_mutex[workerid]);
+		_STARPU_PTHREAD_COND_DESTROY(&sched_cond[workerid]);
 	}
 }
 

+ 6 - 6
src/sched_policies/parallel_greedy.c

@@ -89,13 +89,13 @@ static void initialize_pgreedy_policy(struct starpu_machine_topology_s *topology
 		}
 	}
 
-	PTHREAD_MUTEX_INIT(&sched_mutex, NULL);
-	PTHREAD_COND_INIT(&sched_cond, NULL);
+	_STARPU_PTHREAD_MUTEX_INIT(&sched_mutex, NULL);
+	_STARPU_PTHREAD_COND_INIT(&sched_cond, NULL);
 
 	for (workerid = 0; workerid < nworkers; workerid++)
 	{
-		PTHREAD_MUTEX_INIT(&master_sched_mutex[workerid], NULL);
-		PTHREAD_COND_INIT(&master_sched_cond[workerid], NULL);
+		_STARPU_PTHREAD_MUTEX_INIT(&master_sched_mutex[workerid], NULL);
+		_STARPU_PTHREAD_COND_INIT(&master_sched_cond[workerid], NULL);
 	}
 
 	for (workerid = 0; workerid < nworkers; workerid++)
@@ -207,8 +207,8 @@ static struct starpu_task *pop_task_pgreedy_policy(void)
 
 			//fprintf(stderr, "POP -> size %d best_size %d\n", worker_size, best_size);
 
-			PTHREAD_BARRIER_INIT(&j->before_work_barrier, NULL, worker_size);
-			PTHREAD_BARRIER_INIT(&j->after_work_barrier, NULL, worker_size);
+			_STARPU_PTHREAD_BARRIER_INIT(&j->before_work_barrier, NULL, worker_size);
+			_STARPU_PTHREAD_BARRIER_INIT(&j->after_work_barrier, NULL, worker_size);
 
 			/* Dispatch task aliases to the different slaves */
 			for (i = 1; i < worker_size; i++)

+ 9 - 9
src/sched_policies/parallel_heft.c

@@ -58,12 +58,12 @@ static void parallel_heft_post_exec_hook(struct starpu_task *task)
 	
 	/* Once we have executed the task, we can update the predicted amount
 	 * of work. */
-	PTHREAD_MUTEX_LOCK(&sched_mutex[workerid]);
+	_STARPU_PTHREAD_MUTEX_LOCK(&sched_mutex[workerid]);
 	worker_exp_len[workerid] -= model + transfer_model;
 	worker_exp_start[workerid] = starpu_timing_now();
 	worker_exp_end[workerid] = worker_exp_start[workerid] + worker_exp_len[workerid];
 	ntasks[workerid]--;
-	PTHREAD_MUTEX_UNLOCK(&sched_mutex[workerid]);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&sched_mutex[workerid]);
 }
 
 static int push_task_on_best_worker(struct starpu_task *task, int best_workerid, double exp_end_predicted, int prio)
@@ -83,7 +83,7 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 
 	int ret = 0;
 
-	PTHREAD_MUTEX_LOCK(&big_lock);
+	_STARPU_PTHREAD_MUTEX_LOCK(&big_lock);
 
 	if (is_basic_worker)
 	{
@@ -110,8 +110,8 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 		j->combined_workerid = best_workerid;
 		j->active_task_alias_count = 0;
 
-		PTHREAD_BARRIER_INIT(&j->before_work_barrier, NULL, worker_size);
-		PTHREAD_BARRIER_INIT(&j->after_work_barrier, NULL, worker_size);
+		_STARPU_PTHREAD_BARRIER_INIT(&j->before_work_barrier, NULL, worker_size);
+		_STARPU_PTHREAD_BARRIER_INIT(&j->after_work_barrier, NULL, worker_size);
 
 		int i;
 		for (i = 0; i < worker_size; i++)
@@ -134,7 +134,7 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 
 	}
 
-	PTHREAD_MUTEX_UNLOCK(&big_lock);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&big_lock);
 
 	return ret;
 }
@@ -406,13 +406,13 @@ static void initialize_parallel_heft_policy(struct starpu_machine_topology_s *to
 		worker_exp_end[workerid] = worker_exp_start[workerid]; 
 		ntasks[workerid] = 0;
 	
-		PTHREAD_MUTEX_INIT(&sched_mutex[workerid], NULL);
-		PTHREAD_COND_INIT(&sched_cond[workerid], NULL);
+		_STARPU_PTHREAD_MUTEX_INIT(&sched_mutex[workerid], NULL);
+		_STARPU_PTHREAD_COND_INIT(&sched_cond[workerid], NULL);
 	
 		starpu_worker_set_sched_condition(workerid, &sched_cond[workerid], &sched_mutex[workerid]);
 	}
 
-	PTHREAD_MUTEX_INIT(&big_lock, NULL);
+	_STARPU_PTHREAD_MUTEX_INIT(&big_lock, NULL);
 
 	/* We pre-compute an array of all the perfmodel archs that are applicable */
 	unsigned total_worker_count = nworkers + ncombinedworkers;

+ 2 - 2
src/sched_policies/random_policy.c

@@ -77,8 +77,8 @@ static void initialize_random_policy(struct starpu_machine_topology_s *topology,
 	unsigned workerid;
 	for (workerid = 0; workerid < nworkers; workerid++)
 	{
-		PTHREAD_MUTEX_INIT(&sched_mutex[workerid], NULL);
-		PTHREAD_COND_INIT(&sched_cond[workerid], NULL);
+		_STARPU_PTHREAD_MUTEX_INIT(&sched_mutex[workerid], NULL);
+		_STARPU_PTHREAD_COND_INIT(&sched_cond[workerid], NULL);
 	
 		starpu_worker_set_sched_condition(workerid, &sched_cond[workerid], &sched_mutex[workerid]);
 	}

+ 5 - 5
src/sched_policies/stack_queues.c

@@ -59,7 +59,7 @@ unsigned _starpu_get_stack_nprocessed(struct starpu_stack_jobq_s *stack_queue)
 
 void _starpu_stack_push_task(struct starpu_stack_jobq_s *stack_queue, pthread_mutex_t *sched_mutex, pthread_cond_t *sched_cond, starpu_job_t task)
 {
-	PTHREAD_MUTEX_LOCK(sched_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(sched_mutex);
 	total_number_of_jobs++;
 
 	STARPU_TRACE_JOB_PUSH(task, 0);
@@ -70,8 +70,8 @@ void _starpu_stack_push_task(struct starpu_stack_jobq_s *stack_queue, pthread_mu
 	stack_queue->njobs++;
 	stack_queue->nprocessed++;
 
-	PTHREAD_COND_SIGNAL(sched_cond);
-	PTHREAD_MUTEX_UNLOCK(sched_mutex);
+	_STARPU_PTHREAD_COND_SIGNAL(sched_cond);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(sched_mutex);
 }
 
 starpu_job_t _starpu_stack_pop_task(struct starpu_stack_jobq_s *stack_queue, pthread_mutex_t *sched_mutex, int workerid __attribute__ ((unused)))
@@ -94,9 +94,9 @@ starpu_job_t _starpu_stack_pop_task(struct starpu_stack_jobq_s *stack_queue, pth
 
 		/* we are sure that we got it now, so at worst, some people thought 
 		 * there remained some work and will soon discover it is not true */
-		PTHREAD_MUTEX_LOCK(sched_mutex);
+		_STARPU_PTHREAD_MUTEX_LOCK(sched_mutex);
 		total_number_of_jobs--;
-		PTHREAD_MUTEX_UNLOCK(sched_mutex);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(sched_mutex);
 	}
 	
 	return j;

+ 8 - 8
src/sched_policies/work_stealing_policy.c

@@ -145,13 +145,13 @@ static struct starpu_task *ws_pop_task(void)
 
 	q = queue_array[workerid];
 
-	PTHREAD_MUTEX_LOCK(&global_sched_mutex);
+	_STARPU_PTHREAD_MUTEX_LOCK(&global_sched_mutex);
 
 	task = _starpu_deque_pop_task(q, -1);
 	if (task) {
 		/* there was a local task */
 		performed_total++;
-		PTHREAD_MUTEX_UNLOCK(&global_sched_mutex);
+		_STARPU_PTHREAD_MUTEX_UNLOCK(&global_sched_mutex);
 		return task;
 	}
 	
@@ -165,7 +165,7 @@ static struct starpu_task *ws_pop_task(void)
 		performed_total++;
 	}
 
-	PTHREAD_MUTEX_UNLOCK(&global_sched_mutex);
+	_STARPU_PTHREAD_MUTEX_UNLOCK(&global_sched_mutex);
 
 	return task;
 }
@@ -179,7 +179,7 @@ static int ws_push_task(struct starpu_task *task)
         struct starpu_deque_jobq_s *deque_queue;
 	deque_queue = queue_array[workerid];
 
-        PTHREAD_MUTEX_LOCK(&global_sched_mutex);
+        _STARPU_PTHREAD_MUTEX_LOCK(&global_sched_mutex);
 	// XXX reuse ?
         //total_number_of_jobs++;
 
@@ -188,8 +188,8 @@ static int ws_push_task(struct starpu_task *task)
         deque_queue->njobs++;
         deque_queue->nprocessed++;
 
-        PTHREAD_COND_SIGNAL(&global_sched_cond);
-        PTHREAD_MUTEX_UNLOCK(&global_sched_mutex);
+        _STARPU_PTHREAD_COND_SIGNAL(&global_sched_cond);
+        _STARPU_PTHREAD_MUTEX_UNLOCK(&global_sched_mutex);
 
         return 0;
 }
@@ -200,8 +200,8 @@ static void initialize_ws_policy(struct starpu_machine_topology_s *topology,
 	nworkers = topology->nworkers;
 	rr_worker = 0;
 
-	PTHREAD_MUTEX_INIT(&global_sched_mutex, NULL);
-	PTHREAD_COND_INIT(&global_sched_cond, NULL);
+	_STARPU_PTHREAD_MUTEX_INIT(&global_sched_mutex, NULL);
+	_STARPU_PTHREAD_COND_INIT(&global_sched_cond, NULL);
 
 	unsigned workerid;
 	for (workerid = 0; workerid < nworkers; workerid++)