Browse Source

src: Update LIST_TYPE macro to generate list functionalities and type
names according to the StarPU coding conventions.

src/common/list.h: definition of LIST_TYPE
<other files> : use of the automatically defined types and
functions

Nathalie Furmento 13 years ago
parent
commit
efc9c3a136
58 changed files with 610 additions and 608 deletions
  1. 47 47
      mpi/starpu_mpi.c
  2. 2 2
      mpi/starpu_mpi_private.h
  3. 1 1
      src/common/fxt.h
  4. 46 49
      src/common/list.h
  5. 3 3
      src/core/dependencies/cg.c
  6. 4 2
      src/core/dependencies/cg.h
  7. 19 19
      src/core/dependencies/data_concurrency.c
  8. 1 1
      src/core/dependencies/data_concurrency.h
  9. 2 2
      src/core/dependencies/dependencies.c
  10. 8 8
      src/core/dependencies/implicit_data_deps.c
  11. 3 3
      src/core/dependencies/tags.c
  12. 5 5
      src/core/dependencies/tags.h
  13. 6 6
      src/core/dependencies/task_deps.c
  14. 15 15
      src/core/jobs.c
  15. 10 10
      src/core/jobs.h
  16. 1 1
      src/core/perfmodel/perfmodel.c
  17. 5 6
      src/core/perfmodel/perfmodel.h
  18. 4 4
      src/core/perfmodel/perfmodel_history.c
  19. 2 2
      src/core/sched_policy.c
  20. 1 1
      src/core/sched_policy.h
  21. 8 8
      src/core/task.c
  22. 3 3
      src/core/task.h
  23. 2 2
      src/core/workers.c
  24. 1 1
      src/core/workers.h
  25. 25 25
      src/datawizard/coherency.c
  26. 19 19
      src/datawizard/coherency.h
  27. 9 6
      src/datawizard/copy_driver.c
  28. 7 7
      src/datawizard/copy_driver.h
  29. 87 87
      src/datawizard/data_request.c
  30. 20 19
      src/datawizard/data_request.h
  31. 8 8
      src/datawizard/filters.c
  32. 2 2
      src/datawizard/footprint.c
  33. 2 2
      src/datawizard/footprint.h
  34. 12 12
      src/datawizard/interfaces/data_interface.c
  35. 76 76
      src/datawizard/memalloc.c
  36. 7 7
      src/datawizard/memalloc.h
  37. 5 5
      src/datawizard/reduction.c
  38. 9 9
      src/datawizard/user_interactions.c
  39. 3 3
      src/datawizard/write_back.c
  40. 3 3
      src/debug/latency.c
  41. 2 2
      src/debug/structures_size.c
  42. 27 27
      src/debug/traces/starpu_fxt.c
  43. 2 2
      src/drivers/cpu/driver_cpu.c
  44. 2 2
      src/drivers/cuda/driver_cuda.c
  45. 3 3
      src/drivers/driver_common/driver_common.c
  46. 8 8
      src/drivers/driver_common/driver_common.h
  47. 22 22
      src/drivers/gordon/driver_gordon.c
  48. 3 3
      src/drivers/opencl/driver_opencl.c
  49. 7 7
      src/profiling/bound.c
  50. 3 3
      src/profiling/bound.h
  51. 19 19
      src/sched_policies/deque_queues.c
  52. 2 2
      src/sched_policies/deque_queues.h
  53. 1 1
      src/sched_policies/parallel_greedy.c
  54. 1 1
      src/sched_policies/parallel_heft.c
  55. 8 8
      src/sched_policies/stack_queues.c
  56. 3 3
      src/sched_policies/stack_queues.h
  57. 2 2
      src/sched_policies/work_stealing_policy.c
  58. 2 2
      src/util/execute_on_all.c

+ 47 - 47
mpi/starpu_mpi.c

@@ -27,13 +27,13 @@
 //#define USE_STARPU_ACTIVITY	1
 
 static void submit_mpi_req(void *arg);
-static void handle_request_termination(struct starpu_mpi_req_s *req);
+static void handle_request_termination(struct _starpu_mpi_req *req);
 
 /* The list of requests that have been newly submitted by the application */
-static starpu_mpi_req_list_t new_requests;
+static struct _starpu_mpi_req_list *new_requests;
 
 /* The list of detached requests that have already been submitted to MPI */
-static starpu_mpi_req_list_t detached_requests;
+static struct _starpu_mpi_req_list *detached_requests;
 static pthread_mutex_t detached_requests_mutex;
 
 static pthread_cond_t cond;
@@ -51,7 +51,7 @@ static int posted_requests = 0;
  *	Isend
  */
 
-static void starpu_mpi_isend_func(struct starpu_mpi_req_s *req)
+static void starpu_mpi_isend_func(struct _starpu_mpi_req *req)
 {
         _STARPU_MPI_LOG_IN();
 	void *ptr = starpu_mpi_handle_to_ptr(req->data_handle);
@@ -73,11 +73,11 @@ static void starpu_mpi_isend_func(struct starpu_mpi_req_s *req)
         _STARPU_MPI_LOG_OUT();
 }
 
-static struct starpu_mpi_req_s *_starpu_mpi_isend_common(starpu_data_handle_t data_handle,
-				int dest, int mpi_tag, MPI_Comm comm,
-				unsigned detached, void (*callback)(void *), void *arg)
+static struct _starpu_mpi_req *_starpu_mpi_isend_common(starpu_data_handle_t data_handle,
+							int dest, int mpi_tag, MPI_Comm comm,
+							unsigned detached, void (*callback)(void *), void *arg)
 {
-	struct starpu_mpi_req_s *req = calloc(1, sizeof(struct starpu_mpi_req_s));
+	struct _starpu_mpi_req *req = calloc(1, sizeof(struct _starpu_mpi_req));
 	STARPU_ASSERT(req);
 
         _STARPU_MPI_LOG_IN();
@@ -116,7 +116,7 @@ int starpu_mpi_isend(starpu_data_handle_t data_handle, starpu_mpi_req *public_re
         _STARPU_MPI_LOG_IN();
 	STARPU_ASSERT(public_req);
 
-	struct starpu_mpi_req_s *req;
+	struct _starpu_mpi_req *req;
 	req = _starpu_mpi_isend_common(data_handle, dest, mpi_tag, comm, 0, NULL, NULL);
 
 	STARPU_ASSERT(req);
@@ -144,7 +144,7 @@ int starpu_mpi_isend_detached(starpu_data_handle_t data_handle,
  *	Irecv
  */
 
-static void starpu_mpi_irecv_func(struct starpu_mpi_req_s *req)
+static void starpu_mpi_irecv_func(struct _starpu_mpi_req *req)
 {
         _STARPU_MPI_LOG_IN();
 	void *ptr = starpu_mpi_handle_to_ptr(req->data_handle);
@@ -165,10 +165,10 @@ static void starpu_mpi_irecv_func(struct starpu_mpi_req_s *req)
         _STARPU_MPI_LOG_OUT();
 }
 
-static struct starpu_mpi_req_s *_starpu_mpi_irecv_common(starpu_data_handle_t data_handle, int source, int mpi_tag, MPI_Comm comm, unsigned detached, void (*callback)(void *), void *arg)
+static struct _starpu_mpi_req *_starpu_mpi_irecv_common(starpu_data_handle_t data_handle, int source, int mpi_tag, MPI_Comm comm, unsigned detached, void (*callback)(void *), void *arg)
 {
         _STARPU_MPI_LOG_IN();
-	struct starpu_mpi_req_s *req = calloc(1, sizeof(struct starpu_mpi_req_s));
+	struct _starpu_mpi_req *req = calloc(1, sizeof(struct _starpu_mpi_req));
 	STARPU_ASSERT(req);
 
         INC_POSTED_REQUESTS(1);
@@ -205,7 +205,7 @@ int starpu_mpi_irecv(starpu_data_handle_t data_handle, starpu_mpi_req *public_re
         _STARPU_MPI_LOG_IN();
 	STARPU_ASSERT(public_req);
 
-	struct starpu_mpi_req_s *req;
+	struct _starpu_mpi_req *req;
 	req = _starpu_mpi_irecv_common(data_handle, source, mpi_tag, comm, 0, NULL, NULL);
 
 	STARPU_ASSERT(req);
@@ -268,11 +268,11 @@ int starpu_mpi_send(starpu_data_handle_t data_handle, int dest, int mpi_tag, MPI
  *	Wait
  */
 
-static void starpu_mpi_wait_func(struct starpu_mpi_req_s *waiting_req)
+static void starpu_mpi_wait_func(struct _starpu_mpi_req *waiting_req)
 {
         _STARPU_MPI_LOG_IN();
 	/* Which is the mpi request we are waiting for ? */
-	struct starpu_mpi_req_s *req = waiting_req->other_request;
+	struct _starpu_mpi_req *req = waiting_req->other_request;
 
 	req->ret = MPI_Wait(&req->request, waiting_req->status);
         STARPU_ASSERT(req->ret == MPI_SUCCESS);
@@ -285,9 +285,9 @@ int starpu_mpi_wait(starpu_mpi_req *public_req, MPI_Status *status)
 {
         _STARPU_MPI_LOG_IN();
 	int ret;
-	struct starpu_mpi_req_s *waiting_req = calloc(1, sizeof(struct starpu_mpi_req_s));
+	struct _starpu_mpi_req *waiting_req = calloc(1, sizeof(struct _starpu_mpi_req));
 	STARPU_ASSERT(waiting_req);
-	struct starpu_mpi_req_s *req = *public_req;
+	struct _starpu_mpi_req *req = *public_req;
 
         INC_POSTED_REQUESTS(1);
 
@@ -329,11 +329,11 @@ int starpu_mpi_wait(starpu_mpi_req *public_req, MPI_Status *status)
  * 	Test
  */
 
-static void starpu_mpi_test_func(struct starpu_mpi_req_s *testing_req)
+static void starpu_mpi_test_func(struct _starpu_mpi_req *testing_req)
 {
         _STARPU_MPI_LOG_IN();
 	/* Which is the mpi request we are testing for ? */
-	struct starpu_mpi_req_s *req = testing_req->other_request;
+	struct _starpu_mpi_req *req = testing_req->other_request;
 
         _STARPU_MPI_DEBUG("Test request %p - mpitag %d - TYPE %s %d\n", &req->request, req->mpi_tag, (req->request_type == RECV_REQ)?"recv : source":"send : dest", req->srcdst);
 	req->ret = MPI_Test(&req->request, testing_req->flag, testing_req->status);
@@ -359,7 +359,7 @@ int starpu_mpi_test(starpu_mpi_req *public_req, int *flag, MPI_Status *status)
 
 	STARPU_ASSERT(public_req);
 
-	struct starpu_mpi_req_s *req = *public_req;
+	struct _starpu_mpi_req *req = *public_req;
 
 	STARPU_ASSERT(!req->detached);
 
@@ -369,9 +369,9 @@ int starpu_mpi_test(starpu_mpi_req *public_req, int *flag, MPI_Status *status)
 
 	if (submitted)
 	{
-		struct starpu_mpi_req_s *testing_req = calloc(1, sizeof(struct starpu_mpi_req_s));
+		struct _starpu_mpi_req *testing_req = calloc(1, sizeof(struct _starpu_mpi_req));
                 STARPU_ASSERT(testing_req);
-                //		memset(testing_req, 0, sizeof(struct starpu_mpi_req_s));
+                //		memset(testing_req, 0, sizeof(struct _starpu_mpi_req));
 
 		/* Initialize the request structure */
 		_STARPU_PTHREAD_MUTEX_INIT(&(testing_req->req_mutex), NULL);
@@ -415,7 +415,7 @@ int starpu_mpi_test(starpu_mpi_req *public_req, int *flag, MPI_Status *status)
  *	Barrier
  */
 
-static void starpu_mpi_barrier_func(struct starpu_mpi_req_s *barrier_req)
+static void starpu_mpi_barrier_func(struct _starpu_mpi_req *barrier_req)
 {
         _STARPU_MPI_LOG_IN();
 
@@ -430,7 +430,7 @@ int starpu_mpi_barrier(MPI_Comm comm)
 {
         _STARPU_MPI_LOG_IN();
 	int ret;
-	struct starpu_mpi_req_s *barrier_req = calloc(1, sizeof(struct starpu_mpi_req_s));
+	struct _starpu_mpi_req *barrier_req = calloc(1, sizeof(struct _starpu_mpi_req));
 	STARPU_ASSERT(barrier_req);
 
 	/* Initialize the request structure */
@@ -475,7 +475,7 @@ static char *starpu_mpi_request_type(unsigned request_type)
 }
 #endif
 
-static void handle_request_termination(struct starpu_mpi_req_s *req)
+static void handle_request_termination(struct _starpu_mpi_req *req)
 {
         _STARPU_MPI_LOG_IN();
 
@@ -507,12 +507,12 @@ static void handle_request_termination(struct starpu_mpi_req_s *req)
 static void submit_mpi_req(void *arg)
 {
         _STARPU_MPI_LOG_IN();
-	struct starpu_mpi_req_s *req = arg;
+	struct _starpu_mpi_req *req = arg;
 
         INC_POSTED_REQUESTS(-1);
 
 	_STARPU_PTHREAD_MUTEX_LOCK(&mutex);
-	starpu_mpi_req_list_push_front(new_requests, req);
+	_starpu_mpi_req_list_push_front(new_requests, req);
         _STARPU_MPI_DEBUG("Pushing new request type %d\n", req->request_type);
 	_STARPU_PTHREAD_COND_BROADCAST(&cond);
 	_STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
@@ -529,7 +529,7 @@ static unsigned progression_hook_func(void *arg __attribute__((unused)))
 	unsigned may_block = 1;
 
 	_STARPU_PTHREAD_MUTEX_LOCK(&mutex);
-	if (!starpu_mpi_req_list_empty(detached_requests))
+	if (!_starpu_mpi_req_list_empty(detached_requests))
 	{
 		_STARPU_PTHREAD_COND_SIGNAL(&cond);
 		may_block = 0;
@@ -549,15 +549,15 @@ static void test_detached_requests(void)
         _STARPU_MPI_LOG_IN();
 	int flag;
 	MPI_Status status;
-	struct starpu_mpi_req_s *req, *next_req;
+	struct _starpu_mpi_req *req, *next_req;
 
 	_STARPU_PTHREAD_MUTEX_LOCK(&detached_requests_mutex);
 
-	for (req = starpu_mpi_req_list_begin(detached_requests);
-		req != starpu_mpi_req_list_end(detached_requests);
+	for (req = _starpu_mpi_req_list_begin(detached_requests);
+		req != _starpu_mpi_req_list_end(detached_requests);
 		req = next_req)
 	{
-		next_req = starpu_mpi_req_list_next(req);
+		next_req = _starpu_mpi_req_list_next(req);
 
 		_STARPU_PTHREAD_MUTEX_UNLOCK(&detached_requests_mutex);
 
@@ -573,7 +573,7 @@ static void test_detached_requests(void)
 		_STARPU_PTHREAD_MUTEX_LOCK(&detached_requests_mutex);
 
 		if (flag)
-			starpu_mpi_req_list_erase(detached_requests, req);
+			_starpu_mpi_req_list_erase(detached_requests, req);
 
 #ifdef STARPU_DEVEL
 #warning TODO fix memleak
@@ -587,7 +587,7 @@ static void test_detached_requests(void)
         _STARPU_MPI_LOG_OUT();
 }
 
-static void handle_new_request(struct starpu_mpi_req_s *req)
+static void handle_new_request(struct _starpu_mpi_req *req)
 {
         _STARPU_MPI_LOG_IN();
 	STARPU_ASSERT(req);
@@ -599,7 +599,7 @@ static void handle_new_request(struct starpu_mpi_req_s *req)
 	if (req->detached)
 	{
 		_STARPU_PTHREAD_MUTEX_LOCK(&mutex);
-		starpu_mpi_req_list_push_front(detached_requests, req);
+		_starpu_mpi_req_list_push_front(detached_requests, req);
 		_STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
 
 		starpu_wake_all_blocked_workers();
@@ -644,12 +644,12 @@ static void *progress_thread_func(void *arg)
 	_STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
 
 	_STARPU_PTHREAD_MUTEX_LOCK(&mutex);
-	while (running || posted_requests || !(starpu_mpi_req_list_empty(new_requests)) || !(starpu_mpi_req_list_empty(detached_requests))) {
+	while (running || posted_requests || !(_starpu_mpi_req_list_empty(new_requests)) || !(_starpu_mpi_req_list_empty(detached_requests))) {
 		/* shall we block ? */
-		unsigned block = starpu_mpi_req_list_empty(new_requests);
+		unsigned block = _starpu_mpi_req_list_empty(new_requests);
 
 #ifndef USE_STARPU_ACTIVITY
-		block = block && starpu_mpi_req_list_empty(detached_requests);
+		block = block && _starpu_mpi_req_list_empty(detached_requests);
 #endif
 
 		if (block)
@@ -664,10 +664,10 @@ static void *progress_thread_func(void *arg)
 		_STARPU_PTHREAD_MUTEX_LOCK(&mutex);
 
 		/* get one request */
-		struct starpu_mpi_req_s *req;
-		while (!starpu_mpi_req_list_empty(new_requests))
+		struct _starpu_mpi_req *req;
+		while (!_starpu_mpi_req_list_empty(new_requests))
 		{
-			req = starpu_mpi_req_list_pop_back(new_requests);
+			req = _starpu_mpi_req_list_pop_back(new_requests);
 
 			/* handling a request is likely to block for a while
 			 * (on a sync_data_with_mem call), we want to let the
@@ -679,8 +679,8 @@ static void *progress_thread_func(void *arg)
 		}
 	}
 
-	STARPU_ASSERT(starpu_mpi_req_list_empty(detached_requests));
-	STARPU_ASSERT(starpu_mpi_req_list_empty(new_requests));
+	STARPU_ASSERT(_starpu_mpi_req_list_empty(detached_requests));
+	STARPU_ASSERT(_starpu_mpi_req_list_empty(new_requests));
         STARPU_ASSERT(posted_requests == 0);
 
         if (initialize_mpi) {
@@ -737,10 +737,10 @@ int _starpu_mpi_initialize(int initialize_mpi, int *rank, int *world_size)
 {
 	_STARPU_PTHREAD_MUTEX_INIT(&mutex, NULL);
 	_STARPU_PTHREAD_COND_INIT(&cond, NULL);
-	new_requests = starpu_mpi_req_list_new();
+	new_requests = _starpu_mpi_req_list_new();
 
 	_STARPU_PTHREAD_MUTEX_INIT(&detached_requests_mutex, NULL);
-	detached_requests = starpu_mpi_req_list_new();
+	detached_requests = _starpu_mpi_req_list_new();
 
         _STARPU_PTHREAD_MUTEX_INIT(&mutex_posted_requests, NULL);
 
@@ -800,8 +800,8 @@ int starpu_mpi_shutdown(void)
 #endif
 
 	/* free the request queues */
-	starpu_mpi_req_list_delete(detached_requests);
-	starpu_mpi_req_list_delete(new_requests);
+	_starpu_mpi_req_list_delete(detached_requests);
+	_starpu_mpi_req_list_delete(new_requests);
 
 	return 0;
 }

+ 2 - 2
mpi/starpu_mpi_private.h

@@ -60,7 +60,7 @@
 #define TEST_REQ        3
 #define BARRIER_REQ     4
 
-LIST_TYPE(starpu_mpi_req,
+LIST_TYPE(_starpu_mpi_req,
 	/* description of the data at StarPU level */
 	starpu_data_handle_t data_handle;
 
@@ -89,7 +89,7 @@ LIST_TYPE(starpu_mpi_req,
 
 	/* In the case of a Wait/Test request, we are going to post a request
 	 * to test the completion of another request */
-	struct starpu_mpi_req_s *other_request;
+	struct _starpu_mpi_req *other_request;
 
 	/* in the case of detached requests */
 	unsigned detached;

+ 1 - 1
src/common/fxt.h

@@ -260,7 +260,7 @@ do {										\
 
 #define _STARPU_TRACE_TAG_DONE(tag)						\
 do {										\
-        struct starpu_job_s *job = (tag)->job;                                  \
+        struct _starpu_job *job = (tag)->job;                                  \
         const char *model_name = _starpu_get_model_name((job));                       \
 	if (model_name)                                                         \
 	{									\

+ 46 - 49
src/common/list.h

@@ -1,7 +1,7 @@
 /* StarPU --- Runtime system for heterogeneous multicore architectures.
  *
  * Copyright (C) 2009, 2010-2011  Université de Bordeaux 1
- * Copyright (C) 2010  Centre National de la Recherche Scientifique
+ * Copyright (C) 2010, 2011  Centre National de la Recherche Scientifique
  *
  * StarPU is free software; you can redistribute it and/or modify
  * it under the terms of the GNU Lesser General Public License as published by
@@ -24,9 +24,9 @@
  * *********************************************************
  * LIST_TYPE(FOO, contenu);
  *  - déclare les types suivants
- *      + pour les cellules : FOO_t
- *      + pour les listes : FOO_list_t
- *      + pour les itérateurs : FOO_itor_t
+ *      + pour les cellules : FOO
+ *      + pour les listes : FOO_list
+ *      + pour les itérateurs : FOO
  *  - déclare les accesseurs suivants :
  *     * création d'une cellule 
  *   FOO_t      FOO_new(void);  
@@ -58,6 +58,14 @@
  *   FOO_t      FOO_list_front(FOO_list_t);
  *     * vérifie si la liste chainée est cohérente
  *   int	FOO_list_check(FOO_list_t);
+ *     *
+ *   FOO_t      FOO_list_begin(FOO_list_t);
+ *     *
+ *   FOO_t      FOO_list_end(FOO_list_t);
+ *     *
+ *   FOO_t      FOO_list_next(FOO_t)
+ *     *
+ *   int        FOO_list_size(FOO_list_t)
  * *********************************************************
  * Exemples d'utilisation :
  *  - au départ, on a :
@@ -80,7 +88,7 @@
  *  e->b = 1;
  *  ma_structure_list_push_front(l, e);
  *  - itérateur de liste :
- *  ma_structure_itor_t i;
+ *  ma_structure i;
  *  for(i  = ma_structure_list_begin(l);
  *      i != ma_structure_list_end(l);
  *      i  = ma_structure_list_next(i))
@@ -95,82 +103,71 @@
 /**@hideinitializer
  * Generates a new type for list of elements */
 #define LIST_TYPE(ENAME, DECL) \
-  LIST_DECLARE_TYPE(ENAME) \
   LIST_CREATE_TYPE(ENAME, DECL)
 
 /**@hideinitializer
- * Forward type declaration for lists */
-#define LIST_DECLARE_TYPE(ENAME) \
-  /** automatic type: ENAME##_list_t is a list of ENAME##_t */ \
-  typedef struct ENAME##_list_s* ENAME##_list_t; \
-  /** automatic type: defines ENAME##_t */ \
-  typedef struct ENAME##_s* ENAME##_t; \
-  /** automatic type: ENAME##_itor_t is an iterator on lists of ENAME##_t */ \
-  typedef ENAME##_t ENAME##_itor_t;
-
-/**@hideinitializer
  * The effective type declaration for lists */
 #define LIST_CREATE_TYPE(ENAME, DECL) \
-  /** from automatic type: ENAME##_t */ \
-  struct ENAME##_s \
+  /** from automatic type: struct ENAME */ \
+  struct ENAME \
   { \
-    struct ENAME##_s*_prev; /**< @internal previous cell */ \
-    struct ENAME##_s*_next; /**< @internal next cell */ \
+    struct ENAME *_prev; /**< @internal previous cell */ \
+    struct ENAME *_next; /**< @internal next cell */ \
     DECL \
   }; \
   /** @internal */ \
-  struct ENAME##_list_s \
+  struct ENAME##_list \
   { \
-    struct ENAME##_s* _head; /**< @internal head of the list */ \
-    struct ENAME##_s* _tail; /**< @internal tail of the list */ \
+    struct ENAME *_head; /**< @internal head of the list */ \
+    struct ENAME *_tail; /**< @internal tail of the list */ \
   }; \
-  /** @internal */static inline ENAME##_t ENAME##_new(void) \
-    { ENAME##_t e = (ENAME##_t)malloc(sizeof(struct ENAME##_s)); \
+  /** @internal */static inline struct ENAME *ENAME##_new(void) \
+    { struct ENAME *e = (struct ENAME *)malloc(sizeof(struct ENAME)); \
       e->_next = NULL; e->_prev = NULL; return e; } \
-  /** @internal */static inline void ENAME##_delete(ENAME##_t e) \
+  /** @internal */static inline void ENAME##_delete(struct ENAME *e) \
     { free(e); } \
-  /** @internal */static inline void ENAME##_list_push_front(ENAME##_list_t l, ENAME##_t e) \
+  /** @internal */static inline void ENAME##_list_push_front(struct ENAME##_list *l, struct ENAME *e) \
     { if(l->_tail == NULL) l->_tail = e; else l->_head->_prev = e; \
       e->_prev = NULL; e->_next = l->_head; l->_head = e; } \
-  /** @internal */static inline void ENAME##_list_push_back(ENAME##_list_t l, ENAME##_t e) \
+  /** @internal */static inline void ENAME##_list_push_back(struct ENAME##_list *l, struct ENAME *e) \
     { if(l->_head == NULL) l->_head = e; else l->_tail->_next = e; \
       e->_next = NULL; e->_prev = l->_tail; l->_tail = e; } \
-  /** @internal */static inline void ENAME##_list_push_list_front(ENAME##_list_t l1, ENAME##_list_t l2) \
+  /** @internal */static inline void ENAME##_list_push_list_front(struct ENAME##_list *l1, struct ENAME##_list *l2) \
     { if (l2->_head == NULL) { l2->_head = l1->_head; l2->_tail = l1->_tail; } \
       else if (l1->_head != NULL) { l1->_tail->_next = l2->_head; l2->_head->_prev = l1->_tail; l2->_head = l1->_head; } } \
-  /** @internal */static inline void ENAME##_list_push_list_back(ENAME##_list_t l1, ENAME##_list_t l2) \
+  /** @internal */static inline void ENAME##_list_push_list_back(struct ENAME##_list *l1, struct ENAME##_list *l2) \
     { if(l1->_head == NULL) { l1->_head = l2->_head; l1->_tail = l2->_tail; } \
       else if (l2->_head != NULL) { l1->_tail->_next = l2->_head; l2->_head->_prev = l1->_tail; l1->_tail = l2->_head; } } \
-  /** @internal */static inline ENAME##_t ENAME##_list_front(ENAME##_list_t l) \
+  /** @internal */static inline struct ENAME *ENAME##_list_front(struct ENAME##_list *l) \
     { return l->_head; } \
-  /** @internal */static inline ENAME##_t ENAME##_list_back(ENAME##_list_t l) \
+  /** @internal */static inline struct ENAME *ENAME##_list_back(struct ENAME##_list *l) \
     { return l->_tail; } \
-  /** @internal */static inline ENAME##_list_t ENAME##_list_new(void) \
-    { ENAME##_list_t l; l=(ENAME##_list_t)malloc(sizeof(struct ENAME##_list_s)); \
+  /** @internal */static inline struct ENAME##_list *ENAME##_list_new(void) \
+    { struct ENAME##_list *l; l=(struct ENAME##_list *)malloc(sizeof(struct ENAME##_list)); \
       l->_head=NULL; l->_tail=l->_head; return l; } \
-  /** @internal */static inline int ENAME##_list_empty(ENAME##_list_t l) \
+  /** @internal */static inline int ENAME##_list_empty(struct ENAME##_list *l) \
     { return (l->_head == NULL); } \
-  /** @internal */static inline void ENAME##_list_delete(ENAME##_list_t l) \
+  /** @internal */static inline void ENAME##_list_delete(struct ENAME##_list *l) \
     { free(l); } \
-  /** @internal */static inline void ENAME##_list_erase(ENAME##_list_t l, ENAME##_t c) \
-    { ENAME##_t p = c->_prev; if(p) p->_next = c->_next; else l->_head = c->_next; \
+  /** @internal */static inline void ENAME##_list_erase(struct ENAME##_list *l, struct ENAME *c) \
+    { struct ENAME *p = c->_prev; if(p) p->_next = c->_next; else l->_head = c->_next; \
       if(c->_next) c->_next->_prev = p; else l->_tail = p; } \
-  /** @internal */static inline ENAME##_t ENAME##_list_pop_front(ENAME##_list_t l) \
-    { ENAME##_t e = ENAME##_list_front(l); \
+  /** @internal */static inline struct ENAME *ENAME##_list_pop_front(struct ENAME##_list *l) \
+    { struct ENAME *e = ENAME##_list_front(l); \
       ENAME##_list_erase(l, e); return e; } \
-  /** @internal */static inline ENAME##_t ENAME##_list_pop_back(ENAME##_list_t l) \
-    { ENAME##_t e = ENAME##_list_back(l); \
+  /** @internal */static inline struct ENAME *ENAME##_list_pop_back(struct ENAME##_list *l) \
+    { struct ENAME *e = ENAME##_list_back(l); \
       ENAME##_list_erase(l, e); return e; } \
-  /** @internal */static inline ENAME##_itor_t ENAME##_list_begin(ENAME##_list_t l) \
+  /** @internal */static inline struct ENAME *ENAME##_list_begin(struct ENAME##_list *l) \
     { return l->_head; } \
-  /** @internal */static inline ENAME##_itor_t ENAME##_list_end(ENAME##_list_t l __attribute__ ((unused))) \
+  /** @internal */static inline struct ENAME *ENAME##_list_end(struct ENAME##_list *l __attribute__ ((unused))) \
     { return NULL; } \
-  /** @internal */static inline ENAME##_itor_t ENAME##_list_next(ENAME##_itor_t i) \
+  /** @internal */static inline struct ENAME *ENAME##_list_next(struct ENAME *i) \
     { return i->_next; } \
-  /** @internal */static inline int ENAME##_list_size(ENAME##_list_t l) \
-    { ENAME##_itor_t i=l->_head; int k=0; while(i!=NULL){k++;i=i->_next;} return k; } \
-  /** @internal */static inline int ENAME##_list_check(ENAME##_list_t l) \
-    { ENAME##_itor_t i=l->_head; while(i) \
+  /** @internal */static inline int ENAME##_list_size(struct ENAME##_list *l) \
+    { struct ENAME *i=l->_head; int k=0; while(i!=NULL){k++;i=i->_next;} return k; } \
+  /** @internal */static inline int ENAME##_list_check(struct ENAME##_list *l) \
+    { struct ENAME *i=l->_head; while(i) \
     { if ((i->_next == NULL) && i != l->_tail) return 0; \
       if (i->_next == i) return 0; \
       i=i->_next;} return 1; }

+ 3 - 3
src/core/dependencies/cg.c

@@ -91,7 +91,7 @@ void _starpu_notify_cg(struct _starpu_cg *cg)
 
 		struct _starpu_tag *tag;
 		struct _starpu_cg_list *tag_successors, *job_successors;
-		starpu_job_t j;
+		struct _starpu_job *j;
 
 		/* the group is now completed */
 		switch (cg->cg_type) {
@@ -173,7 +173,7 @@ void _starpu_notify_cg_list(struct _starpu_cg_list *successors)
 
 		if (cg_type == STARPU_CG_TASK)
 		{
-			starpu_job_t j = cg->succ.job;
+			struct _starpu_job *j = cg->succ.job;
 			_STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
 		}			
 
@@ -181,7 +181,7 @@ void _starpu_notify_cg_list(struct _starpu_cg_list *successors)
 
 		if (cg_type == STARPU_CG_TASK)
 		{
-			starpu_job_t j = cg->succ.job;
+			struct _starpu_job *j = cg->succ.job;
 			
 			/* In case this task was immediately terminated, since
 			 * _starpu_notify_cg_list already hold the sync_mutex

+ 4 - 2
src/core/dependencies/cg.h

@@ -31,6 +31,8 @@
 #define STARPU_NMAXDEPS	256
 #endif
 
+struct _starpu_job;
+
 /* Completion Group list */
 struct _starpu_cg_list {
 	unsigned nsuccs; /* how many successors ? */
@@ -62,7 +64,7 @@ struct _starpu_cg {
 		struct _starpu_tag *tag;
 
 		/* STARPU_CG_TASK */
-		struct starpu_job_s *job;
+		struct _starpu_job *job;
 
 		/* STARPU_CG_APPS */
 		/* in case this completion group is related to an application,
@@ -81,6 +83,6 @@ void _starpu_cg_list_deinit(struct _starpu_cg_list *list);
 void _starpu_add_successor_to_cg_list(struct _starpu_cg_list *successors, struct _starpu_cg *cg);
 void _starpu_notify_cg(struct _starpu_cg *cg);
 void _starpu_notify_cg_list(struct _starpu_cg_list *successors);
-void _starpu_notify_task_dependencies(struct starpu_job_s *j);
+void _starpu_notify_task_dependencies(struct _starpu_job *j);
 
 #endif // __CG_H__

+ 19 - 19
src/core/dependencies/data_concurrency.c

@@ -31,34 +31,34 @@
  */
 
 /* the header lock must be taken by the caller */
-static starpu_data_requester_t may_unlock_data_req_list_head(starpu_data_handle_t handle)
+static struct _starpu_data_requester *may_unlock_data_req_list_head(starpu_data_handle_t handle)
 {
-	starpu_data_requester_list_t req_list;
+	struct _starpu_data_requester_list *req_list;
 
 	if (handle->reduction_refcnt > 0)
 	{
 		req_list = handle->reduction_req_list;
 	}
 	else {
-		if (starpu_data_requester_list_empty(handle->reduction_req_list))
+		if (_starpu_data_requester_list_empty(handle->reduction_req_list))
 			req_list = handle->req_list;
 		else
 			req_list = handle->reduction_req_list;
 	}
 
 	/* if there is no one to unlock ... */
-	if (starpu_data_requester_list_empty(req_list))
+	if (_starpu_data_requester_list_empty(req_list))
 		return NULL;
 
 	/* if there is no reference to the data anymore, we can use it */
 	if (handle->refcnt == 0)
-		return starpu_data_requester_list_pop_front(req_list);
+		return _starpu_data_requester_list_pop_front(req_list);
 
 	if (handle->current_mode == STARPU_W)
 		return NULL;
 
 	/* data->current_mode == STARPU_R, so we can process more readers */
-	starpu_data_requester_t r = starpu_data_requester_list_front(req_list);
+	struct _starpu_data_requester *r = _starpu_data_requester_list_front(req_list);
 
 	enum starpu_access_mode r_mode = r->mode;
 	if (r_mode == STARPU_RW)
@@ -68,7 +68,7 @@ static starpu_data_requester_t may_unlock_data_req_list_head(starpu_data_handle_
 	 * access, we only proceed if the cuurrent mode is the same as the
 	 * requested mode. */
 	if (r_mode == handle->current_mode)
-		return starpu_data_requester_list_pop_front(req_list);
+		return _starpu_data_requester_list_pop_front(req_list);
 	else
 		return NULL;
 }
@@ -80,7 +80,7 @@ static starpu_data_requester_t may_unlock_data_req_list_head(starpu_data_handle_
 static unsigned _starpu_attempt_to_submit_data_request(unsigned request_from_codelet,
 						       starpu_data_handle_t handle, enum starpu_access_mode mode,
 						       void (*callback)(void *), void *argcb,
-						       starpu_job_t j, unsigned buffer_index)
+						       struct _starpu_job *j, unsigned buffer_index)
 {
 	if (mode == STARPU_RW)
 		mode = STARPU_W;
@@ -143,7 +143,7 @@ static unsigned _starpu_attempt_to_submit_data_request(unsigned request_from_cod
 		
 		handle->busy_count++;
 		/* enqueue the request */
-		starpu_data_requester_t r = starpu_data_requester_new();
+		struct _starpu_data_requester *r = _starpu_data_requester_new();
 			r->mode = mode;
 			r->is_requested_by_codelet = request_from_codelet;
 			r->j = j;
@@ -152,10 +152,10 @@ static unsigned _starpu_attempt_to_submit_data_request(unsigned request_from_cod
 			r->argcb = argcb;
 
 		/* We put the requester in a specific list if this is a reduction task */
-		starpu_data_requester_list_t req_list =
+		struct _starpu_data_requester_list *req_list =
 			is_a_reduction_task?handle->reduction_req_list:handle->req_list;
 
-		starpu_data_requester_list_push_back(req_list, r);
+		_starpu_data_requester_list_push_back(req_list, r);
 
 		/* failed */
 		put_in_list = 1;
@@ -185,7 +185,7 @@ unsigned _starpu_attempt_to_submit_data_request_from_apps(starpu_data_handle_t h
 	return _starpu_attempt_to_submit_data_request(0, handle, mode, callback, argcb, NULL, 0);
 }
 
-static unsigned attempt_to_submit_data_request_from_job(starpu_job_t j, unsigned buffer_index)
+static unsigned attempt_to_submit_data_request_from_job(struct _starpu_job *j, unsigned buffer_index)
 {
 	/* Note that we do not access j->task->buffers, but j->ordered_buffers
 	 * which is a sorted copy of it. */
@@ -196,7 +196,7 @@ static unsigned attempt_to_submit_data_request_from_job(starpu_job_t j, unsigned
 
 }
 
-static unsigned _submit_job_enforce_data_deps(starpu_job_t j, unsigned start_buffer_index)
+static unsigned _submit_job_enforce_data_deps(struct _starpu_job *j, unsigned start_buffer_index)
 {
 	unsigned buf;
 
@@ -216,7 +216,7 @@ static unsigned _submit_job_enforce_data_deps(starpu_job_t j, unsigned start_buf
    with concurrent data-access at the same time in the scheduling engine (eg.
    there can be 2 tasks reading a piece of data, but there cannot be one
    reading and another writing) */
-unsigned _starpu_submit_job_enforce_data_deps(starpu_job_t j)
+unsigned _starpu_submit_job_enforce_data_deps(struct _starpu_job *j)
 {
 	struct starpu_codelet *cl = j->task->cl;
 
@@ -232,9 +232,9 @@ unsigned _starpu_submit_job_enforce_data_deps(starpu_job_t j)
 	return _submit_job_enforce_data_deps(j, 0);
 }
 
-static unsigned unlock_one_requester(starpu_data_requester_t r)
+static unsigned unlock_one_requester(struct _starpu_data_requester *r)
 {
-	starpu_job_t j = r->j;
+	struct _starpu_job *j = r->j;
 	unsigned nbuffers = j->task->cl->nbuffers;
 	unsigned buffer_index = r->buffer_index;
 
@@ -280,7 +280,7 @@ void _starpu_notify_data_dependencies(starpu_data_handle_t handle)
 	}
 
 
-	starpu_data_requester_t r;
+	struct _starpu_data_requester *r;
 	while ((r = may_unlock_data_req_list_head(handle)))
 	{
 		/* STARPU_RW accesses are treated as STARPU_W */
@@ -305,7 +305,7 @@ void _starpu_notify_data_dependencies(starpu_data_handle_t handle)
 		{
 			/* We need to put the request back because we must
 			 * perform a reduction before. */
-			starpu_data_requester_list_push_front(handle->req_list, r);
+			_starpu_data_requester_list_push_front(handle->req_list, r);
 		}
 		else {
 			/* The data is now attributed to that request so we put a
@@ -338,7 +338,7 @@ void _starpu_notify_data_dependencies(starpu_data_handle_t handle)
 				r->ready_data_callback(r->argcb);
 			}
 
-			starpu_data_requester_delete(r);
+			_starpu_data_requester_delete(r);
 			
 			_starpu_spin_lock(&handle->header_lock);
 			STARPU_ASSERT(handle->busy_count > 0);

+ 1 - 1
src/core/dependencies/data_concurrency.h

@@ -20,7 +20,7 @@
 
 #include <core/jobs.h>
 
-unsigned _starpu_submit_job_enforce_data_deps(starpu_job_t j);
+unsigned _starpu_submit_job_enforce_data_deps(struct _starpu_job *j);
 
 void _starpu_notify_data_dependencies(starpu_data_handle_t handle);
 

+ 2 - 2
src/core/dependencies/dependencies.c

@@ -1,7 +1,7 @@
 /* StarPU --- Runtime system for heterogeneous multicore architectures.
  *
  * Copyright (C) 2010  Université de Bordeaux 1
- * Copyright (C) 2010  Centre National de la Recherche Scientifique
+ * Copyright (C) 2010, 2011  Centre National de la Recherche Scientifique
  *
  * StarPU is free software; you can redistribute it and/or modify
  * it under the terms of the GNU Lesser General Public License as published by
@@ -25,7 +25,7 @@
 #include <core/dependencies/data_concurrency.h>
 
 /* We assume that j->sync_mutex is taken by the caller */
-void _starpu_notify_dependencies(struct starpu_job_s *j)
+void _starpu_notify_dependencies(struct _starpu_job *j)
 {
 	STARPU_ASSERT(j);
 	STARPU_ASSERT(j->task);

+ 8 - 8
src/core/dependencies/implicit_data_deps.c

@@ -58,7 +58,7 @@ static void _starpu_add_reader_after_writer(starpu_data_handle_t handle, struct
 #endif
 		handle->last_submitted_ghost_writer_id_is_valid)
 	{
-		starpu_job_t pre_sync_job = _starpu_get_job_associated_to_task(pre_sync_task);
+		struct _starpu_job *pre_sync_job = _starpu_get_job_associated_to_task(pre_sync_task);
 		_STARPU_TRACE_GHOST_TASK_DEPS(handle->last_submitted_ghost_writer_id, pre_sync_job->job_id);
 		_starpu_bound_job_id_dep(pre_sync_job, handle->last_submitted_ghost_writer_id);
 		_STARPU_DEP_DEBUG("dep ID%lu -> %p\n", handle->last_submitted_ghost_writer_id, pre_sync_task);
@@ -98,7 +98,7 @@ static void _starpu_add_writer_after_readers(starpu_data_handle_t handle, struct
 #endif
 	{
 		/* Declare all dependencies with ghost readers */
-		starpu_job_t pre_sync_job = _starpu_get_job_associated_to_task(pre_sync_task);
+		struct _starpu_job *pre_sync_job = _starpu_get_job_associated_to_task(pre_sync_task);
 
 		struct _starpu_jobid_list *ghost_readers_id = handle->last_submitted_ghost_readers_id;
 		while (ghost_readers_id)
@@ -145,7 +145,7 @@ static void _starpu_add_writer_after_writer(starpu_data_handle_t handle, struct
 	{
 		if (handle->last_submitted_ghost_writer_id_is_valid)
 		{
-			starpu_job_t pre_sync_job = _starpu_get_job_associated_to_task(pre_sync_task);
+			struct _starpu_job *pre_sync_job = _starpu_get_job_associated_to_task(pre_sync_task);
 			_STARPU_TRACE_GHOST_TASK_DEPS(handle->last_submitted_ghost_writer_id, pre_sync_job->job_id);
 			_starpu_bound_job_id_dep(pre_sync_job, handle->last_submitted_ghost_writer_id);
 			_STARPU_DEP_DEBUG("dep ID%lu -> %p\n", handle->last_submitted_ghost_writer_id, pre_sync_task);
@@ -189,8 +189,8 @@ void _starpu_detect_implicit_data_deps_with_handle(struct starpu_task *pre_sync_
 
 	if (handle->sequential_consistency)
 	{
-		starpu_job_t pre_sync_job = _starpu_get_job_associated_to_task(pre_sync_task);
-		starpu_job_t post_sync_job = _starpu_get_job_associated_to_task(post_sync_task);
+		struct _starpu_job *pre_sync_job = _starpu_get_job_associated_to_task(pre_sync_task);
+		struct _starpu_job *post_sync_job = _starpu_get_job_associated_to_task(post_sync_task);
 
 		/* Skip tasks that are associated to a reduction phase so that
 		 * they do not interfere with the application. */
@@ -280,7 +280,7 @@ void _starpu_detect_implicit_data_deps(struct starpu_task *task)
 
 	/* We don't want to enforce a sequential consistency for tasks that are
 	 * not visible to the application. */
-	starpu_job_t j = _starpu_get_job_associated_to_task(task);
+	struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
 	if (j->reduction_task)
 		return;
 
@@ -330,7 +330,7 @@ void _starpu_release_data_enforce_sequential_consistency(struct starpu_task *tas
 			{
 				/* Save the previous writer as the ghost last writer */
 				handle->last_submitted_ghost_writer_id_is_valid = 1;
-				starpu_job_t ghost_job = _starpu_get_job_associated_to_task(task);
+				struct _starpu_job *ghost_job = _starpu_get_job_associated_to_task(task);
 				handle->last_submitted_ghost_writer_id = ghost_job->job_id;
 			}
 			
@@ -360,7 +360,7 @@ void _starpu_release_data_enforce_sequential_consistency(struct starpu_task *tas
 #endif
 				{
 					/* Save the job id of the reader task in the ghost reader linked list list */
-					starpu_job_t ghost_reader_job = _starpu_get_job_associated_to_task(task);
+					struct _starpu_job *ghost_reader_job = _starpu_get_job_associated_to_task(task);
 					struct _starpu_jobid_list *link = (struct _starpu_jobid_list *) malloc(sizeof(struct _starpu_jobid_list));
 					STARPU_ASSERT(link);
 					link->next = handle->last_submitted_ghost_readers_id;

+ 3 - 3
src/core/dependencies/tags.c

@@ -1,7 +1,7 @@
 /* StarPU --- Runtime system for heterogeneous multicore architectures.
  *
  * Copyright (C) 2009, 2010, 2011  Université de Bordeaux 1
- * Copyright (C) 2010  Centre National de la Recherche Scientifique
+ * Copyright (C) 2010, 2011  Centre National de la Recherche Scientifique
  *
  * StarPU is free software; you can redistribute it and/or modify
  * it under the terms of the GNU Lesser General Public License as published by
@@ -147,7 +147,7 @@ void _starpu_tag_set_ready(struct _starpu_tag *tag)
 	/* mark this tag as ready to run */
 	tag->state = STARPU_READY;
 	/* declare it to the scheduler ! */
-	struct starpu_job_s *j = tag->job;
+	struct _starpu_job *j = tag->job;
 
 	/* In case the task job is going to be scheduled immediately, and if
 	 * the task is "empty", calling _starpu_push_task would directly try to enforce
@@ -197,7 +197,7 @@ void starpu_tag_notify_from_apps(starpu_tag_t id)
 	_starpu_notify_tag_dependencies(tag);
 }
 
-void _starpu_tag_declare(starpu_tag_t id, struct starpu_job_s *job)
+void _starpu_tag_declare(starpu_tag_t id, struct _starpu_job *job)
 {
 	_STARPU_TRACE_TAG(id, job);
 	job->task->use_tag = 1;

+ 5 - 5
src/core/dependencies/tags.h

@@ -42,7 +42,7 @@ enum _starpu_tag_state {
 	STARPU_DONE
 };
 
-struct starpu_job_s;
+struct _starpu_job;
 
 struct _starpu_tag {
 	struct _starpu_spinlock lock;
@@ -51,18 +51,18 @@ struct _starpu_tag {
 
 	struct _starpu_cg_list tag_successors;
 
-	struct starpu_job_s *job; /* which job is associated to the tag if any ? */
+	struct _starpu_job *job; /* which job is associated to the tag if any ? */
 
 	unsigned is_assigned;
 	unsigned is_submitted;
 };
 
-void _starpu_notify_dependencies(struct starpu_job_s *j);
+void _starpu_notify_dependencies(struct _starpu_job *j);
 void _starpu_notify_tag_dependencies(struct _starpu_tag *tag);
 
-void _starpu_tag_declare(starpu_tag_t id, struct starpu_job_s *job);
+void _starpu_tag_declare(starpu_tag_t id, struct _starpu_job *job);
 void _starpu_tag_set_ready(struct _starpu_tag *tag);
 
-unsigned _starpu_submit_job_enforce_task_deps(struct starpu_job_s *j);
+unsigned _starpu_submit_job_enforce_task_deps(struct _starpu_job *j);
 
 #endif // __TAGS_H__

+ 6 - 6
src/core/dependencies/task_deps.c

@@ -1,7 +1,7 @@
 /* StarPU --- Runtime system for heterogeneous multicore architectures.
  *
  * Copyright (C) 2010  Université de Bordeaux 1
- * Copyright (C) 2010  Centre National de la Recherche Scientifique
+ * Copyright (C) 2010, 2011  Centre National de la Recherche Scientifique
  *
  * StarPU is free software; you can redistribute it and/or modify
  * it under the terms of the GNU Lesser General Public License as published by
@@ -26,7 +26,7 @@
 #include <core/dependencies/data_concurrency.h>
 #include <profiling/bound.h>
 
-static struct _starpu_cg *create_cg_task(unsigned ntags, starpu_job_t j)
+static struct _starpu_cg *create_cg_task(unsigned ntags, struct _starpu_job *j)
 {
 	struct _starpu_cg *cg = (struct _starpu_cg *) malloc(sizeof(struct _starpu_cg));
 	STARPU_ASSERT(cg);
@@ -42,7 +42,7 @@ static struct _starpu_cg *create_cg_task(unsigned ntags, starpu_job_t j)
 }
 
 /* the job lock must be taken */
-static void _starpu_task_add_succ(starpu_job_t j, struct _starpu_cg *cg)
+static void _starpu_task_add_succ(struct _starpu_job *j, struct _starpu_cg *cg)
 {
 	STARPU_ASSERT(j);
 
@@ -54,7 +54,7 @@ static void _starpu_task_add_succ(starpu_job_t j, struct _starpu_cg *cg)
 	}
 }
 
-void _starpu_notify_task_dependencies(starpu_job_t j)
+void _starpu_notify_task_dependencies(struct _starpu_job *j)
 {
 	_starpu_notify_cg_list(&j->job_successors);
 }
@@ -65,7 +65,7 @@ void starpu_task_declare_deps_array(struct starpu_task *task, unsigned ndeps, st
 	if (ndeps == 0)
 		return;
 
-	starpu_job_t job;
+	struct _starpu_job *job;
 
 	job = _starpu_get_job_associated_to_task(task);
 
@@ -78,7 +78,7 @@ void starpu_task_declare_deps_array(struct starpu_task *task, unsigned ndeps, st
 	{
 		struct starpu_task *dep_task = task_array[i];
 
-		starpu_job_t dep_job;
+		struct _starpu_job *dep_job;
 		dep_job = _starpu_get_job_associated_to_task(dep_task);
 		STARPU_ASSERT(dep_job != job);
 

+ 15 - 15
src/core/jobs.c

@@ -27,7 +27,7 @@
 #include <profiling/bound.h>
 #include <starpu_top.h>
 
-size_t _starpu_job_get_data_size(starpu_job_t j)
+size_t _starpu_job_get_data_size(struct _starpu_job *j)
 {
 	size_t size = 0;
 
@@ -50,18 +50,18 @@ static unsigned job_cnt = 0;
 
 void _starpu_exclude_task_from_dag(struct starpu_task *task)
 {
-	starpu_job_t j = _starpu_get_job_associated_to_task(task);
+	struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
 
 	j->exclude_from_dag = 1;
 }
 
-/* create an internal starpu_job_t structure to encapsulate the task */
-starpu_job_t __attribute__((malloc)) _starpu_job_create(struct starpu_task *task)
+/* create an internal struct _starpu_job structure to encapsulate the task */
+struct _starpu_job* __attribute__((malloc)) _starpu_job_create(struct starpu_task *task)
 {
-	starpu_job_t job;
+	struct _starpu_job *job;
         _STARPU_LOG_IN();
 
-	job = starpu_job_new();
+	job = _starpu_job_new();
 
 	job->nimpl =0; /* best implementation */
 	job->task = task;
@@ -99,7 +99,7 @@ starpu_job_t __attribute__((malloc)) _starpu_job_create(struct starpu_task *task
 	return job;
 }
 
-void _starpu_job_destroy(starpu_job_t j)
+void _starpu_job_destroy(struct _starpu_job *j)
 {
 	_STARPU_PTHREAD_COND_DESTROY(&j->sync_cond);
 	_STARPU_PTHREAD_MUTEX_DESTROY(&j->sync_mutex);
@@ -112,10 +112,10 @@ void _starpu_job_destroy(starpu_job_t j)
 
 	_starpu_cg_list_deinit(&j->job_successors);
 
-	starpu_job_delete(j);
+	_starpu_job_delete(j);
 }
 
-void _starpu_wait_job(starpu_job_t j)
+void _starpu_wait_job(struct _starpu_job *j)
 {
 	STARPU_ASSERT(j->task);
 	STARPU_ASSERT(!j->task->detach);
@@ -135,7 +135,7 @@ void _starpu_wait_job(starpu_job_t j)
         _STARPU_LOG_OUT();
 }
 
-void _starpu_handle_job_termination(starpu_job_t j, unsigned job_is_already_locked)
+void _starpu_handle_job_termination(struct _starpu_job *j, unsigned job_is_already_locked)
 {
 	struct starpu_task *task = j->task;
 
@@ -237,7 +237,7 @@ void _starpu_handle_job_termination(starpu_job_t j, unsigned job_is_already_lock
 
 /* This function is called when a new task is submitted to StarPU 
  * it returns 1 if the tag deps are not fulfilled, 0 otherwise */
-static unsigned _starpu_not_all_tag_deps_are_fulfilled(starpu_job_t j)
+static unsigned _starpu_not_all_tag_deps_are_fulfilled(struct _starpu_job *j)
 {
 	unsigned ret;
 
@@ -274,7 +274,7 @@ static unsigned _starpu_not_all_tag_deps_are_fulfilled(starpu_job_t j)
 #ifdef STARPU_DEVEL
 #warning TODO remove the job_is_already_locked parameter
 #endif
-static unsigned _starpu_not_all_task_deps_are_fulfilled(starpu_job_t j, unsigned job_is_already_locked)
+static unsigned _starpu_not_all_task_deps_are_fulfilled(struct _starpu_job *j, unsigned job_is_already_locked)
 {
 	unsigned ret;
 
@@ -310,7 +310,7 @@ static unsigned _starpu_not_all_task_deps_are_fulfilled(starpu_job_t j, unsigned
 #ifdef STARPU_DEVEL
 #warning TODO remove the job_is_already_locked parameter
 #endif
-unsigned _starpu_enforce_deps_and_schedule(starpu_job_t j, unsigned job_is_already_locked)
+unsigned _starpu_enforce_deps_and_schedule(struct _starpu_job *j, unsigned job_is_already_locked)
 {
 	unsigned ret;
         _STARPU_LOG_IN();
@@ -345,7 +345,7 @@ unsigned _starpu_enforce_deps_and_schedule(starpu_job_t j, unsigned job_is_alrea
 #ifdef STARPU_DEVEL
 #warning TODO remove the job_is_already_locked parameter
 #endif
-unsigned _starpu_enforce_deps_starting_from_task(starpu_job_t j, unsigned job_is_already_locked)
+unsigned _starpu_enforce_deps_starting_from_task(struct _starpu_job *j, unsigned job_is_already_locked)
 {
 	unsigned ret;
 
@@ -393,7 +393,7 @@ int _starpu_push_local_task(struct _starpu_worker *worker, struct starpu_task *t
 	return 0;
 }
 
-const char *_starpu_get_model_name(starpu_job_t j)
+const char *_starpu_get_model_name(struct _starpu_job *j)
 {
 	if (!j)
 		return NULL;

+ 10 - 10
src/core/jobs.h

@@ -54,7 +54,7 @@ typedef void (*_starpu_cl_func)(void **, void *);
 #define _STARPU_OPENCL_MAY_PERFORM(j)	((j)->task->cl->where & STARPU_OPENCL)
 
 /* A job is the internal representation of a task. */
-LIST_TYPE(starpu_job,
+LIST_TYPE(_starpu_job,
 
 	/* The implementation associated to the job */
 	unsigned nimpl;
@@ -129,29 +129,29 @@ LIST_TYPE(starpu_job,
 	pthread_barrier_t after_work_barrier;
 )
 
-/* Create an internal starpu_job_t structure to encapsulate the task. */
-starpu_job_t __attribute__((malloc)) _starpu_job_create(struct starpu_task *task);
+/* Create an internal struct _starpu_job *structure to encapsulate the task. */
+struct _starpu_job* __attribute__((malloc)) _starpu_job_create(struct starpu_task *task);
 
 /* Destroy the data structure associated to the job structure */
-void _starpu_job_destroy(starpu_job_t j);
+void _starpu_job_destroy(struct _starpu_job *j);
 
 /* Wait for the termination of the job */
-void _starpu_wait_job(starpu_job_t j);
+void _starpu_wait_job(struct _starpu_job *j);
 
 /* Specify that the task should not appear in the DAG generated by debug tools. */
 void _starpu_exclude_task_from_dag(struct starpu_task *task);
 
 /* try to submit job j, enqueue it if it's not schedulable yet */
-unsigned _starpu_enforce_deps_and_schedule(starpu_job_t j, unsigned job_is_already_locked);
-unsigned _starpu_enforce_deps_starting_from_task(starpu_job_t j, unsigned job_is_already_locked);
+unsigned _starpu_enforce_deps_and_schedule(struct _starpu_job *j, unsigned job_is_already_locked);
+unsigned _starpu_enforce_deps_starting_from_task(struct _starpu_job *j, unsigned job_is_already_locked);
 
 
 /* This function must be called after the execution of a job, this triggers all
  * job's dependencies and perform the callback function if any. */
-void _starpu_handle_job_termination(starpu_job_t j, unsigned job_is_already_locked);
+void _starpu_handle_job_termination(struct _starpu_job *j, unsigned job_is_already_locked);
 
 /* Get the sum of the size of the data accessed by the job. */
-size_t _starpu_job_get_data_size(starpu_job_t j);
+size_t _starpu_job_get_data_size(struct _starpu_job *j);
 
 /* Get a task from the local pool of tasks that were explicitly attributed to
  * that worker. */
@@ -164,6 +164,6 @@ struct starpu_task *_starpu_pop_local_task(struct _starpu_worker *worker);
 int _starpu_push_local_task(struct _starpu_worker *worker, struct starpu_task *task, int back);
 
 /* Returns the symbol associated to that job if any. */
-const char *_starpu_get_model_name(starpu_job_t j);
+const char *_starpu_get_model_name(struct _starpu_job *j);
 
 #endif // __JOBS_H__

+ 1 - 1
src/core/perfmodel/perfmodel.c

@@ -161,7 +161,7 @@ void _starpu_load_perfmodel(struct starpu_perfmodel *model)
 static double starpu_model_expected_perf(struct starpu_task *task, struct starpu_perfmodel *model, enum starpu_perf_archtype arch,  unsigned nimpl)
 {
 	if (model) {
-		starpu_job_t j = _starpu_get_job_associated_to_task(task);
+		struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
 		switch (model->type) {
 			case STARPU_PER_ARCH:
 

+ 5 - 6
src/core/perfmodel/perfmodel.h

@@ -29,8 +29,7 @@
 #include <stdio.h>
 
 struct starpu_buffer_descr;
-struct starpu_jobq_s;
-struct starpu_job_s;
+struct _starpu_job;
 enum starpu_perf_archtype;
 
 ///* File format */
@@ -46,7 +45,7 @@ void _starpu_get_perf_model_dir_codelets(char *path, size_t maxlen);
 void _starpu_get_perf_model_dir_bus(char *path, size_t maxlen);
 void _starpu_get_perf_model_dir_debug(char *path, size_t maxlen);
 
-double _starpu_history_based_job_expected_perf(struct starpu_perfmodel *model, enum starpu_perf_archtype arch, struct starpu_job_s *j, unsigned nimpl);
+double _starpu_history_based_job_expected_perf(struct starpu_perfmodel *model, enum starpu_perf_archtype arch, struct _starpu_job *j, unsigned nimpl);
 int _starpu_register_model(struct starpu_perfmodel *model);
 void _starpu_load_history_based_model(struct starpu_perfmodel *model, unsigned scan_history);
 void _starpu_load_perfmodel(struct starpu_perfmodel *model);
@@ -54,10 +53,10 @@ void _starpu_initialize_registered_performance_models(void);
 void _starpu_deinitialize_registered_performance_models(void);
 
 double _starpu_regression_based_job_expected_perf(struct starpu_perfmodel *model,
-					enum starpu_perf_archtype arch, struct starpu_job_s *j, unsigned nimpl);
+					enum starpu_perf_archtype arch, struct _starpu_job *j, unsigned nimpl);
 double _starpu_non_linear_regression_based_job_expected_perf(struct starpu_perfmodel *model,
-					enum starpu_perf_archtype arch, struct starpu_job_s *j, unsigned nimpl);
-void _starpu_update_perfmodel_history(struct starpu_job_s *j, struct starpu_perfmodel *model, enum starpu_perf_archtype arch,
+					enum starpu_perf_archtype arch, struct _starpu_job *j, unsigned nimpl);
+void _starpu_update_perfmodel_history(struct _starpu_job *j, struct starpu_perfmodel *model, enum starpu_perf_archtype arch,
 				unsigned cpuid, double measured, unsigned nimpl);
 
 void _starpu_create_sampling_directory_if_needed(void);

+ 4 - 4
src/core/perfmodel/perfmodel_history.c

@@ -856,7 +856,7 @@ void starpu_perfmodel_debugfilepath(struct starpu_perfmodel *model,
 	get_model_debug_path(model, archname, path, maxlen);
 }
 
-double _starpu_regression_based_job_expected_perf(struct starpu_perfmodel *model, enum starpu_perf_archtype arch, struct starpu_job_s *j, unsigned nimpl)
+double _starpu_regression_based_job_expected_perf(struct starpu_perfmodel *model, enum starpu_perf_archtype arch, struct _starpu_job *j, unsigned nimpl)
 {
 	double exp = -1.0;
 	size_t size = _starpu_job_get_data_size(j);
@@ -870,7 +870,7 @@ double _starpu_regression_based_job_expected_perf(struct starpu_perfmodel *model
 	return exp;
 }
 
-double _starpu_non_linear_regression_based_job_expected_perf(struct starpu_perfmodel *model, enum starpu_perf_archtype arch, struct starpu_job_s *j,unsigned nimpl)
+double _starpu_non_linear_regression_based_job_expected_perf(struct starpu_perfmodel *model, enum starpu_perf_archtype arch, struct _starpu_job *j,unsigned nimpl)
 {
 	double exp = -1.0;
 	size_t size = _starpu_job_get_data_size(j);
@@ -902,7 +902,7 @@ double _starpu_non_linear_regression_based_job_expected_perf(struct starpu_perfm
 	return exp;
 }
 
-double _starpu_history_based_job_expected_perf(struct starpu_perfmodel *model, enum starpu_perf_archtype arch, struct starpu_job_s *j,unsigned nimpl)
+double _starpu_history_based_job_expected_perf(struct starpu_perfmodel *model, enum starpu_perf_archtype arch, struct _starpu_job *j,unsigned nimpl)
 {
 	double exp;
 	struct starpu_per_arch_perfmodel *per_arch_model;
@@ -938,7 +938,7 @@ double _starpu_history_based_job_expected_perf(struct starpu_perfmodel *model, e
 	return exp;
 }
 
-void _starpu_update_perfmodel_history(starpu_job_t j, struct starpu_perfmodel *model, enum starpu_perf_archtype arch, unsigned cpuid STARPU_ATTRIBUTE_UNUSED, double measured, unsigned nimpl)
+void _starpu_update_perfmodel_history(struct _starpu_job *j, struct starpu_perfmodel *model, enum starpu_perf_archtype arch, unsigned cpuid STARPU_ATTRIBUTE_UNUSED, double measured, unsigned nimpl)
 {
 	if (model)
 	{

+ 2 - 2
src/core/sched_policy.c

@@ -248,7 +248,7 @@ static int _starpu_push_task_on_specific_worker(struct starpu_task *task, int wo
 		int ret = 0;
 		int i;
 
-		starpu_job_t j = _starpu_get_job_associated_to_task(task);
+		struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
 		j->task_size = worker_size;
 		j->combined_workerid = workerid;
 		j->active_task_alias_count = 0;
@@ -269,7 +269,7 @@ static int _starpu_push_task_on_specific_worker(struct starpu_task *task, int wo
 }
 
 /* the generic interface that call the proper underlying implementation */
-int _starpu_push_task(starpu_job_t j, unsigned job_is_already_locked)
+int _starpu_push_task(struct _starpu_job *j, unsigned job_is_already_locked)
 {
 	struct starpu_task *task = j->task;
         _STARPU_LOG_IN();

+ 1 - 1
src/core/sched_policy.h

@@ -28,7 +28,7 @@ struct starpu_sched_policy *_starpu_get_sched_policy(void);
 void _starpu_init_sched_policy(struct _starpu_machine_config *config);
 void _starpu_deinit_sched_policy(struct _starpu_machine_config *config);
 
-int _starpu_push_task(starpu_job_t task, unsigned job_is_already_locked);
+int _starpu_push_task(struct _starpu_job *task, unsigned job_is_already_locked);
 /* pop a task that can be executed on the worker */
 struct starpu_task *_starpu_pop_task(struct _starpu_worker *worker);
 /* pop every task that can be executed on the worker */

+ 8 - 8
src/core/task.c

@@ -106,7 +106,7 @@ void starpu_task_deinit(struct starpu_task *task)
 			_STARPU_PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
 	}
 
-	starpu_job_t j = (struct starpu_job_s *)task->starpu_private;
+	struct _starpu_job *j = (struct _starpu_job *)task->starpu_private;
 
 	if (j)
 		_starpu_job_destroy(j);
@@ -169,7 +169,7 @@ int starpu_task_wait(struct starpu_task *task)
 		return -EDEADLK;
 	}
 
-	starpu_job_t j = (struct starpu_job_s *)task->starpu_private;
+	struct _starpu_job *j = (struct _starpu_job *)task->starpu_private;
 
 	_starpu_wait_job(j);
 
@@ -182,22 +182,22 @@ int starpu_task_wait(struct starpu_task *task)
 	return 0;
 }
 
-starpu_job_t _starpu_get_job_associated_to_task(struct starpu_task *task)
+struct _starpu_job *_starpu_get_job_associated_to_task(struct starpu_task *task)
 {
 	STARPU_ASSERT(task);
 
 	if (!task->starpu_private)
 	{
-		starpu_job_t j = _starpu_job_create(task);
+		struct _starpu_job *j = _starpu_job_create(task);
 		task->starpu_private = j;
 	}
 
-	return (struct starpu_job_s *)task->starpu_private;
+	return (struct _starpu_job *)task->starpu_private;
 }
 
 /* NB in case we have a regenerable task, it is possible that the job was
  * already counted. */
-int _starpu_submit_job(starpu_job_t j)
+int _starpu_submit_job(struct _starpu_job *j)
 {
         _STARPU_LOG_IN();
 	/* notify bound computation of a new task */
@@ -290,10 +290,10 @@ int starpu_task_submit(struct starpu_task *task)
 	if (profiling)
 		_starpu_clock_gettime(&info->submit_time);
 
-	/* internally, StarPU manipulates a starpu_job_t which is a wrapper around a
+	/* internally, StarPU manipulates a struct _starpu_job * which is a wrapper around a
 	* task structure, it is possible that this job structure was already
 	* allocated, for instance to enforce task depenencies. */
-	starpu_job_t j = _starpu_get_job_associated_to_task(task);
+	struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
 
 	ret = _starpu_submit_job(j);
 

+ 3 - 3
src/core/task.h

@@ -1,7 +1,7 @@
 /* StarPU --- Runtime system for heterogeneous multicore architectures.
  *
  * Copyright (C) 2009, 2010-2011  Université de Bordeaux 1
- * Copyright (C) 2010  Centre National de la Recherche Scientifique
+ * Copyright (C) 2010, 2011  Centre National de la Recherche Scientifique
  *
  * StarPU is free software; you can redistribute it and/or modify
  * it under the terms of the GNU Lesser General Public License as published by
@@ -38,11 +38,11 @@ void _starpu_set_current_task(struct starpu_task *task);
 
 /* NB the second argument makes it possible to count regenerable tasks only
  * once. */
-int _starpu_submit_job(starpu_job_t j);
+int _starpu_submit_job(struct _starpu_job *j);
 
 /* Returns the job structure (which is the internal data structure associated
  * to a task). */
-starpu_job_t _starpu_get_job_associated_to_task(struct starpu_task *task);
+struct _starpu_job *_starpu_get_job_associated_to_task(struct starpu_task *task);
 
 struct starpu_task *_starpu_create_task_alias(struct starpu_task *task);
 

+ 2 - 2
src/core/workers.c

@@ -184,7 +184,7 @@ static void _starpu_launch_drivers(struct _starpu_machine_config *config)
 		 * may be executed by another thread than that of the Gordon
 		 * driver so that we cannot call the push_codelet_output method
 		 * directly */
-		workerarg->terminated_jobs = starpu_job_list_new();
+		workerarg->terminated_jobs = _starpu_job_list_new();
 
 		starpu_task_list_init(&workerarg->local_tasks);
 	
@@ -470,7 +470,7 @@ static void _starpu_terminate_workers(struct _starpu_machine_config *config)
 		}
 
 		STARPU_ASSERT(starpu_task_list_empty(&worker->local_tasks));
-		starpu_job_list_delete(worker->terminated_jobs);
+		_starpu_job_list_delete(worker->terminated_jobs);
 	}
 }
 

+ 1 - 1
src/core/workers.h

@@ -72,7 +72,7 @@ struct _starpu_worker {
 	pthread_mutex_t *sched_mutex; /* mutex protecting sched_cond */
 	struct starpu_task_list local_tasks; /* this queue contains tasks that have been explicitely submitted to that queue */
 	struct _starpu_worker_set *set; /* in case this worker belongs to a set */
-	struct starpu_job_list_s *terminated_jobs; /* list of pending jobs which were executed */
+	struct _starpu_job_list *terminated_jobs; /* list of pending jobs which were executed */
 	unsigned worker_is_running;
 	unsigned worker_is_initialized;
 	enum _starpu_worker_status status; /* what is the worker doing now ? (eg. CALLBACK) */

+ 25 - 25
src/datawizard/coherency.c

@@ -109,7 +109,7 @@ uint32_t _starpu_select_src_node(starpu_data_handle_t handle, unsigned destinati
 
 /* this may be called once the data is fetched with header and STARPU_RW-lock hold */
 void _starpu_update_data_state(starpu_data_handle_t handle,
-			       struct starpu_data_replicate_s *requesting_replicate,
+			       struct _starpu_data_replicate *requesting_replicate,
 			       enum starpu_access_mode mode)
 {
 	/* There is nothing to do for relaxed coherency modes (scratch or
@@ -138,7 +138,7 @@ void _starpu_update_data_state(starpu_data_handle_t handle,
 			uint32_t node;
 			for (node = 0; node < nnodes; node++)
 			{
-				struct starpu_data_replicate_s *replicate = &handle->per_node[node];
+				struct _starpu_data_replicate *replicate = &handle->per_node[node];
 				if (replicate->state != STARPU_INVALID)
 					replicate->state = STARPU_SHARED;
 			}
@@ -264,9 +264,9 @@ static int determine_request_path(starpu_data_handle_t handle,
 /* handle->lock should be taken. r is returned locked. The node parameter
  * indicate either the source of the request, or the destination for a
  * write-only request. */
-static starpu_data_request_t _starpu_search_existing_data_request(struct starpu_data_replicate_s *replicate, unsigned node, enum starpu_access_mode mode, unsigned is_prefetch)
+static struct _starpu_data_request *_starpu_search_existing_data_request(struct _starpu_data_replicate *replicate, unsigned node, enum starpu_access_mode mode, unsigned is_prefetch)
 {
-	starpu_data_request_t r;
+	struct _starpu_data_request *r;
 
 	r = replicate->request[node];
 
@@ -321,10 +321,10 @@ static starpu_data_request_t _starpu_search_existing_data_request(struct starpu_
  */
 
 /* This function is called with handle's header lock taken */
-starpu_data_request_t _starpu_create_request_to_fetch_data(starpu_data_handle_t handle,
-				struct starpu_data_replicate_s *dst_replicate,
-                                enum starpu_access_mode mode, unsigned is_prefetch,
-                                void (*callback_func)(void *), void *callback_arg)
+struct _starpu_data_request *_starpu_create_request_to_fetch_data(starpu_data_handle_t handle,
+								  struct _starpu_data_replicate *dst_replicate,
+								  enum starpu_access_mode mode, unsigned is_prefetch,
+								  void (*callback_func)(void *), void *callback_arg)
 {
 	unsigned requesting_node = dst_replicate->memory_node;
 
@@ -379,7 +379,7 @@ starpu_data_request_t _starpu_create_request_to_fetch_data(starpu_data_handle_t
 					src_nodes, dst_nodes, handling_nodes);
 
 	STARPU_ASSERT(nhops >= 1 && nhops <= 4);
-	starpu_data_request_t requests[nhops];
+	struct _starpu_data_request *requests[nhops];
 
 	/* Did we reuse a request for that hop ? */
 	int reused_requests[nhops];
@@ -388,14 +388,14 @@ starpu_data_request_t _starpu_create_request_to_fetch_data(starpu_data_handle_t
 	int hop;
 	for (hop = 0; hop < nhops; hop++)
 	{
-		starpu_data_request_t r;
+		struct _starpu_data_request *r;
 
 		unsigned hop_src_node = src_nodes[hop];
 		unsigned hop_dst_node = dst_nodes[hop];
 		unsigned hop_handling_node = handling_nodes[hop];
 
-		struct starpu_data_replicate_s *hop_src_replicate;
-		struct starpu_data_replicate_s *hop_dst_replicate;
+		struct _starpu_data_replicate *hop_src_replicate;
+		struct _starpu_data_replicate *hop_dst_replicate;
 
 		/* Only the first request is independant */
 		unsigned ndeps = (hop == 0)?0:1;
@@ -413,8 +413,8 @@ starpu_data_request_t _starpu_create_request_to_fetch_data(starpu_data_handle_t
 		if (!r) {
 			/* Create a new request if there was no request to reuse */
 			r = _starpu_create_data_request(handle, hop_src_replicate,
-					hop_dst_replicate, hop_handling_node,
-					mode, ndeps, is_prefetch);
+							hop_dst_replicate, hop_handling_node,
+							mode, ndeps, is_prefetch);
 		}
 
 		requests[hop] = r; 
@@ -423,7 +423,7 @@ starpu_data_request_t _starpu_create_request_to_fetch_data(starpu_data_handle_t
 	/* Chain these requests */
 	for (hop = 0; hop < nhops; hop++)
 	{
-		starpu_data_request_t r;
+		struct _starpu_data_request *r;
 		r = requests[hop];
 
 		if (hop != nhops - 1)
@@ -451,7 +451,7 @@ starpu_data_request_t _starpu_create_request_to_fetch_data(starpu_data_handle_t
 	return requests[nhops - 1];
 }
 
-int _starpu_fetch_data_on_node(starpu_data_handle_t handle, struct starpu_data_replicate_s *dst_replicate,
+int _starpu_fetch_data_on_node(starpu_data_handle_t handle, struct _starpu_data_replicate *dst_replicate,
 			       enum starpu_access_mode mode, unsigned is_prefetch,
 			       void (*callback_func)(void *), void *callback_arg)
 {
@@ -466,9 +466,9 @@ int _starpu_fetch_data_on_node(starpu_data_handle_t handle, struct starpu_data_r
 		dst_replicate->handle->busy_count++;
 	}
 
-	starpu_data_request_t r;
+	struct _starpu_data_request *r;
 	r = _starpu_create_request_to_fetch_data(handle, dst_replicate, mode,
-					is_prefetch, callback_func, callback_arg);
+						 is_prefetch, callback_func, callback_arg);
 
 	/* If no request was created, the handle was already up-to-date on the
 	 * node. In this case, _starpu_create_request_to_fetch_data has already
@@ -483,12 +483,12 @@ int _starpu_fetch_data_on_node(starpu_data_handle_t handle, struct starpu_data_r
         return ret;
 }
 
-static int prefetch_data_on_node(starpu_data_handle_t handle, struct starpu_data_replicate_s *replicate, enum starpu_access_mode mode)
+static int prefetch_data_on_node(starpu_data_handle_t handle, struct _starpu_data_replicate *replicate, enum starpu_access_mode mode)
 {
 	return _starpu_fetch_data_on_node(handle, replicate, mode, 1, NULL, NULL);
 }
 
-static int fetch_data(starpu_data_handle_t handle, struct starpu_data_replicate_s *replicate, enum starpu_access_mode mode)
+static int fetch_data(starpu_data_handle_t handle, struct _starpu_data_replicate *replicate, enum starpu_access_mode mode)
 {
 	return _starpu_fetch_data_on_node(handle, replicate, mode, 0, NULL, NULL);
 }
@@ -510,7 +510,7 @@ uint32_t _starpu_data_get_footprint(starpu_data_handle_t handle)
 
 /* in case the data was accessed on a write mode, do not forget to 
  * make it accessible again once it is possible ! */
-void _starpu_release_data_on_node(starpu_data_handle_t handle, uint32_t default_wt_mask, struct starpu_data_replicate_s *replicate)
+void _starpu_release_data_on_node(starpu_data_handle_t handle, uint32_t default_wt_mask, struct _starpu_data_replicate *replicate)
 {
 	uint32_t wt_mask;
 	wt_mask = default_wt_mask | handle->wt_mask;
@@ -548,7 +548,7 @@ void _starpu_release_data_on_node(starpu_data_handle_t handle, uint32_t default_
 		_starpu_spin_unlock(&handle->header_lock);
 }
 
-static void _starpu_set_data_requested_flag_if_needed(struct starpu_data_replicate_s *replicate)
+static void _starpu_set_data_requested_flag_if_needed(struct _starpu_data_replicate *replicate)
 {
 // XXX : this is just a hint, so we don't take the lock ...
 //	pthread_spin_lock(&handle->header_lock);
@@ -576,7 +576,7 @@ int starpu_prefetch_task_input_on_node(struct starpu_task *task, uint32_t node)
 		if (mode & (STARPU_SCRATCH|STARPU_REDUX))
 			continue;
 
-		struct starpu_data_replicate_s *replicate = &handle->per_node[node];
+		struct _starpu_data_replicate *replicate = &handle->per_node[node];
 		prefetch_data_on_node(handle, replicate, mode);
 
 		_starpu_set_data_requested_flag_if_needed(replicate);
@@ -607,7 +607,7 @@ int _starpu_fetch_task_input(struct starpu_task *task, uint32_t mask)
 		starpu_data_handle_t handle = descrs[index].handle;
 		enum starpu_access_mode mode = descrs[index].mode;
 
-		struct starpu_data_replicate_s *local_replicate;
+		struct _starpu_data_replicate *local_replicate;
 
 		if (mode & (STARPU_SCRATCH|STARPU_REDUX))
 		{
@@ -665,7 +665,7 @@ void _starpu_push_task_output(struct starpu_task *task, uint32_t mask)
 		starpu_data_handle_t handle = descrs[index].handle;
 		enum starpu_access_mode mode = descrs[index].mode;
 
-		struct starpu_data_replicate_s *replicate;
+		struct _starpu_data_replicate *replicate;
 
 		if (mode & STARPU_RW)
 		{

+ 19 - 19
src/datawizard/coherency.h

@@ -38,7 +38,7 @@ enum _starpu_cache_state {
 };
 
 /* this should contain the information relative to a given data replicate  */
-LIST_TYPE(starpu_data_replicate,
+LIST_TYPE(_starpu_data_replicate,
 	starpu_data_handle_t handle;
 
 	/* describe the actual data layout */
@@ -68,7 +68,7 @@ LIST_TYPE(starpu_data_replicate,
 	uint8_t automatically_allocated;
 
         /* Pointer to memchunk for LRU strategy */
-	struct starpu_mem_chunk_s * mc;
+	struct _starpu_mem_chunk * mc;
  
 	/* To help the scheduling policies to make some decision, we
 	   may keep a track of the tasks that are likely to request 
@@ -78,10 +78,10 @@ LIST_TYPE(starpu_data_replicate,
 	   use this hint can simply ignore it.
 	 */
 	uint8_t requested[STARPU_MAXNODES];
-	struct starpu_data_request_s *request[STARPU_MAXNODES];
+	struct _starpu_data_request *request[STARPU_MAXNODES];
 )
 
-struct starpu_data_requester_list_s;
+struct _starpu_data_requester_list;
 
 struct _starpu_jobid_list {
 	unsigned long id;
@@ -95,7 +95,7 @@ struct _starpu_task_wrapper_list {
 };
 
 struct _starpu_data_state {
-	struct starpu_data_requester_list_s *req_list;
+	struct _starpu_data_requester_list *req_list;
 	/* the number of requests currently in the scheduling engine (not in
 	 * the req_list anymore), i.e. the number of holders of the
 	 * current_mode rwlock */
@@ -124,8 +124,8 @@ struct _starpu_data_state {
 	unsigned nchildren;
 
 	/* describe the state of the data in term of coherency */
-	struct starpu_data_replicate_s per_node[STARPU_MAXNODES];
-	struct starpu_data_replicate_s per_worker[STARPU_NMAXWORKERS];
+	struct _starpu_data_replicate per_node[STARPU_MAXNODES];
+	struct _starpu_data_replicate per_worker[STARPU_NMAXWORKERS];
 
 	struct starpu_data_interface_ops *ops;
 
@@ -195,7 +195,7 @@ struct _starpu_data_state {
 	/* List of requesters that are specific to the pending reduction. This
 	 * list is used when the requests in the req_list list are frozen until
 	 * the end of the reduction. */
-	struct starpu_data_requester_list_s *reduction_req_list;
+	struct _starpu_data_requester_list *reduction_req_list;
 
 	starpu_data_handle_t reduction_tmp_handles[STARPU_NMAXWORKERS];
 
@@ -219,16 +219,16 @@ void _starpu_display_msi_stats(void);
 
 /* This does not take a reference on the handle, the caller has to do it,
  * e.g. through _starpu_attempt_to_submit_data_request_from_apps() */
-int _starpu_fetch_data_on_node(struct _starpu_data_state *state, struct starpu_data_replicate_s *replicate,
-				enum starpu_access_mode mode, unsigned is_prefetch,
-				void (*callback_func)(void *), void *callback_arg);
+int _starpu_fetch_data_on_node(starpu_data_handle_t handle, struct _starpu_data_replicate *replicate,
+			       enum starpu_access_mode mode, unsigned is_prefetch,
+			       void (*callback_func)(void *), void *callback_arg);
 /* This releases a reference on the handle */
 void _starpu_release_data_on_node(struct _starpu_data_state *state, uint32_t default_wt_mask,
-				struct starpu_data_replicate_s *replicate);
+				  struct _starpu_data_replicate *replicate);
 
 void _starpu_update_data_state(starpu_data_handle_t handle,
-				struct starpu_data_replicate_s *requesting_replicate,
-				enum starpu_access_mode mode);
+			       struct _starpu_data_replicate *requesting_replicate,
+			       enum starpu_access_mode mode);
 
 uint32_t _starpu_get_data_refcnt(struct _starpu_data_state *state, uint32_t node);
 
@@ -247,12 +247,12 @@ unsigned starpu_data_test_if_allocated_on_node(starpu_data_handle_t handle, uint
 
 uint32_t _starpu_select_src_node(struct _starpu_data_state *state, unsigned destination);
 
-starpu_data_request_t _starpu_create_request_to_fetch_data(starpu_data_handle_t handle,
-							   struct starpu_data_replicate_s *dst_replicate,
-							   enum starpu_access_mode mode, unsigned is_prefetch,
-							   void (*callback_func)(void *), void *callback_arg);
+struct _starpu_data_request *_starpu_create_request_to_fetch_data(starpu_data_handle_t handle,
+								  struct _starpu_data_replicate *dst_replicate,
+								  enum starpu_access_mode mode, unsigned is_prefetch,
+								  void (*callback_func)(void *), void *callback_arg);
 
-void _starpu_redux_init_data_replicate(starpu_data_handle_t handle, struct starpu_data_replicate_s *replicate, int workerid);
+void _starpu_redux_init_data_replicate(starpu_data_handle_t handle, struct _starpu_data_replicate *replicate, int workerid);
 void _starpu_data_start_reduction_mode(starpu_data_handle_t handle);
 void _starpu_data_end_reduction_mode(starpu_data_handle_t handle);
 void _starpu_data_end_reduction_mode_terminate(starpu_data_handle_t handle);

+ 9 - 6
src/datawizard/copy_driver.c

@@ -82,7 +82,10 @@ void starpu_wake_all_blocked_workers(void)
 static unsigned communication_cnt = 0;
 #endif
 
-static int copy_data_1_to_1_generic(starpu_data_handle_t handle, struct starpu_data_replicate_s *src_replicate, struct starpu_data_replicate_s *dst_replicate, struct starpu_data_request_s *req STARPU_ATTRIBUTE_UNUSED)
+static int copy_data_1_to_1_generic(starpu_data_handle_t handle,
+				    struct _starpu_data_replicate *src_replicate,
+				    struct _starpu_data_replicate *dst_replicate,
+				    struct _starpu_data_request *req STARPU_ATTRIBUTE_UNUSED)
 {
 	int ret = 0;
 
@@ -231,11 +234,11 @@ static int copy_data_1_to_1_generic(starpu_data_handle_t handle, struct starpu_d
 }
 
 int __attribute__((warn_unused_result)) _starpu_driver_copy_data_1_to_1(starpu_data_handle_t handle,
-						struct starpu_data_replicate_s *src_replicate,
-						struct starpu_data_replicate_s *dst_replicate,
-						unsigned donotread,
-						struct starpu_data_request_s *req,
-						unsigned may_alloc)
+									struct _starpu_data_replicate *src_replicate,
+									struct _starpu_data_replicate *dst_replicate,
+									unsigned donotread,
+									struct _starpu_data_request *req,
+									unsigned may_alloc)
 {
 	if (!donotread)
 	{

+ 7 - 7
src/datawizard/copy_driver.h

@@ -33,8 +33,8 @@
 #include <starpu_opencl.h>
 #endif
 
-struct starpu_data_request_s;
-struct starpu_data_replicate_s;
+struct _starpu_data_request;
+struct _starpu_data_replicate;
 
 /* this is a structure that can be queried to see whether an asynchronous
  * transfer has terminated or not */
@@ -56,11 +56,11 @@ struct _starpu_async_channel {
 void _starpu_wake_all_blocked_workers_on_node(unsigned nodeid);
 
 int _starpu_driver_copy_data_1_to_1(starpu_data_handle_t handle,
-					struct starpu_data_replicate_s *src_replicate,
-					struct starpu_data_replicate_s *dst_replicate,
-					unsigned donotread,
-					struct starpu_data_request_s *req,
-					unsigned may_alloc);
+				    struct _starpu_data_replicate *src_replicate,
+				    struct _starpu_data_replicate *dst_replicate,
+				    unsigned donotread,
+				    struct _starpu_data_request *req,
+				    unsigned may_alloc);
 
 unsigned _starpu_driver_test_request_completion(struct _starpu_async_channel *async_channel);
 void _starpu_driver_wait_request_completion(struct _starpu_async_channel *async_channel);

+ 87 - 87
src/datawizard/data_request.c

@@ -20,12 +20,12 @@
 #include <datawizard/datawizard.h>
 
 /* requests that have not been treated at all */
-static starpu_data_request_list_t data_requests[STARPU_MAXNODES];
-static starpu_data_request_list_t prefetch_requests[STARPU_MAXNODES];
+static struct _starpu_data_request_list *data_requests[STARPU_MAXNODES];
+static struct _starpu_data_request_list *prefetch_requests[STARPU_MAXNODES];
 static pthread_mutex_t data_requests_list_mutex[STARPU_MAXNODES];
 
 /* requests that are not terminated (eg. async transfers) */
-static starpu_data_request_list_t data_requests_pending[STARPU_MAXNODES];
+static struct _starpu_data_request_list *data_requests_pending[STARPU_MAXNODES];
 static pthread_mutex_t data_requests_pending_list_mutex[STARPU_MAXNODES];
 
 int starpu_memstrategy_drop_prefetch[STARPU_MAXNODES];
@@ -35,11 +35,11 @@ void _starpu_init_data_request_lists(void)
 	unsigned i;
 	for (i = 0; i < STARPU_MAXNODES; i++)
 	{
-		prefetch_requests[i] = starpu_data_request_list_new();
-		data_requests[i] = starpu_data_request_list_new();
+		prefetch_requests[i] = _starpu_data_request_list_new();
+		data_requests[i] = _starpu_data_request_list_new();
 		_STARPU_PTHREAD_MUTEX_INIT(&data_requests_list_mutex[i], NULL);
 
-		data_requests_pending[i] = starpu_data_request_list_new();
+		data_requests_pending[i] = _starpu_data_request_list_new();
 		_STARPU_PTHREAD_MUTEX_INIT(&data_requests_pending_list_mutex[i], NULL);
 		
 		starpu_memstrategy_drop_prefetch[i]=0;
@@ -52,16 +52,16 @@ void _starpu_deinit_data_request_lists(void)
 	for (i = 0; i < STARPU_MAXNODES; i++)
 	{
 		_STARPU_PTHREAD_MUTEX_DESTROY(&data_requests_pending_list_mutex[i]);
-		starpu_data_request_list_delete(data_requests_pending[i]);
+		_starpu_data_request_list_delete(data_requests_pending[i]);
 
 		_STARPU_PTHREAD_MUTEX_DESTROY(&data_requests_list_mutex[i]);
-		starpu_data_request_list_delete(data_requests[i]);
-		starpu_data_request_list_delete(prefetch_requests[i]);
+		_starpu_data_request_list_delete(data_requests[i]);
+		_starpu_data_request_list_delete(prefetch_requests[i]);
 	}
 }
 
 /* this should be called with the lock r->handle->header_lock taken */
-static void starpu_data_request_destroy(starpu_data_request_t r)
+static void starpu_data_request_destroy(struct _starpu_data_request *r)
 {
 	unsigned node;
 
@@ -79,19 +79,19 @@ static void starpu_data_request_destroy(starpu_data_request_t r)
 	STARPU_ASSERT(r->dst_replicate->request[node] == r);
 	r->dst_replicate->request[node] = NULL;
 	//fprintf(stderr, "DESTROY REQ %p (%d) refcnt %d\n", r, node, r->refcnt);
-	starpu_data_request_delete(r);
+	_starpu_data_request_delete(r);
 }
 
 /* handle->lock should already be taken !  */
-starpu_data_request_t _starpu_create_data_request(starpu_data_handle_t handle,
-						  struct starpu_data_replicate_s *src_replicate,
-						  struct starpu_data_replicate_s *dst_replicate,
-						  uint32_t handling_node,
-						  enum starpu_access_mode mode,
-						  unsigned ndeps,
-						  unsigned is_prefetch)
+struct _starpu_data_request *_starpu_create_data_request(starpu_data_handle_t handle,
+							 struct _starpu_data_replicate *src_replicate,
+							 struct _starpu_data_replicate *dst_replicate,
+							 uint32_t handling_node,
+							 enum starpu_access_mode mode,
+							 unsigned ndeps,
+							 unsigned is_prefetch)
 {
-	starpu_data_request_t r = starpu_data_request_new();
+	struct _starpu_data_request *r = _starpu_data_request_new();
 
 	_starpu_spin_init(&r->lock);
 
@@ -131,7 +131,7 @@ starpu_data_request_t _starpu_create_data_request(starpu_data_handle_t handle,
 	return r;
 }
 
-int _starpu_wait_data_request_completion(starpu_data_request_t r, unsigned may_alloc)
+int _starpu_wait_data_request_completion(struct _starpu_data_request *r, unsigned may_alloc)
 {
 	int retval;
 	int do_delete = 0;
@@ -175,7 +175,7 @@ int _starpu_wait_data_request_completion(starpu_data_request_t r, unsigned may_a
 }
 
 /* this is non blocking */
-void _starpu_post_data_request(starpu_data_request_t r, uint32_t handling_node)
+void _starpu_post_data_request(struct _starpu_data_request *r, uint32_t handling_node)
 {
 //	_STARPU_DEBUG("POST REQUEST\n");
 
@@ -192,9 +192,9 @@ void _starpu_post_data_request(starpu_data_request_t r, uint32_t handling_node)
 	/* insert the request in the proper list */
 	_STARPU_PTHREAD_MUTEX_LOCK(&data_requests_list_mutex[handling_node]);
 	if (r->prefetch) {
-		starpu_data_request_list_push_back(prefetch_requests[handling_node], r);
+		_starpu_data_request_list_push_back(prefetch_requests[handling_node], r);
 	} else
-		starpu_data_request_list_push_back(data_requests[handling_node], r);
+		_starpu_data_request_list_push_back(data_requests[handling_node], r);
 	_STARPU_PTHREAD_MUTEX_UNLOCK(&data_requests_list_mutex[handling_node]);
 
 #ifndef STARPU_NON_BLOCKING_DRIVERS
@@ -203,7 +203,7 @@ void _starpu_post_data_request(starpu_data_request_t r, uint32_t handling_node)
 }
 
 /* We assume that r->lock is taken by the caller */
-void _starpu_data_request_append_callback(starpu_data_request_t r, void (*callback_func)(void *), void *callback_arg)
+void _starpu_data_request_append_callback(struct _starpu_data_request *r, void (*callback_func)(void *), void *callback_arg)
 {
 	STARPU_ASSERT(r);
 
@@ -220,14 +220,14 @@ void _starpu_data_request_append_callback(starpu_data_request_t r, void (*callba
 }
 
 /* This method is called with handle's header_lock taken */
-static void starpu_handle_data_request_completion(starpu_data_request_t r)
+static void starpu_handle_data_request_completion(struct _starpu_data_request *r)
 {
 	unsigned do_delete = 0;
 	starpu_data_handle_t handle = r->handle;
 	enum starpu_access_mode mode = r->mode;
 
-	struct starpu_data_replicate_s *src_replicate = r->src_replicate;
-	struct starpu_data_replicate_s *dst_replicate = r->dst_replicate;
+	struct _starpu_data_replicate *src_replicate = r->src_replicate;
+	struct _starpu_data_replicate *dst_replicate = r->dst_replicate;
 
 
 #ifdef STARPU_MEMORY_STATUS
@@ -267,7 +267,7 @@ static void starpu_handle_data_request_completion(starpu_data_request_t r)
 	unsigned chained_req;
 	for (chained_req = 0; chained_req < r->next_req_count; chained_req++)
 	{
-		struct starpu_data_request_s *next_req = r->next_req[chained_req];
+		struct _starpu_data_request *next_req = r->next_req[chained_req];
 		STARPU_ASSERT(next_req->ndeps > 0);
 		next_req->ndeps--;
 		_starpu_post_data_request(next_req, next_req->handling_node);
@@ -323,15 +323,15 @@ static void starpu_handle_data_request_completion(starpu_data_request_t r)
 }
 
 /* TODO : accounting to see how much time was spent working for other people ... */
-static int starpu_handle_data_request(starpu_data_request_t r, unsigned may_alloc)
+static int starpu_handle_data_request(struct _starpu_data_request *r, unsigned may_alloc)
 {
 	starpu_data_handle_t handle = r->handle;
 
 	_starpu_spin_lock(&handle->header_lock);
 	_starpu_spin_lock(&r->lock);
 
-	struct starpu_data_replicate_s *src_replicate = r->src_replicate;
-	struct starpu_data_replicate_s *dst_replicate = r->dst_replicate;
+	struct _starpu_data_replicate *src_replicate = r->src_replicate;
+	struct _starpu_data_replicate *dst_replicate = r->dst_replicate;
 
 	enum starpu_access_mode r_mode = r->mode;
 
@@ -345,7 +345,7 @@ static int starpu_handle_data_request(starpu_data_request_t r, unsigned may_allo
 	/* the header of the data must be locked by the worker that submitted the request */
 
 	r->retval = _starpu_driver_copy_data_1_to_1(handle, src_replicate,
-			dst_replicate, !(r_mode & STARPU_R), r, may_alloc);
+						    dst_replicate, !(r_mode & STARPU_R), r, may_alloc);
 
 	if (r->retval == -ENOMEM)
 	{
@@ -364,7 +364,7 @@ static int starpu_handle_data_request(starpu_data_request_t r, unsigned may_allo
 		_starpu_spin_unlock(&handle->header_lock);
 
 		_STARPU_PTHREAD_MUTEX_LOCK(&data_requests_pending_list_mutex[r->handling_node]);
-		starpu_data_request_list_push_front(data_requests_pending[r->handling_node], r);
+		_starpu_data_request_list_push_front(data_requests_pending[r->handling_node], r);
 		_STARPU_PTHREAD_MUTEX_UNLOCK(&data_requests_pending_list_mutex[r->handling_node]);
 
 		return -EAGAIN;
@@ -379,15 +379,15 @@ static int starpu_handle_data_request(starpu_data_request_t r, unsigned may_allo
 
 void _starpu_handle_node_data_requests(uint32_t src_node, unsigned may_alloc)
 {
-	starpu_data_request_t r;
-	starpu_data_request_list_t new_data_requests;
+	struct _starpu_data_request *r;
+	struct _starpu_data_request_list *new_data_requests;
 
 	/* take all the entries from the request list */
         _STARPU_PTHREAD_MUTEX_LOCK(&data_requests_list_mutex[src_node]);
 
-	starpu_data_request_list_t local_list = data_requests[src_node];
+	struct _starpu_data_request_list *local_list = data_requests[src_node];
 
-	if (starpu_data_request_list_empty(local_list))
+	if (_starpu_data_request_list_empty(local_list))
 	{
 		/* there is no request */
                 _STARPU_PTHREAD_MUTEX_UNLOCK(&data_requests_list_mutex[src_node]);
@@ -398,47 +398,47 @@ void _starpu_handle_node_data_requests(uint32_t src_node, unsigned may_alloc)
 	/* There is an entry: we create a new empty list to replace the list of
 	 * requests, and we handle the request(s) one by one in the former
 	 * list, without concurrency issues.*/
-	data_requests[src_node] = starpu_data_request_list_new();
+	data_requests[src_node] = _starpu_data_request_list_new();
 
 	_STARPU_PTHREAD_MUTEX_UNLOCK(&data_requests_list_mutex[src_node]);
 
-	new_data_requests = starpu_data_request_list_new();
+	new_data_requests = _starpu_data_request_list_new();
 
 	/* for all entries of the list */
-	while (!starpu_data_request_list_empty(local_list))
+	while (!_starpu_data_request_list_empty(local_list))
 	{
                 int res;
 
-		r = starpu_data_request_list_pop_front(local_list);
+		r = _starpu_data_request_list_pop_front(local_list);
 
 		res = starpu_handle_data_request(r, may_alloc);
 		if (res == -ENOMEM)
 		{
-			starpu_data_request_list_push_back(new_data_requests, r);
+			_starpu_data_request_list_push_back(new_data_requests, r);
 		}
 	}
 
 	_STARPU_PTHREAD_MUTEX_LOCK(&data_requests_list_mutex[src_node]);
-	starpu_data_request_list_push_list_front(new_data_requests, data_requests[src_node]);
+	_starpu_data_request_list_push_list_front(new_data_requests, data_requests[src_node]);
 	_STARPU_PTHREAD_MUTEX_UNLOCK(&data_requests_list_mutex[src_node]);
 
-	starpu_data_request_list_delete(new_data_requests);
-	starpu_data_request_list_delete(local_list);
+	_starpu_data_request_list_delete(new_data_requests);
+	_starpu_data_request_list_delete(local_list);
 }
 
 void _starpu_handle_node_prefetch_requests(uint32_t src_node, unsigned may_alloc){
 	starpu_memstrategy_drop_prefetch[src_node]=0;
 
-	starpu_data_request_t r;
-	starpu_data_request_list_t new_data_requests;
-	starpu_data_request_list_t new_prefetch_requests;
+	struct _starpu_data_request *r;
+	struct _starpu_data_request_list *new_data_requests;
+	struct _starpu_data_request_list *new_prefetch_requests;
 
 	/* take all the entries from the request list */
         _STARPU_PTHREAD_MUTEX_LOCK(&data_requests_list_mutex[src_node]);
 
-	starpu_data_request_list_t local_list = prefetch_requests[src_node];
+	struct _starpu_data_request_list *local_list = prefetch_requests[src_node];
 	
-	if (starpu_data_request_list_empty(local_list))
+	if (_starpu_data_request_list_empty(local_list))
 	{
 		/* there is no request */
                 _STARPU_PTHREAD_MUTEX_UNLOCK(&data_requests_list_mutex[src_node]);
@@ -448,72 +448,72 @@ void _starpu_handle_node_prefetch_requests(uint32_t src_node, unsigned may_alloc
 	/* There is an entry: we create a new empty list to replace the list of
 	 * requests, and we handle the request(s) one by one in the former
 	 * list, without concurrency issues.*/
-	prefetch_requests[src_node] = starpu_data_request_list_new();
+	prefetch_requests[src_node] = _starpu_data_request_list_new();
 
 	_STARPU_PTHREAD_MUTEX_UNLOCK(&data_requests_list_mutex[src_node]);
 
-	new_data_requests = starpu_data_request_list_new();
-	new_prefetch_requests = starpu_data_request_list_new();
+	new_data_requests = _starpu_data_request_list_new();
+	new_prefetch_requests = _starpu_data_request_list_new();
 
 	/* for all entries of the list */
-	while (!starpu_data_request_list_empty(local_list))
+	while (!_starpu_data_request_list_empty(local_list))
 	{
                 int res;
 
-		r = starpu_data_request_list_pop_front(local_list);
+		r = _starpu_data_request_list_pop_front(local_list);
 
 		res = starpu_handle_data_request(r, may_alloc);
 		if (res == -ENOMEM )
 		{
 			starpu_memstrategy_drop_prefetch[src_node]=1;
 			if (r->prefetch)
-				starpu_data_request_list_push_back(new_prefetch_requests, r);
-			else 
+				_starpu_data_request_list_push_back(new_prefetch_requests, r);
+			else
 			{
 				/* Prefetch request promoted while in tmp list*/
-				starpu_data_request_list_push_back(new_data_requests, r);
+				_starpu_data_request_list_push_back(new_data_requests, r);
 			}
 			break;
 		}
 	}
 
-	while(!starpu_data_request_list_empty(local_list) && starpu_memstrategy_drop_prefetch[src_node])
+	while(!_starpu_data_request_list_empty(local_list) && starpu_memstrategy_drop_prefetch[src_node])
 	{
-		r = starpu_data_request_list_pop_front(local_list);
+		r = _starpu_data_request_list_pop_front(local_list);
 		if (r->prefetch)
-			starpu_data_request_list_push_back(new_prefetch_requests, r);
+			_starpu_data_request_list_push_back(new_prefetch_requests, r);
 		else
-			starpu_data_request_list_push_back(new_data_requests, r);
+			_starpu_data_request_list_push_back(new_data_requests, r);
 	}
 
 	_STARPU_PTHREAD_MUTEX_LOCK(&data_requests_list_mutex[src_node]);
-	starpu_data_request_list_push_list_front(new_data_requests, data_requests[src_node]);
-	starpu_data_request_list_push_list_front(new_prefetch_requests, prefetch_requests[src_node]);
+	_starpu_data_request_list_push_list_front(new_data_requests, data_requests[src_node]);
+	_starpu_data_request_list_push_list_front(new_prefetch_requests, prefetch_requests[src_node]);
 	_STARPU_PTHREAD_MUTEX_UNLOCK(&data_requests_list_mutex[src_node]);
 
-	starpu_data_request_list_delete(new_data_requests);
-	starpu_data_request_list_delete(new_prefetch_requests);
-	starpu_data_request_list_delete(local_list);
+	_starpu_data_request_list_delete(new_data_requests);
+	_starpu_data_request_list_delete(new_prefetch_requests);
+	_starpu_data_request_list_delete(local_list);
 }
 
 static void _handle_pending_node_data_requests(uint32_t src_node, unsigned force)
 {
 //	_STARPU_DEBUG("_starpu_handle_pending_node_data_requests ...\n");
 //
-	starpu_data_request_list_t new_data_requests_pending = starpu_data_request_list_new();
+	struct _starpu_data_request_list *new_data_requests_pending = _starpu_data_request_list_new();
 
 	_STARPU_PTHREAD_MUTEX_LOCK(&data_requests_pending_list_mutex[src_node]);
 
 	/* for all entries of the list */
-	starpu_data_request_list_t local_list = data_requests_pending[src_node];
-	data_requests_pending[src_node] = starpu_data_request_list_new();
+	struct _starpu_data_request_list *local_list = data_requests_pending[src_node];
+	data_requests_pending[src_node] = _starpu_data_request_list_new();
 
 	_STARPU_PTHREAD_MUTEX_UNLOCK(&data_requests_pending_list_mutex[src_node]);
 
-	while (!starpu_data_request_list_empty(local_list))
+	while (!_starpu_data_request_list_empty(local_list))
 	{
-		starpu_data_request_t r;
-		r = starpu_data_request_list_pop_front(local_list);
+		struct _starpu_data_request *r;
+		r = _starpu_data_request_list_pop_front(local_list);
 
 		starpu_data_handle_t handle = r->handle;
 		
@@ -540,16 +540,16 @@ static void _handle_pending_node_data_requests(uint32_t src_node, unsigned force
 				_starpu_spin_unlock(&r->lock);
 				_starpu_spin_unlock(&handle->header_lock);
 
-				starpu_data_request_list_push_back(new_data_requests_pending, r);
+				_starpu_data_request_list_push_back(new_data_requests_pending, r);
 			}
 		}
 	}
 	_STARPU_PTHREAD_MUTEX_LOCK(&data_requests_pending_list_mutex[src_node]);
-	starpu_data_request_list_push_list_back(data_requests_pending[src_node], new_data_requests_pending);
+	_starpu_data_request_list_push_list_back(data_requests_pending[src_node], new_data_requests_pending);
 	_STARPU_PTHREAD_MUTEX_UNLOCK(&data_requests_pending_list_mutex[src_node]);
 
-	starpu_data_request_list_delete(local_list);
-	starpu_data_request_list_delete(new_data_requests_pending);
+	_starpu_data_request_list_delete(local_list);
+	_starpu_data_request_list_delete(new_data_requests_pending);
 }
 
 void _starpu_handle_pending_node_data_requests(uint32_t src_node)
@@ -565,14 +565,14 @@ void _starpu_handle_all_pending_node_data_requests(uint32_t src_node)
 int _starpu_check_that_no_data_request_exists(uint32_t node)
 {
 	/* XXX lock that !!! that's a quick'n'dirty test */
-	int no_request = starpu_data_request_list_empty(data_requests[node]);
-	int no_pending = starpu_data_request_list_empty(data_requests_pending[node]);
+	int no_request = _starpu_data_request_list_empty(data_requests[node]);
+	int no_pending = _starpu_data_request_list_empty(data_requests_pending[node]);
 
 	return (no_request && no_pending);
 }
 
 
-void _starpu_update_prefetch_status(starpu_data_request_t r){
+void _starpu_update_prefetch_status(struct _starpu_data_request *r){
 	STARPU_ASSERT(r->prefetch > 0);
 	r->prefetch=0;
 	
@@ -580,7 +580,7 @@ void _starpu_update_prefetch_status(starpu_data_request_t r){
 	unsigned chained_req;
 	for (chained_req = 0; chained_req < r->next_req_count; chained_req++)
 	{
-		struct starpu_data_request_s *next_req = r->next_req[chained_req];
+		struct _starpu_data_request *next_req = r->next_req[chained_req];
 		if (next_req->prefetch)
 			_starpu_update_prefetch_status(next_req);
 	}
@@ -589,16 +589,16 @@ void _starpu_update_prefetch_status(starpu_data_request_t r){
 	
 	/* The request can be in a different list (handling request or the temp list)
 	 * we have to check that it is really in the prefetch list. */
-	starpu_data_request_t r_iter;
-	for (r_iter = starpu_data_request_list_begin(prefetch_requests[r->handling_node]);
-	     r_iter != starpu_data_request_list_end(prefetch_requests[r->handling_node]);
-	     r_iter = starpu_data_request_list_next(r_iter))
+	struct _starpu_data_request *r_iter;
+	for (r_iter = _starpu_data_request_list_begin(prefetch_requests[r->handling_node]);
+	     r_iter != _starpu_data_request_list_end(prefetch_requests[r->handling_node]);
+	     r_iter = _starpu_data_request_list_next(r_iter))
 	{
 		
 		if (r==r_iter)
 		{
-			starpu_data_request_list_erase(prefetch_requests[r->handling_node],r);
-			starpu_data_request_list_push_front(data_requests[r->handling_node],r);
+			_starpu_data_request_list_erase(prefetch_requests[r->handling_node],r);
+			_starpu_data_request_list_push_front(data_requests[r->handling_node],r);
 			break;
 		}		
 	}

+ 20 - 19
src/datawizard/data_request.h

@@ -24,7 +24,7 @@
 #include <common/list.h>
 #include <common/starpu_spinlock.h>
 
-struct starpu_data_replicate_s;
+struct _starpu_data_replicate;
 
 struct _starpu_callback_list {
 	void (*callback_func)(void *);
@@ -32,13 +32,13 @@ struct _starpu_callback_list {
 	struct _starpu_callback_list *next;
 };
 
-LIST_TYPE(starpu_data_request,
+LIST_TYPE(_starpu_data_request,
 	struct _starpu_spinlock lock;
 	unsigned refcnt;
 
 	starpu_data_handle_t handle;
-	struct starpu_data_replicate_s *src_replicate;
-	struct starpu_data_replicate_s *dst_replicate;
+	struct _starpu_data_replicate *src_replicate;
+	struct _starpu_data_replicate *dst_replicate;
 
 	uint32_t handling_node;
 
@@ -55,7 +55,7 @@ LIST_TYPE(starpu_data_request,
 	unsigned ndeps;
 
 	/* in case we have a chain of request (eg. for nvidia multi-GPU) */
-	struct starpu_data_request_s *next_req[STARPU_MAXNODES];
+	struct _starpu_data_request *next_req[STARPU_MAXNODES];
 	/* who should perform the next request ? */
 	unsigned next_req_count;
 
@@ -68,7 +68,7 @@ LIST_TYPE(starpu_data_request,
 
 /* Everyone that wants to access some piece of data will post a request.
  * Not only StarPU internals, but also the application may put such requests */
-LIST_TYPE(starpu_data_requester,
+LIST_TYPE(_starpu_data_requester,
 	/* what kind of access is requested ? */
 	enum starpu_access_mode mode;
 
@@ -76,7 +76,7 @@ LIST_TYPE(starpu_data_requester,
 	unsigned is_requested_by_codelet;
 
 	/* in case this is a codelet that will do the access */
-	struct starpu_job_s *j;
+	struct _starpu_job *j;
 	unsigned buffer_index;
 
 	/* if this is more complicated ... (eg. application request) 
@@ -88,7 +88,7 @@ LIST_TYPE(starpu_data_requester,
 
 void _starpu_init_data_request_lists(void);
 void _starpu_deinit_data_request_lists(void);
-void _starpu_post_data_request(starpu_data_request_t r, uint32_t handling_node);
+void _starpu_post_data_request(struct _starpu_data_request *r, uint32_t handling_node);
 void _starpu_handle_node_data_requests(uint32_t src_node, unsigned may_alloc);
 void _starpu_handle_node_prefetch_requests(uint32_t src_node, unsigned may_alloc);
 
@@ -97,18 +97,19 @@ void _starpu_handle_all_pending_node_data_requests(uint32_t src_node);
 
 int _starpu_check_that_no_data_request_exists(uint32_t node);
 
-starpu_data_request_t _starpu_create_data_request(starpu_data_handle_t handle,
-				struct starpu_data_replicate_s *src_replicate,
-				struct starpu_data_replicate_s *dst_replicate,
-				uint32_t handling_node,
-				enum starpu_access_mode mode,
-				unsigned ndeps,
-				unsigned is_prefetch);
+struct _starpu_data_request *_starpu_create_data_request(starpu_data_handle_t handle,
+							 struct _starpu_data_replicate *src_replicate,
+							 struct _starpu_data_replicate *dst_replicate,
+							 uint32_t handling_node,
+							 enum starpu_access_mode mode,
+							 unsigned ndeps,
+							 unsigned is_prefetch);
 
-int _starpu_wait_data_request_completion(starpu_data_request_t r, unsigned may_alloc);
+int _starpu_wait_data_request_completion(struct _starpu_data_request *r, unsigned may_alloc);
 
-void _starpu_data_request_append_callback(starpu_data_request_t r,
-			void (*callback_func)(void *), void *callback_arg);
+void _starpu_data_request_append_callback(struct _starpu_data_request *r,
+					  void (*callback_func)(void *),
+					  void *callback_arg);
 
-void _starpu_update_prefetch_status(starpu_data_request_t r);
+void _starpu_update_prefetch_status(struct _starpu_data_request *r);
 #endif // __DATA_REQUEST_H__

+ 8 - 8
src/datawizard/filters.c

@@ -152,8 +152,8 @@ void starpu_data_partition(starpu_data_handle_t initial_handle, struct starpu_da
 		child->is_readonly = initial_handle->is_readonly;
 
 		/* initialize the chunk lock */
-		child->req_list = starpu_data_requester_list_new();
-		child->reduction_req_list = starpu_data_requester_list_new();
+		child->req_list = _starpu_data_requester_list_new();
+		child->reduction_req_list = _starpu_data_requester_list_new();
 		child->refcnt = 0;
 		child->busy_count = 0;
 		child->busy_waiting = 0;
@@ -185,8 +185,8 @@ void starpu_data_partition(starpu_data_handle_t initial_handle, struct starpu_da
 		unsigned node;
 		for (node = 0; node < STARPU_MAXNODES; node++)
 		{
-			struct starpu_data_replicate_s *initial_replicate; 
-			struct starpu_data_replicate_s *child_replicate;
+			struct _starpu_data_replicate *initial_replicate; 
+			struct _starpu_data_replicate *child_replicate;
 
 			initial_replicate = &initial_handle->per_node[node];
 			child_replicate = &child->per_node[node];
@@ -208,7 +208,7 @@ void starpu_data_partition(starpu_data_handle_t initial_handle, struct starpu_da
 		unsigned worker;
 		for (worker = 0; worker < nworkers; worker++)
 		{
-			struct starpu_data_replicate_s *child_replicate;
+			struct _starpu_data_replicate *child_replicate;
 			child_replicate = &child->per_worker[worker];
 			
 			child_replicate->state = STARPU_INVALID;
@@ -271,8 +271,8 @@ void starpu_data_unpartition(starpu_data_handle_t root_handle, uint32_t gatherin
 		STARPU_ASSERT(ret == 0); 
 
 		_starpu_data_free_interfaces(&root_handle->children[child]);
-		starpu_data_requester_list_delete(child_handle->req_list);
-		starpu_data_requester_list_delete(child_handle->reduction_req_list);
+		_starpu_data_requester_list_delete(child_handle->req_list);
+		_starpu_data_requester_list_delete(child_handle->reduction_req_list);
 	}
 
 	/* the gathering_node should now have a valid copy of all the children.
@@ -297,7 +297,7 @@ void starpu_data_unpartition(starpu_data_handle_t root_handle, uint32_t gatherin
 
 		for (child = 0; child < root_handle->nchildren; child++)
 		{
-			struct starpu_data_replicate_s *local = &root_handle->children[child].per_node[node];
+			struct _starpu_data_replicate *local = &root_handle->children[child].per_node[node];
 
 			if (local->state == STARPU_INVALID) {
 				/* One of the bits is missing */

+ 2 - 2
src/datawizard/footprint.c

@@ -1,7 +1,7 @@
 /* StarPU --- Runtime system for heterogeneous multicore architectures.
  *
  * Copyright (C) 2009, 2010  Université de Bordeaux 1
- * Copyright (C) 2010  Centre National de la Recherche Scientifique
+ * Copyright (C) 2010, 2011  Centre National de la Recherche Scientifique
  *
  * StarPU is free software; you can redistribute it and/or modify
  * it under the terms of the GNU Lesser General Public License as published by
@@ -18,7 +18,7 @@
 #include <datawizard/footprint.h>
 #include <common/hash.h>
 
-uint32_t _starpu_compute_buffers_footprint(starpu_job_t j)
+uint32_t _starpu_compute_buffers_footprint(struct _starpu_job *j)
 {
 	if (j->footprint_is_computed)
 		return j->footprint;

+ 2 - 2
src/datawizard/footprint.h

@@ -1,7 +1,7 @@
 /* StarPU --- Runtime system for heterogeneous multicore architectures.
  *
  * Copyright (C) 2009, 2010  Université de Bordeaux 1
- * Copyright (C) 2010  Centre National de la Recherche Scientifique
+ * Copyright (C) 2010, 2011  Centre National de la Recherche Scientifique
  *
  * StarPU is free software; you can redistribute it and/or modify
  * it under the terms of the GNU Lesser General Public License as published by
@@ -24,7 +24,7 @@
 
 /* Compute the footprint that characterizes the job and cache it into the job
  * structure. */
-uint32_t _starpu_compute_buffers_footprint(struct starpu_job_s *j);
+uint32_t _starpu_compute_buffers_footprint(struct _starpu_job *j);
 
 /* Compute the footprint that characterizes the layout of the data handle. */
 uint32_t _starpu_compute_data_footprint(starpu_data_handle_t handle);

+ 12 - 12
src/datawizard/interfaces/data_interface.c

@@ -101,7 +101,7 @@ static void _starpu_register_new_data(starpu_data_handle_t handle,
 	STARPU_ASSERT(handle);
 
 	/* initialize the new lock */
-	handle->req_list = starpu_data_requester_list_new();
+	handle->req_list = _starpu_data_requester_list_new();
 	handle->refcnt = 0;
 	handle->busy_count = 0;
 	handle->busy_waiting = 0;
@@ -138,7 +138,7 @@ static void _starpu_register_new_data(starpu_data_handle_t handle,
 	handle->init_cl = NULL;
 
 	handle->reduction_refcnt = 0;
-	handle->reduction_req_list = starpu_data_requester_list_new();
+	handle->reduction_req_list = _starpu_data_requester_list_new();
 
 #ifdef STARPU_USE_FXT
 	handle->last_submitted_ghost_writer_id_is_valid = 0;
@@ -160,7 +160,7 @@ static void _starpu_register_new_data(starpu_data_handle_t handle,
 	unsigned node;
 	for (node = 0; node < STARPU_MAXNODES; node++)
 	{
-		struct starpu_data_replicate_s *replicate;
+		struct _starpu_data_replicate *replicate;
 		replicate = &handle->per_node[node];
 		
 		replicate->memory_node = node;
@@ -184,7 +184,7 @@ static void _starpu_register_new_data(starpu_data_handle_t handle,
 	unsigned nworkers = starpu_worker_get_count();
 	for (worker = 0; worker < nworkers; worker++)
 	{
-		struct starpu_data_replicate_s *replicate;
+		struct _starpu_data_replicate *replicate;
 		replicate = &handle->per_worker[worker];
 		replicate->allocated = 0;
 		replicate->automatically_allocated = 0;
@@ -238,7 +238,7 @@ static starpu_data_handle_t _starpu_data_handle_allocate(struct starpu_data_inte
 		handle->stats_invalidated[node]=0;
 #endif
 
-		struct starpu_data_replicate_s *replicate;
+		struct _starpu_data_replicate *replicate;
 		replicate = &handle->per_node[node];
 		/* relaxed_coherency = 0 */
 
@@ -252,7 +252,7 @@ static starpu_data_handle_t _starpu_data_handle_allocate(struct starpu_data_inte
 	unsigned nworkers = starpu_worker_get_count();
 	for (worker = 0; worker < nworkers; worker++)
 	{
-		struct starpu_data_replicate_s *replicate;
+		struct _starpu_data_replicate *replicate;
 		replicate = &handle->per_worker[worker];
 
 		replicate->handle = handle;
@@ -389,7 +389,7 @@ static void _starpu_data_unregister_fetch_data_callback(void *_arg)
 
 	STARPU_ASSERT(handle);
 
-	struct starpu_data_replicate_s *replicate = &handle->per_node[arg->memory_node];
+	struct _starpu_data_replicate *replicate = &handle->per_node[arg->memory_node];
 
 	ret = _starpu_fetch_data_on_node(handle, replicate, STARPU_R, 0, NULL, NULL);
 	STARPU_ASSERT(!ret);
@@ -428,7 +428,7 @@ static void _starpu_data_unregister(starpu_data_handle_t handle, unsigned cohere
 					_starpu_data_unregister_fetch_data_callback, &arg))
 			{
 				/* no one has locked this data yet, so we proceed immediately */
-				struct starpu_data_replicate_s *home_replicate = &handle->per_node[home_node];
+				struct _starpu_data_replicate *home_replicate = &handle->per_node[home_node];
 				int ret = _starpu_fetch_data_on_node(handle, home_replicate, STARPU_R, 0, NULL, NULL);
 				STARPU_ASSERT(!ret);
 			}
@@ -465,7 +465,7 @@ static void _starpu_data_unregister(starpu_data_handle_t handle, unsigned cohere
 	unsigned node;
 	for (node = 0; node < STARPU_MAXNODES; node++)
 	{
-		struct starpu_data_replicate_s *local = &handle->per_node[node];
+		struct _starpu_data_replicate *local = &handle->per_node[node];
 
 		if (local->allocated && local->automatically_allocated){
 			/* free the data copy in a lazy fashion */
@@ -473,8 +473,8 @@ static void _starpu_data_unregister(starpu_data_handle_t handle, unsigned cohere
 		}
 	}
 
-	starpu_data_requester_list_delete(handle->req_list);
-	starpu_data_requester_list_delete(handle->reduction_req_list);
+	_starpu_data_requester_list_delete(handle->req_list);
+	_starpu_data_requester_list_delete(handle->reduction_req_list);
 
 	free(handle);
 }
@@ -500,7 +500,7 @@ void starpu_data_invalidate(starpu_data_handle_t handle)
 	unsigned node;
 	for (node = 0; node < STARPU_MAXNODES; node++)
 	{
-		struct starpu_data_replicate_s *local = &handle->per_node[node];
+		struct _starpu_data_replicate *local = &handle->per_node[node];
 
 		if (local->allocated && local->automatically_allocated){
 			/* free the data copy in a lazy fashion */

+ 76 - 76
src/datawizard/memalloc.c

@@ -27,13 +27,13 @@ static pthread_rwlock_t mc_rwlock[STARPU_MAXNODES];
 static pthread_rwlock_t lru_rwlock[STARPU_MAXNODES];
 
 /* Last Recently used memory chunkgs */
-static starpu_mem_chunk_lru_list_t starpu_lru_list[STARPU_MAXNODES];
+static struct _starpu_mem_chunk_lru_list *starpu_lru_list[STARPU_MAXNODES];
 
 /* Potentially in use memory chunks */
-static starpu_mem_chunk_list_t mc_list[STARPU_MAXNODES];
+static struct _starpu_mem_chunk_list *mc_list[STARPU_MAXNODES];
 
 /* Explicitly caches memory chunks that can be reused */
-static starpu_mem_chunk_list_t memchunk_cache[STARPU_MAXNODES];
+static struct _starpu_mem_chunk_list *memchunk_cache[STARPU_MAXNODES];
 
 /* When reclaiming memory to allocate, we reclaim MAX(what_is_to_reclaim_on_device, data_size_coefficient*data_size) */
 const unsigned starpu_memstrategy_data_size_coefficient=2;
@@ -47,9 +47,9 @@ void _starpu_init_mem_chunk_lists(void)
 	{
 		_STARPU_PTHREAD_RWLOCK_INIT(&mc_rwlock[i], NULL);
 		_STARPU_PTHREAD_RWLOCK_INIT(&lru_rwlock[i], NULL);
-		mc_list[i] = starpu_mem_chunk_list_new();
-		starpu_lru_list[i] = starpu_mem_chunk_lru_list_new();
-		memchunk_cache[i] = starpu_mem_chunk_list_new();
+		mc_list[i] = _starpu_mem_chunk_list_new();
+		starpu_lru_list[i] = _starpu_mem_chunk_lru_list_new();
+		memchunk_cache[i] = _starpu_mem_chunk_list_new();
 	}
 }
 
@@ -58,9 +58,9 @@ void _starpu_deinit_mem_chunk_lists(void)
 	unsigned i;
 	for (i = 0; i < STARPU_MAXNODES; i++)
 	{
-		starpu_mem_chunk_list_delete(mc_list[i]);
-		starpu_mem_chunk_list_delete(memchunk_cache[i]);
-		starpu_mem_chunk_lru_list_delete(starpu_lru_list[i]);
+		_starpu_mem_chunk_list_delete(mc_list[i]);
+		_starpu_mem_chunk_list_delete(memchunk_cache[i]);
+		_starpu_mem_chunk_lru_list_delete(starpu_lru_list[i]);
 	}
 }
 
@@ -130,7 +130,7 @@ static unsigned may_free_subtree(starpu_data_handle_t handle, unsigned node)
 }
 
 static void transfer_subtree_to_node(starpu_data_handle_t handle, unsigned src_node,
-						unsigned dst_node)
+				     unsigned dst_node)
 {
 	unsigned i;
 	unsigned last = 0;
@@ -139,8 +139,8 @@ static void transfer_subtree_to_node(starpu_data_handle_t handle, unsigned src_n
 
 	if (handle->nchildren == 0)
 	{
-		struct starpu_data_replicate_s *src_replicate = &handle->per_node[src_node];
-		struct starpu_data_replicate_s *dst_replicate = &handle->per_node[dst_node];
+		struct _starpu_data_replicate *src_replicate = &handle->per_node[src_node];
+		struct _starpu_data_replicate *dst_replicate = &handle->per_node[dst_node];
 
 		/* this is a leaf */
 		switch(src_replicate->state) {
@@ -205,7 +205,7 @@ static void transfer_subtree_to_node(starpu_data_handle_t handle, unsigned src_n
 	}
 }
 
-static size_t free_memory_on_node(starpu_mem_chunk_t mc, uint32_t node)
+static size_t free_memory_on_node(struct _starpu_mem_chunk *mc, uint32_t node)
 {
 	size_t freed = 0;
 
@@ -218,7 +218,7 @@ static size_t free_memory_on_node(starpu_mem_chunk_t mc, uint32_t node)
 	 * anymore ? */
 	unsigned data_was_deleted = mc->data_was_deleted;
 
-	struct starpu_data_replicate_s *replicate = mc->replicate;
+	struct _starpu_data_replicate *replicate = mc->replicate;
 
 //	while (_starpu_spin_trylock(&handle->header_lock))
 //		_starpu_datawizard_progress(_starpu_get_local_memory_node());
@@ -269,7 +269,7 @@ static size_t free_memory_on_node(starpu_mem_chunk_t mc, uint32_t node)
 
 
 
-static size_t do_free_mem_chunk(starpu_mem_chunk_t mc, unsigned node)
+static size_t do_free_mem_chunk(struct _starpu_mem_chunk *mc, unsigned node)
 {
 	size_t size;
 
@@ -279,17 +279,17 @@ static size_t do_free_mem_chunk(starpu_mem_chunk_t mc, unsigned node)
 	size = free_memory_on_node(mc, node);
 
 	/* remove the mem_chunk from the list */
-	starpu_mem_chunk_list_erase(mc_list[node], mc);
+	_starpu_mem_chunk_list_erase(mc_list[node], mc);
 
 	free(mc->chunk_interface);
-	starpu_mem_chunk_delete(mc);
+	_starpu_mem_chunk_delete(mc);
 
 	return size;
 }
 
 /* This function is called for memory chunks that are possibly in used (ie. not
  * in the cache). They should therefore still be associated to a handle. */
-static size_t try_to_free_mem_chunk(starpu_mem_chunk_t mc, unsigned node)
+static size_t try_to_free_mem_chunk(struct _starpu_mem_chunk *mc, unsigned node)
 {
 	size_t freed = 0;
 
@@ -355,7 +355,7 @@ static size_t try_to_free_mem_chunk(starpu_mem_chunk_t mc, unsigned node)
 /* We assume that mc_rwlock[node] is taken. is_already_in_mc_list indicates
  * that the mc is already in the list of buffers that are possibly used, and
  * therefore not in the cache. */
-static void reuse_mem_chunk(unsigned node, struct starpu_data_replicate_s *new_replicate, starpu_mem_chunk_t mc, unsigned is_already_in_mc_list)
+static void reuse_mem_chunk(unsigned node, struct _starpu_data_replicate *new_replicate, struct _starpu_mem_chunk *mc, unsigned is_already_in_mc_list)
 {
 	starpu_data_handle_t old_data;
 	old_data = mc->data;
@@ -366,10 +366,10 @@ static void reuse_mem_chunk(unsigned node, struct starpu_data_replicate_s *new_r
 
 	if (!is_already_in_mc_list)
 	{
-		starpu_mem_chunk_list_erase(memchunk_cache[node], mc);
+		_starpu_mem_chunk_list_erase(memchunk_cache[node], mc);
 	}
 
-	struct starpu_data_replicate_s *old_replicate = mc->replicate;
+	struct _starpu_data_replicate *old_replicate = mc->replicate;
 	old_replicate->allocated = 0;
 	old_replicate->automatically_allocated = 0;
 	old_replicate->initialized = 0;
@@ -390,11 +390,11 @@ static void reuse_mem_chunk(unsigned node, struct starpu_data_replicate_s *new_r
 	/* reinsert the mem chunk in the list of active memory chunks */
 	if (!is_already_in_mc_list)
 	{
-		starpu_mem_chunk_list_push_front(mc_list[node], mc);
+		_starpu_mem_chunk_list_push_front(mc_list[node], mc);
 	}
 }
 
-static unsigned try_to_reuse_mem_chunk(starpu_mem_chunk_t mc, unsigned node, starpu_data_handle_t new_data, unsigned is_already_in_mc_list)
+static unsigned try_to_reuse_mem_chunk(struct _starpu_mem_chunk *mc, unsigned node, starpu_data_handle_t new_data, unsigned is_already_in_mc_list)
 {
 	unsigned success = 0;
 
@@ -438,15 +438,15 @@ static int _starpu_data_interface_compare(void *data_interface_a, struct starpu_
 }
 
 /* This function must be called with mc_rwlock[node] taken in write mode */
-static starpu_mem_chunk_t _starpu_memchunk_cache_lookup_locked(uint32_t node, starpu_data_handle_t handle)
+static struct _starpu_mem_chunk *_starpu_memchunk_cache_lookup_locked(uint32_t node, starpu_data_handle_t handle)
 {
 	uint32_t footprint = _starpu_compute_data_footprint(handle);
 
 	/* go through all buffers in the cache */
-	starpu_mem_chunk_t mc;
-	for (mc = starpu_mem_chunk_list_begin(memchunk_cache[node]);
-	     mc != starpu_mem_chunk_list_end(memchunk_cache[node]);
-	     mc = starpu_mem_chunk_list_next(mc))
+	struct _starpu_mem_chunk *mc;
+	for (mc = _starpu_mem_chunk_list_begin(memchunk_cache[node]);
+	     mc != _starpu_mem_chunk_list_end(memchunk_cache[node]);
+	     mc = _starpu_mem_chunk_list_next(mc))
 	{
 		if (mc->footprint == footprint)
 		{
@@ -457,7 +457,7 @@ static starpu_mem_chunk_t _starpu_memchunk_cache_lookup_locked(uint32_t node, st
 			/* Cache hit */
 
 			/* Remove from the cache */
-			starpu_mem_chunk_list_erase(memchunk_cache[node], mc);
+			_starpu_mem_chunk_list_erase(memchunk_cache[node], mc);
 			return mc;
 		}
 	}
@@ -471,7 +471,7 @@ static starpu_mem_chunk_t _starpu_memchunk_cache_lookup_locked(uint32_t node, st
  * mc_rwlock[node] taken in write mode. */
 static unsigned try_to_find_reusable_mem_chunk(unsigned node, starpu_data_handle_t data, uint32_t footprint)
 {
-	starpu_mem_chunk_t mc, next_mc;
+	struct _starpu_mem_chunk *mc, *next_mc;
 
 	/* go through all buffers in the cache */
 	mc = _starpu_memchunk_cache_lookup_locked(node, data);
@@ -483,14 +483,14 @@ static unsigned try_to_find_reusable_mem_chunk(unsigned node, starpu_data_handle
 	}
 
 	/* now look for some non essential data in the active list */
-	for (mc = starpu_mem_chunk_list_begin(mc_list[node]);
-	     mc != starpu_mem_chunk_list_end(mc_list[node]);
+	for (mc = _starpu_mem_chunk_list_begin(mc_list[node]);
+	     mc != _starpu_mem_chunk_list_end(mc_list[node]);
 	     mc = next_mc)
 	{
 		/* there is a risk that the memory chunk is freed before next
 		 * iteration starts: so we compute the next element of the list
 		 * now */
-		next_mc = starpu_mem_chunk_list_next(mc);
+		next_mc = _starpu_mem_chunk_list_next(mc);
 
 		if (mc->data->is_not_important && (mc->footprint == footprint))
 		{
@@ -510,22 +510,22 @@ static unsigned try_to_find_reusable_mem_chunk(unsigned node, starpu_data_handle
  */
 static size_t flush_memchunk_cache(uint32_t node, size_t reclaim)
 {
-	starpu_mem_chunk_t mc, next_mc;
+	struct _starpu_mem_chunk *mc, *next_mc;
 
 	size_t freed = 0;
 
-	for (mc = starpu_mem_chunk_list_begin(memchunk_cache[node]);
-	     mc != starpu_mem_chunk_list_end(memchunk_cache[node]);
+	for (mc = _starpu_mem_chunk_list_begin(memchunk_cache[node]);
+	     mc != _starpu_mem_chunk_list_end(memchunk_cache[node]);
 	     mc = next_mc)
 	{
-		next_mc = starpu_mem_chunk_list_next(mc);
+		next_mc = _starpu_mem_chunk_list_next(mc);
 
 		freed += free_memory_on_node(mc, node);
 
-		starpu_mem_chunk_list_erase(memchunk_cache[node], mc);
+		_starpu_mem_chunk_list_erase(memchunk_cache[node], mc);
 
 		free(mc->chunk_interface);
-		starpu_mem_chunk_delete(mc);
+		_starpu_mem_chunk_delete(mc);
 		if (reclaim && freed>reclaim)
 			break;
 	}
@@ -543,16 +543,16 @@ static size_t free_potentially_in_use_mc(uint32_t node, unsigned force, size_t r
 {
 	size_t freed = 0;
 
-	starpu_mem_chunk_t mc, next_mc;
+	struct _starpu_mem_chunk *mc, *next_mc;
 
-	for (mc = starpu_mem_chunk_list_begin(mc_list[node]);
-	     mc != starpu_mem_chunk_list_end(mc_list[node]);
+	for (mc = _starpu_mem_chunk_list_begin(mc_list[node]);
+	     mc != _starpu_mem_chunk_list_end(mc_list[node]);
 	     mc = next_mc)
 	{
 		/* there is a risk that the memory chunk is freed
 		   before next iteration starts: so we compute the next
 		   element of the list now */
-		next_mc = starpu_mem_chunk_list_next(mc);
+		next_mc = _starpu_mem_chunk_list_next(mc);
 
 		if (!force)
 		{
@@ -603,9 +603,9 @@ size_t _starpu_free_all_automatically_allocated_buffers(uint32_t node)
 	return reclaim_memory_generic(node, 1, 0);
 }
 
-static starpu_mem_chunk_t _starpu_memchunk_init(struct starpu_data_replicate_s *replicate, size_t size, size_t interface_size, unsigned automatically_allocated)
+static struct _starpu_mem_chunk *_starpu_memchunk_init(struct _starpu_data_replicate *replicate, size_t size, size_t interface_size, unsigned automatically_allocated)
 {
-	starpu_mem_chunk_t mc = starpu_mem_chunk_new();
+	struct _starpu_mem_chunk *mc = _starpu_mem_chunk_new();
 	starpu_data_handle_t handle = replicate->handle;
 
 	STARPU_ASSERT(handle);
@@ -629,11 +629,11 @@ static starpu_mem_chunk_t _starpu_memchunk_init(struct starpu_data_replicate_s *
 	return mc;
 }
 
-static void register_mem_chunk(struct starpu_data_replicate_s *replicate, size_t size, unsigned automatically_allocated)
+static void register_mem_chunk(struct _starpu_data_replicate *replicate, size_t size, unsigned automatically_allocated)
 {
 	unsigned dst_node = replicate->memory_node;
 
-	starpu_mem_chunk_t mc;
+	struct _starpu_mem_chunk *mc;
 
 	/* the interface was already filled by ops->allocate_data_on_node */
 	size_t interface_size = replicate->handle->ops->interface_size;
@@ -643,7 +643,7 @@ static void register_mem_chunk(struct starpu_data_replicate_s *replicate, size_t
 
 	_STARPU_PTHREAD_RWLOCK_WRLOCK(&mc_rwlock[dst_node]);
 
-	starpu_mem_chunk_list_push_back(mc_list[dst_node], mc);
+	_starpu_mem_chunk_list_push_back(mc_list[dst_node], mc);
 
 	_STARPU_PTHREAD_RWLOCK_UNLOCK(&mc_rwlock[dst_node]);
 }
@@ -656,22 +656,22 @@ void _starpu_request_mem_chunk_removal(starpu_data_handle_t handle, unsigned nod
 	_STARPU_PTHREAD_RWLOCK_WRLOCK(&mc_rwlock[node]);
 
 	/* iterate over the list of memory chunks and remove the entry */
-	starpu_mem_chunk_t mc, next_mc;
-	for (mc = starpu_mem_chunk_list_begin(mc_list[node]);
-	     mc != starpu_mem_chunk_list_end(mc_list[node]);
+	struct _starpu_mem_chunk *mc, *next_mc;
+	for (mc = _starpu_mem_chunk_list_begin(mc_list[node]);
+	     mc != _starpu_mem_chunk_list_end(mc_list[node]);
 	     mc = next_mc)
 	{
-		next_mc = starpu_mem_chunk_list_next(mc);
+		next_mc = _starpu_mem_chunk_list_next(mc);
 
 		if (mc->data == handle) {
 			/* we found the data */
 			mc->data_was_deleted = 1;
 
 			/* remove it from the main list */
-			starpu_mem_chunk_list_erase(mc_list[node], mc);
+			_starpu_mem_chunk_list_erase(mc_list[node], mc);
 
 			/* put it in the list of buffers to be removed */
-			starpu_mem_chunk_list_push_front(memchunk_cache[node], mc);
+			_starpu_mem_chunk_list_push_front(memchunk_cache[node], mc);
 
 			/* Note that we do not stop here because there can be
 			 * multiple replicates associated to the same handle on
@@ -730,7 +730,7 @@ static size_t _starpu_get_global_mem_size(int dst_node)
  *
  */
 
-static ssize_t _starpu_allocate_interface(starpu_data_handle_t handle, struct starpu_data_replicate_s *replicate, uint32_t dst_node, unsigned is_prefetch)
+static ssize_t _starpu_allocate_interface(starpu_data_handle_t handle, struct _starpu_data_replicate *replicate, uint32_t dst_node, unsigned is_prefetch)
 {
 	unsigned attempts = 0;
 	ssize_t allocated_memory;
@@ -811,7 +811,7 @@ static ssize_t _starpu_allocate_interface(starpu_data_handle_t handle, struct st
 	return allocated_memory;
 }
 
-int _starpu_allocate_memory_on_node(starpu_data_handle_t handle, struct starpu_data_replicate_s *replicate, unsigned is_prefetch)
+int _starpu_allocate_memory_on_node(starpu_data_handle_t handle, struct _starpu_data_replicate *replicate, unsigned is_prefetch)
 {
 	ssize_t allocated_memory;
 
@@ -852,28 +852,28 @@ unsigned starpu_data_test_if_allocated_on_node(starpu_data_handle_t handle, uint
 	return handle->per_node[memory_node].allocated;
 }
 
-void _starpu_memchunk_recently_used(starpu_mem_chunk_t mc, unsigned node)
+void _starpu_memchunk_recently_used(struct _starpu_mem_chunk *mc, unsigned node)
 {
 	_STARPU_PTHREAD_RWLOCK_WRLOCK(&lru_rwlock[node]);
-	starpu_mem_chunk_lru_t mc_lru=starpu_mem_chunk_lru_new();
+	struct _starpu_mem_chunk_lru *mc_lru=_starpu_mem_chunk_lru_new();
 	mc_lru->mc=mc;
-	starpu_mem_chunk_lru_list_push_front(starpu_lru_list[node],mc_lru);
+	_starpu_mem_chunk_lru_list_push_front(starpu_lru_list[node],mc_lru);
 	_STARPU_PTHREAD_RWLOCK_UNLOCK(&lru_rwlock[node]);
 }
 
 /* The mc_rwlock[node] rw-lock should be taken prior to calling this function.*/
-static void starpu_memchunk_recently_used_move(starpu_mem_chunk_t mc, unsigned node)
+static void _starpu_memchunk_recently_used_move(struct _starpu_mem_chunk *mc, unsigned node)
 {
 	/* XXX Sometimes the memchunk is not in the list... */
-	starpu_mem_chunk_t mc_iter;
-	for (mc_iter = starpu_mem_chunk_list_begin(mc_list[node]);
-	     mc_iter != starpu_mem_chunk_list_end(mc_list[node]);
-	     mc_iter = starpu_mem_chunk_list_next(mc_iter) )
+	struct _starpu_mem_chunk *mc_iter;
+	for (mc_iter = _starpu_mem_chunk_list_begin(mc_list[node]);
+	     mc_iter != _starpu_mem_chunk_list_end(mc_list[node]);
+	     mc_iter = _starpu_mem_chunk_list_next(mc_iter) )
 	{
 		if (mc_iter==mc)
 		{
-			starpu_mem_chunk_list_erase(mc_list[node], mc);
-			starpu_mem_chunk_list_push_back(mc_list[node], mc);
+			_starpu_mem_chunk_list_erase(mc_list[node], mc);
+			_starpu_mem_chunk_list_push_back(mc_list[node], mc);
 			return;
 		}
 
@@ -883,12 +883,12 @@ static void starpu_memchunk_recently_used_move(starpu_mem_chunk_t mc, unsigned n
 static void starpu_lru(unsigned node)
 {
 	_STARPU_PTHREAD_RWLOCK_WRLOCK(&lru_rwlock[node]);
-	while (!starpu_mem_chunk_lru_list_empty(starpu_lru_list[node]))
+	while (!_starpu_mem_chunk_lru_list_empty(starpu_lru_list[node]))
 	{
-		starpu_mem_chunk_lru_t mc_lru=starpu_mem_chunk_lru_list_front(starpu_lru_list[node]);
-		starpu_memchunk_recently_used_move(mc_lru->mc, node);
-		starpu_mem_chunk_lru_list_erase(starpu_lru_list[node], mc_lru);
-		starpu_mem_chunk_lru_delete(mc_lru);
+		struct _starpu_mem_chunk_lru *mc_lru=_starpu_mem_chunk_lru_list_front(starpu_lru_list[node]);
+		_starpu_memchunk_recently_used_move(mc_lru->mc, node);
+		_starpu_mem_chunk_lru_list_erase(starpu_lru_list[node], mc_lru);
+		_starpu_mem_chunk_lru_delete(mc_lru);
 	}
 	_STARPU_PTHREAD_RWLOCK_UNLOCK(&lru_rwlock[node]);
 }
@@ -899,16 +899,16 @@ void _starpu_display_data_stats_by_node(int node)
 {
 	_STARPU_PTHREAD_RWLOCK_WRLOCK(&mc_rwlock[node]);
 
-	if (!starpu_mem_chunk_list_empty(mc_list[node]))
+	if (!_starpu_mem_chunk_list_empty(mc_list[node]))
 	{
 		fprintf(stderr, "#-------\n");
 		fprintf(stderr, "Data on Node #%d\n",node);
 
-		starpu_mem_chunk_t mc;
+		struct _starpu_mem_chunk *mc;
 
-		for (mc = starpu_mem_chunk_list_begin(mc_list[node]);
-		     mc != starpu_mem_chunk_list_end(mc_list[node]);
-		     mc = starpu_mem_chunk_list_next(mc))
+		for (mc = _starpu_mem_chunk_list_begin(mc_list[node]);
+		     mc != _starpu_mem_chunk_list_end(mc_list[node]);
+		     mc = _starpu_mem_chunk_list_next(mc))
 		{
 			_starpu_display_data_handle_stats(mc->data);
 		}

+ 7 - 7
src/datawizard/memalloc.h

@@ -26,9 +26,9 @@
 #include <datawizard/coherency.h>
 #include <datawizard/copy_driver.h>
 
-struct starpu_data_replicate_s;
+struct _starpu_data_replicate;
 
-LIST_TYPE(starpu_mem_chunk,
+LIST_TYPE(_starpu_mem_chunk,
 	starpu_data_handle_t data;
 	size_t size;
 
@@ -48,20 +48,20 @@ LIST_TYPE(starpu_mem_chunk,
 	/* A buffer that is used for SCRATCH or reduction cannnot be used with
 	 * filters. */
 	unsigned relaxed_coherency;
-	struct starpu_data_replicate_s *replicate;
+	struct _starpu_data_replicate *replicate;
 )
 
 /* LRU list */
-LIST_TYPE(starpu_mem_chunk_lru,
-	starpu_mem_chunk_t mc;
+LIST_TYPE(_starpu_mem_chunk_lru,
+	struct _starpu_mem_chunk *mc;
 )
 
 void _starpu_init_mem_chunk_lists(void);
 void _starpu_deinit_mem_chunk_lists(void);
 void _starpu_request_mem_chunk_removal(starpu_data_handle_t handle, unsigned node);
-int _starpu_allocate_memory_on_node(starpu_data_handle_t handle, struct starpu_data_replicate_s *replicate, unsigned is_prefetch);
+int _starpu_allocate_memory_on_node(starpu_data_handle_t handle, struct _starpu_data_replicate *replicate, unsigned is_prefetch);
 size_t _starpu_free_all_automatically_allocated_buffers(uint32_t node);
-void _starpu_memchunk_recently_used(starpu_mem_chunk_t mc, unsigned node);
+void _starpu_memchunk_recently_used(struct _starpu_mem_chunk *mc, unsigned node);
 
 void _starpu_display_data_stats_by_node(int node);
 #endif

+ 5 - 5
src/datawizard/reduction.c

@@ -41,7 +41,7 @@ void starpu_data_set_reduction_methods(starpu_data_handle_t handle,
 	_starpu_spin_unlock(&handle->header_lock);
 }
 
-void _starpu_redux_init_data_replicate(starpu_data_handle_t handle, struct starpu_data_replicate_s *replicate, int workerid)
+void _starpu_redux_init_data_replicate(starpu_data_handle_t handle, struct _starpu_data_replicate *replicate, int workerid)
 {
 	STARPU_ASSERT(replicate);
 	STARPU_ASSERT(replicate->allocated);
@@ -86,7 +86,7 @@ void _starpu_data_start_reduction_mode(starpu_data_handle_t handle)
 	unsigned nworkers = starpu_worker_get_count();
 	for (worker = 0; worker < nworkers; worker++)
 	{
-		struct starpu_data_replicate_s *replicate;
+		struct _starpu_data_replicate *replicate;
 		replicate = &handle->per_worker[worker];
 		replicate->initialized = 0;
 	}
@@ -198,7 +198,7 @@ void _starpu_data_end_reduction_mode(starpu_data_handle_t handle)
 		 * when they try to access the handle (normal tasks are
 		 * data requests to that handle are frozen until the
 		 * data is coherent again). */
-		starpu_job_t j = _starpu_get_job_associated_to_task(redux_task);
+		struct _starpu_job *j = _starpu_get_job_associated_to_task(redux_task);
 		j->reduction_task = 1;
 
 		redux_task->cl = handle->redux_cl;
@@ -227,7 +227,7 @@ void _starpu_data_end_reduction_mode(starpu_data_handle_t handle)
 			 * when they try to access the handle (normal tasks are
 			 * data requests to that handle are frozen until the
 			 * data is coherent again). */
-			starpu_job_t j = _starpu_get_job_associated_to_task(redux_task);
+			struct _starpu_job *j = _starpu_get_job_associated_to_task(redux_task);
 			j->reduction_task = 1;
 	
 			redux_task->cl = handle->redux_cl;
@@ -257,7 +257,7 @@ void _starpu_data_end_reduction_mode_terminate(starpu_data_handle_t handle)
 	unsigned worker;
 	for (worker = 0; worker < nworkers; worker++)
 	{
-		struct starpu_data_replicate_s *replicate;
+		struct _starpu_data_replicate *replicate;
 		replicate = &handle->per_worker[worker];
 		replicate->initialized = 0;
 

+ 9 - 9
src/datawizard/user_interactions.c

@@ -27,7 +27,7 @@
  * memory node. */
 int starpu_data_request_allocation(starpu_data_handle_t handle, uint32_t node)
 {
-	starpu_data_request_t r;
+	struct _starpu_data_request *r;
 
 	STARPU_ASSERT(handle);
 
@@ -85,10 +85,10 @@ static void _starpu_data_acquire_continuation_non_blocking(void *arg)
 
 	STARPU_ASSERT(handle);
 
-	struct starpu_data_replicate_s *ram_replicate = &handle->per_node[0];
+	struct _starpu_data_replicate *ram_replicate = &handle->per_node[0];
 
 	ret = _starpu_fetch_data_on_node(handle, ram_replicate, wrapper->mode, 1,
-			_starpu_data_acquire_fetch_data_callback, wrapper);
+					 _starpu_data_acquire_fetch_data_callback, wrapper);
 	STARPU_ASSERT(!ret);
 }
 
@@ -146,7 +146,7 @@ int starpu_data_acquire_cb(starpu_data_handle_t handle,
 		wrapper->post_sync_task->detach = 1;
 
 #ifdef STARPU_USE_FXT
-                starpu_job_t job = _starpu_get_job_associated_to_task(wrapper->pre_sync_task);
+                struct _starpu_job *job = _starpu_get_job_associated_to_task(wrapper->pre_sync_task);
                 job->model_name = "acquire_cb_pre";
                 job = _starpu_get_job_associated_to_task(wrapper->post_sync_task);
                 job->model_name = "acquire_cb_post";
@@ -180,7 +180,7 @@ static inline void _starpu_data_acquire_continuation(void *arg)
 
 	STARPU_ASSERT(handle);
 
-	struct starpu_data_replicate_s *ram_replicate = &handle->per_node[0];
+	struct _starpu_data_replicate *ram_replicate = &handle->per_node[0];
 
 	_starpu_fetch_data_on_node(handle, ram_replicate, wrapper->mode, 0, NULL, NULL);
 	
@@ -225,7 +225,7 @@ int starpu_data_acquire(starpu_data_handle_t handle, enum starpu_access_mode mod
 		wrapper.post_sync_task->detach = 1;
 
 #ifdef STARPU_USE_FXT
-                starpu_job_t job = _starpu_get_job_associated_to_task(wrapper.pre_sync_task);
+                struct _starpu_job *job = _starpu_get_job_associated_to_task(wrapper.pre_sync_task);
                 job->model_name = "acquire_pre";
                 job = _starpu_get_job_associated_to_task(wrapper.post_sync_task);
                 job->model_name = "acquire_post";
@@ -250,7 +250,7 @@ int starpu_data_acquire(starpu_data_handle_t handle, enum starpu_access_mode mod
 	if (!_starpu_attempt_to_submit_data_request_from_apps(handle, mode, _starpu_data_acquire_continuation, &wrapper))
 	{
 		/* no one has locked this data yet, so we proceed immediately */
-		struct starpu_data_replicate_s *ram_replicate = &handle->per_node[0];
+		struct _starpu_data_replicate *ram_replicate = &handle->per_node[0];
 		int ret = _starpu_fetch_data_on_node(handle, ram_replicate, mode, 0, NULL, NULL);
 		STARPU_ASSERT(!ret);
 	}
@@ -290,7 +290,7 @@ static void _prefetch_data_on_node(void *arg)
 	starpu_data_handle_t handle = wrapper->handle;
         int ret;
 
-	struct starpu_data_replicate_s *replicate = &handle->per_node[wrapper->node];
+	struct _starpu_data_replicate *replicate = &handle->per_node[wrapper->node];
 	ret = _starpu_fetch_data_on_node(handle, replicate, STARPU_R, wrapper->async, NULL, NULL);
         STARPU_ASSERT(!ret);
 
@@ -329,7 +329,7 @@ int _starpu_prefetch_data_on_node_with_mode(starpu_data_handle_t handle, unsigne
 	if (!_starpu_attempt_to_submit_data_request_from_apps(handle, mode, _prefetch_data_on_node, wrapper))
 	{
 		/* we can immediately proceed */
-		struct starpu_data_replicate_s *replicate = &handle->per_node[node];
+		struct _starpu_data_replicate *replicate = &handle->per_node[node];
 		_starpu_fetch_data_on_node(handle, replicate, mode, async, NULL, NULL);
 
 		/* remove the "lock"/reference */

+ 3 - 3
src/datawizard/write_back.c

@@ -28,7 +28,7 @@ static void wt_callback(void *arg) {
 }
 
 void _starpu_write_through_data(starpu_data_handle_t handle, uint32_t requesting_node, 
-					   uint32_t write_through_mask)
+				uint32_t write_through_mask)
 {
 	if ((write_through_mask & ~(1<<requesting_node)) == 0) {
 		/* nothing will be done ... */
@@ -53,9 +53,9 @@ void _starpu_write_through_data(starpu_data_handle_t handle, uint32_t requesting
 				handle->busy_count++;
 				handle->current_mode = STARPU_R;
 
-				starpu_data_request_t r;
+				struct _starpu_data_request *r;
 				r = _starpu_create_request_to_fetch_data(handle, &handle->per_node[node],
-						STARPU_R, 1, wt_callback, handle);
+									 STARPU_R, 1, wt_callback, handle);
 
 			        /* If no request was created, the handle was already up-to-date on the
 			         * node */

+ 3 - 3
src/debug/latency.c

@@ -1,7 +1,7 @@
 /* StarPU --- Runtime system for heterogeneous multicore architectures.
  *
  * Copyright (C) 2010-2011  Université de Bordeaux 1
- * Copyright (C) 2010  Centre National de la Recherche Scientifique
+ * Copyright (C) 2010, 2011  Centre National de la Recherche Scientifique
  *
  * StarPU is free software; you can redistribute it and/or modify
  * it under the terms of the GNU Lesser General Public License as published by
@@ -34,7 +34,7 @@ void _starpu_benchmark_ping_pong(starpu_data_handle_t handle,
 		handle->busy_count++;
 		_starpu_spin_unlock(&handle->header_lock);
 
-		struct starpu_data_replicate_s *replicate_0 = &handle->per_node[node0];
+		struct _starpu_data_replicate *replicate_0 = &handle->per_node[node0];
 		ret = _starpu_fetch_data_on_node(handle, replicate_0, STARPU_RW, 0, NULL, NULL);
 		STARPU_ASSERT(!ret);
 		_starpu_release_data_on_node(handle, 0, replicate_0);
@@ -44,7 +44,7 @@ void _starpu_benchmark_ping_pong(starpu_data_handle_t handle,
 		handle->busy_count++;
 		_starpu_spin_unlock(&handle->header_lock);
 
-		struct starpu_data_replicate_s *replicate_1 = &handle->per_node[node1];
+		struct _starpu_data_replicate *replicate_1 = &handle->per_node[node1];
 		ret = _starpu_fetch_data_on_node(handle, replicate_1, STARPU_RW, 0, NULL, NULL);
 		STARPU_ASSERT(!ret);
 		_starpu_release_data_on_node(handle, 0, replicate_1);

+ 2 - 2
src/debug/structures_size.c

@@ -25,8 +25,8 @@ void _starpu_debug_display_structures_size(void)
 {
 	fprintf(stderr, "struct starpu_task\t\t%u bytes\t(%x)\n",
 			(unsigned) sizeof(struct starpu_task), (unsigned) sizeof(struct starpu_task));
-	fprintf(stderr, "struct starpu_job_s\t\t%u bytes\t(%x)\n",
-			(unsigned) sizeof(struct starpu_job_s), (unsigned) sizeof(struct starpu_job_s));
+	fprintf(stderr, "struct _starpu_job\t\t%u bytes\t(%x)\n",
+			(unsigned) sizeof(struct _starpu_job), (unsigned) sizeof(struct _starpu_job));
 	fprintf(stderr, "struct _starpu_data_state\t%u bytes\t(%x)\n",
 			(unsigned) sizeof(struct _starpu_data_state), (unsigned) sizeof(struct _starpu_data_state));
 	fprintf(stderr, "struct _starpu_tag\t\t%u bytes\t(%x)\n",

+ 27 - 27
src/debug/traces/starpu_fxt.c

@@ -89,13 +89,13 @@ static double last_activity_flush_timestamp[STARPU_NMAXWORKERS];
 static double accumulated_sleep_time[STARPU_NMAXWORKERS];
 static double accumulated_exec_time[STARPU_NMAXWORKERS];
 
-LIST_TYPE(symbol_name,
+LIST_TYPE(_starpu_symbol_name,
 	char *name;
 )
 
-static symbol_name_list_t symbol_list;
+static struct _starpu_symbol_name_list *symbol_list;
 
-LIST_TYPE(communication,
+LIST_TYPE(_starpu_communication,
 	unsigned comid;
 	float comm_start;	
 	float bandwidth;
@@ -103,7 +103,7 @@ LIST_TYPE(communication,
 	unsigned dst_node;
 )
 
-static communication_list_t communication_list;
+static struct _starpu_communication_list *communication_list;
 
 /*
  * Paje trace file tools
@@ -293,10 +293,10 @@ static void handle_worker_deinit_end(struct fxt_ev_64 *ev, struct starpu_fxt_opt
 
 static void create_paje_state_if_not_found(char *name, struct starpu_fxt_options *options)
 {
-	symbol_name_itor_t itor;
-	for (itor = symbol_name_list_begin(symbol_list);
-		itor != symbol_name_list_end(symbol_list);
-		itor = symbol_name_list_next(itor))
+	struct _starpu_symbol_name *itor;
+	for (itor = _starpu_symbol_name_list_begin(symbol_list);
+		itor != _starpu_symbol_name_list_end(symbol_list);
+		itor = _starpu_symbol_name_list_next(itor))
 	{
 		if (!strcmp(name, itor->name))
 		{
@@ -306,12 +306,12 @@ static void create_paje_state_if_not_found(char *name, struct starpu_fxt_options
 	}
 
 	/* it's the first time ... */
-	symbol_name_t entry = symbol_name_new();
-		entry->name = malloc(strlen(name));
-		strcpy(entry->name, name);
+	struct _starpu_symbol_name *entry = _starpu_symbol_name_new();
+	entry->name = malloc(strlen(name));
+	strcpy(entry->name, name);
+
+	_starpu_symbol_name_list_push_front(symbol_list, entry);
 
-	symbol_name_list_push_front(symbol_list, entry);
-	
 	/* choose some colour ... that's disguting yes */
 	unsigned hash_symbol_red = get_colour_symbol_red(name);
 	unsigned hash_symbol_green = get_colour_symbol_green(name);
@@ -514,14 +514,14 @@ static void handle_start_driver_copy(struct fxt_ev_64 *ev, struct starpu_fxt_opt
 		}
 
 		/* create a structure to store the start of the communication, this will be matched later */
-		communication_t com = communication_new();
+		struct _starpu_communication *com = _starpu_communication_new();
 		com->comid = comid;
 		com->comm_start = get_event_time_stamp(ev, options);
 
 		com->src_node = src;
 		com->dst_node = dst;
 
-		communication_list_push_back(communication_list, com);
+		_starpu_communication_list_push_back(communication_list, com);
 	}
 
 }
@@ -543,10 +543,10 @@ static void handle_end_driver_copy(struct fxt_ev_64 *ev, struct starpu_fxt_optio
 		}
 
 		/* look for a data transfer to match */
-		communication_itor_t itor;
-		for (itor = communication_list_begin(communication_list);
-			itor != communication_list_end(communication_list);
-			itor = communication_list_next(itor))
+		struct _starpu_communication *itor;
+		for (itor = _starpu_communication_list_begin(communication_list);
+			itor != _starpu_communication_list_end(communication_list);
+			itor = _starpu_communication_list_next(itor))
 		{
 			if (itor->comid == comid)
 			{
@@ -555,7 +555,7 @@ static void handle_end_driver_copy(struct fxt_ev_64 *ev, struct starpu_fxt_optio
 
 				itor->bandwidth = bandwidth;
 
-				communication_t com = communication_new();
+				struct _starpu_communication *com = _starpu_communication_new();
 				com->comid = comid;
 				com->comm_start = get_event_time_stamp(ev, options);
 				com->bandwidth = -bandwidth;
@@ -563,7 +563,7 @@ static void handle_end_driver_copy(struct fxt_ev_64 *ev, struct starpu_fxt_optio
 				com->src_node = itor->src_node;
 				com->dst_node = itor->dst_node;
 
-				communication_list_push_back(communication_list, com);
+				_starpu_communication_list_push_back(communication_list, com);
 
 				break;
 			}
@@ -770,10 +770,10 @@ void _starpu_fxt_display_bandwidth(struct starpu_fxt_options *options)
 
 	char *prefix = options->file_prefix;
 
-	communication_itor_t itor;
-	for (itor = communication_list_begin(communication_list);
-		itor != communication_list_end(communication_list);
-		itor = communication_list_next(itor))
+	struct _starpu_communication*itor;
+	for (itor = _starpu_communication_list_begin(communication_list);
+		itor != _starpu_communication_list_end(communication_list);
+		itor = _starpu_communication_list_next(itor))
 	{
 		current_bandwidth_per_node[itor->src_node] +=  itor->bandwidth;
 		if (out_paje_file)
@@ -815,8 +815,8 @@ void starpu_fxt_parse_new_file(char *filename_in, struct starpu_fxt_options *opt
 	/* create a htable to identify each worker(tid) */
 	hcreate(STARPU_NMAXWORKERS);
 
-	symbol_list = symbol_name_list_new(); 
-	communication_list = communication_list_new();
+	symbol_list = _starpu_symbol_name_list_new();
+	communication_list = _starpu_communication_list_new();
 
 	char *prefix = options->file_prefix;
 

+ 2 - 2
src/drivers/cpu/driver_cpu.c

@@ -25,7 +25,7 @@
 #include "driver_cpu.h"
 #include <core/sched_policy.h>
 
-static int execute_job_on_cpu(starpu_job_t j, struct _starpu_worker *cpu_args, int is_parallel_task, int rank, enum starpu_perf_archtype perf_arch)
+static int execute_job_on_cpu(struct _starpu_job *j, struct _starpu_worker *cpu_args, int is_parallel_task, int rank, enum starpu_perf_archtype perf_arch)
 {
 	int ret;
 	struct timespec codelet_start, codelet_end;
@@ -117,7 +117,7 @@ void *_starpu_cpu_worker(void *arg)
 	_STARPU_PTHREAD_COND_SIGNAL(&cpu_arg->ready_cond);
 	_STARPU_PTHREAD_MUTEX_UNLOCK(&cpu_arg->mutex);
 
-        starpu_job_t j;
+        struct _starpu_job *j;
 	struct starpu_task *task;
 
 	int res;

+ 2 - 2
src/drivers/cuda/driver_cuda.c

@@ -174,7 +174,7 @@ void _starpu_init_cuda(void)
 	assert(ncudagpus <= STARPU_MAXCUDADEVS);
 }
 
-static int execute_job_on_cuda(starpu_job_t j, struct _starpu_worker *args)
+static int execute_job_on_cuda(struct _starpu_job *j, struct _starpu_worker *args)
 {
 	int ret;
 	uint32_t mask = 0;
@@ -292,7 +292,7 @@ void *_starpu_cuda_worker(void *arg)
 	_STARPU_PTHREAD_COND_SIGNAL(&args->ready_cond);
 	_STARPU_PTHREAD_MUTEX_UNLOCK(&args->mutex);
 
-	struct starpu_job_s * j;
+	struct _starpu_job * j;
 	struct starpu_task *task;
 	int res;
 

+ 3 - 3
src/drivers/driver_common/driver_common.c

@@ -25,7 +25,7 @@
 #include <drivers/driver_common/driver_common.h>
 #include <starpu_top.h>
 
-void _starpu_driver_start_job(struct _starpu_worker *args, starpu_job_t j, struct timespec *codelet_start, int rank)
+void _starpu_driver_start_job(struct _starpu_worker *args, struct _starpu_job *j, struct timespec *codelet_start, int rank)
 {
 	struct starpu_task *task = j->task;
 	struct starpu_codelet *cl = task->cl;
@@ -59,7 +59,7 @@ void _starpu_driver_start_job(struct _starpu_worker *args, starpu_job_t j, struc
 	_STARPU_TRACE_START_CODELET_BODY(j);
 }
 
-void _starpu_driver_end_job(struct _starpu_worker *args, starpu_job_t j, struct timespec *codelet_end, int rank)
+void _starpu_driver_end_job(struct _starpu_worker *args, struct _starpu_job *j, struct timespec *codelet_end, int rank)
 {
 	struct starpu_task *task = j->task;
 	struct starpu_codelet *cl = task->cl;
@@ -85,7 +85,7 @@ void _starpu_driver_end_job(struct _starpu_worker *args, starpu_job_t j, struct
 
 	args->status = STATUS_UNKNOWN;
 }
-void _starpu_driver_update_job_feedback(starpu_job_t j, struct _starpu_worker *worker_args,
+void _starpu_driver_update_job_feedback(struct _starpu_job *j, struct _starpu_worker *worker_args,
 					enum starpu_perf_archtype perf_arch,
 					struct timespec *codelet_start, struct timespec *codelet_end, double conversion_time)
 {

+ 8 - 8
src/drivers/driver_common/driver_common.h

@@ -1,7 +1,7 @@
 /* StarPU --- Runtime system for heterogeneous multicore architectures.
  *
  * Copyright (C) 2010, 2011  Université de Bordeaux 1
- * Copyright (C) 2010  Centre National de la Recherche Scientifique
+ * Copyright (C) 2010, 2011  Centre National de la Recherche Scientifique
  *
  * StarPU is free software; you can redistribute it and/or modify
  * it under the terms of the GNU Lesser General Public License as published by
@@ -23,13 +23,13 @@
 #include <core/jobs.h>
 #include <common/utils.h>
 
-void _starpu_driver_start_job(struct _starpu_worker *args, starpu_job_t j,
-		struct timespec *codelet_start, int rank);
-void _starpu_driver_end_job(struct _starpu_worker *args, starpu_job_t j,
-		struct timespec *codelet_end, int rank);
-void _starpu_driver_update_job_feedback(starpu_job_t j, struct _starpu_worker *worker_args,
-		enum starpu_perf_archtype perf_arch,
-		struct timespec *codelet_start, struct timespec *codelet_end, double);
+void _starpu_driver_start_job(struct _starpu_worker *args, struct _starpu_job *j,
+			      struct timespec *codelet_start, int rank);
+void _starpu_driver_end_job(struct _starpu_worker *args, struct _starpu_job *j,
+			    struct timespec *codelet_end, int rank);
+void _starpu_driver_update_job_feedback(struct _starpu_job *j, struct _starpu_worker *worker_args,
+					enum starpu_perf_archtype perf_arch,
+					struct timespec *codelet_start, struct timespec *codelet_end, double);
 
 void _starpu_block_worker(int workerid, pthread_cond_t *cond, pthread_mutex_t *mutex);
 

+ 22 - 22
src/drivers/gordon/driver_gordon.c

@@ -38,10 +38,10 @@ struct gordon_task_wrapper_s {
 	/* who has executed that ? */
 	struct _starpu_worker *worker;
 
-	struct starpu_job_list_s *list;	/* StarPU */
+	struct _starpu_job_list *list;	/* StarPU */
 	struct gordon_ppu_job_s *gordon_job; /* gordon*/
 
-	struct starpu_job_s *j; /* if there is a single task */
+	struct _starpu_job *j; /* if there is a single task */
 
 	/* debug */
 	unsigned terminated;
@@ -78,7 +78,7 @@ void *gordon_worker_progress(void *arg)
 	return NULL;
 }
 
-static void starpu_to_gordon_buffers(starpu_job_t j, struct gordon_ppu_job_s *gordon_job, uint32_t memory_node)
+static void starpu_to_gordon_buffers(struct _starpu_job *j, struct gordon_ppu_job_s *gordon_job, uint32_t memory_node)
 {
 	unsigned buffer;
 	unsigned nin = 0, ninout = 0, nout = 0;
@@ -152,7 +152,7 @@ static void starpu_to_gordon_buffers(starpu_job_t j, struct gordon_ppu_job_s *go
 
 /* we assume the data are already available so that the data interface fields are 
  * already filled */
-static struct gordon_task_wrapper_s *starpu_to_gordon_job(starpu_job_t j)
+static struct gordon_task_wrapper_s *starpu_to_gordon_job(struct _starpu_job *j)
 {
 	struct gordon_ppu_job_s *gordon_job = gordon_alloc_jobs(1, 0);
 	struct gordon_task_wrapper_s *task_wrapper =
@@ -174,7 +174,7 @@ static struct gordon_task_wrapper_s *starpu_to_gordon_job(starpu_job_t j)
 	return task_wrapper;
 }
 
-static void handle_terminated_job(starpu_job_t j)
+static void handle_terminated_job(struct _starpu_job *j)
 {
 	_starpu_push_task_output(j->task, 0);
 	_starpu_handle_job_termination(j, 0);
@@ -184,7 +184,7 @@ static void handle_terminated_job(starpu_job_t j)
 static void gordon_callback_list_func(void *arg)
 {
 	struct gordon_task_wrapper_s *task_wrapper = arg; 
-	struct starpu_job_list_s *wrapper_list; 
+	struct _starpu_job_list *wrapper_list; 
 
 	/* we don't know who will execute that codelet : so we actually defer the
  	 * execution of the StarPU codelet and the job termination later */
@@ -200,9 +200,9 @@ static void gordon_callback_list_func(void *arg)
 	unsigned task_cnt = 0;
 
 	/* XXX 0 was hardcoded */
-	while (!starpu_job_list_empty(wrapper_list))
+	while (!_starpu_job_list_empty(wrapper_list))
 	{
-		starpu_job_t j = starpu_job_list_pop_back(wrapper_list);
+		struct _starpu_job *j = _starpu_job_list_pop_back(wrapper_list);
 
 		struct gordon_ppu_job_s * gordon_task = &task_wrapper->gordon_job[task_cnt];
 		struct starpu_perfmodel *model = j->task->cl->model;
@@ -222,7 +222,7 @@ static void gordon_callback_list_func(void *arg)
 	}
 
 	/* the job list was allocated by the gordon driver itself */
-	starpu_job_list_delete(wrapper_list);
+	_starpu_job_list_delete(wrapper_list);
 
 	starpu_wake_all_blocked_workers();
 	free(task_wrapper->gordon_job);
@@ -249,7 +249,7 @@ static void gordon_callback_func(void *arg)
 	free(task_wrapper);
 }
 
-int inject_task(starpu_job_t j, struct _starpu_worker *worker)
+int inject_task(struct _starpu_job *j, struct _starpu_worker *worker)
 {
 	struct starpu_task *task = j->task;
 	int ret = _starpu_fetch_task_input(task, 0);
@@ -269,16 +269,16 @@ int inject_task(starpu_job_t j, struct _starpu_worker *worker)
 	return 0;
 }
 
-int inject_task_list(struct starpu_job_list_s *list, struct _starpu_worker *worker)
+int inject_task_list(struct _starpu_job_list *list, struct _starpu_worker *worker)
 {
 	/* first put back all tasks that can not be performed by Gordon */
 	unsigned nvalids = 0;
 	unsigned ninvalids = 0;
-	starpu_job_t j;
+	struct _starpu_job *j;
 
 	// TODO !
 //	
-//	for (j = starpu_job_list_begin(list); j != starpu_job_list_end(list); j = starpu_job_list_next(j) )
+//	for (j = _starpu_job_list_begin(list); j != _starpu_job_list_end(list); j = _starpu_job_list_next(j) )
 //	{
 //		if (!_STARPU_GORDON_MAY_PERFORM(j)) {
 //			// XXX TODO
@@ -290,7 +290,7 @@ int inject_task_list(struct starpu_job_list_s *list, struct _starpu_worker *work
 //		}
 //	}
 
-	nvalids = job_list_size(list);
+	nvalids = _job_list_size(list);
 //	_STARPU_DEBUG("nvalids %d \n", nvalids);
 
 	
@@ -305,7 +305,7 @@ int inject_task_list(struct starpu_job_list_s *list, struct _starpu_worker *work
 	task_wrapper->worker = worker;
 	
 	unsigned index;
-	for (j = starpu_job_list_begin(list), index = 0; j != starpu_job_list_end(list); j = starpu_job_list_next(j), index++)
+	for (j = _starpu_job_list_begin(list), index = 0; j != _starpu_job_list_end(list); j = _starpu_job_list_next(j), index++)
 	{
 		int ret;
 
@@ -345,12 +345,12 @@ void *gordon_worker_inject(struct _starpu_worker_set *arg)
 #warning we should look into the local job list here !
 #endif
 
-			struct starpu_job_list_s *list = _starpu_pop_every_task();
+			struct _starpu_job_list *list = _starpu_pop_every_task();
 			/* XXX 0 is hardcoded */
 			if (list)
 			{
 				/* partition lists */
-				unsigned size = job_list_size(list);
+				unsigned size = _starpu_job_list_size(list);
 				unsigned nchunks = (size<2*arg->nworkers)?size:(2*arg->nworkers);
 				//unsigned nchunks = (size<arg->nworkers)?size:(arg->nworkers);
 
@@ -360,20 +360,20 @@ void *gordon_worker_inject(struct _starpu_worker_set *arg)
 				unsigned chunk;
 				for (chunk = 0; chunk < nchunks; chunk++)
 				{
-					struct starpu_job_list_s *chunk_list;
+					struct _starpu_job_list *chunk_list;
 					if (chunk != (nchunks -1))
 					{
 						/* split the list in 2 parts : list = chunk_list | tail */
-						chunk_list = starpu_job_list_new();
+						chunk_list = _starpu_job_list_new();
 
 						/* find the end */
 						chunk_list->_head = list->_head;
 
-						starpu_job_itor_t it_j = starpu_job_list_begin(list);
+						struct _starpu_job *it_j = _starpu_job_list_begin(list);
 						unsigned ind;
 						for (ind = 0; ind < chunksize; ind++)
 						{
-							it_j = starpu_job_list_next(it_j);
+							it_j = _starpu_job_list_next(it_j);
 						}
 
 						/* it_j should be the first element of the new list (tail) */
@@ -395,7 +395,7 @@ void *gordon_worker_inject(struct _starpu_worker_set *arg)
 			}
 #else
 			/* gordon should accept a little more work */
-			starpu_job_t j;
+			struct _starpu_job *j;
 			j =  _starpu_pop_task();
 	//		_STARPU_DEBUG("pop task %p\n", j);
 			if (j) {

+ 3 - 3
src/drivers/opencl/driver_opencl.c

@@ -409,7 +409,7 @@ void _starpu_opencl_init(void)
 }
 
 static unsigned _starpu_opencl_get_device_name(int dev, char *name, int lname);
-static int _starpu_opencl_execute_job(starpu_job_t j, struct _starpu_worker *args);
+static int _starpu_opencl_execute_job(struct _starpu_job *j, struct _starpu_worker *args);
 
 void *_starpu_opencl_worker(void *arg)
 {
@@ -454,7 +454,7 @@ void *_starpu_opencl_worker(void *arg)
 	_STARPU_PTHREAD_COND_SIGNAL(&args->ready_cond);
 	_STARPU_PTHREAD_MUTEX_UNLOCK(&args->mutex);
 
-	struct starpu_job_s * j;
+	struct _starpu_job * j;
 	struct starpu_task *task;
 	int res;
 
@@ -547,7 +547,7 @@ unsigned _starpu_opencl_get_device_count(void)
 	return nb_devices;
 }
 
-static int _starpu_opencl_execute_job(starpu_job_t j, struct _starpu_worker *args)
+static int _starpu_opencl_execute_job(struct _starpu_job *j, struct _starpu_worker *args)
 {
 	int ret;
 	uint32_t mask = 0;

+ 7 - 7
src/profiling/bound.c

@@ -149,7 +149,7 @@ void starpu_bound_start(int deps, int prio)
 		free(td);
 }
 
-static int good_job(starpu_job_t j)
+static int good_job(struct _starpu_job *j)
 {
 	/* No codelet, nothing to measure */
 	if (j->exclude_from_dag)
@@ -165,7 +165,7 @@ static int good_job(starpu_job_t j)
 	return 1;
 }
 
-static void new_task(starpu_job_t j)
+static void new_task(struct _starpu_job *j)
 {
 	struct bound_task *t;
 
@@ -187,7 +187,7 @@ static void new_task(starpu_job_t j)
 	tasks = t;
 }
 
-void _starpu_bound_record(starpu_job_t j)
+void _starpu_bound_record(struct _starpu_job *j)
 {
 	if (!_starpu_bound_recording)
 		return;
@@ -254,7 +254,7 @@ void _starpu_bound_tag_dep(starpu_tag_t id, starpu_tag_t dep_id)
 	_STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
 }
 
-void _starpu_bound_task_dep(starpu_job_t j, starpu_job_t dep_j)
+void _starpu_bound_task_dep(struct _starpu_job *j, struct _starpu_job *dep_j)
 {
 	struct bound_task *t;
 
@@ -289,7 +289,7 @@ static struct bound_task *find_job(unsigned long id)
 	return NULL;
 }
 
-void _starpu_bound_job_id_dep(starpu_job_t j, unsigned long id)
+void _starpu_bound_job_id_dep(struct _starpu_job *j, unsigned long id)
 {
 	struct bound_task *t, *dep_t;
 
@@ -331,7 +331,7 @@ static void _starpu_get_tasks_times(int nw, int nt, double *times) {
 	int w, t;
 	for (w = 0; w < nw; w++) {
 		for (t = 0, tp = task_pools; tp; t++, tp = tp->next) {
-			struct starpu_job_s j = {
+			struct _starpu_job j = {
 				.footprint = tp->footprint,
 				.footprint_is_computed = 1,
 			};
@@ -395,7 +395,7 @@ void starpu_bound_print_lp(FILE *output)
 
 		nt = 0;
 		for (t1 = tasks; t1; t1 = t1->next) {
-			struct starpu_job_s j = {
+			struct _starpu_job j = {
 				.footprint = t1->footprint,
 				.footprint_is_computed = 1,
 			};

+ 3 - 3
src/profiling/bound.h

@@ -25,15 +25,15 @@
 extern int _starpu_bound_recording;
 
 /* Record task for bound computation */
-extern void _starpu_bound_record(starpu_job_t j);
+extern void _starpu_bound_record(struct _starpu_job *j);
 
 /* Record tag dependency: id depends on dep_id */
 extern void _starpu_bound_tag_dep(starpu_tag_t id, starpu_tag_t dep_id);
 
 /* Record task dependency: j depends on dep_j */
-extern void _starpu_bound_task_dep(starpu_job_t j, starpu_job_t dep_j);
+extern void _starpu_bound_task_dep(struct _starpu_job *j, struct _starpu_job *dep_j);
 
 /* Record job id dependency: j depends on job_id */
-extern void _starpu_bound_job_id_dep(starpu_job_t dep_j, unsigned long job_id);
+extern void _starpu_bound_job_id_dep(struct _starpu_job *dep_j, unsigned long job_id);
 
 #endif // __BOUND_H__

+ 19 - 19
src/sched_policies/deque_queues.c

@@ -1,7 +1,7 @@
 /* StarPU --- Runtime system for heterogeneous multicore architectures.
  *
  * Copyright (C) 2010-2011  Université de Bordeaux 1
- * Copyright (C) 2010  Centre National de la Recherche Scientifique
+ * Copyright (C) 2010, 2011  Centre National de la Recherche Scientifique
  * Copyright (C) 2011  Télécom-SudParis
  *
  * StarPU is free software; you can redistribute it and/or modify
@@ -31,7 +31,7 @@ struct _starpu_deque_jobq *_starpu_create_deque(void)
 	deque = (struct _starpu_deque_jobq *) malloc(sizeof(struct _starpu_deque_jobq));
 
 	/* note that not all mechanisms (eg. the semaphore) have to be used */
-	deque->jobq = starpu_job_list_new();
+	deque->jobq = _starpu_job_list_new();
 	deque->njobs = 0;
 	deque->nprocessed = 0;
 
@@ -44,7 +44,7 @@ struct _starpu_deque_jobq *_starpu_create_deque(void)
 
 void _starpu_destroy_deque(struct _starpu_deque_jobq *deque)
 {
-	starpu_job_list_delete(deque->jobq);
+	_starpu_job_list_delete(deque->jobq);
 	free(deque);
 }
 
@@ -60,7 +60,7 @@ unsigned _starpu_get_deque_nprocessed(struct _starpu_deque_jobq *deque_queue)
 
 struct starpu_task *_starpu_deque_pop_task(struct _starpu_deque_jobq *deque_queue, int workerid __attribute__ ((unused)))
 {
-	starpu_job_t j = NULL;
+	struct _starpu_job *j = NULL;
 
 	if ((deque_queue->njobs == 0) && _starpu_machine_is_running())
 	{
@@ -68,9 +68,9 @@ struct starpu_task *_starpu_deque_pop_task(struct _starpu_deque_jobq *deque_queu
 	}
 
 	/* TODO find a task that suits workerid */
-	for (j  = starpu_job_list_begin(deque_queue->jobq);
-	     j != starpu_job_list_end(deque_queue->jobq);
-	     j  = starpu_job_list_next(j))
+	for (j  = _starpu_job_list_begin(deque_queue->jobq);
+	     j != _starpu_job_list_end(deque_queue->jobq);
+	     j  = _starpu_job_list_next(j))
 	{
 		unsigned nimpl;
 		STARPU_ASSERT(j);
@@ -79,7 +79,7 @@ struct starpu_task *_starpu_deque_pop_task(struct _starpu_deque_jobq *deque_queu
 			if (starpu_worker_can_execute_task(workerid, j->task, nimpl))
 			{
 				j->nimpl = nimpl;
-				j = starpu_job_list_pop_front(deque_queue->jobq);
+				j = _starpu_job_list_pop_front(deque_queue->jobq);
 				deque_queue->njobs--;
 				_STARPU_TRACE_JOB_POP(j, 0);
 				return j->task;
@@ -89,9 +89,9 @@ struct starpu_task *_starpu_deque_pop_task(struct _starpu_deque_jobq *deque_queu
 	return NULL;
 }
 
-struct starpu_job_list_s *_starpu_deque_pop_every_task(struct _starpu_deque_jobq *deque_queue, pthread_mutex_t *sched_mutex, int workerid)
+struct _starpu_job_list *_starpu_deque_pop_every_task(struct _starpu_deque_jobq *deque_queue, pthread_mutex_t *sched_mutex, int workerid)
 {
-	struct starpu_job_list_s *new_list, *old_list;
+	struct _starpu_job_list *new_list, *old_list;
 
 	/* block until some task is available in that queue */
 	_STARPU_PTHREAD_MUTEX_LOCK(sched_mutex);
@@ -103,20 +103,20 @@ struct starpu_job_list_s *_starpu_deque_pop_every_task(struct _starpu_deque_jobq
 	else {
 		/* there is a task */
 		old_list = deque_queue->jobq;
-		new_list = starpu_job_list_new();
+		new_list = _starpu_job_list_new();
 
 		unsigned new_list_size = 0;
 
-		starpu_job_itor_t i;
-		starpu_job_t next_job;
+		struct _starpu_job *i;
+		struct _starpu_job *next_job;
 		/* note that this starts at the _head_ of the list, so we put
  		 * elements at the back of the new list */
-		for(i = starpu_job_list_begin(old_list);
-			i != starpu_job_list_end(old_list);
+		for(i = _starpu_job_list_begin(old_list);
+			i != _starpu_job_list_end(old_list);
 			i  = next_job)
 		{
 			unsigned nimpl;
-			next_job = starpu_job_list_next(i);
+			next_job = _starpu_job_list_next(i);
 
 			for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
 			if (starpu_worker_can_execute_task(workerid, i->task, nimpl))
@@ -124,8 +124,8 @@ struct starpu_job_list_s *_starpu_deque_pop_every_task(struct _starpu_deque_jobq
 				/* this elements can be moved into the new list */
 				new_list_size++;
 				
-				starpu_job_list_erase(old_list, i);
-				starpu_job_list_push_back(new_list, i);
+				_starpu_job_list_erase(old_list, i);
+				_starpu_job_list_push_back(new_list, i);
 				i->nimpl = nimpl;
 			}
 		}
@@ -133,7 +133,7 @@ struct starpu_job_list_s *_starpu_deque_pop_every_task(struct _starpu_deque_jobq
 		if (new_list_size == 0)
 		{
 			/* the new list is empty ... */
-			starpu_job_list_delete(new_list);
+			_starpu_job_list_delete(new_list);
 			new_list = NULL;
 		}
 		else

+ 2 - 2
src/sched_policies/deque_queues.h

@@ -25,7 +25,7 @@
 
 struct _starpu_deque_jobq {
 	/* the actual list */
-	starpu_job_list_t jobq;
+	struct _starpu_job_list *jobq;
 
 	/* the number of tasks currently in the queue */
 	unsigned njobs;
@@ -43,7 +43,7 @@ struct _starpu_deque_jobq *_starpu_create_deque(void);
 void _starpu_destroy_deque(struct _starpu_deque_jobq *deque);
 
 struct starpu_task *_starpu_deque_pop_task(struct _starpu_deque_jobq *deque_queue, int workerid);
-struct starpu_job_list_s *_starpu_deque_pop_every_task(struct _starpu_deque_jobq *deque_queue, pthread_mutex_t *sched_mutex, int workerid);
+struct _starpu_job_list *_starpu_deque_pop_every_task(struct _starpu_deque_jobq *deque_queue, pthread_mutex_t *sched_mutex, int workerid);
 
 unsigned _starpu_get_deque_njobs(struct _starpu_deque_jobq *deque_queue);
 unsigned _starpu_get_deque_nprocessed(struct _starpu_deque_jobq *deque_queue);

+ 1 - 1
src/sched_policies/parallel_greedy.c

@@ -200,7 +200,7 @@ static struct starpu_task *pop_task_pgreedy_policy(void)
 			int worker_size = combined_worker->worker_size;
 			int *combined_workerid = combined_worker->combined_workerid;
 
-			starpu_job_t j = _starpu_get_job_associated_to_task(task);
+			struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
 			j->task_size = worker_size;
 			j->combined_workerid = best_workerid;
 			j->active_task_alias_count = 0;

+ 1 - 1
src/sched_policies/parallel_heft.c

@@ -105,7 +105,7 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 		int worker_size = combined_worker->worker_size;
 		int *combined_workerid = combined_worker->combined_workerid;
 
-		starpu_job_t j = _starpu_get_job_associated_to_task(task);
+		struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
 		j->task_size = worker_size;
 		j->combined_workerid = best_workerid;
 		j->active_task_alias_count = 0;

+ 8 - 8
src/sched_policies/stack_queues.c

@@ -1,7 +1,7 @@
 /* StarPU --- Runtime system for heterogeneous multicore architectures.
  *
  * Copyright (C) 2010-2011  Université de Bordeaux 1
- * Copyright (C) 2010  Centre National de la Recherche Scientifique
+ * Copyright (C) 2010, 2011  Centre National de la Recherche Scientifique
  *
  * StarPU is free software; you can redistribute it and/or modify
  * it under the terms of the GNU Lesser General Public License as published by
@@ -36,7 +36,7 @@ struct _starpu_stack_jobq *_starpu_create_stack(void)
 	struct _starpu_stack_jobq *stack;
 	stack = (struct _starpu_stack_jobq *) malloc(sizeof(struct _starpu_stack_jobq));
 
-	stack->jobq = starpu_job_list_new();
+	stack->jobq = _starpu_job_list_new();
 	stack->njobs = 0;
 	stack->nprocessed = 0;
 
@@ -57,16 +57,16 @@ unsigned _starpu_get_stack_nprocessed(struct _starpu_stack_jobq *stack_queue)
 	return stack_queue->nprocessed;
 }
 
-void _starpu_stack_push_task(struct _starpu_stack_jobq *stack_queue, pthread_mutex_t *sched_mutex, pthread_cond_t *sched_cond, starpu_job_t task)
+void _starpu_stack_push_task(struct _starpu_stack_jobq *stack_queue, pthread_mutex_t *sched_mutex, pthread_cond_t *sched_cond, struct _starpu_job *task)
 {
 	_STARPU_PTHREAD_MUTEX_LOCK(sched_mutex);
 	total_number_of_jobs++;
 
 	_STARPU_TRACE_JOB_PUSH(task, 0);
 	if (task->task->priority)
-		starpu_job_list_push_back(stack_queue->jobq, task);
+		_starpu_job_list_push_back(stack_queue->jobq, task);
 	else
-		starpu_job_list_push_front(stack_queue->jobq, task);
+		_starpu_job_list_push_front(stack_queue->jobq, task);
 	stack_queue->njobs++;
 	stack_queue->nprocessed++;
 
@@ -74,9 +74,9 @@ void _starpu_stack_push_task(struct _starpu_stack_jobq *stack_queue, pthread_mut
 	_STARPU_PTHREAD_MUTEX_UNLOCK(sched_mutex);
 }
 
-starpu_job_t _starpu_stack_pop_task(struct _starpu_stack_jobq *stack_queue, pthread_mutex_t *sched_mutex, int workerid __attribute__ ((unused)))
+struct _starpu_job *_starpu_stack_pop_task(struct _starpu_stack_jobq *stack_queue, pthread_mutex_t *sched_mutex, int workerid __attribute__ ((unused)))
 {
-	starpu_job_t j = NULL;
+	struct _starpu_job *j = NULL;
 
 	if (stack_queue->njobs == 0)
 		return NULL;
@@ -85,7 +85,7 @@ starpu_job_t _starpu_stack_pop_task(struct _starpu_stack_jobq *stack_queue, pthr
 	if (stack_queue->njobs > 0) 
 	{
 		/* there is a task */
-		j = starpu_job_list_pop_back(stack_queue->jobq);
+		j = _starpu_job_list_pop_back(stack_queue->jobq);
 	
 		STARPU_ASSERT(j);
 		stack_queue->njobs--;

+ 3 - 3
src/sched_policies/stack_queues.h

@@ -25,7 +25,7 @@
 
 struct _starpu_stack_jobq {
 	/* the actual list */
-	starpu_job_list_t jobq;
+	struct _starpu_job_list *jobq;
 
 	/* the number of tasks currently in the queue */
 	unsigned njobs;
@@ -41,9 +41,9 @@ struct _starpu_stack_jobq {
 
 struct _starpu_stack_jobq *_starpu_create_stack(void);
 
-void _starpu_stack_push_task(struct _starpu_stack_jobq *stack, pthread_mutex_t *sched_mutex, pthread_cond_t *sched_cond, starpu_job_t task);
+void _starpu_stack_push_task(struct _starpu_stack_jobq *stack, pthread_mutex_t *sched_mutex, pthread_cond_t *sched_cond, struct _starpu_job *task);
 
-starpu_job_t _starpu_stack_pop_task(struct _starpu_stack_jobq *stack, pthread_mutex_t *sched_mutex, int workerid);
+struct _starpu_job *_starpu_stack_pop_task(struct _starpu_stack_jobq *stack, pthread_mutex_t *sched_mutex, int workerid);
 
 void _starpu_init_stack_queues_mechanisms(void);
 

+ 2 - 2
src/sched_policies/work_stealing_policy.c

@@ -172,7 +172,7 @@ static struct starpu_task *ws_pop_task(void)
 
 static int ws_push_task(struct starpu_task *task)
 {
-	starpu_job_t j = _starpu_get_job_associated_to_task(task);
+	struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
 
 	int workerid = starpu_worker_get_id();
 
@@ -184,7 +184,7 @@ static int ws_push_task(struct starpu_task *task)
         //total_number_of_jobs++;
 
         _STARPU_TRACE_JOB_PUSH(task, 0);
-        starpu_job_list_push_front(deque_queue->jobq, j);
+        _starpu_job_list_push_front(deque_queue->jobq, j);
         deque_queue->njobs++;
         deque_queue->nprocessed++;
 

+ 2 - 2
src/util/execute_on_all.c

@@ -1,7 +1,7 @@
 /* StarPU --- Runtime system for heterogeneous multicore architectures.
  *
  * Copyright (C) 2009, 2010  Université de Bordeaux 1
- * Copyright (C) 2010  Centre National de la Recherche Scientifique
+ * Copyright (C) 2010, 2011  Centre National de la Recherche Scientifique
  *
  * StarPU is free software; you can redistribute it and/or modify
  * it under the terms of the GNU Lesser General Public License as published by
@@ -68,7 +68,7 @@ void starpu_execute_on_each_worker(void (*func)(void *), void *arg, uint32_t whe
 		tasks[worker]->destroy = 0;
 
 #ifdef STARPU_USE_FXT
-                starpu_job_t job = _starpu_get_job_associated_to_task(tasks[worker]);
+                struct _starpu_job *job = _starpu_get_job_associated_to_task(tasks[worker]);
                 job->model_name = "execute_on_all_wrapper";
 #endif