Browse Source

sched_ctx: renaming

	starpu_create_sched_ctx --> starpu_sched_ctx_create
	starpu_create_sched_ctx_inside_interval --> starpu_sched_ctx_create_inside_interval
	starpu_delete_sched_ctx --> starpu_sched_ctx_delete
	starpu_add_workers_to_sched_ctx --> starpu_sched_ctx_add_workers
	starpu_remove_workers_from_sched_ctx --> starpu_sched_ctx_remove_workers
	starpu_set_sched_ctx_policy_data --> starpu_sched_ctx_set_policy_data
	starpu_get_sched_ctx_policy_data --> starpu_sched_ctx_get_policy_data
Nathalie Furmento 12 years ago
parent
commit
a00f51f94a

+ 6 - 6
doc/chapters/advanced-api.texi

@@ -537,20 +537,20 @@ StarPU permits on one hand grouping workers in combined workers in order to exec
 In contrast when we group workers in scheduling contexts we submit starpu tasks to them and we schedule them with the policy assigned to the context.
 Scheduling contexts can be created, deleted and modified dynamically.
 
-@deftypefun unsigned starpu_create_sched_ctx (const char *@var{policy_name}, int *@var{workerids_ctx}, int @var{nworkers_ctx}, const char *@var{sched_ctx_name})
+@deftypefun unsigned starpu_sched_ctx_create (const char *@var{policy_name}, int *@var{workerids_ctx}, int @var{nworkers_ctx}, const char *@var{sched_ctx_name})
 This function creates a scheduling context which uses the scheduling policy indicated in the first argument and assigns the workers indicated in the second argument to execute the tasks submitted to it.
 The return value represents the identifier of the context that has just been created. It will be further used to indicate the context the tasks will be submitted to. The return value should be at most @code{STARPU_NMAX_SCHED_CTXS}.
 @end deftypefun
 
-@deftypefun void starpu_delete_sched_ctx (unsigned @var{sched_ctx_id}, unsigned @var{inheritor_sched_ctx_id})
+@deftypefun void starpu_sched_ctx_delete (unsigned @var{sched_ctx_id}, unsigned @var{inheritor_sched_ctx_id})
 Delete scheduling context @var{sched_ctx_id} and lets scheduling context @var{inheritor_sched_ctx_id} take over its workers.
 @end deftypefun
 
-@deftypefun void starpu_add_workers_to_sched_ctx ({int *}@var{workerids_ctx}, int @var{nworkers_ctx}, unsigned @var{sched_ctx})
+@deftypefun void starpu_sched_ctx_add_workers ({int *}@var{workerids_ctx}, int @var{nworkers_ctx}, unsigned @var{sched_ctx})
 This function adds dynamically the workers indicated in the first argument to the context indicated in the last argument. The last argument cannot be greater than  @code{STARPU_NMAX_SCHED_CTXS}.
 @end deftypefun
 
-@deftypefun void starpu_remove_workers_from_sched_ctx ({int *}@var{workerids_ctx}, int @var{nworkers_ctx}, unsigned @var{sched_ctx})
+@deftypefun void starpu_sched_ctx_remove_workers ({int *}@var{workerids_ctx}, int @var{nworkers_ctx}, unsigned @var{sched_ctx})
 This function removes the workers indicated in the first argument from the context indicated in the last argument. The last argument cannot be greater than  @code{STARPU_NMAX_SCHED_CTXS}.
 @end deftypefun
 
@@ -713,12 +713,12 @@ This function returns the condition variables associated to a worker in a contex
 It is used in the policy to access to the local queue of the worker
 @end deftypefun
 
-@deftypefun void starpu_set_sched_ctx_policy_data (unsigned @var{sched_ctx_id}, {void *} @var{policy_data})
+@deftypefun void starpu_sched_ctx_set_policy_data (unsigned @var{sched_ctx_id}, {void *} @var{policy_data})
 Each scheduling policy uses some specific data (queues, variables, additional condition variables).
 It is memorize through a local structure. This function assigns it to a scheduling context.
 @end deftypefun
 
-@deftypefun void* starpu_get_sched_ctx_policy_data (unsigned @var{sched_ctx})
+@deftypefun void* starpu_sched_ctx_get_policy_data (unsigned @var{sched_ctx})
 Returns the policy data previously assigned to a context
 @end deftypefun
 

+ 2 - 2
doc/chapters/perf-optimization.texi

@@ -210,7 +210,7 @@ If the application programmer plans to launch several parallel kernels simultane
 Meanwhile, if the application programmer is aware of the demands of these kernels and of the specificity of the machine used to execute them, the workers can be divided between several contexts. 
 These scheduling contexts will isolate the execution of each kernel and they will permit the use of a scheduling policy proper to each one of them.
 In order to create the contexts, you have to know the indentifiers of the workers running within StarPU. 
-By passing a set of workers together with the scheduling policy to the function @code{starpu_create_sched_ctx}, you will get an identifier of the context created which you will use to indicate the context you want to submit the tasks to.
+By passing a set of workers together with the scheduling policy to the function @code{starpu_sched_ctx_create}, you will get an identifier of the context created which you will use to indicate the context you want to submit the tasks to.
 
 @cartouche
 @smallexample
@@ -219,7 +219,7 @@ int workerids[3] = @{1, 3, 10@};
 
 /* @b{indicate the scheduling policy to be used within the context, the list of 
    workers assigned to it, the number of workers, the name of the context} */
-int id_ctx = starpu_create_sched_ctx("heft", workerids, 3, "my_ctx");
+int id_ctx = starpu_sched_ctx_create("heft", workerids, 3, "my_ctx");
 
 /* @b{let StarPU know that the folowing tasks will be submitted to this context} */
 starpu_set_sched_ctx(id);

+ 2 - 2
examples/sched_ctx/sched_ctx.c

@@ -76,8 +76,8 @@ int main(int argc, char **argv)
 #endif
 
 	/*create contexts however you want*/
-	unsigned sched_ctx1 = starpu_create_sched_ctx("dmda", procs1, nprocs1, "ctx1");
-	unsigned sched_ctx2 = starpu_create_sched_ctx("dmda", procs2, nprocs2, "ctx2");
+	unsigned sched_ctx1 = starpu_sched_ctx_create("dmda", procs1, nprocs1, "ctx1");
+	unsigned sched_ctx2 = starpu_sched_ctx_create("dmda", procs2, nprocs2, "ctx2");
 
 	/*indicate what to do with the resources when context 2 finishes (it depends on your application)*/
 	starpu_sched_ctx_set_inheritor(sched_ctx2, sched_ctx1);

+ 3 - 3
examples/sched_ctx_utils/sched_ctx_utils.c

@@ -83,7 +83,7 @@ void* start_bench(void *val){
 	{
 		pthread_mutex_lock(&mut);
 		if(first){
-			starpu_delete_sched_ctx(p->ctx, p->the_other_ctx);
+			starpu_sched_ctx_delete(p->ctx, p->the_other_ctx);
 		}
 		
 		first = 0;
@@ -213,7 +213,7 @@ void construct_contexts(void (*bench)(unsigned, unsigned))
 	}
 	printf("\n ");
 
-	p1.ctx = starpu_create_sched_ctx("heft", procs, nprocs1, "sched_ctx1");
+	p1.ctx = starpu_sched_ctx_create("heft", procs, nprocs1, "sched_ctx1");
 	p2.the_other_ctx = (int)p1.ctx;
 	p1.procs = procs;
 	p1.nprocs = nprocs1;
@@ -240,7 +240,7 @@ void construct_contexts(void (*bench)(unsigned, unsigned))
 	}
 	printf("\n");
 
-	p2.ctx = starpu_create_sched_ctx("heft", procs2, nprocs2, "sched_ctx2");
+	p2.ctx = starpu_sched_ctx_create("heft", procs2, nprocs2, "sched_ctx2");
 	p1.the_other_ctx = (int)p2.ctx;
 	p2.procs = procs2;
 	p2.nprocs = nprocs2;

+ 5 - 5
examples/scheduler/dummy_sched.c

@@ -29,7 +29,7 @@ typedef struct dummy_sched_data {
 
 static void dummy_sched_add_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers)
 {
-	struct dummy_sched_data *data = (struct dummy_sched_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	struct dummy_sched_data *data = (struct dummy_sched_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	
 	unsigned i;
 	int workerid;
@@ -64,14 +64,14 @@ static void init_dummy_sched(unsigned sched_ctx_id)
 	pthread_mutex_init(&data->sched_mutex, NULL);
 	pthread_cond_init(&data->sched_cond, NULL);
 
-	starpu_set_sched_ctx_policy_data(sched_ctx_id, (void*)data);		
+	starpu_sched_ctx_set_policy_data(sched_ctx_id, (void*)data);		
 
 	FPRINTF(stderr, "Initialising Dummy scheduler\n");
 }
 
 static void deinit_dummy_sched(unsigned sched_ctx_id)
 {
-	struct dummy_sched_data *data = (struct dummy_sched_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	struct dummy_sched_data *data = (struct dummy_sched_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	STARPU_ASSERT(starpu_task_list_empty(&data->sched_list));
 
@@ -88,7 +88,7 @@ static void deinit_dummy_sched(unsigned sched_ctx_id)
 static int push_task_dummy(struct starpu_task *task)
 {
 	unsigned sched_ctx_id = task->sched_ctx;
-	struct dummy_sched_data *data = (struct dummy_sched_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	struct dummy_sched_data *data = (struct dummy_sched_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	pthread_mutex_lock(&data->sched_mutex);
 
@@ -109,7 +109,7 @@ static struct starpu_task *pop_task_dummy(unsigned sched_ctx_id)
 	 * through the entire list until we find a task that is executable from
 	 * the calling worker. So we just take the head of the list and give it
 	 * to the worker. */
-	struct dummy_sched_data *data = (struct dummy_sched_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	struct dummy_sched_data *data = (struct dummy_sched_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	return starpu_task_list_pop_back(&data->sched_list);
 }
 

+ 7 - 7
include/starpu_sched_ctx.h

@@ -76,21 +76,21 @@ void starpu_call_poped_task_cb(int workerid, unsigned sched_ctx_id, double flops
 void starpu_call_pushed_task_cb(int workerid, unsigned sched_ctx_id);
 #endif //STARPU_USE_SCHED_CTX_HYPERVISOR
 
-unsigned starpu_create_sched_ctx(const char *policy_name, int *workerids_ctx, int nworkers_ctx, const char *sched_ctx_name);
+unsigned starpu_sched_ctx_create(const char *policy_name, int *workerids_ctx, int nworkers_ctx, const char *sched_ctx_name);
 
-unsigned starpu_create_sched_ctx_inside_interval(const char *policy_name, const char *sched_name,
+unsigned starpu_sched_ctx_create_inside_interval(const char *policy_name, const char *sched_name,
 						 int min_ncpus, int max_ncpus, int min_ngpus, int max_ngpus,
 						 unsigned allow_overlap);
 
-void starpu_delete_sched_ctx(unsigned sched_ctx_id, unsigned inheritor_sched_ctx_id);
+void starpu_sched_ctx_delete(unsigned sched_ctx_id, unsigned inheritor_sched_ctx_id);
 
-void starpu_add_workers_to_sched_ctx(int *workerids_ctx, int nworkers_ctx, unsigned sched_ctx);
+void starpu_sched_ctx_add_workers(int *workerids_ctx, int nworkers_ctx, unsigned sched_ctx);
 
-void starpu_remove_workers_from_sched_ctx(int *workerids_ctx, int nworkers_ctx, unsigned sched_ctx);
+void starpu_sched_ctx_remove_workers(int *workerids_ctx, int nworkers_ctx, unsigned sched_ctx);
 
-void starpu_set_sched_ctx_policy_data(unsigned sched_ctx_id, void *policy_data);
+void starpu_sched_ctx_set_policy_data(unsigned sched_ctx_id, void *policy_data);
 
-void* starpu_get_sched_ctx_policy_data(unsigned sched_ctx);
+void* starpu_sched_ctx_get_policy_data(unsigned sched_ctx);
 
 struct starpu_sched_ctx_worker_collection* starpu_create_worker_collection_for_sched_ctx(unsigned sched_ctx_id, int type);
 

+ 2 - 2
sched_ctx_hypervisor/examples/app_driven_test/app_driven_test.c

@@ -107,8 +107,8 @@ int main()
 	for(i = 0; i < nres2; i++)
 		ressources2[i] = nres1+i;
 
-	unsigned sched_ctx1 = starpu_create_sched_ctx("heft", ressources1, nres1, "sched_ctx1");
-	unsigned sched_ctx2 = starpu_create_sched_ctx("heft", ressources2, nres2, "sched_ctx2");
+	unsigned sched_ctx1 = starpu_sched_ctx_create("heft", ressources1, nres1, "sched_ctx1");
+	unsigned sched_ctx2 = starpu_sched_ctx_create("heft", ressources2, nres2, "sched_ctx2");
 
 	struct starpu_sched_ctx_hypervisor_policy policy;
 	policy.custom = 0;

+ 3 - 3
sched_ctx_hypervisor/examples/sched_ctx_utils/sched_ctx_utils.c

@@ -107,7 +107,7 @@ void* start_bench(void *val)
 	/* 	pthread_mutex_lock(&mut); */
 	/* 	if(first){ */
 	/* 		sched_ctx_hypervisor_unregiser_ctx(p->ctx); */
-	/* 		starpu_delete_sched_ctx(p->ctx, p->the_other_ctx); */
+	/* 		starpu_sched_ctx_delete(p->ctx, p->the_other_ctx); */
 	/* 	} */
 
 	/* 	first = 0; */
@@ -266,7 +266,7 @@ void construct_contexts(void (*bench)(float*, unsigned, unsigned))
 	for(i = 0; i < 12; i++)
 		p1.workers[i] = i;
 
-	p1.ctx = starpu_create_sched_ctx("heft", p1.workers, nworkers1, "sched_ctx1");
+	p1.ctx = starpu_sched_ctx_create("heft", p1.workers, nworkers1, "sched_ctx1");
 	starpu_set_perf_counters(p1.ctx, perf_counters);
 	p2.the_other_ctx = (int)p1.ctx;
 	p1.nworkers = nworkers1;
@@ -302,7 +302,7 @@ void construct_contexts(void (*bench)(float*, unsigned, unsigned))
 	/* for(i = n_all_gpus  + cpu1; i < n_all_gpus + cpu1 + cpu2; i++) */
 	/* 	p2.workers[k++] = i; */
 
-	p2.ctx = starpu_create_sched_ctx("heft", p2.workers, 0, "sched_ctx2");
+	p2.ctx = starpu_sched_ctx_create("heft", p2.workers, 0, "sched_ctx2");
 	starpu_set_perf_counters(p2.ctx, perf_counters);
 	p1.the_other_ctx = (int)p2.ctx;
 	p2.nworkers = 0;

+ 6 - 6
sched_ctx_hypervisor/src/sched_ctx_hypervisor.c

@@ -361,9 +361,9 @@ void sched_ctx_hypervisor_move_workers(unsigned sender_sched_ctx, unsigned recei
 		_get_cpus(workers_to_move, nworkers_to_move, cpus, &ncpus);
 
 //		if(ncpus != 0)
-//			starpu_remove_workers_from_sched_ctx(cpus, ncpus, sender_sched_ctx);
+//			starpu_sched_ctx_remove_workers(cpus, ncpus, sender_sched_ctx);
 
-		starpu_add_workers_to_sched_ctx(workers_to_move, nworkers_to_move, receiver_sched_ctx);
+		starpu_sched_ctx_add_workers(workers_to_move, nworkers_to_move, receiver_sched_ctx);
 
 		if(now)
 		{
@@ -373,7 +373,7 @@ void sched_ctx_hypervisor_move_workers(unsigned sender_sched_ctx, unsigned recei
 /* 				printf(" %d", workers_to_move[j]); */
 /* 			printf("\n"); */
 
-			starpu_remove_workers_from_sched_ctx(workers_to_move, nworkers_to_move, sender_sched_ctx);
+			starpu_sched_ctx_remove_workers(workers_to_move, nworkers_to_move, sender_sched_ctx);
 		}
 		else
 		{
@@ -417,7 +417,7 @@ void sched_ctx_hypervisor_add_workers_to_sched_ctx(int* workers_to_add, unsigned
 /* 		for(j = 0; j < nworkers_to_add; j++) */
 /* 			printf(" %d", workers_to_add[j]); */
 /* 		printf("\n"); */
-		starpu_add_workers_to_sched_ctx(workers_to_add, nworkers_to_add, sched_ctx);
+		starpu_sched_ctx_add_workers(workers_to_add, nworkers_to_add, sched_ctx);
 		struct starpu_sched_ctx_hypervisor_policy_config *new_config = sched_ctx_hypervisor_get_config(sched_ctx);
 		int i;
 		for(i = 0; i < nworkers_to_add; i++)
@@ -447,7 +447,7 @@ void sched_ctx_hypervisor_remove_workers_from_sched_ctx(int* workers_to_remove,
 /* 					printf(" %d", workers_to_remove[j]); */
 /* 				printf("\n"); */
 
-				starpu_remove_workers_from_sched_ctx(workers_to_remove, nworkers_to_remove, sched_ctx);
+				starpu_sched_ctx_remove_workers(workers_to_remove, nworkers_to_remove, sched_ctx);
 		}
 		else
 		{
@@ -588,7 +588,7 @@ static unsigned _ack_resize_completed(unsigned sched_ctx, int worker)
 /* 					printf(" %d", moved_workers[j]); */
 /* 				printf("\n"); */
 
-				starpu_remove_workers_from_sched_ctx(moved_workers, nmoved_workers, sender_sched_ctx);
+				starpu_sched_ctx_remove_workers(moved_workers, nmoved_workers, sender_sched_ctx);
 
 				/* info concerning only the gflops_rate strateg */
 				struct starpu_sched_ctx_hypervisor_wrapper *sender_sc_w = &hypervisor.sched_ctx_w[sender_sched_ctx];

+ 11 - 11
src/core/sched_ctx.c

@@ -329,7 +329,7 @@ static void _get_workers(int min, int max, int *workers, int *nw, enum starpu_ar
 /*TODO: hierarchical ctxs: get n good workers: close to the other ones I already assigned to the ctx */
 						for(i = 0; i < n; i++)
 							workers[(*nw)++] = _pus[i];
-						starpu_remove_workers_from_sched_ctx(_pus, n, config->sched_ctxs[s].id);
+						starpu_sched_ctx_remove_workers(_pus, n, config->sched_ctxs[s].id);
 					}
 				}
 			}
@@ -382,7 +382,7 @@ static void _get_workers(int min, int max, int *workers, int *nw, enum starpu_ar
 							pus_to_remove[c++] = _pus[i];
 						}
 						if(!allow_overlap)
-							starpu_remove_workers_from_sched_ctx(pus_to_remove, npus_to_remove, config->sched_ctxs[s].id);
+							starpu_sched_ctx_remove_workers(pus_to_remove, npus_to_remove, config->sched_ctxs[s].id);
 					}
 
 				}
@@ -391,7 +391,7 @@ static void _get_workers(int min, int max, int *workers, int *nw, enum starpu_ar
 	}
 }
 
-unsigned starpu_create_sched_ctx_inside_interval(const char *policy_name, const char *sched_name, 
+unsigned starpu_sched_ctx_create_inside_interval(const char *policy_name, const char *sched_name, 
 						 int min_ncpus, int max_ncpus, int min_ngpus, int max_ngpus,
 						 unsigned allow_overlap)
 {
@@ -420,7 +420,7 @@ unsigned starpu_create_sched_ctx_inside_interval(const char *policy_name, const
 	return sched_ctx->id;
 	
 }
-unsigned starpu_create_sched_ctx(const char *policy_name, int *workerids, 
+unsigned starpu_sched_ctx_create(const char *policy_name, int *workerids, 
 				 int nworkers, const char *sched_name)
 {
 	struct _starpu_sched_ctx *sched_ctx = NULL;
@@ -464,7 +464,7 @@ static void _starpu_delete_sched_ctx(struct _starpu_sched_ctx *sched_ctx)
 	_STARPU_PTHREAD_MUTEX_UNLOCK(&sched_ctx_manag);
 }
 
-void starpu_delete_sched_ctx(unsigned sched_ctx_id, unsigned inheritor_sched_ctx_id)
+void starpu_sched_ctx_delete(unsigned sched_ctx_id, unsigned inheritor_sched_ctx_id)
 {
 	struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
 	struct _starpu_sched_ctx *inheritor_sched_ctx = _starpu_get_sched_ctx_struct(inheritor_sched_ctx_id);
@@ -480,7 +480,7 @@ void starpu_delete_sched_ctx(unsigned sched_ctx_id, unsigned inheritor_sched_ctx
 
 	if(!(sched_ctx->workers->nworkers == nworkers && sched_ctx->workers->nworkers == inheritor_sched_ctx->workers->nworkers) && sched_ctx->workers->nworkers > 0 && inheritor_sched_ctx_id != STARPU_NMAX_SCHED_CTXS)
 	{
-		starpu_add_workers_to_sched_ctx(sched_ctx->workers->workerids, sched_ctx->workers->nworkers, inheritor_sched_ctx_id);
+		starpu_sched_ctx_add_workers(sched_ctx->workers->workerids, sched_ctx->workers->nworkers, inheritor_sched_ctx_id);
 	}
 
 	if(!_starpu_wait_for_all_tasks_of_sched_ctx(sched_ctx_id) && !_starpu_wait_for_all_tasks_of_sched_ctx(0))
@@ -545,7 +545,7 @@ void _starpu_fetch_tasks_from_empty_ctx_list(struct _starpu_sched_ctx *sched_ctx
 	return;
 
 }
-void starpu_add_workers_to_sched_ctx(int *workers_to_add, int nworkers_to_add, unsigned sched_ctx_id)
+void starpu_sched_ctx_add_workers(int *workers_to_add, int nworkers_to_add, unsigned sched_ctx_id)
 {
 	struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
 	int added_workers[nworkers_to_add];
@@ -569,7 +569,7 @@ void starpu_add_workers_to_sched_ctx(int *workers_to_add, int nworkers_to_add, u
 	return;
 }
 
-void starpu_remove_workers_from_sched_ctx(int *workers_to_remove, int nworkers_to_remove, unsigned sched_ctx_id)
+void starpu_sched_ctx_remove_workers(int *workers_to_remove, int nworkers_to_remove, unsigned sched_ctx_id)
 {
 	struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
 	int removed_workers[sched_ctx->workers->nworkers];
@@ -677,7 +677,7 @@ void _starpu_decrement_nsubmitted_tasks_of_sched_ctx(unsigned sched_ctx_id)
 		if(sched_ctx->finished_submit)
 		{
 			_STARPU_PTHREAD_MUTEX_UNLOCK(&finished_submit_mutex);
-			starpu_delete_sched_ctx(sched_ctx_id, sched_ctx->inheritor);
+			starpu_sched_ctx_delete(sched_ctx_id, sched_ctx->inheritor);
 			return;
 		}
 		_STARPU_PTHREAD_MUTEX_UNLOCK(&finished_submit_mutex);
@@ -721,13 +721,13 @@ unsigned _starpu_get_nsched_ctxs()
 	return config->topology.nsched_ctxs;
 }
 
-void starpu_set_sched_ctx_policy_data(unsigned sched_ctx_id, void* policy_data)
+void starpu_sched_ctx_set_policy_data(unsigned sched_ctx_id, void* policy_data)
 {
 	struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
 	sched_ctx->policy_data = policy_data;
 }
 
-void* starpu_get_sched_ctx_policy_data(unsigned sched_ctx_id)
+void* starpu_sched_ctx_get_policy_data(unsigned sched_ctx_id)
 {
 	struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
 	return sched_ctx->policy_data;

+ 13 - 13
src/sched_policies/deque_modeling_policy_data_aware.c

@@ -153,7 +153,7 @@ static struct starpu_task *_starpu_fifo_pop_first_ready_task(struct _starpu_fifo
 
 static struct starpu_task *dmda_pop_ready_task(unsigned sched_ctx_id)
 {
-	dmda_data *dt = (dmda_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	dmda_data *dt = (dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	struct starpu_task *task;
 
@@ -188,7 +188,7 @@ static struct starpu_task *dmda_pop_ready_task(unsigned sched_ctx_id)
 
 static struct starpu_task *dmda_pop_task(unsigned sched_ctx_id)
 {
-	dmda_data *dt = (dmda_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	dmda_data *dt = (dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	struct starpu_task *task;
 
@@ -221,7 +221,7 @@ static struct starpu_task *dmda_pop_task(unsigned sched_ctx_id)
 
 static struct starpu_task *dmda_pop_every_task(unsigned sched_ctx_id)
 {
-	dmda_data *dt = (dmda_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	dmda_data *dt = (dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	struct starpu_task *new_list;
 
@@ -251,7 +251,7 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 				    double predicted, double predicted_transfer,
 				    int prio, unsigned sched_ctx_id)
 {
-	dmda_data *dt = (dmda_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	dmda_data *dt = (dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	/* make sure someone coule execute that task ! */
 	STARPU_ASSERT(best_workerid != -1);
 
@@ -326,7 +326,7 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 /* TODO: factorize with dmda!! */
 static int _dm_push_task(struct starpu_task *task, unsigned prio, unsigned sched_ctx_id)
 {
-	dmda_data *dt = (dmda_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	dmda_data *dt = (dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	unsigned worker, worker_ctx = 0;
 	int best = -1;
 	
@@ -464,7 +464,7 @@ static void compute_all_performance_predictions(struct starpu_task *task,
 	unsigned nimpl;
 
 	starpu_task_bundle_t bundle = task->bundle;
-	dmda_data *dt = (dmda_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	dmda_data *dt = (dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	struct starpu_sched_ctx_worker_collection *workers = starpu_get_worker_collection_of_sched_ctx(sched_ctx_id);
 		
 	while(workers->has_next(workers))
@@ -577,7 +577,7 @@ static int _dmda_push_task(struct starpu_task *task, unsigned prio, unsigned sch
 	int forced_best = -1;
 	int forced_impl = -1;
 
-	dmda_data *dt = (dmda_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	dmda_data *dt = (dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	struct starpu_sched_ctx_worker_collection *workers = starpu_get_worker_collection_of_sched_ctx(sched_ctx_id);
 	unsigned nworkers_ctx = workers->nworkers;
 	double local_task_length[STARPU_NMAXWORKERS][STARPU_MAXIMPLEMENTATIONS];
@@ -746,7 +746,7 @@ static int dmda_push_task(struct starpu_task *task)
 
 static void dmda_add_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers) 
 {
-	dmda_data *dt = (dmda_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	dmda_data *dt = (dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	int workerid;
 	unsigned i;
@@ -760,7 +760,7 @@ static void dmda_add_workers(unsigned sched_ctx_id, int *workerids, unsigned nwo
 
 static void dmda_remove_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers)
 {
-	dmda_data *dt = (dmda_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	dmda_data *dt = (dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	int workerid;
 	unsigned i;
@@ -782,7 +782,7 @@ static void initialize_dmda_policy(unsigned sched_ctx_id)
 	dt->_gamma = _STARPU_DEFAULT_GAMMA;
 	dt->idle_power = 0.0;
 
-	starpu_set_sched_ctx_policy_data(sched_ctx_id, (void*)dt);
+	starpu_sched_ctx_set_policy_data(sched_ctx_id, (void*)dt);
 
 	dt->queue_array = (struct _starpu_fifo_taskq**)malloc(STARPU_NMAXWORKERS*sizeof(struct _starpu_fifo_taskq*));
 
@@ -825,7 +825,7 @@ static void initialize_dmda_sorted_policy(unsigned sched_ctx_id)
 
 static void deinitialize_dmda_policy(unsigned sched_ctx_id) 
 {
-	dmda_data *dt = (dmda_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	dmda_data *dt = (dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	free(dt->queue_array);
 	free(dt);
 	starpu_delete_worker_collection_for_sched_ctx(sched_ctx_id);
@@ -840,7 +840,7 @@ static void dmda_pre_exec_hook(struct starpu_task *task)
 {
 	unsigned sched_ctx_id = task->sched_ctx;
 	int workerid = starpu_worker_get_id();
-	dmda_data *dt = (dmda_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	dmda_data *dt = (dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	struct _starpu_fifo_taskq *fifo = dt->queue_array[workerid];
 	double model = task->predicted;
 	double transfer_model = task->predicted_transfer;
@@ -859,7 +859,7 @@ static void dmda_pre_exec_hook(struct starpu_task *task)
 
 static void dmda_push_task_notify(struct starpu_task *task, int workerid, unsigned sched_ctx_id)
 {
-	dmda_data *dt = (dmda_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	dmda_data *dt = (dmda_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	struct _starpu_fifo_taskq *fifo = dt->queue_array[workerid];
 	/* Compute the expected penality */
 	enum starpu_perf_archtype perf_arch = starpu_worker_get_perf_archtype(workerid);

+ 6 - 6
src/sched_policies/eager_central_policy.c

@@ -32,7 +32,7 @@ typedef struct {
 
 static void eager_add_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers) 
 {
-	eager_center_policy_data *data = (eager_center_policy_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	eager_center_policy_data *data = (eager_center_policy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	unsigned i;
 	int workerid;
 	for (i = 0; i < nworkers; i++)
@@ -67,14 +67,14 @@ static void initialize_eager_center_policy(unsigned sched_ctx_id)
 	_STARPU_PTHREAD_MUTEX_INIT(&data->sched_mutex, NULL);
 	_STARPU_PTHREAD_COND_INIT(&data->sched_cond, NULL);
 
-	starpu_set_sched_ctx_policy_data(sched_ctx_id, (void*)data);
+	starpu_sched_ctx_set_policy_data(sched_ctx_id, (void*)data);
 }
 
 static void deinitialize_eager_center_policy(unsigned sched_ctx_id) 
 {
 	/* TODO check that there is no task left in the queue */
 
-	eager_center_policy_data *data = (eager_center_policy_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	eager_center_policy_data *data = (eager_center_policy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	/* deallocate the job queue */
 	_starpu_destroy_fifo(data->fifo);
@@ -90,7 +90,7 @@ static void deinitialize_eager_center_policy(unsigned sched_ctx_id)
 static int push_task_eager_policy(struct starpu_task *task)
 {
 	unsigned sched_ctx_id = task->sched_ctx;
-	eager_center_policy_data *data = (eager_center_policy_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	eager_center_policy_data *data = (eager_center_policy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	pthread_mutex_t *changing_ctx_mutex = starpu_get_changing_ctx_mutex(sched_ctx_id);
 	unsigned nworkers;
 	int ret_val = -1;
@@ -110,14 +110,14 @@ static int push_task_eager_policy(struct starpu_task *task)
 
 static struct starpu_task *pop_every_task_eager_policy(unsigned sched_ctx_id)
 {
-	eager_center_policy_data *data = (eager_center_policy_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	eager_center_policy_data *data = (eager_center_policy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	return _starpu_fifo_pop_every_task(data->fifo, &data->sched_mutex, starpu_worker_get_id());
 }
 
 static struct starpu_task *pop_task_eager_policy(unsigned sched_ctx_id)
 {
 	unsigned workerid = starpu_worker_get_id();
-	eager_center_policy_data *data = (eager_center_policy_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	eager_center_policy_data *data = (eager_center_policy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	
 	return _starpu_fifo_pop_task(data->fifo, workerid);
 }

+ 5 - 5
src/sched_policies/eager_central_priority_policy.c

@@ -76,7 +76,7 @@ static void _starpu_destroy_priority_taskq(struct _starpu_priority_taskq *priori
 
 static void eager_priority_add_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers) 
 {
-	eager_central_prio_data *data = (eager_central_prio_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	eager_central_prio_data *data = (eager_central_prio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	unsigned i;
 	int workerid;
@@ -109,7 +109,7 @@ static void initialize_eager_center_priority_policy(unsigned sched_ctx_id)
 
 	/* only a single queue (even though there are several internaly) */
 	data->taskq = _starpu_create_priority_taskq();
-	starpu_set_sched_ctx_policy_data(sched_ctx_id, (void*)data);
+	starpu_sched_ctx_set_policy_data(sched_ctx_id, (void*)data);
 
 	_STARPU_PTHREAD_MUTEX_INIT(&data->sched_mutex, NULL);
 	_STARPU_PTHREAD_COND_INIT(&data->sched_cond, NULL);
@@ -119,7 +119,7 @@ static void initialize_eager_center_priority_policy(unsigned sched_ctx_id)
 static void deinitialize_eager_center_priority_policy(unsigned sched_ctx_id) 
 {
 	/* TODO check that there is no task left in the queue */
-	eager_central_prio_data *data = (eager_central_prio_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	eager_central_prio_data *data = (eager_central_prio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	/* deallocate the task queue */
 	_starpu_destroy_priority_taskq(data->taskq);
@@ -135,7 +135,7 @@ static void deinitialize_eager_center_priority_policy(unsigned sched_ctx_id)
 static int _starpu_priority_push_task(struct starpu_task *task)
 {
 	unsigned sched_ctx_id = task->sched_ctx;
-	eager_central_prio_data *data = (eager_central_prio_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	eager_central_prio_data *data = (eager_central_prio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	struct _starpu_priority_taskq *taskq = data->taskq;
 
@@ -178,7 +178,7 @@ static struct starpu_task *_starpu_priority_pop_task(unsigned sched_ctx_id)
 	unsigned workerid = starpu_worker_get_id();
 	int skipped = 0;
 
-	eager_central_prio_data *data = (eager_central_prio_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	eager_central_prio_data *data = (eager_central_prio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	
 	struct _starpu_priority_taskq *taskq = data->taskq;
 

+ 10 - 10
src/sched_policies/heft.c

@@ -70,7 +70,7 @@ static void param_modified(struct starpu_top_param* d)
 
 static void heft_add_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers)
 {
-	heft_data *hd = (heft_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	heft_data *hd = (heft_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	int workerid;
 	unsigned i;
@@ -86,7 +86,7 @@ static void heft_add_workers(unsigned sched_ctx_id, int *workerids, unsigned nwo
 
 static void heft_remove_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers)
 {
-	heft_data *hd = (heft_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	heft_data *hd = (heft_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	int workerid;
 	unsigned i;
@@ -109,7 +109,7 @@ static void heft_init(unsigned sched_ctx_id)
 	hd->_gamma = _STARPU_DEFAULT_GAMMA;
 	hd->idle_power = 0.0;
 	
-	starpu_set_sched_ctx_policy_data(sched_ctx_id, (void*)hd);
+	starpu_sched_ctx_set_policy_data(sched_ctx_id, (void*)hd);
 
 	hd->queue_array = (struct _starpu_fifo_taskq**)malloc(STARPU_NMAXWORKERS*sizeof(struct _starpu_fifo_taskq*));
 
@@ -143,7 +143,7 @@ static void heft_pre_exec_hook(struct starpu_task *task)
 {
 	unsigned sched_ctx_id = task->sched_ctx;
 	int workerid = starpu_worker_get_id();
-	heft_data *hd = (heft_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	heft_data *hd = (heft_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	struct _starpu_fifo_taskq *fifo = hd->queue_array[workerid];
 	double model = task->predicted;
 	double transfer_model = task->predicted_transfer;
@@ -163,7 +163,7 @@ static void heft_pre_exec_hook(struct starpu_task *task)
 static void heft_push_task_notify(struct starpu_task *task, int workerid)
 {
 	unsigned sched_ctx_id = task->sched_ctx;
-	heft_data *hd = (heft_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	heft_data *hd = (heft_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	struct _starpu_fifo_taskq *fifo = hd->queue_array[workerid];
 	/* Compute the expected penality */
 	enum starpu_perf_archtype perf_arch = starpu_worker_get_perf_archtype(workerid);
@@ -222,7 +222,7 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 	/* make sure someone coule execute that task ! */
 	STARPU_ASSERT(best_workerid != -1);
 
-	heft_data *hd = (heft_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	heft_data *hd = (heft_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	struct _starpu_fifo_taskq *fifo = hd->queue_array[best_workerid];
 
 	pthread_mutex_t *sched_mutex;
@@ -318,7 +318,7 @@ static void compute_all_performance_predictions(struct starpu_task *task,
 	int worker, worker_ctx = 0;
 	unsigned nimpl;
 
-	heft_data *hd = (heft_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	heft_data *hd = (heft_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	starpu_task_bundle_t bundle = task->bundle;
 	struct starpu_sched_ctx_worker_collection *workers = starpu_get_worker_collection_of_sched_ctx(sched_ctx_id);
@@ -429,7 +429,7 @@ static void compute_all_performance_predictions(struct starpu_task *task,
 /* TODO: factorize with dmda */
 static int _heft_push_task(struct starpu_task *task, unsigned prio, unsigned sched_ctx_id)
 {
-	heft_data *hd = (heft_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	heft_data *hd = (heft_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	int worker, worker_ctx = 0;
 	unsigned nimpl;
 	int best = -1, best_in_ctx = -1;
@@ -602,7 +602,7 @@ static struct starpu_task *heft_pop_task(unsigned sched_ctx_id)
 	struct starpu_task *task;
 
 	int workerid = starpu_worker_get_id();
-	heft_data *hd = (heft_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	heft_data *hd = (heft_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	struct _starpu_fifo_taskq *fifo = hd->queue_array[workerid];
 
 	task = _starpu_fifo_pop_local_task(fifo);
@@ -619,7 +619,7 @@ static struct starpu_task *heft_pop_task(unsigned sched_ctx_id)
 
 static void heft_deinit(unsigned sched_ctx_id) 
 {
-	heft_data *ht = (heft_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	heft_data *ht = (heft_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	free(ht);
 	starpu_delete_worker_collection_for_sched_ctx(sched_ctx_id);
 }

+ 6 - 6
src/sched_policies/parallel_greedy.c

@@ -46,7 +46,7 @@ static int possible_combinations_size[STARPU_NMAXWORKERS][10];
 
 static void pgreedy_add_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers)
 {
-	struct pgreedy_data *data = (struct pgreedy_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	struct pgreedy_data *data = (struct pgreedy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	_starpu_sched_find_worker_combinations(workerids, nworkers);
 
@@ -130,7 +130,7 @@ static void pgreedy_add_workers(unsigned sched_ctx_id, int *workerids, unsigned
 
 static void pgreedy_remove_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers)
 {
-	struct pgreedy_data *data = (struct pgreedy_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	struct pgreedy_data *data = (struct pgreedy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	int workerid;
 	unsigned i;
 	for(i = 0; i < nworkers; i++)
@@ -154,13 +154,13 @@ static void initialize_pgreedy_policy(unsigned sched_ctx_id)
 	_STARPU_PTHREAD_MUTEX_INIT(&data->sched_mutex, NULL);
 	_STARPU_PTHREAD_COND_INIT(&data->sched_cond, NULL);
 
-	starpu_set_sched_ctx_policy_data(sched_ctx_id, (void*)data);
+	starpu_sched_ctx_set_policy_data(sched_ctx_id, (void*)data);
 }
 
 static void deinitialize_pgreedy_policy(unsigned sched_ctx_id) 
 {
 	/* TODO check that there is no task left in the queue */
-	struct pgreedy_data *data = (struct pgreedy_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	struct pgreedy_data *data = (struct pgreedy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	/* deallocate the job queue */
 	_starpu_destroy_fifo(data->fifo);
@@ -189,7 +189,7 @@ static int push_task_pgreedy_policy(struct starpu_task *task)
    		_STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);
 		return ret_val;
 	}
-	struct pgreedy_data *data = (struct pgreedy_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	struct pgreedy_data *data = (struct pgreedy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	ret_val = _starpu_fifo_push_task(data->fifo, &data->sched_mutex, &data->sched_cond, task);
 	_STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);
 	
@@ -198,7 +198,7 @@ static int push_task_pgreedy_policy(struct starpu_task *task)
 
 static struct starpu_task *pop_task_pgreedy_policy(unsigned sched_ctx_id)
 {
-	struct pgreedy_data *data = (struct pgreedy_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	struct pgreedy_data *data = (struct pgreedy_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	int workerid = starpu_worker_get_id();
 

+ 4 - 4
src/sched_policies/parallel_heft.c

@@ -90,7 +90,7 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 	/* make sure someone coule execute that task ! */
 	STARPU_ASSERT(best_workerid != -1);
 	
-	pheft_data *hd = (pheft_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	pheft_data *hd = (pheft_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	/* Is this a basic worker or a combined worker ? */
 	unsigned memory_node;
@@ -242,7 +242,7 @@ static double compute_ntasks_end(int workerid)
 
 static int _parallel_heft_push_task(struct starpu_task *task, unsigned prio, unsigned sched_ctx_id)
 {
-	pheft_data *hd = (pheft_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	pheft_data *hd = (pheft_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	struct starpu_sched_ctx_worker_collection *workers = starpu_get_worker_collection_of_sched_ctx(sched_ctx_id);
 	unsigned nworkers_ctx = workers->nworkers;
@@ -555,7 +555,7 @@ static void initialize_parallel_heft_policy(unsigned sched_ctx_id)
 	hd->_gamma = _STARPU_DEFAULT_GAMMA;
 	hd->idle_power = 0.0;
 	
-	starpu_set_sched_ctx_policy_data(sched_ctx_id, (void*)hd);
+	starpu_sched_ctx_set_policy_data(sched_ctx_id, (void*)hd);
 
 	const char *strval_alpha = getenv("STARPU_SCHED_ALPHA");
 	if (strval_alpha)
@@ -579,7 +579,7 @@ static void initialize_parallel_heft_policy(unsigned sched_ctx_id)
 
 static void parallel_heft_deinit(unsigned sched_ctx_id) 
 {
-	pheft_data *hd = (pheft_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	pheft_data *hd = (pheft_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	starpu_delete_worker_collection_for_sched_ctx(sched_ctx_id);
 	_STARPU_PTHREAD_MUTEX_DESTROY(&hd->global_push_mutex);
 	free(hd);

+ 9 - 9
src/sched_policies/work_stealing_policy.c

@@ -56,7 +56,7 @@ static int calibration_value = 0;
  */
 static unsigned select_victim_round_robin(unsigned sched_ctx_id)
 {
-	work_stealing_data *ws = (work_stealing_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	work_stealing_data *ws = (work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	unsigned worker = ws->last_pop_worker;
 	unsigned nworkers = starpu_get_nworkers_of_sched_ctx(sched_ctx_id);
 
@@ -84,7 +84,7 @@ static unsigned select_victim_round_robin(unsigned sched_ctx_id)
  */
 static unsigned select_worker_round_robin(unsigned sched_ctx_id)
 {
-	work_stealing_data *ws = (work_stealing_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	work_stealing_data *ws = (work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	unsigned worker = ws->last_push_worker;
 	unsigned nworkers = starpu_get_nworkers_of_sched_ctx(sched_ctx_id);
 
@@ -105,7 +105,7 @@ static unsigned select_worker_round_robin(unsigned sched_ctx_id)
  */
 static float overload_metric(unsigned sched_ctx_id, unsigned id)
 {
-	work_stealing_data *ws = (work_stealing_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	work_stealing_data *ws = (work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	float execution_ratio = 0.0f;
 	float current_ratio = 0.0f;
 
@@ -250,7 +250,7 @@ static inline unsigned select_worker(unsigned sched_ctx_id)
 #endif
 static struct starpu_task *ws_pop_task(unsigned sched_ctx_id)
 {
-	work_stealing_data *ws = (work_stealing_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	work_stealing_data *ws = (work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	struct starpu_task *task;
 	struct _starpu_deque_jobq *q;
@@ -293,7 +293,7 @@ static struct starpu_task *ws_pop_task(unsigned sched_ctx_id)
 int ws_push_task(struct starpu_task *task)
 {
 	unsigned sched_ctx_id = task->sched_ctx;
-	work_stealing_data *ws = (work_stealing_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	work_stealing_data *ws = (work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	struct _starpu_deque_jobq *deque_queue;
 	struct _starpu_job *j = _starpu_get_job_associated_to_task(task); 
@@ -342,7 +342,7 @@ int ws_push_task(struct starpu_task *task)
 
 static void ws_add_workers(unsigned sched_ctx_id, int *workerids,unsigned nworkers) 
 {
-	work_stealing_data *ws = (work_stealing_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	work_stealing_data *ws = (work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	unsigned i;
 	int workerid;
@@ -364,7 +364,7 @@ static void ws_add_workers(unsigned sched_ctx_id, int *workerids,unsigned nworke
 
 static void ws_remove_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers)
 {
-	work_stealing_data *ws = (work_stealing_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	work_stealing_data *ws = (work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	unsigned i;
 	int workerid;
@@ -382,7 +382,7 @@ static void initialize_ws_policy(unsigned sched_ctx_id)
 	starpu_create_worker_collection_for_sched_ctx(sched_ctx_id, WORKER_LIST);
 
 	work_stealing_data *ws = (work_stealing_data*)malloc(sizeof(work_stealing_data));
-	starpu_set_sched_ctx_policy_data(sched_ctx_id, (void*)ws);
+	starpu_sched_ctx_set_policy_data(sched_ctx_id, (void*)ws);
 	
 	ws->last_pop_worker = 0;
 	ws->last_push_worker = 0;
@@ -401,7 +401,7 @@ static void initialize_ws_policy(unsigned sched_ctx_id)
 
 static void deinit_ws_policy(unsigned sched_ctx_id)
 {
-	work_stealing_data *ws = (work_stealing_data*)starpu_get_sched_ctx_policy_data(sched_ctx_id);
+	work_stealing_data *ws = (work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 
 	free(ws->queue_array);
 	_STARPU_PTHREAD_MUTEX_DESTROY(&ws->sched_mutex);