浏览代码

sched_ctx: renaming in incldue/starpu_sched_ctx.h

Nathalie Furmento 12 年之前
父节点
当前提交
21cfa998e0

+ 2 - 2
doc/chapters/advanced-api.texi

@@ -610,12 +610,12 @@ Sets the scheduling context the task will be submitted to
 Returns the scheduling contexts the tasks are currently submitted to
 Returns the scheduling contexts the tasks are currently submitted to
 @end deftypefun
 @end deftypefun
 
 
-@deftypefun unsigned starpu_get_nworkers_of_sched_ctx (unsigned @var{sched_ctx})
+@deftypefun unsigned starpu_sched_ctx_get_nworkers (unsigned @var{sched_ctx})
 Returns the number of workers managed by the specified contexts
 Returns the number of workers managed by the specified contexts
 (Usually needed to verify if it manages any workers or if it should be blocked)
 (Usually needed to verify if it manages any workers or if it should be blocked)
 @end deftypefun
 @end deftypefun
 
 
-@deftypefun unsigned starpu_get_nshared_workers (unsigned @var{sched_ctx_id}, unsigned @var{sched_ctx_id2})
+@deftypefun unsigned starpu_sched_ctx_get_nshared_workers (unsigned @var{sched_ctx_id}, unsigned @var{sched_ctx_id2})
 Returns the number of workers shared by two contexts
 Returns the number of workers shared by two contexts
 @end deftypefun
 @end deftypefun
 
 

+ 4 - 4
include/starpu_sched_ctx.h

@@ -131,13 +131,13 @@ void starpu_notify_hypervisor_exists(void);
 
 
 unsigned starpu_check_if_hypervisor_exists(void);
 unsigned starpu_check_if_hypervisor_exists(void);
 
 
-unsigned starpu_get_nworkers_of_sched_ctx(unsigned sched_ctx);
+unsigned starpu_sched_ctx_get_nworkers(unsigned sched_ctx);
 
 
-unsigned starpu_get_nshared_workers(unsigned sched_ctx_id, unsigned sched_ctx_id2);
+unsigned starpu_sched_ctx_get_nshared_workers(unsigned sched_ctx_id, unsigned sched_ctx_id2);
 
 
-unsigned starpu_worker_belongs_to_sched_ctx(int workerid, unsigned sched_ctx_id);
+unsigned starpu_sched_ctx_contains_worker(int workerid, unsigned sched_ctx_id);
 
 
-unsigned starpu_are_overlapping_ctxs_on_worker(int workerid);
+unsigned starpu_sched_ctx_overlapping_ctxs_on_worker(int workerid);
 
 
 unsigned starpu_is_ctxs_turn(int workerid, unsigned sched_ctx_id);
 unsigned starpu_is_ctxs_turn(int workerid, unsigned sched_ctx_id);
 
 

+ 3 - 3
sched_ctx_hypervisor/src/hypervisor_policies/gflops_rate_policy.c

@@ -72,9 +72,9 @@ static int* _get_workers_to_move(unsigned sender_sched_ctx, unsigned receiver_sc
                 struct starpu_sched_ctx_hypervisor_policy_config *sender_config = sched_ctx_hypervisor_get_config(sender_sched_ctx);
                 struct starpu_sched_ctx_hypervisor_policy_config *sender_config = sched_ctx_hypervisor_get_config(sender_sched_ctx);
                 unsigned potential_moving_cpus = _get_potential_nworkers(sender_config, sender_sched_ctx, STARPU_CPU_WORKER);
                 unsigned potential_moving_cpus = _get_potential_nworkers(sender_config, sender_sched_ctx, STARPU_CPU_WORKER);
                 unsigned potential_moving_gpus = _get_potential_nworkers(sender_config, sender_sched_ctx, STARPU_CUDA_WORKER);
                 unsigned potential_moving_gpus = _get_potential_nworkers(sender_config, sender_sched_ctx, STARPU_CUDA_WORKER);
-                unsigned sender_nworkers = starpu_get_nworkers_of_sched_ctx(sender_sched_ctx);
+                unsigned sender_nworkers = starpu_sched_ctx_get_nworkers(sender_sched_ctx);
                 struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(receiver_sched_ctx);
                 struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(receiver_sched_ctx);
-                unsigned nworkers_ctx = starpu_get_nworkers_of_sched_ctx(receiver_sched_ctx);
+                unsigned nworkers_ctx = starpu_sched_ctx_get_nworkers(receiver_sched_ctx);
 
 
                 if(nworkers_needed < (potential_moving_cpus + 5 * potential_moving_gpus))
                 if(nworkers_needed < (potential_moving_cpus + 5 * potential_moving_gpus))
                 {
                 {
@@ -119,7 +119,7 @@ static int* _get_workers_to_move(unsigned sender_sched_ctx, unsigned receiver_sc
 
 
                         if(sender_nworkers - nworkers_to_move >= sender_config->min_nworkers)
                         if(sender_nworkers - nworkers_to_move >= sender_config->min_nworkers)
                         {
                         {
-                                unsigned nshared_workers = starpu_get_nshared_workers(sender_sched_ctx, receiver_sched_ctx);
+                                unsigned nshared_workers = starpu_sched_ctx_get_nshared_workers(sender_sched_ctx, receiver_sched_ctx);
                                 if((nworkers_ctx + nworkers_to_move - nshared_workers) > config->max_nworkers)
                                 if((nworkers_ctx + nworkers_to_move - nshared_workers) > config->max_nworkers)
                                         nworkers_to_move = nworkers_ctx > config->max_nworkers ? 0 : (config->max_nworkers - nworkers_ctx + nshared_workers);
                                         nworkers_to_move = nworkers_ctx > config->max_nworkers ? 0 : (config->max_nworkers - nworkers_ctx + nshared_workers);
 
 

+ 1 - 1
sched_ctx_hypervisor/src/hypervisor_policies/idle_policy.c

@@ -23,7 +23,7 @@ unsigned worker_belong_to_other_sched_ctx(unsigned sched_ctx, int worker)
 
 
 	int i;
 	int i;
 	for(i = 0; i < nsched_ctxs; i++)
 	for(i = 0; i < nsched_ctxs; i++)
-		if(sched_ctxs[i] != sched_ctx && starpu_worker_belongs_to_sched_ctx(worker, sched_ctxs[i]))
+		if(sched_ctxs[i] != sched_ctx && starpu_sched_ctx_contains_worker(worker, sched_ctxs[i]))
 			return 1;
 			return 1;
 	return 0;
 	return 0;
 }
 }

+ 5 - 5
sched_ctx_hypervisor/src/hypervisor_policies/policy_tools.c

@@ -59,7 +59,7 @@ unsigned _find_poor_sched_ctx(unsigned req_sched_ctx, int nworkers_to_move)
 	{
 	{
 		if(sched_ctxs[i] != STARPU_NMAX_SCHED_CTXS && sched_ctxs[i] != req_sched_ctx)
 		if(sched_ctxs[i] != STARPU_NMAX_SCHED_CTXS && sched_ctxs[i] != req_sched_ctx)
 		{
 		{
-			unsigned nworkers = starpu_get_nworkers_of_sched_ctx(sched_ctxs[i]);
+			unsigned nworkers = starpu_sched_ctx_get_nworkers(sched_ctxs[i]);
 			config  = sched_ctx_hypervisor_get_config(sched_ctxs[i]);
 			config  = sched_ctx_hypervisor_get_config(sched_ctxs[i]);
 			if((nworkers + nworkers_to_move) <= config->max_nworkers)
 			if((nworkers + nworkers_to_move) <= config->max_nworkers)
 			{
 			{
@@ -207,7 +207,7 @@ unsigned _get_potential_nworkers(struct starpu_sched_ctx_hypervisor_policy_confi
 int _get_nworkers_to_move(unsigned req_sched_ctx)
 int _get_nworkers_to_move(unsigned req_sched_ctx)
 {
 {
        	struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(req_sched_ctx);
        	struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(req_sched_ctx);
-	unsigned nworkers = starpu_get_nworkers_of_sched_ctx(req_sched_ctx);
+	unsigned nworkers = starpu_sched_ctx_get_nworkers(req_sched_ctx);
 	unsigned nworkers_to_move = 0;
 	unsigned nworkers_to_move = 0;
 
 
 	unsigned potential_moving_workers = _get_potential_nworkers(config, req_sched_ctx, STARPU_ANY_WORKER);
 	unsigned potential_moving_workers = _get_potential_nworkers(config, req_sched_ctx, STARPU_ANY_WORKER);
@@ -269,8 +269,8 @@ unsigned _resize(unsigned sender_sched_ctx, unsigned receiver_sched_ctx, unsigne
 			{
 			{
 				poor_sched_ctx = receiver_sched_ctx;
 				poor_sched_ctx = receiver_sched_ctx;
 				struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(poor_sched_ctx);
 				struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(poor_sched_ctx);
-				unsigned nworkers = starpu_get_nworkers_of_sched_ctx(poor_sched_ctx);
-				unsigned nshared_workers = starpu_get_nshared_workers(sender_sched_ctx, poor_sched_ctx);
+				unsigned nworkers = starpu_sched_ctx_get_nworkers(poor_sched_ctx);
+				unsigned nshared_workers = starpu_sched_ctx_get_nshared_workers(sender_sched_ctx, poor_sched_ctx);
 				if((nworkers+nworkers_to_move-nshared_workers) > config->max_nworkers)
 				if((nworkers+nworkers_to_move-nshared_workers) > config->max_nworkers)
 					nworkers_to_move = nworkers > config->max_nworkers ? 0 : (config->max_nworkers - nworkers+nshared_workers);
 					nworkers_to_move = nworkers > config->max_nworkers ? 0 : (config->max_nworkers - nworkers+nshared_workers);
 				if(nworkers_to_move == 0) poor_sched_ctx = STARPU_NMAX_SCHED_CTXS;
 				if(nworkers_to_move == 0) poor_sched_ctx = STARPU_NMAX_SCHED_CTXS;
@@ -332,7 +332,7 @@ double _get_ctx_velocity(struct starpu_sched_ctx_hypervisor_wrapper* sc_w)
         double elapsed_flops = sched_ctx_hypervisor_get_elapsed_flops_per_sched_ctx(sc_w);
         double elapsed_flops = sched_ctx_hypervisor_get_elapsed_flops_per_sched_ctx(sc_w);
 	double total_elapsed_flops = sched_ctx_hypervisor_get_total_elapsed_flops_per_sched_ctx(sc_w);
 	double total_elapsed_flops = sched_ctx_hypervisor_get_total_elapsed_flops_per_sched_ctx(sc_w);
 	double prc = elapsed_flops/sc_w->total_flops;
 	double prc = elapsed_flops/sc_w->total_flops;
-	unsigned nworkers = starpu_get_nworkers_of_sched_ctx(sc_w->sched_ctx);
+	unsigned nworkers = starpu_sched_ctx_get_nworkers(sc_w->sched_ctx);
 	double redim_sample = elapsed_flops == total_elapsed_flops ? HYPERVISOR_START_REDIM_SAMPLE*nworkers : HYPERVISOR_REDIM_SAMPLE*nworkers;
 	double redim_sample = elapsed_flops == total_elapsed_flops ? HYPERVISOR_START_REDIM_SAMPLE*nworkers : HYPERVISOR_REDIM_SAMPLE*nworkers;
 	if(prc >= redim_sample)
 	if(prc >= redim_sample)
         {
         {

+ 7 - 7
sched_ctx_hypervisor/src/hypervisor_policies/simple_policy.c

@@ -56,7 +56,7 @@ static unsigned _find_poor_sched_ctx(unsigned req_sched_ctx, int nworkers_to_mov
 	{
 	{
 		if(sched_ctxs[i] != STARPU_NMAX_SCHED_CTXS && sched_ctxs[i] != req_sched_ctx)
 		if(sched_ctxs[i] != STARPU_NMAX_SCHED_CTXS && sched_ctxs[i] != req_sched_ctx)
 		{
 		{
-			unsigned nworkers = starpu_get_nworkers_of_sched_ctx(sched_ctxs[i]);
+			unsigned nworkers = starpu_sched_ctx_get_nworkers(sched_ctxs[i]);
 			config  = sched_ctx_hypervisor_get_config(sched_ctxs[i]);
 			config  = sched_ctx_hypervisor_get_config(sched_ctxs[i]);
 			if((nworkers + nworkers_to_move) <= config->max_nworkers)
 			if((nworkers + nworkers_to_move) <= config->max_nworkers)
 			{
 			{
@@ -176,7 +176,7 @@ static unsigned _get_potential_nworkers(struct starpu_sched_ctx_hypervisor_polic
 static unsigned _get_nworkers_to_move(unsigned req_sched_ctx)
 static unsigned _get_nworkers_to_move(unsigned req_sched_ctx)
 {
 {
        	struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(req_sched_ctx);
        	struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(req_sched_ctx);
-	unsigned nworkers = starpu_get_nworkers_of_sched_ctx(req_sched_ctx);
+	unsigned nworkers = starpu_sched_ctx_get_nworkers(req_sched_ctx);
 	unsigned nworkers_to_move = 0;
 	unsigned nworkers_to_move = 0;
 
 
 	unsigned potential_moving_workers = _get_potential_nworkers(config, req_sched_ctx, 0);
 	unsigned potential_moving_workers = _get_potential_nworkers(config, req_sched_ctx, 0);
@@ -237,8 +237,8 @@ static unsigned _simple_resize(unsigned sender_sched_ctx, unsigned receiver_sche
 			{
 			{
 				poor_sched_ctx = receiver_sched_ctx;
 				poor_sched_ctx = receiver_sched_ctx;
 				struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(poor_sched_ctx);
 				struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(poor_sched_ctx);
-				unsigned nworkers = starpu_get_nworkers_of_sched_ctx(poor_sched_ctx);
-				unsigned nshared_workers = starpu_get_nshared_workers(sender_sched_ctx, poor_sched_ctx);
+				unsigned nworkers = starpu_sched_ctx_get_nworkers(poor_sched_ctx);
+				unsigned nshared_workers = starpu_sched_ctx_get_nshared_workers(sender_sched_ctx, poor_sched_ctx);
 				if((nworkers+nworkers_to_move-nshared_workers) > config->max_nworkers)
 				if((nworkers+nworkers_to_move-nshared_workers) > config->max_nworkers)
 					nworkers_to_move = nworkers > config->max_nworkers ? 0 : (config->max_nworkers - nworkers+nshared_workers);
 					nworkers_to_move = nworkers > config->max_nworkers ? 0 : (config->max_nworkers - nworkers+nshared_workers);
 				if(nworkers_to_move == 0) poor_sched_ctx = STARPU_NMAX_SCHED_CTXS;
 				if(nworkers_to_move == 0) poor_sched_ctx = STARPU_NMAX_SCHED_CTXS;
@@ -285,9 +285,9 @@ static int* _get_workers_to_move(unsigned sender_sched_ctx, unsigned receiver_sc
                 struct starpu_sched_ctx_hypervisor_policy_config *sender_config = sched_ctx_hypervisor_get_config(sender_sched_ctx);
                 struct starpu_sched_ctx_hypervisor_policy_config *sender_config = sched_ctx_hypervisor_get_config(sender_sched_ctx);
                 unsigned potential_moving_cpus = _get_potential_nworkers(sender_config, sender_sched_ctx, STARPU_CPU_WORKER);
                 unsigned potential_moving_cpus = _get_potential_nworkers(sender_config, sender_sched_ctx, STARPU_CPU_WORKER);
                 unsigned potential_moving_gpus = _get_potential_nworkers(sender_config, sender_sched_ctx, STARPU_CUDA_WORKER);
                 unsigned potential_moving_gpus = _get_potential_nworkers(sender_config, sender_sched_ctx, STARPU_CUDA_WORKER);
-                unsigned sender_nworkers = starpu_get_nworkers_of_sched_ctx(sender_sched_ctx);
+                unsigned sender_nworkers = starpu_sched_ctx_get_nworkers(sender_sched_ctx);
                 struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(receiver_sched_ctx);
                 struct starpu_sched_ctx_hypervisor_policy_config *config = sched_ctx_hypervisor_get_config(receiver_sched_ctx);
-                unsigned nworkers_ctx = starpu_get_nworkers_of_sched_ctx(receiver_sched_ctx);
+                unsigned nworkers_ctx = starpu_sched_ctx_get_nworkers(receiver_sched_ctx);
 
 
                 if(nworkers_needed < (potential_moving_cpus + 5 * potential_moving_gpus))
                 if(nworkers_needed < (potential_moving_cpus + 5 * potential_moving_gpus))
                 {
                 {
@@ -323,7 +323,7 @@ static int* _get_workers_to_move(unsigned sender_sched_ctx, unsigned receiver_sc
 
 
                         if(sender_nworkers - nworkers_to_move >= sender_config->min_nworkers)
                         if(sender_nworkers - nworkers_to_move >= sender_config->min_nworkers)
                         {
                         {
-                                unsigned nshared_workers = starpu_get_nshared_workers(sender_sched_ctx, receiver_sched_ctx);
+                                unsigned nshared_workers = starpu_sched_ctx_get_nshared_workers(sender_sched_ctx, receiver_sched_ctx);
                                 if((nworkers_ctx + nworkers_to_move - nshared_workers) > config->max_nworkers)
                                 if((nworkers_ctx + nworkers_to_move - nshared_workers) > config->max_nworkers)
                                         nworkers_to_move = nworkers_ctx > config->max_nworkers ? 0 : (config->max_nworkers - nworkers_ctx + nshared_workers);
                                         nworkers_to_move = nworkers_ctx > config->max_nworkers ? 0 : (config->max_nworkers - nworkers_ctx + nshared_workers);
 
 

+ 3 - 3
sched_ctx_hypervisor/src/sched_ctx_hypervisor.c

@@ -457,7 +457,7 @@ void sched_ctx_hypervisor_remove_workers_from_sched_ctx(int* workers_to_remove,
 
 
 				int i;
 				int i;
 				for(i = 0; i < nworkers_to_remove; i++)
 				for(i = 0; i < nworkers_to_remove; i++)
-					if(starpu_worker_belongs_to_sched_ctx(workers_to_remove[i], sched_ctx))
+					if(starpu_sched_ctx_contains_worker(workers_to_remove[i], sched_ctx))
 						workers[nworkers++] = workers_to_remove[i];
 						workers[nworkers++] = workers_to_remove[i];
 
 
 				hypervisor.sched_ctx_w[sched_ctx].resize_ack.receiver_sched_ctx = -1;
 				hypervisor.sched_ctx_w[sched_ctx].resize_ack.receiver_sched_ctx = -1;
@@ -508,7 +508,7 @@ double sched_ctx_hypervisor_get_total_elapsed_flops_per_sched_ctx(struct starpu_
 
 
 static unsigned _ack_resize_completed(unsigned sched_ctx, int worker)
 static unsigned _ack_resize_completed(unsigned sched_ctx, int worker)
 {
 {
-	if(worker != -1 && !starpu_worker_belongs_to_sched_ctx(worker, sched_ctx))
+	if(worker != -1 && !starpu_sched_ctx_contains_worker(worker, sched_ctx))
 		return 0;
 		return 0;
 
 
 	struct starpu_sched_ctx_hypervisor_resize_ack *resize_ack = NULL;
 	struct starpu_sched_ctx_hypervisor_resize_ack *resize_ack = NULL;
@@ -523,7 +523,7 @@ static unsigned _ack_resize_completed(unsigned sched_ctx, int worker)
 			pthread_mutex_lock(&sc_w->mutex);
 			pthread_mutex_lock(&sc_w->mutex);
 			unsigned only_remove = 0;
 			unsigned only_remove = 0;
 			if(sc_w->resize_ack.receiver_sched_ctx == -1 && hypervisor.sched_ctxs[i] != sched_ctx &&
 			if(sc_w->resize_ack.receiver_sched_ctx == -1 && hypervisor.sched_ctxs[i] != sched_ctx &&
-			   sc_w->resize_ack.nmoved_workers > 0 && starpu_worker_belongs_to_sched_ctx(worker, hypervisor.sched_ctxs[i]))
+			   sc_w->resize_ack.nmoved_workers > 0 && starpu_sched_ctx_contains_worker(worker, hypervisor.sched_ctxs[i]))
 			{
 			{
 				int j;
 				int j;
 				for(j = 0; j < sc_w->resize_ack.nmoved_workers; j++)
 				for(j = 0; j < sc_w->resize_ack.nmoved_workers; j++)

+ 4 - 4
src/core/sched_ctx.c

@@ -854,7 +854,7 @@ _starpu_pthread_mutex_t* starpu_get_changing_ctx_mutex(unsigned sched_ctx_id)
 	return &sched_ctx->changing_ctx_mutex;
 	return &sched_ctx->changing_ctx_mutex;
 }
 }
 
 
-unsigned starpu_get_nworkers_of_sched_ctx(unsigned sched_ctx_id)
+unsigned starpu_sched_ctx_get_nworkers(unsigned sched_ctx_id)
 {
 {
 	struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
 	struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
 	if(sched_ctx != NULL)
 	if(sched_ctx != NULL)
@@ -864,7 +864,7 @@ unsigned starpu_get_nworkers_of_sched_ctx(unsigned sched_ctx_id)
 
 
 }
 }
 
 
-unsigned starpu_get_nshared_workers(unsigned sched_ctx_id, unsigned sched_ctx_id2)
+unsigned starpu_sched_ctx_get_nshared_workers(unsigned sched_ctx_id, unsigned sched_ctx_id2)
 {
 {
         struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
         struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
         struct _starpu_sched_ctx *sched_ctx2 = _starpu_get_sched_ctx_struct(sched_ctx_id2);
         struct _starpu_sched_ctx *sched_ctx2 = _starpu_get_sched_ctx_struct(sched_ctx_id2);
@@ -900,7 +900,7 @@ unsigned starpu_get_nshared_workers(unsigned sched_ctx_id, unsigned sched_ctx_id
 	return shared_workers;
 	return shared_workers;
 }
 }
 
 
-unsigned starpu_worker_belongs_to_sched_ctx(int workerid, unsigned sched_ctx_id)
+unsigned starpu_sched_ctx_contains_worker(int workerid, unsigned sched_ctx_id)
 {
 {
 	struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
 	struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
 	unsigned i;
 	unsigned i;
@@ -912,7 +912,7 @@ unsigned starpu_worker_belongs_to_sched_ctx(int workerid, unsigned sched_ctx_id)
 	return 0;
 	return 0;
 }
 }
 
 
-unsigned starpu_are_overlapping_ctxs_on_worker(int workerid)
+unsigned starpu_sched_ctx_overlapping_ctxs_on_worker(int workerid)
 {
 {
 	struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
 	struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
 	return worker->nsched_ctxs > 1;
 	return worker->nsched_ctxs > 1;

+ 3 - 3
src/sched_policies/deque_modeling_policy_data_aware.c

@@ -692,7 +692,7 @@ static int dmda_push_sorted_task(struct starpu_task *task)
         int ret_val = -1;
         int ret_val = -1;
 
 
 	_STARPU_PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
 	_STARPU_PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
-	nworkers = starpu_get_nworkers_of_sched_ctx(sched_ctx_id);
+	nworkers = starpu_sched_ctx_get_nworkers(sched_ctx_id);
 	if(nworkers == 0)
 	if(nworkers == 0)
 	{
 	{
 		_STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);
 		_STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);
@@ -713,7 +713,7 @@ static int dm_push_task(struct starpu_task *task)
         int ret_val = -1;
         int ret_val = -1;
 
 
 	_STARPU_PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
 	_STARPU_PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
-	nworkers = starpu_get_nworkers_of_sched_ctx(sched_ctx_id);
+	nworkers = starpu_sched_ctx_get_nworkers(sched_ctx_id);
 	if(nworkers == 0)
 	if(nworkers == 0)
 	{
 	{
 		_STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);
 		_STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);
@@ -733,7 +733,7 @@ static int dmda_push_task(struct starpu_task *task)
         int ret_val = -1;
         int ret_val = -1;
 
 
 	_STARPU_PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
 	_STARPU_PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
-	nworkers = starpu_get_nworkers_of_sched_ctx(sched_ctx_id);
+	nworkers = starpu_sched_ctx_get_nworkers(sched_ctx_id);
 	if(nworkers == 0)
 	if(nworkers == 0)
 	{
 	{
 		_STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);
 		_STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);

+ 1 - 1
src/sched_policies/eager_central_policy.c

@@ -97,7 +97,7 @@ static int push_task_eager_policy(struct starpu_task *task)
 	int ret_val = -1;
 	int ret_val = -1;
 
 
 	_STARPU_PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
 	_STARPU_PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
-	nworkers = starpu_get_nworkers_of_sched_ctx(sched_ctx_id);
+	nworkers = starpu_sched_ctx_get_nworkers(sched_ctx_id);
 	if(nworkers == 0)
 	if(nworkers == 0)
 	{
 	{
 		_STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);
 		_STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);

+ 1 - 1
src/sched_policies/eager_central_priority_policy.c

@@ -144,7 +144,7 @@ static int _starpu_priority_push_task(struct starpu_task *task)
         int ret_val = -1;
         int ret_val = -1;
 
 
         _STARPU_PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
         _STARPU_PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
-        nworkers = starpu_get_nworkers_of_sched_ctx(sched_ctx_id);
+        nworkers = starpu_sched_ctx_get_nworkers(sched_ctx_id);
         if(nworkers == 0)
         if(nworkers == 0)
         {
         {
                 _STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);
                 _STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);

+ 3 - 3
src/sched_policies/heft.c

@@ -275,7 +275,7 @@ static int push_task_on_best_worker(struct starpu_task *task, int best_workerid,
 
 
 
 
 	double max_time_on_ctx = starpu_get_max_time_worker_on_ctx();
 	double max_time_on_ctx = starpu_get_max_time_worker_on_ctx();
-	if(max_time_on_ctx != -1.0 && starpu_are_overlapping_ctxs_on_worker(best_workerid) && starpu_is_ctxs_turn(best_workerid, sched_ctx_id))
+	if(max_time_on_ctx != -1.0 && starpu_sched_ctx_overlapping_ctxs_on_worker(best_workerid) && starpu_is_ctxs_turn(best_workerid, sched_ctx_id))
 	{
 	{
 		current_time[best_workerid][sched_ctx_id] += predicted;
 		current_time[best_workerid][sched_ctx_id] += predicted;
 		
 		
@@ -571,7 +571,7 @@ static int heft_push_task(struct starpu_task *task)
 	if (task->priority > 0)
 	if (task->priority > 0)
 	{
 	{
 		_STARPU_PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
 		_STARPU_PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
-		nworkers = starpu_get_nworkers_of_sched_ctx(sched_ctx_id);
+		nworkers = starpu_sched_ctx_get_nworkers(sched_ctx_id);
 		if(nworkers == 0)
 		if(nworkers == 0)
 		{
 		{
 			_STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);
 			_STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);
@@ -584,7 +584,7 @@ static int heft_push_task(struct starpu_task *task)
 	}
 	}
 
 
 	_STARPU_PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
 	_STARPU_PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
-	nworkers = starpu_get_nworkers_of_sched_ctx(sched_ctx_id);
+	nworkers = starpu_sched_ctx_get_nworkers(sched_ctx_id);
 	if(nworkers == 0)
 	if(nworkers == 0)
 	{
 	{
 		_STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);
 		_STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);

+ 1 - 1
src/sched_policies/parallel_greedy.c

@@ -183,7 +183,7 @@ static int push_task_pgreedy_policy(struct starpu_task *task)
 
 
 	/* if the context has no workers return */
 	/* if the context has no workers return */
 	_STARPU_PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
 	_STARPU_PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
-	nworkers = starpu_get_nworkers_of_sched_ctx(sched_ctx_id);
+	nworkers = starpu_sched_ctx_get_nworkers(sched_ctx_id);
 
 
    	if(nworkers == 0)
    	if(nworkers == 0)
 	{
 	{

+ 2 - 2
src/sched_policies/parallel_heft.c

@@ -461,7 +461,7 @@ static int parallel_heft_push_task(struct starpu_task *task)
 	if (task->priority == STARPU_MAX_PRIO)
 	if (task->priority == STARPU_MAX_PRIO)
 	{
 	{
 		_STARPU_PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
 		_STARPU_PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
-                nworkers = starpu_get_nworkers_of_sched_ctx(sched_ctx_id);
+                nworkers = starpu_sched_ctx_get_nworkers(sched_ctx_id);
                 if(nworkers == 0)
                 if(nworkers == 0)
                 {
                 {
                         _STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);
                         _STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);
@@ -475,7 +475,7 @@ static int parallel_heft_push_task(struct starpu_task *task)
 
 
 
 
 	_STARPU_PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
 	_STARPU_PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
-	nworkers = starpu_get_nworkers_of_sched_ctx(sched_ctx_id);
+	nworkers = starpu_sched_ctx_get_nworkers(sched_ctx_id);
         if(nworkers == 0)
         if(nworkers == 0)
 	{
 	{
 		_STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);
 		_STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);

+ 1 - 1
src/sched_policies/random_policy.c

@@ -90,7 +90,7 @@ static int random_push_task(struct starpu_task *task)
         int ret_val = -1;
         int ret_val = -1;
 
 
         _STARPU_PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
         _STARPU_PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
-	nworkers = starpu_get_nworkers_of_sched_ctx(sched_ctx_id);
+	nworkers = starpu_sched_ctx_get_nworkers(sched_ctx_id);
         if(nworkers == 0)
         if(nworkers == 0)
         {
         {
 		_STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);
 		_STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);

+ 3 - 3
src/sched_policies/work_stealing_policy.c

@@ -59,7 +59,7 @@ static unsigned select_victim_round_robin(unsigned sched_ctx_id)
 {
 {
 	struct _starpu_work_stealing_data *ws = (struct _starpu_work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	struct _starpu_work_stealing_data *ws = (struct _starpu_work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	unsigned worker = ws->last_pop_worker;
 	unsigned worker = ws->last_pop_worker;
-	unsigned nworkers = starpu_get_nworkers_of_sched_ctx(sched_ctx_id);
+	unsigned nworkers = starpu_sched_ctx_get_nworkers(sched_ctx_id);
 
 
 	/* If the worker's queue is empty, let's try
 	/* If the worker's queue is empty, let's try
 	 * the next ones */
 	 * the next ones */
@@ -87,7 +87,7 @@ static unsigned select_worker_round_robin(unsigned sched_ctx_id)
 {
 {
 	struct _starpu_work_stealing_data *ws = (struct _starpu_work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	struct _starpu_work_stealing_data *ws = (struct _starpu_work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
 	unsigned worker = ws->last_push_worker;
 	unsigned worker = ws->last_push_worker;
-	unsigned nworkers = starpu_get_nworkers_of_sched_ctx(sched_ctx_id);
+	unsigned nworkers = starpu_sched_ctx_get_nworkers(sched_ctx_id);
 
 
 	ws->last_push_worker = (ws->last_push_worker + 1) % nworkers;
 	ws->last_push_worker = (ws->last_push_worker + 1) % nworkers;
 
 
@@ -306,7 +306,7 @@ int ws_push_task(struct starpu_task *task)
 
 
 	/* if the context has no workers return */
 	/* if the context has no workers return */
         _STARPU_PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
         _STARPU_PTHREAD_MUTEX_LOCK(changing_ctx_mutex);
-        nworkers = starpu_get_nworkers_of_sched_ctx(sched_ctx_id);
+        nworkers = starpu_sched_ctx_get_nworkers(sched_ctx_id);
         if(nworkers == 0)
         if(nworkers == 0)
         {
         {
                 _STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);
                 _STARPU_PTHREAD_MUTEX_UNLOCK(changing_ctx_mutex);