浏览代码

remove some warnings

Andra Hugo 11 年之前
父节点
当前提交
9d2ba463c5

+ 1 - 1
sc_hypervisor/examples/lp_test/lp_resize_test.c

@@ -73,7 +73,7 @@ void* submit_tasks_thread(void *arg)
 	}
 
 	starpu_task_wait_for_all();
-	return;
+	return NULL;
 }
 
 int main()

+ 1 - 1
sc_hypervisor/examples/lp_test/lp_test.c

@@ -72,7 +72,7 @@ void* submit_tasks_thread(void *arg)
 	}
 
 	starpu_task_wait_for_all();
-	return;
+	return NULL;
 }
 
 int main()

+ 1 - 1
sc_hypervisor/src/hypervisor_policies/app_driven_policy.c

@@ -15,7 +15,7 @@
  */
 #include <sc_hypervisor_policy.h>
 
-static void app_driven_handle_post_exec_hook(unsigned sched_ctx, int task_tag)
+static void app_driven_handle_post_exec_hook(unsigned sched_ctx, __attribute__((unused)) int task_tag)
 {
 	sc_hypervisor_policy_resize_to_unknown_receiver(sched_ctx, 1);
 }

+ 1 - 1
sc_hypervisor/src/hypervisor_policies/debit_lp_policy.c

@@ -56,7 +56,7 @@ static unsigned _compute_max_speed(int ns, int nw, double w_in_s[ns][nw], unsign
 	long diff_s = end_time.tv_sec  - start_time.tv_sec;
 	long diff_us = end_time.tv_usec  - start_time.tv_usec;
 
-	float timing = (float)(diff_s*1000000 + diff_us)/1000;
+	__attribute__((unused)) float timing = (float)(diff_s*1000000 + diff_us)/1000;
 
 	if(res > 0.0)
 		return 1;

+ 2 - 1
sc_hypervisor/src/hypervisor_policies/gflops_rate_policy.c

@@ -289,7 +289,8 @@ static void gflops_rate_resize(unsigned sched_ctx)
 	}
 }
 
-static void gflops_rate_handle_poped_task(unsigned sched_ctx, int worker)
+static void gflops_rate_handle_poped_task(unsigned sched_ctx, __attribute__((unused)) int worker, 
+					  __attribute__((unused))struct starpu_task *task, __attribute__((unused))uint32_t footprint)
 {
 	gflops_rate_resize(sched_ctx);
 }

+ 1 - 1
sc_hypervisor/src/hypervisor_policies/ispeed_lp_policy.c

@@ -219,7 +219,7 @@ static void ispeed_lp_resize_ctxs(unsigned *sched_ctxs, int nsched_ctxs , int *w
 	}
 }
 
-static void ispeed_lp_end_ctx(unsigned sched_ctx)
+static void ispeed_lp_end_ctx(__attribute__((unused))unsigned sched_ctx)
 {
 /* 	struct sc_hypervisor_wrapper* sc_w = sc_hypervisor_get_wrapper(sched_ctx); */
 /* 	int worker; */

+ 1 - 1
sc_hypervisor/src/policies_utils/dichotomy.c

@@ -82,7 +82,7 @@ unsigned sc_hypervisor_lp_execute_dichotomy(int ns, int nw, double w_in_s[ns][nw
 	long diff_s = end_time.tv_sec  - start_time.tv_sec;
 	long diff_us = end_time.tv_usec  - start_time.tv_usec;
 
-	float timing = (float)(diff_s*1000000 + diff_us)/1000;
+	__attribute__((unused)) float timing = (float)(diff_s*1000000 + diff_us)/1000;
 
 	return found_sol;
 }

+ 5 - 3
sc_hypervisor/src/policies_utils/policy_tools.c

@@ -352,7 +352,7 @@ double sc_hypervisor_get_fastest_ctx_exec_time(void)
 
 void sc_hypervisor_group_workers_by_type(struct types_of_workers *tw, int *total_nw)
 {
-	int w;
+	unsigned w;
 	for(w = 0; w < tw->nw; w++)
 		total_nw[w] = 0;
 
@@ -382,8 +382,9 @@ enum starpu_worker_archtype sc_hypervisor_get_arch_for_index(unsigned w, struct
 	else
 		if(tw->ncuda != 0)
 			return STARPU_CUDA_WORKER;
-}
 
+	return STARPU_CPU_WORKER;
+}
 
 unsigned sc_hypervisor_get_index_for_arch(enum starpu_worker_archtype arch, struct types_of_workers *tw)
 {
@@ -403,6 +404,7 @@ unsigned sc_hypervisor_get_index_for_arch(enum starpu_worker_archtype arch, stru
 				return 0;
 		}
 	}
+	return 0;
 }
 
 void sc_hypervisor_get_tasks_times(int nw, int nt, double times[nw][nt], int *workers, unsigned size_ctxs, struct sc_hypervisor_policy_task_pool *task_pools)
@@ -521,7 +523,7 @@ unsigned sc_hypervisor_check_speed_gap_btw_ctxs(void)
 				{
 					v[w] = sc_hypervisor_get_speed(sc_w, sc_hypervisor_get_arch_for_index(w, tw));
 					
-					optimal_v[i] += nworkers_per_ctx[i][w];
+					optimal_v[i] += nworkers_per_ctx[i][w]*v[w];
 				}
 				_set_optimal_v(i, optimal_v[i]);
 			}

+ 2 - 2
sc_hypervisor/src/sc_hypervisor.c

@@ -797,7 +797,7 @@ static void notify_poped_task(unsigned sched_ctx, int worker, struct starpu_task
 	hypervisor.sched_ctx_w[sched_ctx].remaining_flops -= task->flops;
 /* 	if(hypervisor.sched_ctx_w[sched_ctx].remaining_flops < 0.0) */
 /* 		hypervisor.sched_ctx_w[sched_ctx].remaining_flops = 0.0; */
-	double ctx_elapsed_flops = sc_hypervisor_get_elapsed_flops_per_sched_ctx(&hypervisor.sched_ctx_w[sched_ctx]);
+//	double ctx_elapsed_flops = sc_hypervisor_get_elapsed_flops_per_sched_ctx(&hypervisor.sched_ctx_w[sched_ctx]);
 /* 	printf("*****************STARPU_STARPU_STARPU: decrement %lf flops  remaining flops %lf total flops %lf elapseed flops %lf in ctx %d \n", */
 /* 	       task->flops, hypervisor.sched_ctx_w[sched_ctx].remaining_flops,  hypervisor.sched_ctx_w[sched_ctx].total_flops, ctx_elapsed_flops, sched_ctx); */
 	starpu_pthread_mutex_unlock(&act_hypervisor_mutex);
@@ -891,7 +891,7 @@ static void notify_delete_context(unsigned sched_ctx)
 void sc_hypervisor_size_ctxs(unsigned *sched_ctxs, int nsched_ctxs, int *workers, int nworkers)
 {
 	starpu_pthread_mutex_lock(&act_hypervisor_mutex);
-	int curr_nsched_ctxs = sched_ctxs == NULL ? hypervisor.nsched_ctxs : nsched_ctxs;
+	unsigned curr_nsched_ctxs = sched_ctxs == NULL ? hypervisor.nsched_ctxs : (unsigned)nsched_ctxs;
 	unsigned *curr_sched_ctxs = sched_ctxs == NULL ? hypervisor.sched_ctxs : sched_ctxs;
 	starpu_pthread_mutex_unlock(&act_hypervisor_mutex);
 	unsigned s;

+ 1 - 2
src/core/sched_ctx.c

@@ -1280,7 +1280,6 @@ void _starpu_sched_ctx_rebind_thread_to_its_cpu(unsigned cpuid)
 static void _starpu_sched_ctx_get_workers_to_sleep(unsigned sched_ctx_id)
 {
 	struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
-
 	struct starpu_worker_collection *workers = sched_ctx->workers;
 	struct starpu_sched_ctx_iterator it;
 	struct _starpu_worker *worker = NULL;
@@ -1297,7 +1296,7 @@ static void _starpu_sched_ctx_get_workers_to_sleep(unsigned sched_ctx_id)
 
 	while(workers->has_next(workers, &it))
 	{
-		int w = workers->get_next(workers, &it);
+		workers->get_next(workers, &it);
 		sem_wait(&sched_ctx->parallel_code_sem);
 	}
 	return;

+ 0 - 2
src/sched_policies/deque_modeling_policy_data_aware.c

@@ -53,8 +53,6 @@ struct _starpu_dmda_data
 	long int ready_task_cnt;
 };
 
-static double idle_power = 0.0;
-
 /* The dmda scheduling policy uses
  *
  * alpha * T_computation + beta * T_communication + gamma * Consumption

+ 1 - 1
src/sched_policies/parallel_heft.c

@@ -502,7 +502,7 @@ static int parallel_heft_push_task(struct starpu_task *task)
 	return ret_val;
 }
 
-static void parallel_heft_add_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers)
+static void parallel_heft_add_workers(__attribute__((unused)) unsigned sched_ctx_id, int *workerids, unsigned nworkers)
 {
 	int workerid;
 	unsigned i;