Browse Source

merge trunk

Corentin Salingue 7 years ago
parent
commit
0c5c0295bb

+ 6 - 0
doc/doxygen/chapters/410_mpi_support.doxy

@@ -542,6 +542,12 @@ Here we have disabled the kernel function call to skip the actual computation
 time and only keep submission time, and we have asked StarPU to fake running on
 MPI node 2 out of 1024 nodes.
 
+To tune the placement of tasks among MPI nodes, one can use
+::STARPU_EXECUTE_ON_NODE or ::STARPU_EXECUTE_ON_DATA to specify an explicit
+node, or the node of a given data (e.g. one of the parameters), or use
+starpu_mpi_node_selection_register_policy() and ::STARPU_NODE_SELECTION_POLICY
+to provide a dynamic policy.
+
 A function starpu_mpi_task_build() is also provided with the aim to
 only construct the task structure. All MPI nodes need to call the
 function, only the node which is to execute the task will return a

+ 2 - 2
src/common/list.h

@@ -156,7 +156,7 @@
     struct ENAME *_tail; /**< @internal tail of the list */ \
   }; \
   /** @internal */LIST_INLINE struct ENAME *ENAME##_new(void) \
-    { struct ENAME *e; _STARPU_MALLOC(e, sizeof(struct ENAME)); \
+    { struct ENAME *e; _STARPU_MALLOC_CAST(e, sizeof(struct ENAME), struct ENAME *); \
       e->_next = NULL; e->_prev = NULL; return e; } \
   /** @internal */LIST_INLINE void ENAME##_delete(struct ENAME *e) \
     { free(e); } \
@@ -185,7 +185,7 @@
   /** @internal */LIST_INLINE void ENAME##_list_init(struct ENAME##_list *l) \
     { l->_head=NULL; l->_tail=l->_head; } \
   /** @internal */LIST_INLINE struct ENAME##_list *ENAME##_list_new(void) \
-    { struct ENAME##_list *l; _STARPU_MALLOC(l, sizeof(struct ENAME##_list)); \
+    { struct ENAME##_list *l; _STARPU_MALLOC_CAST(l, sizeof(struct ENAME##_list), struct ENAME##_list *); \
       ENAME##_list_init(l); return l; } \
   /** @internal */LIST_INLINE int ENAME##_list_empty(const struct ENAME##_list *l) \
     { return (l->_head == NULL); } \

+ 3 - 2
src/common/utils.h

@@ -116,9 +116,10 @@
 	} while (0)
 
 
-#define _STARPU_MALLOC(ptr, size) do { ptr = malloc(size); STARPU_ASSERT_MSG(ptr != NULL, "Cannot allocate %ld bytes\n", (long) size); } while (0)
+#define _STARPU_MALLOC(ptr, size) do { ptr = malloc(size); STARPU_ASSERT_MSG(ptr != NULL, "Cannot allocate %ld bytes\n", (long) (size)); } while (0)
 #define _STARPU_CALLOC(ptr, nmemb, size) do { ptr = calloc(nmemb, size); STARPU_ASSERT_MSG(ptr != NULL, "Cannot allocate %ld bytes\n", (long) (nmemb*size)); } while (0)
-#define _STARPU_REALLOC(ptr, size) do { void *_new_ptr = realloc(ptr, size); STARPU_ASSERT_MSG(_new_ptr != NULL, "Cannot reallocate %ld bytes\n", (long) size); ptr = _new_ptr;} while (0)
+#define _STARPU_REALLOC(ptr, size) do { void *_new_ptr = realloc(ptr, size); STARPU_ASSERT_MSG(_new_ptr != NULL, "Cannot reallocate %ld bytes\n", (long) (size)); ptr = _new_ptr;} while (0)
+#define _STARPU_MALLOC_CAST(ptr, size, type) do { ptr = (type) malloc(size); STARPU_ASSERT_MSG(ptr != NULL, "Cannot allocate %ld bytes\n", (long) (size)); } while (0)
 
 #ifdef _MSC_VER
 #define _STARPU_IS_ZERO(a) (a == 0.0)

+ 1 - 1
src/core/disk.c

@@ -76,7 +76,7 @@ static void add_async_event(struct _starpu_async_channel * channel, void * event
 
 int starpu_disk_register(struct starpu_disk_ops *func, void *parameter, starpu_ssize_t size)
 {
-	STARPU_ASSERT_MSG(size < 0 || size >= STARPU_DISK_SIZE_MIN,"Minimum disk size is %u Bytes ! (Here %u) \n", (int) STARPU_DISK_SIZE_MIN, (int) size);
+	STARPU_ASSERT_MSG(size < 0 || size >= STARPU_DISK_SIZE_MIN, "Minimum disk size is %d Bytes ! (Here %d) \n", (int) STARPU_DISK_SIZE_MIN, (int) size);
 	/* register disk */
 	unsigned disk_memnode = _starpu_memory_node_register(STARPU_DISK_RAM, 0);
 

+ 10 - 2
src/core/disk_ops/disk_hdf5.c

@@ -38,6 +38,7 @@
 static int nb_disk_open = 0;
 #endif
 
+/* TODO: support disk-to-disk copy with HD5Ocopy */
 
 /* ------------------- use HDF5 to write on disk -------------------  */
 
@@ -373,7 +374,10 @@ static void *starpu_hdf5_plug(void *parameter, starpu_ssize_t size STARPU_ATTRIB
 #ifndef H5_HAVE_THREADSAFE
         int nb_disk = STARPU_ATOMIC_ADD(&nb_disk_open, 1);
         if (nb_disk != 1)
-                _STARPU_ERROR("HDF5 library is not compiled with --enable-threadsafe. You can't open more than one HDF5 file in the same time !\n");
+	{
+                _STARPU_ERROR("HDF5 library is not compiled with --enable-threadsafe. You can't open more than one HDF5 file at the same time !\n");
+		return NULL;
+	}
 #endif
 
         struct starpu_hdf5_base * base;
@@ -392,6 +396,7 @@ static void *starpu_hdf5_plug(void *parameter, starpu_ssize_t size STARPU_ATTRIB
                 {
                         free(base);
                         _STARPU_ERROR("Can not create the HDF5 file (%s)", (char *) parameter);
+			return NULL;
                 }
 
                 /* just use _starpu_mktemp_many to create a file, close the file descriptor */
@@ -403,6 +408,7 @@ static void *starpu_hdf5_plug(void *parameter, starpu_ssize_t size STARPU_ATTRIB
                 {
                         free(base); 
                         _STARPU_ERROR("Can not create the HDF5 file (%s)", (char *) parameter);
+			return NULL;
                 }
                 base->created = 1;
         } 
@@ -416,8 +422,10 @@ static void *starpu_hdf5_plug(void *parameter, starpu_ssize_t size STARPU_ATTRIB
                 base->fileID = H5Fopen((char *)parameter, H5F_ACC_RDWR, H5P_DEFAULT);
                 if (base->fileID < 0) 
                 {
-                        free(base); 
+                        free(base);
+			free(path);
                         _STARPU_ERROR("Can not open the HDF5 file (%s)", (char *) parameter);
+			return NULL;
                 }
                 base->created = 0;
                 base->path = path;

+ 1 - 1
src/core/jobs.c

@@ -713,7 +713,7 @@ int _starpu_push_local_task(struct _starpu_worker *worker, struct starpu_task *t
 
 	if (task->execute_on_a_specific_worker && task->workerorder)
 	{
-		STARPU_ASSERT_MSG(task->workerorder >= worker->current_ordered_task_order, "worker order values must not have duplicates (%u pushed to worker %d, but %d already passed)", task->workerorder, worker->workerid, worker->current_ordered_task_order);
+		STARPU_ASSERT_MSG(task->workerorder >= worker->current_ordered_task_order, "worker order values must not have duplicates (%u pushed to worker %d, but %u already passed)", task->workerorder, worker->workerid, worker->current_ordered_task_order);
 		/* Put it in the ordered task ring */
 		unsigned needed = task->workerorder - worker->current_ordered_task_order + 1;
 		if (worker->local_ordered_tasks_size < needed)

+ 2 - 2
src/core/perfmodel/perfmodel_history.c

@@ -1383,7 +1383,7 @@ void starpu_perfmodel_get_arch_name(struct starpu_perfmodel_arch* arch, char *ar
 	strcpy(devices, "");
 	for(i=0 ; i<arch->ndevices ; i++)
 	{
-		written += snprintf(devices + written, sizeof(devices)-written, "%s%u%s", starpu_perfmodel_get_archtype_name(arch->devices[i].type), arch->devices[i].devid, i != arch->ndevices-1 ? "_":"");
+		written += snprintf(devices + written, sizeof(devices)-written, "%s%d%s", starpu_perfmodel_get_archtype_name(arch->devices[i].type), arch->devices[i].devid, i != arch->ndevices-1 ? "_":"");
 	}
 	snprintf(archname, maxlen, "%s_impl%u (Comb%d)", devices, impl, comb);
 }
@@ -1822,7 +1822,7 @@ void _starpu_update_perfmodel_history(struct _starpu_job *j, struct starpu_perfm
 
 		STARPU_ASSERT(j->footprint_is_computed);
 
-		fprintf(f, "0x%x\t%lu\t%f\t%f\t%f\t%d\t\t", j->footprint, (unsigned long) _starpu_job_get_data_size(model, arch, impl, j), measured, task->predicted, task->predicted_transfer, cpuid);
+		fprintf(f, "0x%x\t%lu\t%f\t%f\t%f\t%u\t\t", j->footprint, (unsigned long) _starpu_job_get_data_size(model, arch, impl, j), measured, task->predicted, task->predicted_transfer, cpuid);
 		unsigned i;
 		unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(task);
 

+ 4 - 4
src/core/sched_ctx.c

@@ -575,7 +575,7 @@ struct _starpu_sched_ctx* _starpu_create_sched_ctx(struct starpu_sched_policy *p
 		sched_ctx->sms_start_idx = occupied_sms;
 		sched_ctx->sms_end_idx = occupied_sms+nsms;
 		occupied_sms += nsms;
-		_STARPU_DEBUG("ctx %d: stream worker %d nsms %d ocupied sms %d\n", sched_ctx->id, workerids[0], nsms, occupied_sms);
+		_STARPU_DEBUG("ctx %u: stream worker %d nsms %d ocupied sms %d\n", sched_ctx->id, workerids[0], nsms, occupied_sms);
 		STARPU_ASSERT_MSG(occupied_sms <= STARPU_NMAXSMS , "STARPU:requested more sms than available");
 		_starpu_worker_set_stream_ctx(workerids[0], sched_ctx);
 		sched_ctx->stream_worker = workerids[0];
@@ -584,7 +584,7 @@ struct _starpu_sched_ctx* _starpu_create_sched_ctx(struct starpu_sched_policy *p
 	sched_ctx->nsub_ctxs = 0;
 	sched_ctx->parallel_view = 0;
 
-  /*init the strategy structs and the worker_collection of the ressources of the context */
+	/*init the strategy structs and the worker_collection of the ressources of the context */
 	if(policy)
 	{
 		_starpu_init_sched_policy(config, sched_ctx, policy);
@@ -1092,7 +1092,7 @@ unsigned _starpu_can_push_task(struct _starpu_sched_ctx *sched_ctx, struct starp
 			expected_len = expected_end - hyp_actual_start_sample[sched_ctx->id] ;
 		else
 		{
-			_STARPU_MSG("%d: sc start is 0.0\n", sched_ctx->id);
+			_STARPU_MSG("%u: sc start is 0.0\n", sched_ctx->id);
 			expected_len = expected_end - starpu_timing_now();
 		}
 		if(expected_len < 0.0)
@@ -1355,7 +1355,7 @@ int _starpu_nworkers_able_to_execute_task(struct starpu_task *task, struct _star
 	while(workers->has_next(workers, &it))
 	{
 		unsigned worker = workers->get_next(workers, &it);
-		STARPU_ASSERT_MSG(worker < STARPU_NMAXWORKERS, "worker id %d", worker);
+		STARPU_ASSERT_MSG(worker < STARPU_NMAXWORKERS, "worker id %u", worker);
 		if (starpu_worker_can_execute_task_first_impl(worker, task, NULL))
 			nworkers++;
 	}

+ 2 - 2
src/core/simgrid.c

@@ -110,7 +110,7 @@ int _starpu_simgrid_get_nbhosts(const char *prefix)
 	{
 		char name[32];
 		STARPU_ASSERT(starpu_mpi_world_rank);
-		snprintf(name, sizeof(name), STARPU_MPI_AS_PREFIX"%u", starpu_mpi_world_rank());
+		snprintf(name, sizeof(name), STARPU_MPI_AS_PREFIX"%d", starpu_mpi_world_rank());
 		hosts = MSG_environment_as_get_hosts(_starpu_simgrid_get_as_by_name(name));
 		snprintf(new_prefix, sizeof(new_prefix), "%s-%s", name, prefix);
 		prefix = new_prefix;
@@ -187,7 +187,7 @@ msg_host_t _starpu_simgrid_get_host_by_worker(struct _starpu_worker *worker)
 		default:
 			STARPU_ASSERT(0);
 	}
-	snprintf(name, sizeof(name), "%s%d", prefix, worker->devid);
+	snprintf(name, sizeof(name), "%s%u", prefix, worker->devid);
 	host =  _starpu_simgrid_get_host_by_name(name);
 	STARPU_ASSERT_MSG(host, "Could not find host %s!", name);
 	return host;

+ 3 - 3
src/core/topology.c

@@ -1084,7 +1084,7 @@ _starpu_init_mic_config (struct _starpu_machine_config *config,
 
 	topology->nmiccores[mic_idx] = nmiccores;
 	STARPU_ASSERT_MSG(topology->nmiccores[mic_idx] + topology->nworkers <= STARPU_NMAXWORKERS,
-			  "topology->nmiccores[mic_idx(%d)] (%d) + topology->nworkers (%d) <= STARPU_NMAXWORKERS (%d)",
+			  "topology->nmiccores[mic_idx(%u)] (%u) + topology->nworkers (%u) <= STARPU_NMAXWORKERS (%d)",
 			  mic_idx, topology->nmiccores[mic_idx], topology->nworkers, STARPU_NMAXWORKERS);
 
 	/* _starpu_initialize_workers_mic_deviceid (config); */
@@ -1149,7 +1149,7 @@ _starpu_init_mpi_config (struct _starpu_machine_config *config,
 
         topology->nmpicores[mpi_idx] = nmpicores;
         STARPU_ASSERT_MSG(topology->nmpicores[mpi_idx] + topology->nworkers <= STARPU_NMAXWORKERS,
-                        "topology->nmpicores[mpi_idx(%d)] (%d) + topology->nworkers (%d) <= STARPU_NMAXWORKERS (%d)",
+                        "topology->nmpicores[mpi_idx(%u)] (%u) + topology->nworkers (%u) <= STARPU_NMAXWORKERS (%d)",
                         mpi_idx, topology->nmpicores[mpi_idx], topology->nworkers, STARPU_NMAXWORKERS);
 
         mpi_worker_set[mpi_idx].workers = &config->workers[topology->nworkers];
@@ -2564,7 +2564,7 @@ _starpu_init_workers_binding_and_memory (struct _starpu_machine_config *config,
 
 		workerarg->memory_node = memory_node;
 
-		_STARPU_DEBUG("worker %d type %d devid %d bound to cpu %d, STARPU memory node %d\n", worker, workerarg->arch, devid, workerarg->bindid, memory_node);
+		_STARPU_DEBUG("worker %u type %d devid %u bound to cpu %d, STARPU memory node %u\n", worker, workerarg->arch, devid, workerarg->bindid, memory_node);
 
 #ifdef __GLIBC__
 		if (workerarg->bindid != -1)

+ 1 - 1
src/datawizard/filters.c

@@ -139,7 +139,7 @@ starpu_data_handle_t fstarpu_data_get_sub_data(starpu_data_handle_t root_handle,
 		STARPU_ASSERT(next_child >= 0);
 
 		STARPU_ASSERT_MSG(current_handle->nchildren != 0, "Data %p has to be partitioned before accessing children", current_handle);
-		STARPU_ASSERT_MSG((unsigned) next_child < current_handle->nchildren, "Bogus child number %u, data %p only has %u children", next_child, current_handle, current_handle->nchildren);
+		STARPU_ASSERT_MSG((unsigned) next_child < current_handle->nchildren, "Bogus child number %d, data %p only has %u children", next_child, current_handle, current_handle->nchildren);
 
 		current_handle = &current_handle->children[next_child];
 	}

+ 1 - 1
src/datawizard/interfaces/data_interface.c

@@ -977,7 +977,7 @@ static void _starpu_data_invalidate(void *data)
 		unsigned i, j, nnodes = starpu_memory_nodes_get_count();
 		for (i = 0; i < nnodes; i++)
 			for (j = 0; j < nnodes; j++)
-				STARPU_ASSERT_MSG(!handle->per_node[i].request[j], "request for handle %p pending from %d to %d while invalidating data!", handle, j, i);
+				STARPU_ASSERT_MSG(!handle->per_node[i].request[j], "request for handle %p pending from %u to %u while invalidating data!", handle, j, i);
 	}
 #endif
 

+ 1 - 1
src/datawizard/malloc.c

@@ -132,7 +132,7 @@ int _starpu_malloc_flags_on_node(unsigned dst_node, void **A, size_t dim, int fl
 			{
 				size_t freed;
 				size_t reclaim = 2 * dim;
-				_STARPU_DEBUG("There is not enough memory left, we are going to reclaim %ld\n", reclaim);
+				_STARPU_DEBUG("There is not enough memory left, we are going to reclaim %ld\n", (long)reclaim);
 				_STARPU_TRACE_START_MEMRECLAIM(dst_node,0);
 				freed = _starpu_memory_reclaim_generic(dst_node, 0, reclaim);
 				_STARPU_TRACE_END_MEMRECLAIM(dst_node,0);

+ 2 - 2
src/datawizard/memory_manager.c

@@ -1,6 +1,6 @@
 /* StarPU --- Runtime system for heterogeneous multicore architectures.
  *
- * Copyright (C) 2012-2013, 2015, 2016  CNRS
+ * Copyright (C) 2012-2013, 2015, 2016, 2017  CNRS
  *
  * StarPU is free software; you can redistribute it and/or modify
  * it under the terms of the GNU Lesser General Public License as published by
@@ -57,7 +57,7 @@ void _starpu_memory_manager_set_global_memory_size(unsigned node, size_t size)
 	if (!global_size[node])
 	{
 		global_size[node] = size;
-		_STARPU_DEBUG("Global size for node %d is %ld\n", node, (long)global_size[node]);
+		_STARPU_DEBUG("Global size for node %u is %ld\n", node, (long)global_size[node]);
 	}
 	else
 	{

+ 2 - 2
src/datawizard/memory_nodes.c

@@ -1,7 +1,7 @@
 /* StarPU --- Runtime system for heterogeneous multicore architectures.
  *
  * Copyright (C) 2009-2017  Université de Bordeaux
- * Copyright (C) 2010, 2011, 2012, 2013, 2014, 2015  CNRS
+ * Copyright (C) 2010, 2011, 2012, 2013, 2014, 2015, 2017  CNRS
  * Copyright (C) 2017  Inria
  *
  * StarPU is free software; you can redistribute it and/or modify
@@ -105,7 +105,7 @@ void _starpu_memory_node_get_name(unsigned node, char *name, int size)
 		prefix = "unknown";
 		STARPU_ASSERT(0);
 	}
-	snprintf(name, size, "%s %u", prefix, _starpu_descr.devid[node]);
+	snprintf(name, size, "%s %d", prefix, _starpu_descr.devid[node]);
 }
 
 unsigned _starpu_memory_node_register(enum starpu_node_kind kind, int devid)

+ 5 - 5
src/datawizard/memstats.c

@@ -65,13 +65,13 @@ void _starpu_memory_display_handle_stats(FILE *stream, starpu_data_handle_t hand
 		    +handle->memory_stats->invalidated[node]+handle->memory_stats->loaded_owner[node])
 		{
 			fprintf(stream, "Node #%u\n", node);
-			fprintf(stream, "\tDirect access : %d\n", handle->memory_stats->direct_access[node]);
+			fprintf(stream, "\tDirect access : %u\n", handle->memory_stats->direct_access[node]);
 			/* XXX Not Working yet. */
 			if (handle->memory_stats->shared_to_owner[node])
-				fprintf(stream, "\t\tShared to Owner : %d\n", handle->memory_stats->shared_to_owner[node]);
-			fprintf(stream, "\tLoaded (Owner) : %d\n", handle->memory_stats->loaded_owner[node]);
-			fprintf(stream, "\tLoaded (Shared) : %d\n", handle->memory_stats->loaded_shared[node]);
-			fprintf(stream, "\tInvalidated (was Owner) : %d\n\n", handle->memory_stats->invalidated[node]);
+				fprintf(stream, "\t\tShared to Owner : %u\n", handle->memory_stats->shared_to_owner[node]);
+			fprintf(stream, "\tLoaded (Owner) : %u\n", handle->memory_stats->loaded_owner[node]);
+			fprintf(stream, "\tLoaded (Shared) : %u\n", handle->memory_stats->loaded_shared[node]);
+			fprintf(stream, "\tInvalidated (was Owner) : %u\n\n", handle->memory_stats->invalidated[node]);
 		}
 	}
 }

+ 1 - 1
src/drivers/opencl/driver_opencl.c

@@ -111,7 +111,7 @@ static void _starpu_opencl_limit_gpu_mem_if_needed(unsigned devid)
 	to_waste = totalGlobalMem - global_mem[devid];
 #endif
 
-	_STARPU_DEBUG("OpenCL device %d: Wasting %ld MB / Limit %ld MB / Total %ld MB / Remains %ld MB\n",
+	_STARPU_DEBUG("OpenCL device %u: Wasting %ld MB / Limit %ld MB / Total %ld MB / Remains %ld MB\n",
 			devid, (long)to_waste/(1024*1024), (long) limit, (long)totalGlobalMem/(1024*1024),
 			(long)(totalGlobalMem - to_waste)/(1024*1024));
 

+ 1 - 1
src/sched_policies/heteroprio.c

@@ -128,7 +128,7 @@ inline void starpu_heteroprio_set_mapping(unsigned sched_ctx_id, enum starpu_het
 	hp->prio_mapping_per_arch_index[arch][source_prio] = dest_bucket_id;
 
 	hp->buckets[dest_bucket_id].valid_archs |= starpu_heteroprio_types_to_arch[arch];
-	_STARPU_DEBUG("Adding arch %d to bucket %d\n", arch, dest_bucket_id);
+	_STARPU_DEBUG("Adding arch %d to bucket %u\n", arch, dest_bucket_id);
 }
 
 /** Tell which arch is the faster for the tasks of a bucket (optional) */

+ 1 - 1
src/util/openmp_runtime_support.c

@@ -1474,7 +1474,6 @@ void starpu_omp_critical_inline_end(const char *name)
 	{
 		_starpu_spin_lock(&_global_state.named_criticals_lock);
 		HASH_FIND_STR(_global_state.named_criticals, name, critical);
-		STARPU_ASSERT(critical != NULL);
 		_starpu_spin_unlock(&_global_state.named_criticals_lock);
 	}
 	else
@@ -1482,6 +1481,7 @@ void starpu_omp_critical_inline_end(const char *name)
 		critical = _global_state.default_critical;
 	}
 
+	STARPU_ASSERT(critical != NULL);
 	_starpu_spin_lock(&critical->lock);
 	STARPU_ASSERT(critical->state == 1);
 	critical->state = 0;

+ 1 - 1
src/util/starpu_clusters_create.c

@@ -277,7 +277,7 @@ int starpu_cluster_print(struct starpu_cluster_machine *clusters)
 	struct _starpu_cluster_group *group;
 	struct _starpu_cluster *cluster;
 
-	printf("Number of clusters created: %d\n", clusters->nclusters);
+	printf("Number of clusters created: %u\n", clusters->nclusters);
 	cnt=0;
 	for (group = _starpu_cluster_group_list_begin(clusters->groups);
 	     group != _starpu_cluster_group_list_end(clusters->groups);

+ 1 - 1
tests/disk/mem_reclaim.c

@@ -115,7 +115,7 @@ static void check(void *buffers[], void *args)
 	unsigned *val = (unsigned*) STARPU_VECTOR_GET_PTR(vector);
 	unsigned i;
 	starpu_codelet_unpack_args(args, &i);
-	STARPU_ASSERT_MSG(*val == values[i], "Incorrect value. Value %u should be %u (index %d)", *val, values[i], i);
+	STARPU_ASSERT_MSG(*val == values[i], "Incorrect value. Value %u should be %u (index %u)", *val, values[i], i);
 }
 
 static struct starpu_codelet zero_cl =

+ 4 - 2
tests/main/combined_workers/bfs/bfs.cpp

@@ -29,7 +29,8 @@
 
 extern void omp_bfs_func(void *buffers[], void *_args);
 
-void Usage(int argc, char**argv){
+void Usage(int argc, char**argv)
+{
 	fprintf(stderr,"Usage: %s <input_file>\n", argv[0]);
 }
 
@@ -146,7 +147,8 @@ int main( int argc, char** argv)
 	starpu_data_handle_t graph_visited_handle;
 	starpu_data_handle_t cost_handle;
 
-	if(argc != 2){
+	if(argc != 2)
+	{
 		Usage(argc, argv);
 		exit(1);
 	}

+ 2 - 1
tests/main/hwloc_cpuset.c

@@ -76,10 +76,11 @@ int main(int argc, char **argv)
 	{
 		status = STARPU_TEST_SKIPPED;
 	}
+
+	starpu_shutdown();	
 #else
 	status = STARPU_TEST_SKIPPED;
 #endif
-	starpu_shutdown();	
 
 	return status;
 }

+ 5 - 0
tools/starpu_tasks_rec_complete.c

@@ -174,6 +174,11 @@ int main(int argc, char *argv[])
 		fprintf(output, "%s", s);
 	}
 
+	if (fclose(input))
+	{
+		fprintf(stderr, "couldn't close input: %s\n", strerror(errno));
+		exit(EXIT_FAILURE);
+	}
 	if (fclose(output))
 	{
 		fprintf(stderr, "couldn't close output: %s\n", strerror(errno));