Explorar el Código

Renaming sometimes does not make sense

Cédric Augonnet hace 15 años
padre
commit
b412800174

+ 1 - 1
ChangeLog

@@ -15,7 +15,7 @@ The asynchronous heterogeneous multi-accelerator release
     - All data transfers use data requests now
     - Implement asynchronous data transfers
     - Implement prefetch mechanism
-    - Chain data requests to support GPU->STARPU_RAM->GPU transfers 
+    - Chain data requests to support GPU->RAM->GPU transfers 
   * Make it possible to bypass the scheduler and to assign a task to a specific
     worker
   * Support restartable tasks to reinstanciate dependencies task graphs

+ 1 - 1
examples/audio/starpu-audio-processing.c

@@ -433,7 +433,7 @@ int main(int argc, char **argv)
 	if (outputfilename)
 		fprintf(stderr, "Writing output data\n");
 
-	/* make sure that the output is in STARPU_RAM before quitting StarPU */
+	/* make sure that the output is in RAM before quitting StarPU */
 	starpu_unpartition_data(A_handle, 0);
 	starpu_delete_data(A_handle);
 

+ 2 - 2
examples/basic-examples/mult.c

@@ -103,7 +103,7 @@ static void callback_func(void *arg)
 
 /*
  * The codelet is passed 3 matrices, the "descr" union-type field gives a
- * description of the layout of those 3 matrices in the local memory (ie. STARPU_RAM
+ * description of the layout of those 3 matrices in the local memory (ie. RAM
  * in the case of CPU, GPU frame buffer in the case of GPU etc.). Since we have
  * registered data with the "blas" data interface, we manipulate the .blas
  * field of the descr[x] elements which are union types.
@@ -392,7 +392,7 @@ int main(__attribute__ ((unused)) int argc,
  	 * it's not possible to manipulate a subset of C using get_sub_data until
 	 * starpu_map_filters is called again on C_handle.
 	 * The second argument is the memory node where the different subsets
-	 * should be reassembled, 0 = main memory (STARPU_RAM) */
+	 * should be reassembled, 0 = main memory (RAM) */
 	starpu_unpartition_data(C_handle, 0);
 
 	/* stop monitoring matrix C : after this, it is not possible to pass C 

+ 1 - 1
examples/basic-examples/vector-scal.c

@@ -84,7 +84,7 @@ int main(int argc, char **argv)
 	 *  - the second argument is the memory node where the data (ie. "tab")
 	 *    resides initially: 0 stands for an address in main memory, as
 	 *    opposed to an adress on a GPU for instance.
-	 *  - the third argument is the adress of the vector in STARPU_RAM
+	 *  - the third argument is the adress of the vector in RAM
 	 *  - the fourth argument is the number of elements in the vector
 	 *  - the fifth argument is the size of each element.
 	 */

+ 1 - 1
examples/incrementer/incrementer.c

@@ -83,7 +83,7 @@ int main(int argc, char **argv)
 
 	starpu_wait_all_tasks();
 
-	/* update the array in STARPU_RAM */
+	/* update the array in RAM */
 	starpu_sync_data_with_mem(float_array_handle, STARPU_R);
 	
 	gettimeofday(&end, NULL);

+ 1 - 1
src/core/topology.c

@@ -464,7 +464,7 @@ static void _starpu_init_workers_binding(struct starpu_machine_config_s *config)
 	/* a single cpu is dedicated for the accelerators */
 	int accelerator_bindid = -1;
 
-	/* note that even if the CPU cpu are not used, we always have a STARPU_RAM node */
+	/* note that even if the CPU cpu are not used, we always have a RAM node */
 	/* TODO : support NUMA  ;) */
 	ram_memory_node = _starpu_register_memory_node(STARPU_RAM);
 

+ 1 - 1
src/datawizard/coherency.c

@@ -190,7 +190,7 @@ int _starpu_fetch_data_on_node(starpu_data_handle handle, uint32_t requesting_no
 			starpu_data_request_t r_src_to_ram;
 			starpu_data_request_t r_ram_to_dst;
 
-			/* XXX we hardcore 0 as the STARPU_RAM node ... */
+			/* XXX we hardcore 0 as the RAM node ... */
 			r_ram_to_dst = _starpu_create_data_request(handle, 0, requesting_node, requesting_node, read, write, is_prefetch);
 
 			if (!is_prefetch)

+ 1 - 1
src/datawizard/hierarchy.c

@@ -248,7 +248,7 @@ void starpu_unpartition_data(starpu_data_handle root_handle, uint32_t gathering_
 
 		int ret;
 		ret = _starpu_fetch_data_on_node(&root_handle->children[child], gathering_node, 1, 0, 0);
-		/* for now we pretend that the STARPU_RAM is almost unlimited and that gathering 
+		/* for now we pretend that the RAM is almost unlimited and that gathering 
 		 * data should be possible from the node that does the unpartionning ... we
 		 * don't want to have the programming deal with memory shortage at that time,
 		 * really */

+ 1 - 1
src/datawizard/memory_nodes.c

@@ -62,7 +62,7 @@ unsigned _starpu_get_local_memory_node(void)
 	unsigned *memory_node;
 	memory_node = pthread_getspecific(memory_node_key);
 	
-	/* in case this is called by the programmer, we assume the STARPU_RAM node 
+	/* in case this is called by the programmer, we assume the RAM node 
 	   is the appropriate memory node ... so we return 0 XXX */
 	if (STARPU_UNLIKELY(!memory_node))
 		return 0;

+ 1 - 1
src/datawizard/user_interactions.c

@@ -49,7 +49,7 @@ struct state_and_node {
 	void *callback_arg;
 };
 
-/* put the current value of the data into STARPU_RAM */
+/* put the current value of the data into RAM */
 static inline void _starpu_sync_data_with_mem_continuation(void *arg)
 {
 	int ret;

+ 1 - 1
tests/datawizard/sync_and_notify_data.c

@@ -115,7 +115,7 @@ int main(int argc, char **argv)
 				goto enodev;
 		}
 
-		/* synchronize v in STARPU_RAM */
+		/* synchronize v in RAM */
 		starpu_sync_data_with_mem(v_handle, STARPU_RW);
 
 		/* increment b */

+ 1 - 1
tests/errorcheck/invalid_blocking_calls.c

@@ -25,7 +25,7 @@ static void wrong_func(void *descr[], void *arg)
 {
 	int ret;
 
-	/* try to fetch data in the STARPU_RAM while we are in a codelet, such a
+	/* try to fetch data in the RAM while we are in a codelet, such a
 	 * blocking call is forbidden */
 	ret = starpu_sync_data_with_mem(handle, STARPU_RW);
 	if (ret != -EDEADLK)

+ 3 - 3
tests/experiments/latency/cuda-latency.c

@@ -52,7 +52,7 @@ void send_data(unsigned src, unsigned dst)
 {
 	cudaError_t cures;
 
-	/* Copy data from GPU to STARPU_RAM */
+	/* Copy data from GPU to RAM */
 #ifdef DO_TRANSFER_GPU_TO_RAM
 #ifdef ASYNC
 	cures = cudaMemcpyAsync(cpu_buffer, gpu_buffer[src], buffer_size, cudaMemcpyDeviceToHost, stream[src]);
@@ -69,7 +69,7 @@ void send_data(unsigned src, unsigned dst)
 #endif
 #endif
 
-	/* Tell the other GPU that data is in STARPU_RAM */
+	/* Tell the other GPU that data is in RAM */
 	pthread_mutex_lock(&mutex_gpu);
 	data_is_available[src] = 0;
 	data_is_available[dst] = 1;
@@ -82,7 +82,7 @@ void recv_data(unsigned src, unsigned dst)
 {
 	cudaError_t cures;
 
-	/* Wait for the data to be in STARPU_RAM */
+	/* Wait for the data to be in RAM */
 	pthread_mutex_lock(&mutex_gpu);
 	while (!data_is_available[dst])
 	{