Browse Source

Remove trailing whitespaces in the whole documentation.

Ludovic Stordeur 12 years ago
parent
commit
30bd2b7429

+ 7 - 8
doc/chapters/advanced-api.texi

@@ -7,14 +7,14 @@
 @c See the file starpu.texi for copying conditions.
 @c See the file starpu.texi for copying conditions.
 
 
 @menu
 @menu
-* Defining a new data interface::  
+* Defining a new data interface::
-* Multiformat Data Interface::  
+* Multiformat Data Interface::
-* Task Bundles::                
+* Task Bundles::
-* Task Lists::                  
+* Task Lists::
-* Using Parallel Tasks::        
+* Using Parallel Tasks::
-* Defining a new scheduling policy::  
+* Defining a new scheduling policy::
 * Running drivers::
 * Running drivers::
-* Expert mode::                 
+* Expert mode::
 @end menu
 @end menu
 
 
 @node Defining a new data interface
 @node Defining a new data interface
@@ -783,4 +783,3 @@ Register a progression hook, to be called when workers are idle.
 @deftypefun void starpu_progression_hook_deregister (int @var{hook_id})
 @deftypefun void starpu_progression_hook_deregister (int @var{hook_id})
 Unregister a given progression hook.
 Unregister a given progression hook.
 @end deftypefun
 @end deftypefun
-

+ 7 - 7
doc/chapters/advanced-examples.texi

@@ -9,13 +9,13 @@
 @menu
 @menu
 * Using multiple implementations of a codelet::
 * Using multiple implementations of a codelet::
 * Enabling implementation according to capabilities::
 * Enabling implementation according to capabilities::
-* Task and Worker Profiling::   
+* Task and Worker Profiling::
 * Partitioning Data::
 * Partitioning Data::
-* Performance model example::   
+* Performance model example::
-* Theoretical lower bound on execution time::  
+* Theoretical lower bound on execution time::
-* Insert Task Utility::          
+* Insert Task Utility::
-* Data reduction::  
+* Data reduction::
-* Temporary buffers::  
+* Temporary buffers::
 * Parallel Tasks::
 * Parallel Tasks::
 * Debugging::
 * Debugging::
 * The multiformat interface::
 * The multiformat interface::
@@ -45,7 +45,7 @@ void scal_sse_func(void *buffers[], void *cl_arg)
     __m128 factor __attribute__((aligned(16)));
     __m128 factor __attribute__((aligned(16)));
     factor = _mm_set1_ps(*(float *) cl_arg);
     factor = _mm_set1_ps(*(float *) cl_arg);
 
 
-    unsigned int i;    
+    unsigned int i;
     for (i = 0; i < n_iterations; i++)
     for (i = 0; i < n_iterations; i++)
         VECTOR[i] = _mm_mul_ps(factor, VECTOR[i]);
         VECTOR[i] = _mm_mul_ps(factor, VECTOR[i]);
 @}
 @}

+ 4 - 5
doc/chapters/basic-api.texi

@@ -1029,7 +1029,7 @@ Return a pointer to the non-zero values of the matrix designated by @var{interfa
 
 
 @defmac STARPU_BCSR_GET_NZVAL_DEV_HANDLE ({void *}@var{interface})
 @defmac STARPU_BCSR_GET_NZVAL_DEV_HANDLE ({void *}@var{interface})
 Return a device handle for the array of non-zero values in the matrix designated
 Return a device handle for the array of non-zero values in the matrix designated
-by @var{interface}. The offset documented below has to be used in addition to 
+by @var{interface}. The offset documented below has to be used in addition to
 this.
 this.
 @end defmac
 @end defmac
 
 
@@ -1104,7 +1104,7 @@ Return a pointer to the non-zero values of the matrix designated by @var{interfa
 
 
 @defmac STARPU_CSR_GET_NZVAL_DEV_HANDLE ({void *}@var{interface})
 @defmac STARPU_CSR_GET_NZVAL_DEV_HANDLE ({void *}@var{interface})
 Return a device handle for the array of non-zero values in the matrix designated
 Return a device handle for the array of non-zero values in the matrix designated
-by @var{interface}. The offset documented below has to be used in addition to 
+by @var{interface}. The offset documented below has to be used in addition to
 this.
 this.
 @end defmac
 @end defmac
 
 
@@ -1344,7 +1344,7 @@ vector represented by @var{father_interface} once partitioned in
 @deftypefun void starpu_block_shadow_filter_func_vector (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
 @deftypefun void starpu_block_shadow_filter_func_vector (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
 Return in @code{*@var{child_interface}} the @var{id}th element of the
 Return in @code{*@var{child_interface}} the @var{id}th element of the
 vector represented by @var{father_interface} once partitioned in
 vector represented by @var{father_interface} once partitioned in
-@var{nparts} chunks of equal size with a shadow border @code{filter_arg_ptr}, thus getting a vector of size (n-2*shadow)/nparts+2*shadow 
+@var{nparts} chunks of equal size with a shadow border @code{filter_arg_ptr}, thus getting a vector of size (n-2*shadow)/nparts+2*shadow
 
 
 The @code{filter_arg_ptr} field must be the shadow size casted into @code{void*}.
 The @code{filter_arg_ptr} field must be the shadow size casted into @code{void*}.
 
 
@@ -1544,7 +1544,7 @@ e.g. static storage case.
 @item @code{uint32_t where} (optional)
 @item @code{uint32_t where} (optional)
 Indicates which types of processing units are able to execute the
 Indicates which types of processing units are able to execute the
 codelet. The different values
 codelet. The different values
-@code{STARPU_CPU}, @code{STARPU_CUDA}, 
+@code{STARPU_CPU}, @code{STARPU_CUDA},
 @code{STARPU_OPENCL} can be combined to specify
 @code{STARPU_OPENCL} can be combined to specify
 on which types of processing units the codelet can be executed.
 on which types of processing units the codelet can be executed.
 @code{STARPU_CPU|STARPU_CUDA} for instance indicates that the codelet is
 @code{STARPU_CPU|STARPU_CUDA} for instance indicates that the codelet is
@@ -2772,4 +2772,3 @@ This function blocks until the function has been executed on every appropriate
 processing units, so that it may not be called from a callback function for
 processing units, so that it may not be called from a callback function for
 instance.
 instance.
 @end deftypefun
 @end deftypefun
-

+ 16 - 16
doc/chapters/basic-examples.texi

@@ -7,10 +7,10 @@
 @c See the file starpu.texi for copying conditions.
 @c See the file starpu.texi for copying conditions.
 
 
 @menu
 @menu
-* Compiling and linking options::  
+* Compiling and linking options::
 * Hello World::                 Submitting Tasks
 * Hello World::                 Submitting Tasks
-* Vector Scaling Using the C Extension::  
+* Vector Scaling Using the C Extension::
-* Vector Scaling Using StarPu's API::  
+* Vector Scaling Using StarPu's API::
 * Vector Scaling on an Hybrid CPU/GPU Machine::  Handling Heterogeneous Architectures
 * Vector Scaling on an Hybrid CPU/GPU Machine::  Handling Heterogeneous Architectures
 @end menu
 @end menu
 
 
@@ -52,8 +52,8 @@ to StarPU. You can either use the StarPU C extension (@pxref{C
 Extensions}) or directly use the StarPU's API.
 Extensions}) or directly use the StarPU's API.
 
 
 @menu
 @menu
-* Hello World using the C Extension::  
+* Hello World using the C Extension::
-* Hello World using StarPU's API::  
+* Hello World using StarPU's API::
 @end menu
 @end menu
 
 
 @node Hello World using the C Extension
 @node Hello World using the C Extension
@@ -116,10 +116,10 @@ The remainder of this section shows how to achieve the same result using
 StarPU's standard C API.
 StarPU's standard C API.
 
 
 @menu
 @menu
-* Required Headers::            
+* Required Headers::
-* Defining a Codelet::          
+* Defining a Codelet::
-* Submitting a Task::           
+* Submitting a Task::
-* Execution of Hello World::    
+* Execution of Hello World::
 @end menu
 @end menu
 
 
 @node Required Headers
 @node Required Headers
@@ -306,8 +306,8 @@ example using StarPU's API is given in the next sections.
 
 
 
 
 @menu
 @menu
-* Adding an OpenCL Task Implementation::  
+* Adding an OpenCL Task Implementation::
-* Adding a CUDA Task Implementation::  
+* Adding a CUDA Task Implementation::
 @end menu
 @end menu
 
 
 The simplest way to get started writing StarPU programs is using the C
 The simplest way to get started writing StarPU programs is using the C
@@ -576,7 +576,7 @@ this example is given in @ref{Full source code for the 'Scaling a
 Vector' example}.
 Vector' example}.
 
 
 @menu
 @menu
-* Source Code of Vector Scaling::  
+* Source Code of Vector Scaling::
 * Execution of Vector Scaling::  Running the program
 * Execution of Vector Scaling::  Running the program
 @end menu
 @end menu
 
 
@@ -701,10 +701,10 @@ Contrary to the previous examples, the task submitted in this example may not
 only be executed by the CPUs, but also by a CUDA device.
 only be executed by the CPUs, but also by a CUDA device.
 
 
 @menu
 @menu
-* Definition of the CUDA Kernel::  
+* Definition of the CUDA Kernel::
-* Definition of the OpenCL Kernel::  
+* Definition of the OpenCL Kernel::
-* Definition of the Main Code::  
+* Definition of the Main Code::
-* Execution of Hybrid Vector Scaling::  
+* Execution of Hybrid Vector Scaling::
 @end menu
 @end menu
 
 
 @node Definition of the CUDA Kernel
 @node Definition of the CUDA Kernel

+ 1 - 1
doc/chapters/benchmarks.texi

@@ -6,7 +6,7 @@
 
 
 @menu
 @menu
 * Task size overhead::           Overhead of tasks depending on their size
 * Task size overhead::           Overhead of tasks depending on their size
-* Data transfer latency::        Latency of data transfers 
+* Data transfer latency::        Latency of data transfers
 * Gemm::                         Matrix-matrix multiplication
 * Gemm::                         Matrix-matrix multiplication
 * Cholesky::                     Cholesky factorization
 * Cholesky::                     Cholesky factorization
 * LU::                           LU factorization
 * LU::                           LU factorization

+ 6 - 6
doc/chapters/configuration.texi

@@ -7,8 +7,8 @@
 @c See the file starpu.texi for copying conditions.
 @c See the file starpu.texi for copying conditions.
 
 
 @menu
 @menu
-* Compilation configuration::   
+* Compilation configuration::
-* Execution configuration through environment variables::  
+* Execution configuration through environment variables::
 @end menu
 @end menu
 
 
 @node Compilation configuration
 @node Compilation configuration
@@ -17,10 +17,10 @@
 The following arguments can be given to the @code{configure} script.
 The following arguments can be given to the @code{configure} script.
 
 
 @menu
 @menu
-* Common configuration::        
+* Common configuration::
-* Configuring workers::         
+* Configuring workers::
-* Extension configuration::     
+* Extension configuration::
-* Advanced configuration::      
+* Advanced configuration::
 @end menu
 @end menu
 
 
 @node Common configuration
 @node Common configuration

+ 0 - 1
doc/chapters/fdl-1.3.texi

@@ -505,4 +505,3 @@ to permit their use in free software.
 @c Local Variables:
 @c Local Variables:
 @c ispell-local-pdict: "ispell-dict"
 @c ispell-local-pdict: "ispell-dict"
 @c End:
 @c End:
-

+ 10 - 10
doc/chapters/installing.texi

@@ -7,9 +7,9 @@
 @c See the file starpu.texi for copying conditions.
 @c See the file starpu.texi for copying conditions.
 
 
 @menu
 @menu
-* Downloading StarPU::          
+* Downloading StarPU::
-* Configuration of StarPU::     
+* Configuration of StarPU::
-* Building and Installing StarPU::  
+* Building and Installing StarPU::
 @end menu
 @end menu
 
 
 StarPU can be built and installed by the standard means of the GNU
 StarPU can be built and installed by the standard means of the GNU
@@ -20,8 +20,8 @@ can be used to install StarPU.
 @section Downloading StarPU
 @section Downloading StarPU
 
 
 @menu
 @menu
-* Getting Sources::             
+* Getting Sources::
-* Optional dependencies::       
+* Optional dependencies::
 @end menu
 @end menu
 
 
 @node Getting Sources
 @node Getting Sources
@@ -69,8 +69,8 @@ of hwloc.
 @section Configuration of StarPU
 @section Configuration of StarPU
 
 
 @menu
 @menu
-* Generating Makefiles and configuration scripts::  
+* Generating Makefiles and configuration scripts::
-* Running the configuration::   
+* Running the configuration::
 @end menu
 @end menu
 
 
 @node Generating Makefiles and configuration scripts
 @node Generating Makefiles and configuration scripts
@@ -99,9 +99,9 @@ Details about options that are useful to give to @code{./configure} are given in
 @section Building and Installing StarPU
 @section Building and Installing StarPU
 
 
 @menu
 @menu
-* Building::                    
+* Building::
-* Sanity Checks::               
+* Sanity Checks::
-* Installing::                  
+* Installing::
 @end menu
 @end menu
 
 
 @node Building
 @node Building

+ 2 - 2
doc/chapters/introduction.texi

@@ -70,8 +70,8 @@ policies in a portable fashion (@pxref{Scheduling Policy API}).
 The remainder of this section describes the main concepts used in StarPU.
 The remainder of this section describes the main concepts used in StarPU.
 
 
 @menu
 @menu
-* Codelet and Tasks::           
+* Codelet and Tasks::
-* StarPU Data Management Library::  
+* StarPU Data Management Library::
 * Glossary::
 * Glossary::
 * Research Papers::
 * Research Papers::
 @end menu
 @end menu

+ 5 - 7
doc/chapters/mpi-support.texi

@@ -20,11 +20,11 @@ distributed application, by automatically issuing all required data transfers
 according to the task graph and an application-provided distribution.
 according to the task graph and an application-provided distribution.
 
 
 @menu
 @menu
-* The API::                     
+* The API::
-* Simple Example::              
+* Simple Example::
-* Exchanging User Defined Data Interface::  
+* Exchanging User Defined Data Interface::
-* MPI Insert Task Utility::     
+* MPI Insert Task Utility::
-* MPI Collective Operations::   
+* MPI Collective Operations::
 @end menu
 @end menu
 
 
 @node The API
 @node The API
@@ -592,5 +592,3 @@ for(x = 0; x < nblocks ;  x++) @{
 starpu_mpi_gather_detached(data_handles, nblocks, 0, MPI_COMM_WORLD);
 starpu_mpi_gather_detached(data_handles, nblocks, 0, MPI_COMM_WORLD);
 @end smallexample
 @end smallexample
 @end cartouche
 @end cartouche
-
-

+ 5 - 11
doc/chapters/perf-feedback.texi

@@ -11,9 +11,9 @@
 * On-line::                     On-line performance feedback
 * On-line::                     On-line performance feedback
 * Off-line::                    Off-line performance feedback
 * Off-line::                    Off-line performance feedback
 * Codelet performance::         Performance of codelets
 * Codelet performance::         Performance of codelets
-* Theoretical lower bound on execution time API::  
+* Theoretical lower bound on execution time API::
-* Memory feedback::             
+* Memory feedback::
-* Data statistics::        
+* Data statistics::
 @end menu
 @end menu
 
 
 @node Task debugger
 @node Task debugger
@@ -108,7 +108,7 @@ because there is no task to execute at all (@code{sleeping_time}), and the
 number of tasks that were executed while profiling was enabled.
 number of tasks that were executed while profiling was enabled.
 These values give an estimation of the proportion of time spent do real work,
 These values give an estimation of the proportion of time spent do real work,
 and the time spent either sleeping because there are not enough executable
 and the time spent either sleeping because there are not enough executable
-tasks or simply wasted in pure StarPU overhead. 
+tasks or simply wasted in pure StarPU overhead.
 
 
 Calling @code{starpu_worker_get_profiling_info} resets the profiling
 Calling @code{starpu_worker_get_profiling_info} resets the profiling
 information associated to a worker.
 information associated to a worker.
@@ -119,7 +119,7 @@ generate a graphic showing the evolution of these values during the time, for
 the different workers.
 the different workers.
 
 
 @node Bus feedback
 @node Bus feedback
-@subsection Bus-related feedback 
+@subsection Bus-related feedback
 
 
 TODO: ajouter STARPU_BUS_STATS
 TODO: ajouter STARPU_BUS_STATS
 
 
@@ -559,9 +559,3 @@ Synthetic GFlops : 44.21
 
 
 @c TODO: data transfer stats are similar to the ones displayed when
 @c TODO: data transfer stats are similar to the ones displayed when
 @c setting STARPU_BUS_STATS
 @c setting STARPU_BUS_STATS
-
-
-
-
-
-

+ 3 - 3
doc/chapters/perf-optimization.texi

@@ -232,7 +232,7 @@ A graph can be drawn by using the @code{starpu_perfmodel_plot}:
 
 
 @example
 @example
 $ starpu_perfmodel_plot -s starpu_dlu_lu_model_22
 $ starpu_perfmodel_plot -s starpu_dlu_lu_model_22
-98304 393216 1572864 
+98304 393216 1572864
 $ gnuplot starpu_starpu_dlu_lu_model_22.gp
 $ gnuplot starpu_starpu_dlu_lu_model_22.gp
 $ gv starpu_starpu_dlu_lu_model_22.eps
 $ gv starpu_starpu_dlu_lu_model_22.eps
 @end example
 @end example
@@ -361,12 +361,12 @@ with these manual measurements through @code{starpu_perfmodel_update_history}.
 @node Profiling
 @node Profiling
 @section Profiling
 @section Profiling
 
 
-A quick view of how many tasks each worker has executed can be obtained by setting 
+A quick view of how many tasks each worker has executed can be obtained by setting
 @code{export STARPU_WORKER_STATS=1} This is a convenient way to check that
 @code{export STARPU_WORKER_STATS=1} This is a convenient way to check that
 execution did happen on accelerators without penalizing performance with
 execution did happen on accelerators without penalizing performance with
 the profiling overhead.
 the profiling overhead.
 
 
-A quick view of how much data transfers have been issued can be obtained by setting 
+A quick view of how much data transfers have been issued can be obtained by setting
 @code{export STARPU_BUS_STATS=1} .
 @code{export STARPU_BUS_STATS=1} .
 
 
 More detailed profiling information can be enabled by using @code{export STARPU_PROFILING=1} or by
 More detailed profiling information can be enabled by using @code{export STARPU_PROFILING=1} or by

+ 6 - 7
doc/chapters/scaling-vector-example.texi

@@ -7,10 +7,10 @@
 @c See the file starpu.texi for copying conditions.
 @c See the file starpu.texi for copying conditions.
 
 
 @menu
 @menu
-* Main application::            
+* Main application::
-* CPU Kernel::                 
+* CPU Kernel::
-* CUDA Kernel::                
+* CUDA Kernel::
-* OpenCL Kernel::              
+* OpenCL Kernel::
 @end menu
 @end menu
 
 
 @node Main application
 @node Main application
@@ -32,8 +32,8 @@
 @section OpenCL Kernel
 @section OpenCL Kernel
 
 
 @menu
 @menu
-* Invoking the kernel::         
+* Invoking the kernel::
-* Source of the kernel::        
+* Source of the kernel::
 @end menu
 @end menu
 
 
 @node Invoking the kernel
 @node Invoking the kernel
@@ -45,4 +45,3 @@
 @subsection Source of the kernel
 @subsection Source of the kernel
 
 
 @include chapters/vector_scal_opencl_codelet.texi
 @include chapters/vector_scal_opencl_codelet.texi
-

+ 2 - 3
doc/chapters/using.texi

@@ -7,8 +7,8 @@
 @c See the file starpu.texi for copying conditions.
 @c See the file starpu.texi for copying conditions.
 
 
 @menu
 @menu
-* Setting flags for compiling and linking applications::  
+* Setting flags for compiling and linking applications::
-* Running a basic StarPU application::  
+* Running a basic StarPU application::
 * Kernel threads started by StarPU::
 * Kernel threads started by StarPU::
 * Enabling OpenCL::
 * Enabling OpenCL::
 @end menu
 @end menu
@@ -111,4 +111,3 @@ so:
 @example
 @example
 % STARPU_NCUDA=2 ./application
 % STARPU_NCUDA=2 ./application
 @end example
 @end example
-

+ 1 - 1
doc/chapters/vector_scal_cpu.texi

@@ -51,7 +51,7 @@ void scal_sse_func(void *buffers[], void *cl_arg)
     float factor = *(float *) cl_arg;
     float factor = *(float *) cl_arg;
     FACTOR = _mm_set1_ps(factor);
     FACTOR = _mm_set1_ps(factor);
 
 
-    unsigned int i;	
+    unsigned int i;
     for (i = 0; i < n_iterations; i++)
     for (i = 0; i < n_iterations; i++)
         VECTOR[i] = _mm_mul_ps(FACTOR, VECTOR[i]);
         VECTOR[i] = _mm_mul_ps(FACTOR, VECTOR[i]);
 
 

+ 1 - 1
doc/starpu.texi

@@ -81,7 +81,7 @@ was last updated on @value{UPDATED}.
 * StarPU Basic API::            The Basic API to use StarPU
 * StarPU Basic API::            The Basic API to use StarPU
 * StarPU Advanced API::         Advanced use of StarPU
 * StarPU Advanced API::         Advanced use of StarPU
 * Configuring StarPU::          How to configure StarPU
 * Configuring StarPU::          How to configure StarPU
-* Full source code for the 'Scaling a Vector' example::  
+* Full source code for the 'Scaling a Vector' example::
 * GNU Free Documentation License::  How you can copy and share this manual.
 * GNU Free Documentation License::  How you can copy and share this manual.
 
 
 * Concept Index::               Index of programming concepts.
 * Concept Index::               Index of programming concepts.