소스 검색

doc: minor fixes (typos)

Nathalie Furmento 13 년 전
부모
커밋
6b6c36d5fe

+ 9 - 9
doc/chapters/advanced-examples.texi

@@ -25,7 +25,7 @@
 @section Using multiple implementations of a codelet
 @section Using multiple implementations of a codelet
 One may want to write multiple implementations of a codelet for a single type of
 One may want to write multiple implementations of a codelet for a single type of
 device and let StarPU choose which one to run. As an example, we will show how
 device and let StarPU choose which one to run. As an example, we will show how
-to use SSE to scale a vector. The codelet can be written as follows :
+to use SSE to scale a vector. The codelet can be written as follows:
 
 
 @cartouche
 @cartouche
 @smallexample
 @smallexample
@@ -202,10 +202,10 @@ for (worker = 0; worker < starpu_worker_get_count(); worker++)
         char workername[128];
         char workername[128];
         starpu_worker_get_name(worker, workername, 128);
         starpu_worker_get_name(worker, workername, 128);
         fprintf(stderr, "Worker %s:\n", workername);
         fprintf(stderr, "Worker %s:\n", workername);
-        fprintf(stderr, "\ttotal time : %.2lf ms\n", total_time*1e-3);
-        fprintf(stderr, "\texec time  : %.2lf ms (%.2f %%)\n", executing_time*1e-3,
+        fprintf(stderr, "\ttotal time: %.2lf ms\n", total_time*1e-3);
+        fprintf(stderr, "\texec time: %.2lf ms (%.2f %%)\n", executing_time*1e-3,
                 executing_ratio);
                 executing_ratio);
-        fprintf(stderr, "\tblocked time  : %.2lf ms (%.2f %%)\n", sleeping_time*1e-3,
+        fprintf(stderr, "\tblocked time: %.2lf ms (%.2f %%)\n", sleeping_time*1e-3,
                 sleeping_ratio);
                 sleeping_ratio);
 @}
 @}
 @end smallexample
 @end smallexample
@@ -606,7 +606,7 @@ static struct starpu_codelet cl =
 @end example
 @end example
 
 
 Other examples include for instance calling a BLAS parallel CPU implementation
 Other examples include for instance calling a BLAS parallel CPU implementation
-(see examples/mult/xgemm.c).
+(see @code{examples/mult/xgemm.c}).
 
 
 @subsection SPMD-mode parallel tasks
 @subsection SPMD-mode parallel tasks
 
 
@@ -635,7 +635,7 @@ static void func(void *buffers[], void *args)
         val[i] *= *factor;
         val[i] *= *factor;
 @}
 @}
 
 
-statuc struct starpu_codelet cl =
+static struct starpu_codelet cl =
 @{
 @{
     .modes = @{ STARPU_RW @},
     .modes = @{ STARPU_RW @},
     .where = STARP_CPU,
     .where = STARP_CPU,
@@ -664,7 +664,7 @@ combined worker if the codelet does not actually scale so much.
 @subsection Combined worker sizes
 @subsection Combined worker sizes
 
 
 By default, StarPU creates combined workers according to the architecture
 By default, StarPU creates combined workers according to the architecture
-structure as detected by HwLoc. It means that for each object of the Hwloc
+structure as detected by hwloc. It means that for each object of the hwloc
 topology (NUMA node, socket, cache, ...) a combined worker will be created. If
 topology (NUMA node, socket, cache, ...) a combined worker will be created. If
 some nodes of the hierarchy have a big arity (e.g. many cores in a socket
 some nodes of the hierarchy have a big arity (e.g. many cores in a socket
 without a hierarchy of shared caches), StarPU will create combined workers of
 without a hierarchy of shared caches), StarPU will create combined workers of
@@ -705,11 +705,11 @@ gdb helpers are also provided to show the whole StarPU state:
 @node The multiformat interface
 @node The multiformat interface
 @section The multiformat interface
 @section The multiformat interface
 It may be interesting to represent the same piece of data using two different
 It may be interesting to represent the same piece of data using two different
-data structures : one that would only be used on CPUs, and one that would only
+data structures: one that would only be used on CPUs, and one that would only
 be used on GPUs. This can be done by using the multiformat interface. StarPU
 be used on GPUs. This can be done by using the multiformat interface. StarPU
 will be able to convert data from one data structure to the other when needed.
 will be able to convert data from one data structure to the other when needed.
 Note that the heft scheduler is the only one optimized for this interface. The
 Note that the heft scheduler is the only one optimized for this interface. The
-user must provide StarPU with conversion codelets :
+user must provide StarPU with conversion codelets:
 
 
 @cartouche
 @cartouche
 @smallexample
 @smallexample

+ 4 - 4
doc/chapters/basic-api.texi

@@ -544,7 +544,7 @@ The different values are:
 @item @code{STARPU_VARIABLE_INTERFACE_ID}
 @item @code{STARPU_VARIABLE_INTERFACE_ID}
 @item @code{STARPU_VOID_INTERFACE_ID}
 @item @code{STARPU_VOID_INTERFACE_ID}
 @item @code{STARPU_MULTIFORMAT_INTERFACE_ID}
 @item @code{STARPU_MULTIFORMAT_INTERFACE_ID}
-@item @code{STARPU_NINTERFACES_ID} : number of data interfaces
+@item @code{STARPU_NINTERFACES_ID}: number of data interfaces
 @end table
 @end table
 @end deftp
 @end deftp
 
 
@@ -1067,7 +1067,7 @@ unset, its value will be automatically set based on the availability
 of the @code{XXX_funcs} fields defined below.
 of the @code{XXX_funcs} fields defined below.
 
 
 @item @code{can_execute} (optional)
 @item @code{can_execute} (optional)
-Function prototype : 
+Function prototype: 
 @code{int (*can_execute)(unsigned workerid, struct starpu_task *task, unsigned nimpl)}.
 @code{int (*can_execute)(unsigned workerid, struct starpu_task *task, unsigned nimpl)}.
 Returns 1 if the worker designated by @var{workerid} can execute the @var{nimpl}th implementation of the given@var{task}, 0 otherwise.
 Returns 1 if the worker designated by @var{workerid} can execute the @var{nimpl}th implementation of the given@var{task}, 0 otherwise.
 
 
@@ -1407,7 +1407,7 @@ Output on @code{stderr} some statistics on the codelet @var{cl}.
 This function waits until there is no more ready task.
 This function waits until there is no more ready task.
 @end deftypefun
 @end deftypefun
 
 
-@c Callbacks : what can we put in callbacks ?
+@c Callbacks: what can we put in callbacks ?
 
 
 @node Explicit Dependencies
 @node Explicit Dependencies
 @section Explicit Dependencies
 @section Explicit Dependencies
@@ -1555,7 +1555,7 @@ OpenCL types range within STARPU_OPENCL_DEFAULT (GPU number 0), STARPU_OPENCL_DE
 @end deftp
 @end deftp
 
 
 @deftp {Data Type} {enum starpu_perfmodel_type}
 @deftp {Data Type} {enum starpu_perfmodel_type}
-The possible values are :
+The possible values are:
 @table @asis
 @table @asis
 @item @code{STARPU_PER_ARCH} for application-provided per-arch cost model functions.
 @item @code{STARPU_PER_ARCH} for application-provided per-arch cost model functions.
 @item @code{STARPU_COMMON} for application-provided common cost model function, with per-arch factor.
 @item @code{STARPU_COMMON} for application-provided common cost model function, with per-arch factor.

+ 1 - 1
doc/chapters/introduction.texi

@@ -65,7 +65,7 @@ One of the StarPU primary data structures is the @b{codelet}. A codelet describe
 computational kernel that can possibly be implemented on multiple architectures
 computational kernel that can possibly be implemented on multiple architectures
 such as a CPU, a CUDA device or a Cell's SPU.
 such as a CPU, a CUDA device or a Cell's SPU.
 
 
-@c TODO insert illustration f : f_spu, f_cpu, ...
+@c TODO insert illustration f: f_spu, f_cpu, ...
 
 
 Another important data structure is the @b{task}. Executing a StarPU task
 Another important data structure is the @b{task}. Executing a StarPU task
 consists in applying a codelet on a data set, on one of the architectures on
 consists in applying a codelet on a data set, on one of the architectures on

+ 1 - 1
doc/chapters/mpi-support.texi

@@ -167,7 +167,7 @@ int main(int argc, char **argv)
         if (loop == last_loop && rank == last_rank)
         if (loop == last_loop && rank == last_rank)
         @{
         @{
             starpu_data_acquire(token_handle, STARPU_R);
             starpu_data_acquire(token_handle, STARPU_R);
-            fprintf(stdout, "Finished : token value %d\n", token);
+            fprintf(stdout, "Finished: token value %d\n", token);
             starpu_data_release(token_handle);
             starpu_data_release(token_handle);
         @}
         @}
         else
         else

+ 1 - 1
doc/chapters/perf-feedback.texi

@@ -110,7 +110,7 @@ The bus speed measured by StarPU can be displayed by using the
 @code{starpu_machine_display} tool, for instance:
 @code{starpu_machine_display} tool, for instance:
 
 
 @example
 @example
-StarPU has found :
+StarPU has found:
         3 CUDA devices
         3 CUDA devices
                 CUDA 0 (Tesla C2050 02:00.0)
                 CUDA 0 (Tesla C2050 02:00.0)
                 CUDA 1 (Tesla C2050 03:00.0)
                 CUDA 1 (Tesla C2050 03:00.0)

+ 2 - 2
doc/chapters/using.texi

@@ -53,8 +53,8 @@ Basic examples using StarPU are built in the directory
 
 
 @example
 @example
 % ./examples/basic_examples/vector_scal
 % ./examples/basic_examples/vector_scal
-BEFORE : First element was 1.000000
-AFTER First element is 3.140000
+BEFORE: First element was 1.000000
+AFTER: First element is 3.140000
 %
 %
 @end example
 @end example
 
 

+ 1 - 1
doc/chapters/vector_scal_c.texi

@@ -52,7 +52,7 @@ int main(int argc, char **argv)
     for (i = 0; i < NX; i++)
     for (i = 0; i < NX; i++)
         vector[i] = 1.0f;
         vector[i] = 1.0f;
 
 
-    fprintf(stderr, "BEFORE : First element was %f\n", vector[0]);
+    fprintf(stderr, "BEFORE: First element was %f\n", vector[0]);
 
 
     /* Initialize StarPU with default configuration */
     /* Initialize StarPU with default configuration */
     starpu_init(NULL);
     starpu_init(NULL);

+ 1 - 1
doc/chapters/vector_scal_cpu.texi

@@ -30,7 +30,7 @@ void scal_cpu_func(void *buffers[], void *cl_arg)
     /* length of the vector */
     /* length of the vector */
     unsigned n = STARPU_VECTOR_GET_NX(vector);
     unsigned n = STARPU_VECTOR_GET_NX(vector);
 
 
-    /* get a pointer to the local copy of the vector : note that we have to
+    /* get a pointer to the local copy of the vector: note that we have to
      * cast it in (float *) since a vector could contain any type of
      * cast it in (float *) since a vector could contain any type of
      * elements so that the .ptr field is actually a uintptr_t */
      * elements so that the .ptr field is actually a uintptr_t */
     float *val = (float *)STARPU_VECTOR_GET_PTR(vector);
     float *val = (float *)STARPU_VECTOR_GET_PTR(vector);