basic_examples.doxy 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864
  1. /*
  2. * This file is part of the StarPU Handbook.
  3. * Copyright (C) 2009--2011 Universit@'e de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012, 2013 Centre National de la Recherche Scientifique
  5. * Copyright (C) 2011, 2012 Institut National de Recherche en Informatique et Automatique
  6. * See the file version.doxy for copying conditions.
  7. */
  8. /*! \page basicExamples Basic Examples
  9. \section Hello_World_using_the_C_Extension Hello World using the C Extension
  10. This section shows how to implement a simple program that submits a task
  11. to StarPU using the StarPU C extension (\ref cExtensions). The complete example, and additional examples,
  12. is available in the <c>gcc-plugin/examples</c> directory of the StarPU
  13. distribution. A similar example showing how to directly use the StarPU's API is shown
  14. in @ref{Hello World using StarPU's API}.
  15. GCC from version 4.5 permit to use the StarPU GCC plug-in (\ref cExtensions). This makes writing a task both simpler and less error-prone.
  16. In a nutshell, all it takes is to declare a task, declare and define its
  17. implementations (for CPU, OpenCL, and/or CUDA), and invoke the task like
  18. a regular C function. The example below defines <c>my_task</c> which
  19. has a single implementation for CPU:
  20. \code{.c}
  21. #include <stdio.h>
  22. /* Task declaration. */
  23. static void my_task (int x) __attribute__ ((task));
  24. /* Definition of the CPU implementation of `my_task'. */
  25. static void my_task (int x)
  26. {
  27. printf ("Hello, world! With x = %d\n", x);
  28. }
  29. int main ()
  30. {
  31. /* Initialize StarPU. */
  32. #pragma starpu initialize
  33. /* Do an asynchronous call to `my_task'. */
  34. my_task (42);
  35. /* Wait for the call to complete. */
  36. #pragma starpu wait
  37. /* Terminate. */
  38. #pragma starpu shutdown
  39. return 0;
  40. }
  41. \endcode
  42. The code can then be compiled and linked with GCC and the <c>-fplugin</c> flag:
  43. \verbatim
  44. $ gcc `pkg-config starpu-1.1 --cflags` hello-starpu.c \
  45. -fplugin=`pkg-config starpu-1.1 --variable=gccplugin` \
  46. `pkg-config starpu-1.1 --libs`
  47. \endverbatim
  48. The code can also be compiled without the StarPU C extension and will
  49. behave as a normal sequential code.
  50. \verbatim
  51. $ gcc hello-starpu.c
  52. hello-starpu.c:33:1: warning: ‘task’ attribute directive ignored [-Wattributes]
  53. $ ./a.out
  54. Hello, world! With x = 42
  55. \endverbatim
  56. As can be seen above, the C extensions allows programmers to
  57. use StarPU tasks by essentially annotating ``regular'' C code.
  58. \section Hello_World_using_StarPU_API Hello World using StarPU's API
  59. This section shows how to achieve the same result as in the previous
  60. section using StarPU's standard C API.
  61. \subsection Required_Headers Required Headers
  62. The starpu.h header should be included in any code using StarPU.
  63. \code{.c}
  64. #include <starpu.h>
  65. \endcode
  66. \subsection Defining_a_Codelet Defining a Codelet
  67. \code{.c}
  68. struct params
  69. {
  70. int i;
  71. float f;
  72. };
  73. void cpu_func(void *buffers[], void *cl_arg)
  74. {
  75. struct params *params = cl_arg;
  76. printf("Hello world (params = {%i, %f} )\n", params->i, params->f);
  77. }
  78. struct starpu_codelet cl =
  79. {
  80. .where = STARPU_CPU,
  81. .cpu_funcs = { cpu_func, NULL },
  82. .nbuffers = 0
  83. };
  84. \endcode
  85. A codelet is a structure that represents a computational kernel. Such a codelet
  86. may contain an implementation of the same kernel on different architectures
  87. (e.g. CUDA, x86, ...). For compatibility, make sure that the whole
  88. structure is properly initialized to zero, either by using the
  89. function starpu_codelet_init (@pxref{starpu_codelet_init}), or by letting the
  90. compiler implicitly do it as examplified above.
  91. The <c>nbuffers</c> field specifies the number of data buffers that are
  92. manipulated by the codelet: here the codelet does not access or modify any data
  93. that is controlled by our data management library. Note that the argument
  94. passed to the codelet (the <c>cl_arg</c> field of the <c>starpu_task</c>
  95. structure) does not count as a buffer since it is not managed by our data
  96. management library, but just contain trivial parameters.
  97. \internal
  98. TODO need a crossref to the proper description of "where" see bla for more ...
  99. \endinternal
  100. We create a codelet which may only be executed on the CPUs. The <c>where</c>
  101. field is a bitmask that defines where the codelet may be executed. Here, the
  102. <c>STARPU_CPU</c> value means that only CPUs can execute this codelet
  103. (@pxref{Codelets and Tasks} for more details on this field). Note that
  104. the <c>where</c> field is optional, when unset its value is
  105. automatically set based on the availability of the different
  106. <c>XXX_funcs</c> fields.
  107. When a CPU core executes a codelet, it calls the <c>cpu_func</c> function,
  108. which \em must have the following prototype:
  109. \code{.c}
  110. void (*cpu_func)(void *buffers[], void *cl_arg);
  111. \endcode
  112. In this example, we can ignore the first argument of this function which gives a
  113. description of the input and output buffers (e.g. the size and the location of
  114. the matrices) since there is none.
  115. The second argument is a pointer to a buffer passed as an
  116. argument to the codelet by the means of the <c>cl_arg</c> field of the
  117. <c>starpu_task</c> structure.
  118. \internal
  119. TODO rewrite so that it is a little clearer ?
  120. \endinternal
  121. Be aware that this may be a pointer to a
  122. \em copy of the actual buffer, and not the pointer given by the programmer:
  123. if the codelet modifies this buffer, there is no guarantee that the initial
  124. buffer will be modified as well: this for instance implies that the buffer
  125. cannot be used as a synchronization medium. If synchronization is needed, data
  126. has to be registered to StarPU, see \ref Vector_Scaling_Using_StarPU_API.
  127. \subsection Submitting_a_Task Submitting a Task
  128. \code{.c}
  129. void callback_func(void *callback_arg)
  130. {
  131. printf("Callback function (arg %x)\n", callback_arg);
  132. }
  133. int main(int argc, char **argv)
  134. {
  135. /* initialize StarPU */
  136. starpu_init(NULL);
  137. struct starpu_task *task = starpu_task_create();
  138. task->cl = &cl; /* Pointer to the codelet defined above */
  139. struct params params = { 1, 2.0f };
  140. task->cl_arg = &params;
  141. task->cl_arg_size = sizeof(params);
  142. task->callback_func = callback_func;
  143. task->callback_arg = 0x42;
  144. /* starpu_task_submit will be a blocking call */
  145. task->synchronous = 1;
  146. /* submit the task to StarPU */
  147. starpu_task_submit(task);
  148. /* terminate StarPU */
  149. starpu_shutdown();
  150. return 0;
  151. }
  152. \endcode
  153. Before submitting any tasks to StarPU, starpu_init() must be called. The
  154. <c>NULL</c> argument specifies that we use default configuration. Tasks cannot
  155. be submitted after the termination of StarPU by a call to
  156. starpu_shutdown().
  157. In the example above, a task structure is allocated by a call to
  158. starpu_task_create(). This function only allocates and fills the
  159. corresponding structure with the default settings (@pxref{Codelets and
  160. Tasks, starpu_task_create}), but it does not submit the task to StarPU.
  161. \internal
  162. not really clear ;)
  163. \endinternal
  164. The <c>cl</c> field is a pointer to the codelet which the task will
  165. execute: in other words, the codelet structure describes which computational
  166. kernel should be offloaded on the different architectures, and the task
  167. structure is a wrapper containing a codelet and the piece of data on which the
  168. codelet should operate.
  169. The optional <c>cl_arg</c> field is a pointer to a buffer (of size
  170. <c>cl_arg_size</c>) with some parameters for the kernel
  171. described by the codelet. For instance, if a codelet implements a computational
  172. kernel that multiplies its input vector by a constant, the constant could be
  173. specified by the means of this buffer, instead of registering it as a StarPU
  174. data. It must however be noted that StarPU avoids making copy whenever possible
  175. and rather passes the pointer as such, so the buffer which is pointed at must
  176. kept allocated until the task terminates, and if several tasks are submitted
  177. with various parameters, each of them must be given a pointer to their own
  178. buffer.
  179. Once a task has been executed, an optional callback function is be called.
  180. While the computational kernel could be offloaded on various architectures, the
  181. callback function is always executed on a CPU. The <c>callback_arg</c>
  182. pointer is passed as an argument of the callback. The prototype of a callback
  183. function must be:
  184. \code{.c}
  185. void (*callback_function)(void *);
  186. \endcode
  187. If the <c>synchronous</c> field is non-zero, task submission will be
  188. synchronous: the starpu_task_submit() function will not return until the
  189. task was executed. Note that the starpu_shutdown() function does not
  190. guarantee that asynchronous tasks have been executed before it returns,
  191. starpu_task_wait_for_all() can be used to that effect, or data can be
  192. unregistered (starpu_data_unregister()), which will
  193. implicitly wait for all the tasks scheduled to work on it, unless explicitly
  194. disabled thanks to starpu_data_set_default_sequential_consistency_flag() or
  195. starpu_data_set_sequential_consistency_flag().
  196. \subsection Execution_of_Hello_World Execution of Hello World
  197. \verbatim
  198. $ make hello_world
  199. cc $(pkg-config --cflags starpu-1.1) $(pkg-config --libs starpu-1.1) hello_world.c -o hello_world
  200. $ ./hello_world
  201. Hello world (params = {1, 2.000000} )
  202. Callback function (arg 42)
  203. \endverbatim
  204. \section Vector_Scaling_Using_the_C_Extension Vector Scaling Using the C Extension
  205. The previous example has shown how to submit tasks. In this section,
  206. we show how StarPU tasks can manipulate data.
  207. We will first show how to use the C language extensions provided by
  208. the GCC plug-in (\ref cExtensions). The complete example, and
  209. additional examples, is available in the <c>gcc-plugin/examples</c>
  210. directory of the StarPU distribution. These extensions map directly
  211. to StarPU's main concepts: tasks, task implementations for CPU,
  212. OpenCL, or CUDA, and registered data buffers. The standard C version
  213. that uses StarPU's standard C programming interface is given in the
  214. next section (\ref Vector_Scaling_Using_StarPU_API).
  215. First of all, the vector-scaling task and its simple CPU implementation
  216. has to be defined:
  217. \code{.c}
  218. /* Declare the `vector_scal' task. */
  219. static void vector_scal (unsigned size, float vector[size],
  220. float factor)
  221. __attribute__ ((task));
  222. /* Define the standard CPU implementation. */
  223. static void
  224. vector_scal (unsigned size, float vector[size], float factor)
  225. {
  226. unsigned i;
  227. for (i = 0; i < size; i++)
  228. vector[i] *= factor;
  229. }
  230. \endcode
  231. Next, the body of the program, which uses the task defined above, can be
  232. implemented:
  233. \code{.c}
  234. int
  235. main (void)
  236. {
  237. #pragma starpu initialize
  238. #define NX 0x100000
  239. #define FACTOR 3.14
  240. {
  241. float vector[NX]
  242. __attribute__ ((heap_allocated, registered));
  243. size_t i;
  244. for (i = 0; i < NX; i++)
  245. vector[i] = (float) i;
  246. vector_scal (NX, vector, FACTOR);
  247. #pragma starpu wait
  248. } /* VECTOR is automatically freed here. */
  249. #pragma starpu shutdown
  250. return valid ? EXIT_SUCCESS : EXIT_FAILURE;
  251. }
  252. \endcode
  253. The <c>main</c> function above does several things:
  254. <ul>
  255. <li>
  256. It initializes StarPU.
  257. </li>
  258. <li>
  259. It allocates <c>vector</c> in the heap; it will automatically be freed
  260. when its scope is left. Alternatively, good old <c>malloc</c> and
  261. <c>free</c> could have been used, but they are more error-prone and
  262. require more typing.
  263. </li>
  264. <li>
  265. It registers the memory pointed to by <c>vector</c>. Eventually,
  266. when OpenCL or CUDA task implementations are added, this will allow
  267. StarPU to transfer that memory region between GPUs and the main memory.
  268. Removing this <c>pragma</c> is an error.
  269. </li>
  270. <li>
  271. It invokes the <c>vector_scal</c> task. The invocation looks the same
  272. as a standard C function call. However, it is an asynchronous
  273. invocation, meaning that the actual call is performed in parallel with
  274. the caller's continuation.
  275. </li>
  276. <li>
  277. It waits for the termination of the <c>vector_scal</c>
  278. asynchronous call.
  279. </li>
  280. <li>
  281. Finally, StarPU is shut down.
  282. </li>
  283. </ul>
  284. The program can be compiled and linked with GCC and the <c>-fplugin</c>
  285. flag:
  286. \verbatim
  287. $ gcc `pkg-config starpu-1.1 --cflags` vector_scal.c \
  288. -fplugin=`pkg-config starpu-1.1 --variable=gccplugin` \
  289. `pkg-config starpu-1.1 --libs`
  290. \endverbatim
  291. And voilà!
  292. \subsection Adding_an_OpenCL_Task_Implementation Adding an OpenCL Task Implementation
  293. Now, this is all fine and great, but you certainly want to take
  294. advantage of these newfangled GPUs that your lab just bought, don't you?
  295. So, let's add an OpenCL implementation of the <c>vector_scal</c> task.
  296. We assume that the OpenCL kernel is available in a file,
  297. <c>vector_scal_opencl_kernel.cl</c>, not shown here. The OpenCL task
  298. implementation is similar to that used with the standard C API
  299. (\ref Definition_of_the_OpenCL_Kernel). It is declared and defined
  300. in our C file like this:
  301. \code{.c}
  302. /* The OpenCL programs, loaded from 'main' (see below). */
  303. static struct starpu_opencl_program cl_programs;
  304. static void vector_scal_opencl (unsigned size, float vector[size],
  305. float factor)
  306. __attribute__ ((task_implementation ("opencl", vector_scal)));
  307. static void
  308. vector_scal_opencl (unsigned size, float vector[size], float factor)
  309. {
  310. int id, devid, err;
  311. cl_kernel kernel;
  312. cl_command_queue queue;
  313. cl_event event;
  314. /* VECTOR is GPU memory pointer, not a main memory pointer. */
  315. cl_mem val = (cl_mem) vector;
  316. id = starpu_worker_get_id ();
  317. devid = starpu_worker_get_devid (id);
  318. /* Prepare to invoke the kernel. In the future, this will be largely automated. */
  319. err = starpu_opencl_load_kernel (&kernel, &queue, &cl_programs,
  320. "vector_mult_opencl", devid);
  321. if (err != CL_SUCCESS)
  322. STARPU_OPENCL_REPORT_ERROR (err);
  323. err = clSetKernelArg (kernel, 0, sizeof (size), &size);
  324. err |= clSetKernelArg (kernel, 1, sizeof (val), &val);
  325. err |= clSetKernelArg (kernel, 2, sizeof (factor), &factor);
  326. if (err)
  327. STARPU_OPENCL_REPORT_ERROR (err);
  328. size_t global = 1, local = 1;
  329. err = clEnqueueNDRangeKernel (queue, kernel, 1, NULL, &global,
  330. &local, 0, NULL, &event);
  331. if (err != CL_SUCCESS)
  332. STARPU_OPENCL_REPORT_ERROR (err);
  333. clFinish (queue);
  334. starpu_opencl_collect_stats (event);
  335. clReleaseEvent (event);
  336. /* Done with KERNEL. */
  337. starpu_opencl_release_kernel (kernel);
  338. }
  339. \endcode
  340. The OpenCL kernel itself must be loaded from <c>main</c>, sometime after
  341. the <c>initialize</c> pragma:
  342. \code{.c}
  343. starpu_opencl_load_opencl_from_file ("vector_scal_opencl_kernel.cl",
  344. &cl_programs, "");
  345. \endcode
  346. And that's it. The <c>vector_scal</c> task now has an additional
  347. implementation, for OpenCL, which StarPU's scheduler may choose to use
  348. at run-time. Unfortunately, the <c>vector_scal_opencl</c> above still
  349. has to go through the common OpenCL boilerplate; in the future,
  350. additional extensions will automate most of it.
  351. \subsection Adding_a_CUDA_Task_Implementation Adding a CUDA Task Implementation
  352. Adding a CUDA implementation of the task is very similar, except that
  353. the implementation itself is typically written in CUDA, and compiled
  354. with <c>nvcc</c>. Thus, the C file only needs to contain an external
  355. declaration for the task implementation:
  356. \code{.c}
  357. extern void vector_scal_cuda (unsigned size, float vector[size],
  358. float factor)
  359. __attribute__ ((task_implementation ("cuda", vector_scal)));
  360. \endcode
  361. The actual implementation of the CUDA task goes into a separate
  362. compilation unit, in a <c>.cu</c> file. It is very close to the
  363. implementation when using StarPU's standard C API (\ref Definition_of_the_CUDA_Kernel).
  364. \code{.c}
  365. /* CUDA implementation of the `vector_scal' task, to be compiled with `nvcc'. */
  366. #include <starpu.h>
  367. #include <stdlib.h>
  368. static __global__ void
  369. vector_mult_cuda (unsigned n, float *val, float factor)
  370. {
  371. unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
  372. if (i < n)
  373. val[i] *= factor;
  374. }
  375. /* Definition of the task implementation declared in the C file. */
  376. extern "C" void
  377. vector_scal_cuda (size_t size, float vector[], float factor)
  378. {
  379. unsigned threads_per_block = 64;
  380. unsigned nblocks = (size + threads_per_block - 1) / threads_per_block;
  381. vector_mult_cuda <<< nblocks, threads_per_block, 0,
  382. starpu_cuda_get_local_stream () >>> (size, vector, factor);
  383. cudaStreamSynchronize (starpu_cuda_get_local_stream ());
  384. }
  385. \endcode
  386. The complete source code, in the <c>gcc-plugin/examples/vector_scal</c>
  387. directory of the StarPU distribution, also shows how an SSE-specialized
  388. CPU task implementation can be added.
  389. For more details on the C extensions provided by StarPU's GCC plug-in,
  390. \ref cExtensions.
  391. \section Vector_Scaling_Using_StarPU_API Vector Scaling Using StarPU's API
  392. This section shows how to achieve the same result as explained in the
  393. previous section using StarPU's standard C API.
  394. The full source code for
  395. this example is given in @ref{Full source code for the 'Scaling a
  396. Vector' example}.
  397. \subsection Source_Code_of_Vector_Scaling Source Code of Vector Scaling
  398. Programmers can describe the data layout of their application so that StarPU is
  399. responsible for enforcing data coherency and availability across the machine.
  400. Instead of handling complex (and non-portable) mechanisms to perform data
  401. movements, programmers only declare which piece of data is accessed and/or
  402. modified by a task, and StarPU makes sure that when a computational kernel
  403. starts somewhere (e.g. on a GPU), its data are available locally.
  404. Before submitting those tasks, the programmer first needs to declare the
  405. different pieces of data to StarPU using the <c>starpu_*_data_register</c>
  406. functions. To ease the development of applications for StarPU, it is possible
  407. to describe multiple types of data layout. A type of data layout is called an
  408. <b>interface</b>. There are different predefined interfaces available in StarPU:
  409. here we will consider the <b>vector interface</b>.
  410. The following lines show how to declare an array of <c>NX</c> elements of type
  411. <c>float</c> using the vector interface:
  412. \code{.c}
  413. float vector[NX];
  414. starpu_data_handle_t vector_handle;
  415. starpu_vector_data_register(&vector_handle, 0, (uintptr_t)vector, NX,
  416. sizeof(vector[0]));
  417. \endcode
  418. The first argument, called the <b>data handle</b>, is an opaque pointer which
  419. designates the array in StarPU. This is also the structure which is used to
  420. describe which data is used by a task. The second argument is the node number
  421. where the data originally resides. Here it is 0 since the <c>vector array</c> is in
  422. the main memory. Then comes the pointer <c>vector</c> where the data can be found in main memory,
  423. the number of elements in the vector and the size of each element.
  424. The following shows how to construct a StarPU task that will manipulate the
  425. vector and a constant factor.
  426. \code{.c}
  427. float factor = 3.14;
  428. struct starpu_task *task = starpu_task_create();
  429. task->cl = &cl; /* Pointer to the codelet defined below */
  430. task->handles[0] = vector_handle; /* First parameter of the codelet */
  431. task->cl_arg = &factor;
  432. task->cl_arg_size = sizeof(factor);
  433. task->synchronous = 1;
  434. starpu_task_submit(task);
  435. \endcode
  436. Since the factor is a mere constant float value parameter,
  437. it does not need a preliminary registration, and
  438. can just be passed through the <c>cl_arg</c> pointer like in the previous
  439. example. The vector parameter is described by its handle.
  440. There are two fields in each element of the <c>buffers</c> array.
  441. <c>handle</c> is the handle of the data, and <c>mode</c> specifies how the
  442. kernel will access the data (<c>STARPU_R</c> for read-only, <c>STARPU_W</c> for
  443. write-only and <c>STARPU_RW</c> for read and write access).
  444. The definition of the codelet can be written as follows:
  445. \code{.c}
  446. void scal_cpu_func(void *buffers[], void *cl_arg)
  447. {
  448. unsigned i;
  449. float *factor = cl_arg;
  450. /* length of the vector */
  451. unsigned n = STARPU_VECTOR_GET_NX(buffers[0]);
  452. /* CPU copy of the vector pointer */
  453. float *val = (float *)STARPU_VECTOR_GET_PTR(buffers[0]);
  454. for (i = 0; i < n; i++)
  455. val[i] *= *factor;
  456. }
  457. struct starpu_codelet cl =
  458. {
  459. .cpu_funcs = { scal_cpu_func, NULL },
  460. .nbuffers = 1,
  461. .modes = { STARPU_RW }
  462. };
  463. \endcode
  464. The first argument is an array that gives
  465. a description of all the buffers passed in the <c>task->handles</c> array. The
  466. size of this array is given by the <c>nbuffers</c> field of the codelet
  467. structure. For the sake of genericity, this array contains pointers to the
  468. different interfaces describing each buffer. In the case of the <b>vector
  469. interface</b>, the location of the vector (resp. its length) is accessible in the
  470. \<c>ptr<c> (resp. <c>nx</c>) of this array. Since the vector is accessed in a
  471. read-write fashion, any modification will automatically affect future accesses
  472. to this vector made by other tasks.
  473. The second argument of the <c>scal_cpu_func</c> function contains a pointer to the
  474. parameters of the codelet (given in <c>task->cl_arg</c>), so that we read the
  475. constant factor from this pointer.
  476. \subsection Execution_of_Vector_Scaling Execution of Vector Scaling
  477. \verbatim
  478. $ make vector_scal
  479. cc $(pkg-config --cflags starpu-1.1) $(pkg-config --libs starpu-1.1) vector_scal.c -o vector_scal
  480. $ ./vector_scal
  481. 0.000000 3.000000 6.000000 9.000000 12.000000
  482. \endverbatim
  483. \section Vector_Scaling_on_an_Hybrid_CPU_GPU_Machine Vector Scaling on an Hybrid CPU/GPU Machine
  484. Contrary to the previous examples, the task submitted in this example may not
  485. only be executed by the CPUs, but also by a CUDA device.
  486. \subsection Definition_of_the_CUDA_Kernel Definition of the CUDA Kernel
  487. The CUDA implementation can be written as follows. It needs to be compiled with
  488. a CUDA compiler such as nvcc, the NVIDIA CUDA compiler driver. It must be noted
  489. that the vector pointer returned by STARPU_VECTOR_GET_PTR is here a pointer in GPU
  490. memory, so that it can be passed as such to the <c>vector_mult_cuda</c> kernel
  491. call.
  492. \code{.c}
  493. #include <starpu.h>
  494. static __global__ void vector_mult_cuda(unsigned n, float *val,
  495. float factor)
  496. {
  497. unsigned i = blockIdx.x*blockDim.x + threadIdx.x;
  498. if (i < n)
  499. val[i] *= factor;
  500. }
  501. extern "C" void scal_cuda_func(void *buffers[], void *_args)
  502. {
  503. float *factor = (float *)_args;
  504. /* length of the vector */
  505. unsigned n = STARPU_VECTOR_GET_NX(buffers[0]);
  506. /* CUDA copy of the vector pointer */
  507. float *val = (float *)STARPU_VECTOR_GET_PTR(buffers[0]);
  508. unsigned threads_per_block = 64;
  509. unsigned nblocks = (n + threads_per_block-1) / threads_per_block;
  510. @i{ vector_mult_cuda<<<nblocks,threads_per_block, 0, starpu_cuda_get_local_stream()>>>}
  511. @i{ (n, val, *factor);}
  512. @i{ cudaStreamSynchronize(starpu_cuda_get_local_stream());}
  513. }
  514. \endcode
  515. \subsection Definition_of_the_OpenCL_Kernel Definition of the OpenCL Kernel
  516. The OpenCL implementation can be written as follows. StarPU provides
  517. tools to compile a OpenCL kernel stored in a file.
  518. \code{.c}
  519. __kernel void vector_mult_opencl(int nx, __global float* val, float factor)
  520. {
  521. const int i = get_global_id(0);
  522. if (i < nx) {
  523. val[i] *= factor;
  524. }
  525. }
  526. \endcode
  527. Contrary to CUDA and CPU, <c>STARPU_VECTOR_GET_DEV_HANDLE</c> has to be used,
  528. which returns a <c>cl_mem</c> (which is not a device pointer, but an OpenCL
  529. handle), which can be passed as such to the OpenCL kernel. The difference is
  530. important when using partitioning, see @ref{Partitioning Data}.
  531. \code{.c}
  532. #include <starpu.h>
  533. extern struct starpu_opencl_program programs;
  534. void scal_opencl_func(void *buffers[], void *_args)
  535. {
  536. float *factor = _args;
  537. int id, devid, err; /* OpenCL specific code */
  538. cl_kernel kernel; /* OpenCL specific code */
  539. cl_command_queue queue; /* OpenCL specific code */
  540. cl_event event; /* OpenCL specific code */
  541. /* length of the vector */
  542. unsigned n = STARPU_VECTOR_GET_NX(buffers[0]);
  543. /* OpenCL copy of the vector pointer */
  544. cl_mem val = (cl_mem) STARPU_VECTOR_GET_DEV_HANDLE(buffers[0]);
  545. { /* OpenCL specific code */
  546. id = starpu_worker_get_id();
  547. devid = starpu_worker_get_devid(id);
  548. err = starpu_opencl_load_kernel(&kernel, &queue, &programs,
  549. "vector_mult_opencl", devid); /* Name of the codelet defined above */
  550. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  551. err = clSetKernelArg(kernel, 0, sizeof(n), &n);
  552. err |= clSetKernelArg(kernel, 1, sizeof(val), &val);
  553. err |= clSetKernelArg(kernel, 2, sizeof(*factor), factor);
  554. if (err) STARPU_OPENCL_REPORT_ERROR(err);
  555. }
  556. { /* OpenCL specific code */
  557. size_t global=n;
  558. size_t local=1;
  559. err = clEnqueueNDRangeKernel(queue, kernel, 1, NULL, &global, &local, 0, NULL, &event);
  560. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  561. }
  562. { /* OpenCL specific code */
  563. clFinish(queue);
  564. starpu_opencl_collect_stats(event);
  565. clReleaseEvent(event);
  566. starpu_opencl_release_kernel(kernel);
  567. }
  568. }
  569. \endcode
  570. \subsection Definition_of_the_Main_Code Definition of the Main Code
  571. The CPU implementation is the same as in the previous section.
  572. Here is the source of the main application. You can notice that the fields
  573. <c>cuda_funcs</c> and <c>opencl_funcs</c> of the codelet are set to
  574. define the pointers to the CUDA and OpenCL implementations of the
  575. task.
  576. \code{.c}
  577. #include <starpu.h>
  578. #define NX 2048
  579. extern void scal_cuda_func(void *buffers[], void *_args);
  580. extern void scal_cpu_func(void *buffers[], void *_args);
  581. extern void scal_opencl_func(void *buffers[], void *_args);
  582. /* Definition of the codelet */
  583. static struct starpu_codelet cl =
  584. {
  585. .cuda_funcs = { scal_cuda_func, NULL },
  586. .cpu_funcs = { scal_cpu_func, NULL },
  587. .opencl_funcs = { scal_opencl_func, NULL },
  588. .nbuffers = 1,
  589. .modes = { STARPU_RW }
  590. }
  591. #ifdef STARPU_USE_OPENCL
  592. /* The compiled version of the OpenCL program */
  593. struct starpu_opencl_program programs;
  594. #endif
  595. int main(int argc, char **argv)
  596. {
  597. float *vector;
  598. int i, ret;
  599. float factor=3.0;
  600. struct starpu_task *task;
  601. starpu_data_handle_t vector_handle;
  602. starpu_init(NULL); /* Initialising StarPU */
  603. #ifdef STARPU_USE_OPENCL
  604. starpu_opencl_load_opencl_from_file(
  605. "examples/basic_examples/vector_scal_opencl_codelet.cl",
  606. &programs, NULL);
  607. #endif
  608. vector = malloc(NX*sizeof(vector[0]));
  609. assert(vector);
  610. for(i=0 ; i<NX ; i++) vector[i] = i;
  611. /* Registering data within StarPU */
  612. starpu_vector_data_register(&vector_handle, 0, (uintptr_t)vector,
  613. NX, sizeof(vector[0]));
  614. /* Definition of the task */
  615. task = starpu_task_create();
  616. task->cl = &cl;
  617. task->handles[0] = vector_handle;
  618. task->cl_arg = &factor;
  619. task->cl_arg_size = sizeof(factor);
  620. /* Submitting the task */
  621. ret = starpu_task_submit(task);
  622. if (ret == -ENODEV) {
  623. fprintf(stderr, "No worker may execute this task\n");
  624. return 1;
  625. }
  626. /* Waiting for its termination */
  627. starpu_task_wait_for_all();
  628. /* Update the vector in RAM */
  629. starpu_data_acquire(vector_handle, STARPU_R);
  630. /* Access the data */
  631. for(i=0 ; i<NX; i++) {
  632. fprintf(stderr, "%f ", vector[i]);
  633. }
  634. fprintf(stderr, "\n");
  635. /* Release the RAM view of the data before unregistering it and shutting down StarPU */
  636. starpu_data_release(vector_handle);
  637. starpu_data_unregister(vector_handle);
  638. starpu_shutdown();
  639. return 0;
  640. }
  641. \endcode
  642. \subsection Execution_of_Hybrid_Vector_Scaling Execution of Hybrid Vector Scaling
  643. The Makefile given at the beginning of the section must be extended to
  644. give the rules to compile the CUDA source code. Note that the source
  645. file of the OpenCL kernel does not need to be compiled now, it will
  646. be compiled at run-time when calling the function
  647. starpu_opencl_load_opencl_from_file() (@pxref{starpu_opencl_load_opencl_from_file}).
  648. \verbatim
  649. CFLAGS += $(shell pkg-config --cflags starpu-1.1)
  650. LDFLAGS += $(shell pkg-config --libs starpu-1.1)
  651. CC = gcc
  652. vector_scal: vector_scal.o vector_scal_cpu.o vector_scal_cuda.o vector_scal_opencl.o
  653. %.o: %.cu
  654. nvcc $(CFLAGS) $< -c $@
  655. clean:
  656. rm -f vector_scal *.o
  657. \endverbatim
  658. \verbatim
  659. $ make
  660. \endverbatim
  661. and to execute it, with the default configuration:
  662. \verbatim
  663. $ ./vector_scal
  664. 0.000000 3.000000 6.000000 9.000000 12.000000
  665. \endverbatim
  666. or for example, by disabling CPU devices:
  667. \verbatim
  668. $ STARPU_NCPU=0 ./vector_scal
  669. 0.000000 3.000000 6.000000 9.000000 12.000000
  670. \endverbatim
  671. or by disabling CUDA devices (which may permit to enable the use of OpenCL,
  672. see \ref Enabling_OpenCL) :
  673. \verbatim
  674. $ STARPU_NCUDA=0 ./vector_scal
  675. 0.000000 3.000000 6.000000 9.000000 12.000000
  676. \endverbatim
  677. */