advanced-api.texi 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579
  1. @c -*-texinfo-*-
  2. @c This file is part of the StarPU Handbook.
  3. @c Copyright (C) 2009--2011 Universit@'e de Bordeaux 1
  4. @c Copyright (C) 2010, 2011, 2012 Centre National de la Recherche Scientifique
  5. @c Copyright (C) 2011, 2012 Institut National de Recherche en Informatique et Automatique
  6. @c See the file starpu.texi for copying conditions.
  7. @menu
  8. * Defining a new data interface::
  9. * Multiformat Data Interface::
  10. * Task Bundles::
  11. * Task Lists::
  12. * Using Parallel Tasks::
  13. * Defining a new scheduling policy::
  14. * Expert mode::
  15. @end menu
  16. @node Defining a new data interface
  17. @section Defining a new data interface
  18. @menu
  19. * Data Interface API:: Data Interface API
  20. * An example of data interface:: An example of data interface
  21. @end menu
  22. @node Data Interface API
  23. @subsection Data Interface API
  24. @deftp {Data Type} {struct starpu_data_interface_ops}
  25. @anchor{struct starpu_data_interface_ops}
  26. Per-interface data transfer methods.
  27. @table @asis
  28. @item @code{void (*register_data_handle)(starpu_data_handle_t handle, uint32_t home_node, void *data_interface)}
  29. Register an existing interface into a data handle.
  30. @item @code{starpu_ssize_t (*allocate_data_on_node)(void *data_interface, uint32_t node)}
  31. Allocate data for the interface on a given node.
  32. @item @code{ void (*free_data_on_node)(void *data_interface, uint32_t node)}
  33. Free data of the interface on a given node.
  34. @item @code{ const struct starpu_data_copy_methods *copy_methods}
  35. ram/cuda/spu/opencl synchronous and asynchronous transfer methods.
  36. @item @code{ void * (*handle_to_pointer)(starpu_data_handle_t handle, uint32_t node)}
  37. Return the current pointer (if any) for the handle on the given node.
  38. @item @code{ size_t (*get_size)(starpu_data_handle_t handle)}
  39. Return an estimation of the size of data, for performance models.
  40. @item @code{ uint32_t (*footprint)(starpu_data_handle_t handle)}
  41. Return a 32bit footprint which characterizes the data size.
  42. @item @code{ int (*compare)(void *data_interface_a, void *data_interface_b)}
  43. Compare the data size of two interfaces.
  44. @item @code{ void (*display)(starpu_data_handle_t handle, FILE *f)}
  45. Dump the sizes of a handle to a file.
  46. @item @code{ int (*convert_to_gordon)(void *data_interface, uint64_t *ptr, gordon_strideSize_t *ss)}
  47. Convert the data size to the spu size format. If no SPUs are used, this field can be seto NULL.
  48. @item @code{enum starpu_data_interface_id interfaceid}
  49. An identifier that is unique to each interface.
  50. @item @code{size_t interface_size}
  51. The size of the interface data descriptor.
  52. @item @code{ void (*allocate_new_data)(starpu_data_handle_t handle, void **data_interface)}
  53. Create a new data interface of the given type based on the handle @var{handle}.
  54. @end table
  55. @end deftp
  56. @deftp {Data Type} {struct starpu_data_copy_methods}
  57. Defines the per-interface methods.
  58. @table @asis
  59. @item @code{int @{ram,cuda,opencl,spu@}_to_@{ram,cuda,opencl,spu@}(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node)}
  60. These 16 functions define how to copy data from the @var{src_interface}
  61. interface on the @var{src_node} node to the @var{dst_interface} interface
  62. on the @var{dst_node} node. They return 0 on success.
  63. @item @code{int (*ram_to_cuda_async)(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, cudaStream_t stream)}
  64. Define how to copy data from the @var{src_interface} interface on the
  65. @var{src_node} node (in RAM) to the @var{dst_interface} interface on the
  66. @var{dst_node} node (on a CUDA device), using the given @var{stream}. Return 0
  67. on success.
  68. @item @code{int (*cuda_to_ram_async)(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, cudaStream_t stream)}
  69. Define how to copy data from the @var{src_interface} interface on the
  70. @var{src_node} node (on a CUDA device) to the @var{dst_interface} interface on the
  71. @var{dst_node} node (in RAM), using the given @var{stream}. Return 0
  72. on success.
  73. @item @code{int (*cuda_to_cuda_async)(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, cudaStream_t stream)}
  74. Define how to copy data from the @var{src_interface} interface on the
  75. @var{src_node} node (on a CUDA device) to the @var{dst_interface} interface on
  76. the @var{dst_node} node (on another CUDA device), using the given @var{stream}.
  77. Return 0 on success.
  78. @item @code{int (*ram_to_opencl_async)(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, /* cl_event * */ void *event)}
  79. Define how to copy data from the @var{src_interface} interface on the
  80. @var{src_node} node (in RAM) to the @var{dst_interface} interface on the
  81. @var{dst_node} node (on an OpenCL device), using @var{event}, a pointer to a
  82. cl_event. Return 0 on success.
  83. @item @code{int (*opencl_to_ram_async)(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, /* cl_event * */ void *event)}
  84. Define how to copy data from the @var{src_interface} interface on the
  85. @var{src_node} node (on an OpenCL device) to the @var{dst_interface} interface
  86. on the @var{dst_node} node (in RAM), using the given @var{event}, a pointer to
  87. a cl_event. Return 0 on success.
  88. @item @code{int (*opencl_to_opencl_async)(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, /* cl_event * */ void *event)}
  89. Define how to copy data from the @var{src_interface} interface on the
  90. @var{src_node} node (on an OpenCL device) to the @var{dst_interface} interface
  91. on the @var{dst_node} node (on another OpenCL device), using the given
  92. @var{event}, a pointer to a cl_event. Return 0 on success.
  93. @end table
  94. @end deftp
  95. @deftypefun uint32_t starpu_crc32_be_n ({void *}@var{input}, size_t @var{n}, uint32_t @var{inputcrc})
  96. Compute the CRC of a byte buffer seeded by the inputcrc "current
  97. state". The return value should be considered as the new "current
  98. state" for future CRC computation. This is used for computing data size
  99. footprint.
  100. @end deftypefun
  101. @deftypefun uint32_t starpu_crc32_be (uint32_t @var{input}, uint32_t @var{inputcrc})
  102. Compute the CRC of a 32bit number seeded by the inputcrc "current
  103. state". The return value should be considered as the new "current
  104. state" for future CRC computation. This is used for computing data size
  105. footprint.
  106. @end deftypefun
  107. @deftypefun uint32_t starpu_crc32_string ({char *}@var{str}, uint32_t @var{inputcrc})
  108. Compute the CRC of a string seeded by the inputcrc "current state".
  109. The return value should be considered as the new "current state" for
  110. future CRC computation. This is used for computing data size footprint.
  111. @end deftypefun
  112. @node An example of data interface
  113. @subsection An example of data interface
  114. @deftypefun int starpu_data_interface_get_next_id ()
  115. Returns the next available id for a newly created data interface.
  116. @end deftypefun
  117. TODO
  118. See @code{src/datawizard/interfaces/vector_interface.c} for now.
  119. @node Multiformat Data Interface
  120. @section Multiformat Data Interface
  121. @deftp {Data Type} {struct starpu_multiformat_data_interface_ops}
  122. todo. The different fields are:
  123. @table @asis
  124. @item @code{size_t cpu_elemsize}
  125. the size of each element on CPUs,
  126. @item @code{size_t opencl_elemsize}
  127. the size of each element on OpenCL devices,
  128. @item @code{struct starpu_codelet *cpu_to_opencl_cl}
  129. pointer to a codelet which converts from CPU to OpenCL
  130. @item @code{struct starpu_codelet *opencl_to_cpu_cl}
  131. pointer to a codelet which converts from OpenCL to CPU
  132. @item @code{size_t cuda_elemsize}
  133. the size of each element on CUDA devices,
  134. @item @code{struct starpu_codelet *cpu_to_cuda_cl}
  135. pointer to a codelet which converts from CPU to CUDA
  136. @item @code{struct starpu_codelet *cuda_to_cpu_cl}
  137. pointer to a codelet which converts from CUDA to CPU
  138. @end table
  139. @end deftp
  140. @deftypefun void starpu_multiformat_data_register (starpu_data_handle_t *@var{handle}, uint32_t @var{home_node}, void *@var{ptr}, uint32_t @var{nobjects}, struct starpu_multiformat_data_interface_ops *@var{format_ops})
  141. Register a piece of data that can be represented in different ways, depending upon
  142. the processing unit that manipulates it. It allows the programmer, for instance, to
  143. use an array of structures when working on a CPU, and a structure of arrays when
  144. working on a GPU.
  145. @var{nobjects} is the number of elements in the data. @var{format_ops} describes
  146. the format.
  147. @end deftypefun
  148. @defmac STARPU_MULTIFORMAT_GET_CPU_PTR ({void *}@var{interface})
  149. returns the local pointer to the data with CPU format.
  150. @end defmac
  151. @defmac STARPU_MULTIFORMAT_GET_CUDA_PTR ({void *}@var{interface})
  152. returns the local pointer to the data with CUDA format.
  153. @end defmac
  154. @defmac STARPU_MULTIFORMAT_GET_OPENCL_PTR ({void *}@var{interface})
  155. returns the local pointer to the data with OpenCL format.
  156. @end defmac
  157. @defmac STARPU_MULTIFORMAT_GET_NX ({void *}@var{interface})
  158. returns the number of elements in the data.
  159. @end defmac
  160. @node Task Bundles
  161. @section Task Bundles
  162. @deftp {Data Type} {starpu_task_bundle_t}
  163. Opaque structure describing a list of tasks that should be scheduled
  164. on the same worker whenever it's possible. It must be considered as a
  165. hint given to the scheduler as there is no guarantee that they will be
  166. executed on the same worker.
  167. @end deftp
  168. @deftypefun void starpu_task_bundle_create ({starpu_task_bundle_t *}@var{bundle})
  169. Factory function creating and initializing @var{bundle}, when the call returns, memory needed is allocated and @var{bundle} is ready to use.
  170. @end deftypefun
  171. @deftypefun int starpu_task_bundle_insert (starpu_task_bundle_t @var{bundle}, {struct starpu_task *}@var{task})
  172. Insert @var{task} in @var{bundle}. Until @var{task} is removed from @var{bundle} its expected length and data transfer time will be considered along those of the other tasks of @var{bundle}.
  173. This function mustn't be called if @var{bundle} is already closed and/or @var{task} is already submitted.
  174. @end deftypefun
  175. @deftypefun int starpu_task_bundle_remove (starpu_task_bundle_t @var{bundle}, {struct starpu_task *}@var{task})
  176. Remove @var{task} from @var{bundle}.
  177. Of course @var{task} must have been previously inserted @var{bundle}.
  178. This function mustn't be called if @var{bundle} is already closed and/or @var{task} is already submitted. Doing so would result in undefined behaviour.
  179. @end deftypefun
  180. @deftypefun void starpu_task_bundle_close (starpu_task_bundle_t @var{bundle})
  181. Inform the runtime that the user won't modify @var{bundle} anymore, it means no more inserting or removing task. Thus the runtime can destroy it when possible.
  182. @end deftypefun
  183. @node Task Lists
  184. @section Task Lists
  185. @deftp {Data Type} {struct starpu_task_list}
  186. Stores a double-chained list of tasks
  187. @end deftp
  188. @deftypefun void starpu_task_list_init ({struct starpu_task_list *}@var{list})
  189. Initialize a list structure
  190. @end deftypefun
  191. @deftypefun void starpu_task_list_push_front ({struct starpu_task_list *}@var{list}, {struct starpu_task *}@var{task})
  192. Push a task at the front of a list
  193. @end deftypefun
  194. @deftypefun void starpu_task_list_push_back ({struct starpu_task_list *}@var{list}, {struct starpu_task *}@var{task})
  195. Push a task at the back of a list
  196. @end deftypefun
  197. @deftypefun {struct starpu_task *} starpu_task_list_front ({struct starpu_task_list *}@var{list})
  198. Get the front of the list (without removing it)
  199. @end deftypefun
  200. @deftypefun {struct starpu_task *} starpu_task_list_back ({struct starpu_task_list *}@var{list})
  201. Get the back of the list (without removing it)
  202. @end deftypefun
  203. @deftypefun int starpu_task_list_empty ({struct starpu_task_list *}@var{list})
  204. Test if a list is empty
  205. @end deftypefun
  206. @deftypefun void starpu_task_list_erase ({struct starpu_task_list *}@var{list}, {struct starpu_task *}@var{task})
  207. Remove an element from the list
  208. @end deftypefun
  209. @deftypefun {struct starpu_task *} starpu_task_list_pop_front ({struct starpu_task_list *}@var{list})
  210. Remove the element at the front of the list
  211. @end deftypefun
  212. @deftypefun {struct starpu_task *} starpu_task_list_pop_back ({struct starpu_task_list *}@var{list})
  213. Remove the element at the back of the list
  214. @end deftypefun
  215. @deftypefun {struct starpu_task *} starpu_task_list_begin ({struct starpu_task_list *}@var{list})
  216. Get the first task of the list.
  217. @end deftypefun
  218. @deftypefun {struct starpu_task *} starpu_task_list_end ({struct starpu_task_list *}@var{list})
  219. Get the end of the list.
  220. @end deftypefun
  221. @deftypefun {struct starpu_task *} starpu_task_list_next ({struct starpu_task *}@var{task})
  222. Get the next task of the list. This is not erase-safe.
  223. @end deftypefun
  224. @node Using Parallel Tasks
  225. @section Using Parallel Tasks
  226. These are used by parallel tasks:
  227. @deftypefun int starpu_combined_worker_get_size (void)
  228. Return the size of the current combined worker, i.e. the total number of cpus
  229. running the same task in the case of SPMD parallel tasks, or the total number
  230. of threads that the task is allowed to start in the case of FORKJOIN parallel
  231. tasks.
  232. @end deftypefun
  233. @deftypefun int starpu_combined_worker_get_rank (void)
  234. Return the rank of the current thread within the combined worker. Can only be
  235. used in FORKJOIN parallel tasks, to know which part of the task to work on.
  236. @end deftypefun
  237. Most of these are used for schedulers which support parallel tasks.
  238. @deftypefun unsigned starpu_combined_worker_get_count (void)
  239. Return the number of different combined workers.
  240. @end deftypefun
  241. @deftypefun int starpu_combined_worker_get_id (void)
  242. Return the identifier of the current combined worker.
  243. @end deftypefun
  244. @deftypefun int starpu_combined_worker_assign_workerid (int @var{nworkers}, int @var{workerid_array}[])
  245. Register a new combined worker and get its identifier
  246. @end deftypefun
  247. @deftypefun int starpu_combined_worker_get_description (int @var{workerid}, {int *}@var{worker_size}, {int **}@var{combined_workerid})
  248. Get the description of a combined worker
  249. @end deftypefun
  250. @deftypefun int starpu_combined_worker_can_execute_task (unsigned @var{workerid}, {struct starpu_task *}@var{task}, unsigned @var{nimpl})
  251. Variant of starpu_worker_can_execute_task compatible with combined workers
  252. @end deftypefun
  253. @node Defining a new scheduling policy
  254. @section Defining a new scheduling policy
  255. TODO
  256. A full example showing how to define a new scheduling policy is available in
  257. the StarPU sources in the directory @code{examples/scheduler/}.
  258. @menu
  259. * Scheduling Policy API:: Scheduling Policy API
  260. * Source code::
  261. @end menu
  262. @node Scheduling Policy API
  263. @subsection Scheduling Policy API
  264. While StarPU comes with a variety of scheduling policies (@pxref{Task
  265. scheduling policy}), it may sometimes be desirable to implement custom
  266. policies to address specific problems. The API described below allows
  267. users to write their own scheduling policy.
  268. @deftp {Data Type} {struct starpu_machine_topology}
  269. @table @asis
  270. @item @code{unsigned nworkers}
  271. Total number of workers.
  272. @item @code{unsigned ncombinedworkers}
  273. Total number of combined workers.
  274. @item @code{hwloc_topology_t hwtopology}
  275. Topology as detected by hwloc.
  276. To maintain ABI compatibility when hwloc is not available, the field
  277. is replaced with @code{void *dummy}
  278. @item @code{unsigned nhwcpus}
  279. Total number of CPUs, as detected by the topology code. May be different from
  280. the actual number of CPU workers.
  281. @item @code{unsigned nhwcudagpus}
  282. Total number of CUDA devices, as detected. May be different from the actual
  283. number of CUDA workers.
  284. @item @code{unsigned nhwopenclgpus}
  285. Total number of OpenCL devices, as detected. May be different from the actual
  286. number of CUDA workers.
  287. @item @code{unsigned ncpus}
  288. Actual number of CPU workers used by StarPU.
  289. @item @code{unsigned ncudagpus}
  290. Actual number of CUDA workers used by StarPU.
  291. @item @code{unsigned nopenclgpus}
  292. Actual number of OpenCL workers used by StarPU.
  293. @item @code{unsigned ngordon_spus}
  294. Actual number of Gordon workers used by StarPU.
  295. @item @code{unsigned workers_bindid[STARPU_NMAXWORKERS]}
  296. Indicates the successive cpu identifier that should be used to bind the
  297. workers. It is either filled according to the user's explicit
  298. parameters (from starpu_conf) or according to the STARPU_WORKERS_CPUID env.
  299. variable. Otherwise, a round-robin policy is used to distributed the workers
  300. over the cpus.
  301. @item @code{unsigned workers_cuda_gpuid[STARPU_NMAXWORKERS]}
  302. Indicates the successive cpu identifier that should be used by the CUDA
  303. driver. It is either filled according to the user's explicit parameters (from
  304. starpu_conf) or according to the STARPU_WORKERS_CUDAID env. variable. Otherwise,
  305. they are taken in ID order.
  306. @item @code{unsigned workers_opencl_gpuid[STARPU_NMAXWORKERS]}
  307. Indicates the successive cpu identifier that should be used by the OpenCL
  308. driver. It is either filled according to the user's explicit parameters (from
  309. starpu_conf) or according to the STARPU_WORKERS_OPENCLID env. variable. Otherwise,
  310. they are taken in ID order.
  311. @end table
  312. @end deftp
  313. @deftp {Data Type} {struct starpu_sched_policy}
  314. This structure contains all the methods that implement a scheduling policy. An
  315. application may specify which scheduling strategy in the @code{sched_policy}
  316. field of the @code{starpu_conf} structure passed to the @code{starpu_init}
  317. function. The different fields are:
  318. @table @asis
  319. @item @code{void (*init_sched)(struct starpu_machine_topology *, struct starpu_sched_policy *)}
  320. Initialize the scheduling policy.
  321. @item @code{void (*deinit_sched)(struct starpu_machine_topology *, struct starpu_sched_policy *)}
  322. Cleanup the scheduling policy.
  323. @item @code{int (*push_task)(struct starpu_task *)}
  324. Insert a task into the scheduler.
  325. @item @code{void (*push_task_notify)(struct starpu_task *, int workerid)}
  326. Notify the scheduler that a task was pushed on a given worker. This method is
  327. called when a task that was explicitely assigned to a worker becomes ready and
  328. is about to be executed by the worker. This method therefore permits to keep
  329. the state of of the scheduler coherent even when StarPU bypasses the scheduling
  330. strategy.
  331. @item @code{struct starpu_task *(*pop_task)(void)} (optional)
  332. Get a task from the scheduler. The mutex associated to the worker is already
  333. taken when this method is called. If this method is defined as @code{NULL}, the
  334. worker will only execute tasks from its local queue. In this case, the
  335. @code{push_task} method should use the @code{starpu_push_local_task} method to
  336. assign tasks to the different workers.
  337. @item @code{struct starpu_task *(*pop_every_task)(void)}
  338. Remove all available tasks from the scheduler (tasks are chained by the means
  339. of the prev and next fields of the starpu_task structure). The mutex associated
  340. to the worker is already taken when this method is called. This is currently
  341. only used by the Gordon driver.
  342. @item @code{void (*pre_exec_hook)(struct starpu_task *)} (optional)
  343. This method is called every time a task is starting.
  344. @item @code{void (*post_exec_hook)(struct starpu_task *)} (optional)
  345. This method is called every time a task has been executed.
  346. @item @code{const char *policy_name} (optional)
  347. Name of the policy.
  348. @item @code{const char *policy_description} (optional)
  349. Description of the policy.
  350. @end table
  351. @end deftp
  352. @deftypefun void starpu_worker_set_sched_condition (int @var{workerid}, pthread_cond_t *@var{sched_cond}, pthread_mutex_t *@var{sched_mutex})
  353. This function specifies the condition variable associated to a worker
  354. When there is no available task for a worker, StarPU blocks this worker on a
  355. condition variable. This function specifies which condition variable (and the
  356. associated mutex) should be used to block (and to wake up) a worker. Note that
  357. multiple workers may use the same condition variable. For instance, in the case
  358. of a scheduling strategy with a single task queue, the same condition variable
  359. would be used to block and wake up all workers.
  360. The initialization method of a scheduling strategy (@code{init_sched}) must
  361. call this function once per worker.
  362. @end deftypefun
  363. @deftypefun void starpu_sched_set_min_priority (int @var{min_prio})
  364. Defines the minimum priority level supported by the scheduling policy. The
  365. default minimum priority level is the same as the default priority level which
  366. is 0 by convention. The application may access that value by calling the
  367. @code{starpu_sched_get_min_priority} function. This function should only be
  368. called from the initialization method of the scheduling policy, and should not
  369. be used directly from the application.
  370. @end deftypefun
  371. @deftypefun void starpu_sched_set_max_priority (int @var{max_prio})
  372. Defines the maximum priority level supported by the scheduling policy. The
  373. default maximum priority level is 1. The application may access that value by
  374. calling the @code{starpu_sched_get_max_priority} function. This function should
  375. only be called from the initialization method of the scheduling policy, and
  376. should not be used directly from the application.
  377. @end deftypefun
  378. @deftypefun int starpu_sched_get_min_priority (void)
  379. Returns the current minimum priority level supported by the
  380. scheduling policy
  381. @end deftypefun
  382. @deftypefun int starpu_sched_get_max_priority (void)
  383. Returns the current maximum priority level supported by the
  384. scheduling policy
  385. @end deftypefun
  386. @deftypefun int starpu_push_local_task (int @var{workerid}, {struct starpu_task} *@var{task}, int @var{back})
  387. The scheduling policy may put tasks directly into a worker's local queue so
  388. that it is not always necessary to create its own queue when the local queue
  389. is sufficient. If @var{back} not null, @var{task} is put at the back of the queue
  390. where the worker will pop tasks first. Setting @var{back} to 0 therefore ensures
  391. a FIFO ordering.
  392. @end deftypefun
  393. @deftypefun int starpu_worker_can_execute_task (unsigned @var{workerid}, {struct starpu_task *}@var{task}, unsigned {nimpl})
  394. Check if the worker specified by workerid can execute the codelet. Schedulers need to call it before assigning a task to a worker, otherwise the task may fail to execute.
  395. @end deftypefun
  396. @deftypefun double starpu_timing_now (void)
  397. Return the current date in µs
  398. @end deftypefun
  399. @deftypefun double starpu_task_expected_length ({struct starpu_task *}@var{task}, {enum starpu_perf_archtype} @var{arch}, unsigned @var{nimpl})
  400. Returns expected task duration in µs
  401. @end deftypefun
  402. @deftypefun double starpu_worker_get_relative_speedup ({enum starpu_perf_archtype} @var{perf_archtype})
  403. Returns an estimated speedup factor relative to CPU speed
  404. @end deftypefun
  405. @deftypefun double starpu_task_expected_data_transfer_time (uint32_t @var{memory_node}, {struct starpu_task *}@var{task})
  406. Returns expected data transfer time in µs
  407. @end deftypefun
  408. @deftypefun double starpu_data_expected_transfer_time (starpu_data_handle_t @var{handle}, unsigned @var{memory_node}, {enum starpu_access_mode} @var{mode})
  409. Predict the transfer time (in µs) to move a handle to a memory node
  410. @end deftypefun
  411. @deftypefun double starpu_task_expected_power ({struct starpu_task *}@var{task}, {enum starpu_perf_archtype} @var{arch}, unsigned @var{nimpl})
  412. Returns expected power consumption in J
  413. @end deftypefun
  414. @deftypefun double starpu_task_expected_conversion_time ({struct starpu_task *}@var{task}, {enum starpu_perf_archtype} @var{arch}, unsigned {nimpl})
  415. Returns expected conversion time in ms (multiformat interface only)
  416. @end deftypefun
  417. @node Source code
  418. @subsection Source code
  419. @cartouche
  420. @smallexample
  421. static struct starpu_sched_policy dummy_sched_policy = @{
  422. .init_sched = init_dummy_sched,
  423. .deinit_sched = deinit_dummy_sched,
  424. .push_task = push_task_dummy,
  425. .push_prio_task = NULL,
  426. .pop_task = pop_task_dummy,
  427. .post_exec_hook = NULL,
  428. .pop_every_task = NULL,
  429. .policy_name = "dummy",
  430. .policy_description = "dummy scheduling strategy"
  431. @};
  432. @end smallexample
  433. @end cartouche
  434. @node Expert mode
  435. @section Expert mode
  436. @deftypefun void starpu_wake_all_blocked_workers (void)
  437. Wake all the workers, so they can inspect data requests and task submissions
  438. again.
  439. @end deftypefun
  440. @deftypefun int starpu_progression_hook_register (unsigned (*@var{func})(void *arg), void *@var{arg})
  441. Register a progression hook, to be called when workers are idle.
  442. @end deftypefun
  443. @deftypefun void starpu_progression_hook_deregister (int @var{hook_id})
  444. Unregister a given progression hook.
  445. @end deftypefun