advanced-api.texi 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575
  1. @c -*-texinfo-*-
  2. @c This file is part of the StarPU Handbook.
  3. @c Copyright (C) 2009--2011 Universit@'e de Bordeaux 1
  4. @c Copyright (C) 2010, 2011, 2012 Centre National de la Recherche Scientifique
  5. @c Copyright (C) 2011, 2012 Institut National de Recherche en Informatique et Automatique
  6. @c See the file starpu.texi for copying conditions.
  7. @menu
  8. * Defining a new data interface::
  9. * Multiformat Data Interface::
  10. * Task Bundles::
  11. * Task Lists::
  12. * Using Parallel Tasks::
  13. * Defining a new scheduling policy::
  14. * Expert mode::
  15. @end menu
  16. @node Defining a new data interface
  17. @section Defining a new data interface
  18. @menu
  19. * Data Interface API:: Data Interface API
  20. * An example of data interface:: An example of data interface
  21. @end menu
  22. @node Data Interface API
  23. @subsection Data Interface API
  24. @deftp {Data Type} {struct starpu_data_interface_ops}
  25. @anchor{struct starpu_data_interface_ops}
  26. Per-interface data transfer methods.
  27. @table @asis
  28. @item @code{void (*register_data_handle)(starpu_data_handle_t handle, uint32_t home_node, void *data_interface)}
  29. Register an existing interface into a data handle.
  30. @item @code{starpu_ssize_t (*allocate_data_on_node)(void *data_interface, uint32_t node)}
  31. Allocate data for the interface on a given node.
  32. @item @code{ void (*free_data_on_node)(void *data_interface, uint32_t node)}
  33. Free data of the interface on a given node.
  34. @item @code{ const struct starpu_data_copy_methods *copy_methods}
  35. ram/cuda/spu/opencl synchronous and asynchronous transfer methods.
  36. @item @code{ void * (*handle_to_pointer)(starpu_data_handle_t handle, uint32_t node)}
  37. Return the current pointer (if any) for the handle on the given node.
  38. @item @code{ size_t (*get_size)(starpu_data_handle_t handle)}
  39. Return an estimation of the size of data, for performance models.
  40. @item @code{ uint32_t (*footprint)(starpu_data_handle_t handle)}
  41. Return a 32bit footprint which characterizes the data size.
  42. @item @code{ int (*compare)(void *data_interface_a, void *data_interface_b)}
  43. Compare the data size of two interfaces.
  44. @item @code{ void (*display)(starpu_data_handle_t handle, FILE *f)}
  45. Dump the sizes of a handle to a file.
  46. @item @code{ int (*convert_to_gordon)(void *data_interface, uint64_t *ptr, gordon_strideSize_t *ss)}
  47. Convert the data size to the spu size format. If no SPUs are used, this field can be seto NULL.
  48. @item @code{enum starpu_data_interface_id interfaceid}
  49. An identifier that is unique to each interface.
  50. @item @code{size_t interface_size}
  51. The size of the interface data descriptor.
  52. @item @code{ void (*allocate_new_data)(starpu_data_handle_t handle, void **data_interface)}
  53. Create a new data interface of the given type based on the handle @var{handle}.
  54. @end table
  55. @end deftp
  56. @deftp {Data Type} {struct starpu_data_copy_methods}
  57. Defines the per-interface methods.
  58. @table @asis
  59. @item @code{int @{ram,cuda,opencl,spu@}_to_@{ram,cuda,opencl,spu@}(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node)}
  60. These 16 functions define how to copy data from the @var{src_interface}
  61. interface on the @var{src_node} node to the @var{dst_interface} interface
  62. on the @var{dst_node} node. They return 0 on success.
  63. @item @code{int (*ram_to_cuda_async)(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, cudaStream_t stream)}
  64. Define how to copy data from the @var{src_interface} interface on the
  65. @var{src_node} node (in RAM) to the @var{dst_interface} interface on the
  66. @var{dst_node} node (on a CUDA device), using the given @var{stream}. Return 0
  67. on success.
  68. @item @code{int (*cuda_to_ram_async)(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, cudaStream_t stream)}
  69. Define how to copy data from the @var{src_interface} interface on the
  70. @var{src_node} node (on a CUDA device) to the @var{dst_interface} interface on the
  71. @var{dst_node} node (in RAM), using the given @var{stream}. Return 0
  72. on success.
  73. @item @code{int (*cuda_to_cuda_async)(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, cudaStream_t stream)}
  74. Define how to copy data from the @var{src_interface} interface on the
  75. @var{src_node} node (on a CUDA device) to the @var{dst_interface} interface on
  76. the @var{dst_node} node (on another CUDA device), using the given @var{stream}.
  77. Return 0 on success.
  78. @item @code{int (*ram_to_opencl_async)(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, /* cl_event * */ void *event)}
  79. Define how to copy data from the @var{src_interface} interface on the
  80. @var{src_node} node (in RAM) to the @var{dst_interface} interface on the
  81. @var{dst_node} node (on an OpenCL device), using @var{event}, a pointer to a
  82. cl_event. Return 0 on success.
  83. @item @code{int (*opencl_to_ram_async)(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, /* cl_event * */ void *event)}
  84. Define how to copy data from the @var{src_interface} interface on the
  85. @var{src_node} node (on an OpenCL device) to the @var{dst_interface} interface
  86. on the @var{dst_node} node (in RAM), using the given @var{event}, a pointer to
  87. a cl_event. Return 0 on success.
  88. @item @code{int (*opencl_to_opencl_async)(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, /* cl_event * */ void *event)}
  89. Define how to copy data from the @var{src_interface} interface on the
  90. @var{src_node} node (on an OpenCL device) to the @var{dst_interface} interface
  91. on the @var{dst_node} node (on another OpenCL device), using the given
  92. @var{event}, a pointer to a cl_event. Return 0 on success.
  93. @end table
  94. @end deftp
  95. @deftypefun uint32_t starpu_crc32_be_n ({void *}@var{input}, size_t @var{n}, uint32_t @var{inputcrc})
  96. Compute the CRC of a byte buffer seeded by the inputcrc "current
  97. state". The return value should be considered as the new "current
  98. state" for future CRC computation. This is used for computing data size
  99. footprint.
  100. @end deftypefun
  101. @deftypefun uint32_t starpu_crc32_be (uint32_t @var{input}, uint32_t @var{inputcrc})
  102. Compute the CRC of a 32bit number seeded by the inputcrc "current
  103. state". The return value should be considered as the new "current
  104. state" for future CRC computation. This is used for computing data size
  105. footprint.
  106. @end deftypefun
  107. @deftypefun uint32_t starpu_crc32_string ({char *}@var{str}, uint32_t @var{inputcrc})
  108. Compute the CRC of a string seeded by the inputcrc "current state".
  109. The return value should be considered as the new "current state" for
  110. future CRC computation. This is used for computing data size footprint.
  111. @end deftypefun
  112. @node An example of data interface
  113. @subsection An example of data interface
  114. TODO
  115. See @code{src/datawizard/interfaces/vector_interface.c} for now.
  116. @node Multiformat Data Interface
  117. @section Multiformat Data Interface
  118. @deftp {Data Type} {struct starpu_multiformat_data_interface_ops}
  119. todo. The different fields are:
  120. @table @asis
  121. @item @code{size_t cpu_elemsize}
  122. the size of each element on CPUs,
  123. @item @code{size_t opencl_elemsize}
  124. the size of each element on OpenCL devices,
  125. @item @code{struct starpu_codelet *cpu_to_opencl_cl}
  126. pointer to a codelet which converts from CPU to OpenCL
  127. @item @code{struct starpu_codelet *opencl_to_cpu_cl}
  128. pointer to a codelet which converts from OpenCL to CPU
  129. @item @code{size_t cuda_elemsize}
  130. the size of each element on CUDA devices,
  131. @item @code{struct starpu_codelet *cpu_to_cuda_cl}
  132. pointer to a codelet which converts from CPU to CUDA
  133. @item @code{struct starpu_codelet *cuda_to_cpu_cl}
  134. pointer to a codelet which converts from CUDA to CPU
  135. @end table
  136. @end deftp
  137. @deftypefun void starpu_multiformat_data_register (starpu_data_handle_t *@var{handle}, uint32_t @var{home_node}, void *@var{ptr}, uint32_t @var{nobjects}, struct starpu_multiformat_data_interface_ops *@var{format_ops})
  138. Register a piece of data that can be represented in different ways, depending upon
  139. the processing unit that manipulates it. It allows the programmer, for instance, to
  140. use an array of structures when working on a CPU, and a structure of arrays when
  141. working on a GPU.
  142. @var{nobjects} is the number of elements in the data. @var{format_ops} describes
  143. the format.
  144. @end deftypefun
  145. @defmac STARPU_MULTIFORMAT_GET_CPU_PTR ({void *}@var{interface})
  146. returns the local pointer to the data with CPU format.
  147. @end defmac
  148. @defmac STARPU_MULTIFORMAT_GET_CUDA_PTR ({void *}@var{interface})
  149. returns the local pointer to the data with CUDA format.
  150. @end defmac
  151. @defmac STARPU_MULTIFORMAT_GET_OPENCL_PTR ({void *}@var{interface})
  152. returns the local pointer to the data with OpenCL format.
  153. @end defmac
  154. @defmac STARPU_MULTIFORMAT_GET_NX ({void *}@var{interface})
  155. returns the number of elements in the data.
  156. @end defmac
  157. @node Task Bundles
  158. @section Task Bundles
  159. @deftp {Data Type} {starpu_task_bundle_t}
  160. Opaque structure describing a list of tasks that should be scheduled
  161. on the same worker whenever it's possible. It must be considered as a
  162. hint given to the scheduler as there is no guarantee that they will be
  163. executed on the same worker.
  164. @end deftp
  165. @deftypefun void starpu_task_bundle_create ({starpu_task_bundle_t *}@var{bundle})
  166. Factory function creating and initializing @var{bundle}, when the call returns, memory needed is allocated and @var{bundle} is ready to use.
  167. @end deftypefun
  168. @deftypefun int starpu_task_bundle_insert (starpu_task_bundle_t @var{bundle}, {struct starpu_task *}@var{task})
  169. Insert @var{task} in @var{bundle}. Until @var{task} is removed from @var{bundle} its expected length and data transfer time will be considered along those of the other tasks of @var{bundle}.
  170. This function mustn't be called if @var{bundle} is already closed and/or @var{task} is already submitted.
  171. @end deftypefun
  172. @deftypefun int starpu_task_bundle_remove (starpu_task_bundle_t @var{bundle}, {struct starpu_task *}@var{task})
  173. Remove @var{task} from @var{bundle}.
  174. Of course @var{task} must have been previously inserted @var{bundle}.
  175. This function mustn't be called if @var{bundle} is already closed and/or @var{task} is already submitted. Doing so would result in undefined behaviour.
  176. @end deftypefun
  177. @deftypefun void starpu_task_bundle_close (starpu_task_bundle_t @var{bundle})
  178. Inform the runtime that the user won't modify @var{bundle} anymore, it means no more inserting or removing task. Thus the runtime can destroy it when possible.
  179. @end deftypefun
  180. @node Task Lists
  181. @section Task Lists
  182. @deftp {Data Type} {struct starpu_task_list}
  183. Stores a double-chained list of tasks
  184. @end deftp
  185. @deftypefun void starpu_task_list_init ({struct starpu_task_list *}@var{list})
  186. Initialize a list structure
  187. @end deftypefun
  188. @deftypefun void starpu_task_list_push_front ({struct starpu_task_list *}@var{list}, {struct starpu_task *}@var{task})
  189. Push a task at the front of a list
  190. @end deftypefun
  191. @deftypefun void starpu_task_list_push_back ({struct starpu_task_list *}@var{list}, {struct starpu_task *}@var{task})
  192. Push a task at the back of a list
  193. @end deftypefun
  194. @deftypefun {struct starpu_task *} starpu_task_list_front ({struct starpu_task_list *}@var{list})
  195. Get the front of the list (without removing it)
  196. @end deftypefun
  197. @deftypefun {struct starpu_task *} starpu_task_list_back ({struct starpu_task_list *}@var{list})
  198. Get the back of the list (without removing it)
  199. @end deftypefun
  200. @deftypefun int starpu_task_list_empty ({struct starpu_task_list *}@var{list})
  201. Test if a list is empty
  202. @end deftypefun
  203. @deftypefun void starpu_task_list_erase ({struct starpu_task_list *}@var{list}, {struct starpu_task *}@var{task})
  204. Remove an element from the list
  205. @end deftypefun
  206. @deftypefun {struct starpu_task *} starpu_task_list_pop_front ({struct starpu_task_list *}@var{list})
  207. Remove the element at the front of the list
  208. @end deftypefun
  209. @deftypefun {struct starpu_task *} starpu_task_list_pop_back ({struct starpu_task_list *}@var{list})
  210. Remove the element at the back of the list
  211. @end deftypefun
  212. @deftypefun {struct starpu_task *} starpu_task_list_begin ({struct starpu_task_list *}@var{list})
  213. Get the first task of the list.
  214. @end deftypefun
  215. @deftypefun {struct starpu_task *} starpu_task_list_end ({struct starpu_task_list *}@var{list})
  216. Get the end of the list.
  217. @end deftypefun
  218. @deftypefun {struct starpu_task *} starpu_task_list_next ({struct starpu_task *}@var{task})
  219. Get the next task of the list. This is not erase-safe.
  220. @end deftypefun
  221. @node Using Parallel Tasks
  222. @section Using Parallel Tasks
  223. These are used by parallel tasks:
  224. @deftypefun int starpu_combined_worker_get_size (void)
  225. Return the size of the current combined worker, i.e. the total number of cpus
  226. running the same task in the case of SPMD parallel tasks, or the total number
  227. of threads that the task is allowed to start in the case of FORKJOIN parallel
  228. tasks.
  229. @end deftypefun
  230. @deftypefun int starpu_combined_worker_get_rank (void)
  231. Return the rank of the current thread within the combined worker. Can only be
  232. used in FORKJOIN parallel tasks, to know which part of the task to work on.
  233. @end deftypefun
  234. Most of these are used for schedulers which support parallel tasks.
  235. @deftypefun unsigned starpu_combined_worker_get_count (void)
  236. Return the number of different combined workers.
  237. @end deftypefun
  238. @deftypefun int starpu_combined_worker_get_id (void)
  239. Return the identifier of the current combined worker.
  240. @end deftypefun
  241. @deftypefun int starpu_combined_worker_assign_workerid (int @var{nworkers}, int @var{workerid_array}[])
  242. Register a new combined worker and get its identifier
  243. @end deftypefun
  244. @deftypefun int starpu_combined_worker_get_description (int @var{workerid}, {int *}@var{worker_size}, {int **}@var{combined_workerid})
  245. Get the description of a combined worker
  246. @end deftypefun
  247. @deftypefun int starpu_combined_worker_can_execute_task (unsigned @var{workerid}, {struct starpu_task *}@var{task}, unsigned @var{nimpl})
  248. Variant of starpu_worker_can_execute_task compatible with combined workers
  249. @end deftypefun
  250. @node Defining a new scheduling policy
  251. @section Defining a new scheduling policy
  252. TODO
  253. A full example showing how to define a new scheduling policy is available in
  254. the StarPU sources in the directory @code{examples/scheduler/}.
  255. @menu
  256. * Scheduling Policy API:: Scheduling Policy API
  257. * Source code::
  258. @end menu
  259. @node Scheduling Policy API
  260. @subsection Scheduling Policy API
  261. While StarPU comes with a variety of scheduling policies (@pxref{Task
  262. scheduling policy}), it may sometimes be desirable to implement custom
  263. policies to address specific problems. The API described below allows
  264. users to write their own scheduling policy.
  265. @deftp {Data Type} {struct starpu_machine_topology}
  266. @table @asis
  267. @item @code{unsigned nworkers}
  268. Total number of workers.
  269. @item @code{unsigned ncombinedworkers}
  270. Total number of combined workers.
  271. @item @code{hwloc_topology_t hwtopology}
  272. Topology as detected by hwloc.
  273. To maintain ABI compatibility when hwloc is not available, the field
  274. is replaced with @code{void *dummy}
  275. @item @code{unsigned nhwcpus}
  276. Total number of CPUs, as detected by the topology code. May be different from
  277. the actual number of CPU workers.
  278. @item @code{unsigned nhwcudagpus}
  279. Total number of CUDA devices, as detected. May be different from the actual
  280. number of CUDA workers.
  281. @item @code{unsigned nhwopenclgpus}
  282. Total number of OpenCL devices, as detected. May be different from the actual
  283. number of CUDA workers.
  284. @item @code{unsigned ncpus}
  285. Actual number of CPU workers used by StarPU.
  286. @item @code{unsigned ncudagpus}
  287. Actual number of CUDA workers used by StarPU.
  288. @item @code{unsigned nopenclgpus}
  289. Actual number of OpenCL workers used by StarPU.
  290. @item @code{unsigned ngordon_spus}
  291. Actual number of Gordon workers used by StarPU.
  292. @item @code{unsigned workers_bindid[STARPU_NMAXWORKERS]}
  293. Indicates the successive cpu identifier that should be used to bind the
  294. workers. It is either filled according to the user's explicit
  295. parameters (from starpu_conf) or according to the STARPU_WORKERS_CPUID env.
  296. variable. Otherwise, a round-robin policy is used to distributed the workers
  297. over the cpus.
  298. @item @code{unsigned workers_cuda_gpuid[STARPU_NMAXWORKERS]}
  299. Indicates the successive cpu identifier that should be used by the CUDA
  300. driver. It is either filled according to the user's explicit parameters (from
  301. starpu_conf) or according to the STARPU_WORKERS_CUDAID env. variable. Otherwise,
  302. they are taken in ID order.
  303. @item @code{unsigned workers_opencl_gpuid[STARPU_NMAXWORKERS]}
  304. Indicates the successive cpu identifier that should be used by the OpenCL
  305. driver. It is either filled according to the user's explicit parameters (from
  306. starpu_conf) or according to the STARPU_WORKERS_OPENCLID env. variable. Otherwise,
  307. they are taken in ID order.
  308. @end table
  309. @end deftp
  310. @deftp {Data Type} {struct starpu_sched_policy}
  311. This structure contains all the methods that implement a scheduling policy. An
  312. application may specify which scheduling strategy in the @code{sched_policy}
  313. field of the @code{starpu_conf} structure passed to the @code{starpu_init}
  314. function. The different fields are:
  315. @table @asis
  316. @item @code{void (*init_sched)(struct starpu_machine_topology *, struct starpu_sched_policy *)}
  317. Initialize the scheduling policy.
  318. @item @code{void (*deinit_sched)(struct starpu_machine_topology *, struct starpu_sched_policy *)}
  319. Cleanup the scheduling policy.
  320. @item @code{int (*push_task)(struct starpu_task *)}
  321. Insert a task into the scheduler.
  322. @item @code{void (*push_task_notify)(struct starpu_task *, int workerid)}
  323. Notify the scheduler that a task was pushed on a given worker. This method is
  324. called when a task that was explicitely assigned to a worker becomes ready and
  325. is about to be executed by the worker. This method therefore permits to keep
  326. the state of of the scheduler coherent even when StarPU bypasses the scheduling
  327. strategy.
  328. @item @code{struct starpu_task *(*pop_task)(void)} (optional)
  329. Get a task from the scheduler. The mutex associated to the worker is already
  330. taken when this method is called. If this method is defined as @code{NULL}, the
  331. worker will only execute tasks from its local queue. In this case, the
  332. @code{push_task} method should use the @code{starpu_push_local_task} method to
  333. assign tasks to the different workers.
  334. @item @code{struct starpu_task *(*pop_every_task)(void)}
  335. Remove all available tasks from the scheduler (tasks are chained by the means
  336. of the prev and next fields of the starpu_task structure). The mutex associated
  337. to the worker is already taken when this method is called. This is currently
  338. only used by the Gordon driver.
  339. @item @code{void (*pre_exec_hook)(struct starpu_task *)} (optional)
  340. This method is called every time a task is starting.
  341. @item @code{void (*post_exec_hook)(struct starpu_task *)} (optional)
  342. This method is called every time a task has been executed.
  343. @item @code{const char *policy_name} (optional)
  344. Name of the policy.
  345. @item @code{const char *policy_description} (optional)
  346. Description of the policy.
  347. @end table
  348. @end deftp
  349. @deftypefun void starpu_worker_set_sched_condition (int @var{workerid}, pthread_cond_t *@var{sched_cond}, pthread_mutex_t *@var{sched_mutex})
  350. This function specifies the condition variable associated to a worker
  351. When there is no available task for a worker, StarPU blocks this worker on a
  352. condition variable. This function specifies which condition variable (and the
  353. associated mutex) should be used to block (and to wake up) a worker. Note that
  354. multiple workers may use the same condition variable. For instance, in the case
  355. of a scheduling strategy with a single task queue, the same condition variable
  356. would be used to block and wake up all workers.
  357. The initialization method of a scheduling strategy (@code{init_sched}) must
  358. call this function once per worker.
  359. @end deftypefun
  360. @deftypefun void starpu_sched_set_min_priority (int @var{min_prio})
  361. Defines the minimum priority level supported by the scheduling policy. The
  362. default minimum priority level is the same as the default priority level which
  363. is 0 by convention. The application may access that value by calling the
  364. @code{starpu_sched_get_min_priority} function. This function should only be
  365. called from the initialization method of the scheduling policy, and should not
  366. be used directly from the application.
  367. @end deftypefun
  368. @deftypefun void starpu_sched_set_max_priority (int @var{max_prio})
  369. Defines the maximum priority level supported by the scheduling policy. The
  370. default maximum priority level is 1. The application may access that value by
  371. calling the @code{starpu_sched_get_max_priority} function. This function should
  372. only be called from the initialization method of the scheduling policy, and
  373. should not be used directly from the application.
  374. @end deftypefun
  375. @deftypefun int starpu_sched_get_min_priority (void)
  376. Returns the current minimum priority level supported by the
  377. scheduling policy
  378. @end deftypefun
  379. @deftypefun int starpu_sched_get_max_priority (void)
  380. Returns the current maximum priority level supported by the
  381. scheduling policy
  382. @end deftypefun
  383. @deftypefun int starpu_push_local_task (int @var{workerid}, {struct starpu_task} *@var{task}, int @var{back})
  384. The scheduling policy may put tasks directly into a worker's local queue so
  385. that it is not always necessary to create its own queue when the local queue
  386. is sufficient. If @var{back} not null, @var{task} is put at the back of the queue
  387. where the worker will pop tasks first. Setting @var{back} to 0 therefore ensures
  388. a FIFO ordering.
  389. @end deftypefun
  390. @deftypefun int starpu_worker_can_execute_task (unsigned @var{workerid}, {struct starpu_task *}@var{task}, unsigned {nimpl})
  391. Check if the worker specified by workerid can execute the codelet. Schedulers need to call it before assigning a task to a worker, otherwise the task may fail to execute.
  392. @end deftypefun
  393. @deftypefun double starpu_timing_now (void)
  394. Return the current date in µs
  395. @end deftypefun
  396. @deftypefun double starpu_task_expected_length ({struct starpu_task *}@var{task}, {enum starpu_perf_archtype} @var{arch}, unsigned @var{nimpl})
  397. Returns expected task duration in µs
  398. @end deftypefun
  399. @deftypefun double starpu_worker_get_relative_speedup ({enum starpu_perf_archtype} @var{perf_archtype})
  400. Returns an estimated speedup factor relative to CPU speed
  401. @end deftypefun
  402. @deftypefun double starpu_task_expected_data_transfer_time (uint32_t @var{memory_node}, {struct starpu_task *}@var{task})
  403. Returns expected data transfer time in µs
  404. @end deftypefun
  405. @deftypefun double starpu_data_expected_transfer_time (starpu_data_handle_t @var{handle}, unsigned @var{memory_node}, {enum starpu_access_mode} @var{mode})
  406. Predict the transfer time (in µs) to move a handle to a memory node
  407. @end deftypefun
  408. @deftypefun double starpu_task_expected_power ({struct starpu_task *}@var{task}, {enum starpu_perf_archtype} @var{arch}, unsigned @var{nimpl})
  409. Returns expected power consumption in J
  410. @end deftypefun
  411. @deftypefun double starpu_task_expected_conversion_time ({struct starpu_task *}@var{task}, {enum starpu_perf_archtype} @var{arch}, unsigned {nimpl})
  412. Returns expected conversion time in ms (multiformat interface only)
  413. @end deftypefun
  414. @node Source code
  415. @subsection Source code
  416. @cartouche
  417. @smallexample
  418. static struct starpu_sched_policy dummy_sched_policy = @{
  419. .init_sched = init_dummy_sched,
  420. .deinit_sched = deinit_dummy_sched,
  421. .push_task = push_task_dummy,
  422. .push_prio_task = NULL,
  423. .pop_task = pop_task_dummy,
  424. .post_exec_hook = NULL,
  425. .pop_every_task = NULL,
  426. .policy_name = "dummy",
  427. .policy_description = "dummy scheduling strategy"
  428. @};
  429. @end smallexample
  430. @end cartouche
  431. @node Expert mode
  432. @section Expert mode
  433. @deftypefun void starpu_wake_all_blocked_workers (void)
  434. Wake all the workers, so they can inspect data requests and task submissions
  435. again.
  436. @end deftypefun
  437. @deftypefun int starpu_progression_hook_register (unsigned (*@var{func})(void *arg), void *@var{arg})
  438. Register a progression hook, to be called when workers are idle.
  439. @end deftypefun
  440. @deftypefun void starpu_progression_hook_deregister (int @var{hook_id})
  441. Unregister a given progression hook.
  442. @end deftypefun