starpu_sched_node.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2013 Simon Archipoff
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #ifndef __STARPU_SCHED_NODE_H__
  17. #define __STARPU_SCHED_NODE_H__
  18. #include <starpu.h>
  19. #include <common/starpu_spinlock.h>
  20. #include <starpu_bitmap.h>
  21. #ifdef STARPU_HAVE_HWLOC
  22. #include <hwloc.h>
  23. #endif
  24. /* struct starpu_sched_node are scheduler modules, a scheduler is a tree-like
  25. * structure of them, some parts of scheduler can be shared by several contexes
  26. * to perform some local optimisations, so, for all nodes, a list of father is
  27. * defined indexed by sched_ctx_id
  28. *
  29. * they embed there specialised method in a pseudo object-style, so calls are like node->push_task(node,task)
  30. *
  31. */
  32. struct starpu_sched_node
  33. {
  34. /* node->push_task(node, task)
  35. * this function is called to push a task on node subtree, this can either
  36. * perform a recursive call on a child or store the task in the node, then
  37. * it will be returned by a further pop_task call
  38. *
  39. * the caller must ensure that node is able to execute task
  40. */
  41. int (*push_task)(struct starpu_sched_node *,
  42. struct starpu_task *);
  43. /* this function is called by workers to get a task on them fathers
  44. * this function should first return a localy stored task or perform
  45. * a recursive call on father
  46. *
  47. * a default implementation simply do a recursive call on father
  48. */
  49. struct starpu_task * (*pop_task)(struct starpu_sched_node *,
  50. unsigned sched_ctx_id);
  51. /* node->push_back_task(node, task)
  52. * This function can be called by room-made functions to permit
  53. * the user to specify a particular push function which allows to
  54. * push back the task if the push submitted by the room function fail.
  55. */
  56. int (*push_back_task)(struct starpu_sched_node *,
  57. struct starpu_task *);
  58. /* this function is an heuristic that compute load of subtree, basicaly
  59. * it compute
  60. * estimated_load(node) = sum(estimated_load(node_childs)) +
  61. * nb_local_tasks / average(relative_speedup(underlying_worker))
  62. */
  63. double (*estimated_load)(struct starpu_sched_node * node);
  64. double (*estimated_end)(struct starpu_sched_node * node);
  65. /* the numbers of node's childs
  66. */
  67. int nchilds;
  68. /* the vector of node's childs
  69. */
  70. struct starpu_sched_node ** childs;
  71. /* may be shared by several contexts
  72. * so we need several fathers
  73. */
  74. struct starpu_sched_node * fathers[STARPU_NMAX_SCHED_CTXS];
  75. /* the set of workers in the node's subtree
  76. */
  77. struct starpu_bitmap * workers;
  78. /* the workers available in context
  79. * this member is set with :
  80. * node->workers UNION tree->workers UNION
  81. * node->child[i]->workers_in_ctx iff exist x such as node->childs[i]->fathers[x] == node
  82. */
  83. struct starpu_bitmap * workers_in_ctx;
  84. /* node's private data, no restriction on use
  85. */
  86. void * data;
  87. void (*add_child)(struct starpu_sched_node * node, struct starpu_sched_node * child);
  88. void (*remove_child)(struct starpu_sched_node * node, struct starpu_sched_node * child);
  89. /* this function is called for each node when workers are added or removed from a context
  90. */
  91. void (*notify_change_workers)(struct starpu_sched_node * node);
  92. /* this function is called by starpu_sched_node_destroy just before freeing node
  93. */
  94. void (*deinit_data)(struct starpu_sched_node * node);
  95. /* is_homogeneous is 0 iff workers in the node's subtree are heterogeneous,
  96. * this field is set and updated automaticaly, you shouldn't write on it
  97. */
  98. int properties;
  99. /* This function is called by a node which implements a queue, allowing it to
  100. * signify to its fathers that an empty slot is available in its queue.
  101. * The basic implementation of this function is a recursive call to its
  102. * fathers, the user have to specify a personally-made function to catch those
  103. * calls.
  104. */
  105. void (*room)(struct starpu_sched_node * node, unsigned sched_ctx_id);
  106. /* This function allow a node to wake up a worker.
  107. * It is currently called by node which implements a queue, to signify to
  108. * its childs that a task have been pushed in its local queue, and is
  109. * available to been popped by a worker, for example.
  110. * The basic implementation of this function is a recursive call to
  111. * its childs, until at least one worker have been woken up.
  112. */
  113. int (*avail)(struct starpu_sched_node * node);
  114. #ifdef STARPU_HAVE_HWLOC
  115. /* in case of a hierarchical scheduler, this is set to the part of
  116. * topology that is binded to this node, eg: a numa node for a ws
  117. * node that would balance load between underlying sockets
  118. */
  119. hwloc_obj_t obj;
  120. #endif
  121. };
  122. enum starpu_sched_node_properties
  123. {
  124. STARPU_SCHED_NODE_HOMOGENEOUS = (1<<0),
  125. STARPU_SCHED_NODE_SINGLE_MEMORY_NODE = (1<<1)
  126. };
  127. #define STARPU_SCHED_NODE_IS_HOMOGENEOUS(node) ((node)->properties & STARPU_SCHED_NODE_HOMOGENEOUS)
  128. #define STARPU_SCHED_NODE_IS_SINGLE_MEMORY_NODE(node) ((node)->properties & STARPU_SCHED_NODE_SINGLE_MEMORY_NODE)
  129. struct starpu_sched_tree
  130. {
  131. struct starpu_sched_node * root;
  132. struct starpu_bitmap * workers;
  133. unsigned sched_ctx_id;
  134. /* this lock is used to protect the scheduler,
  135. * it is taken in read mode pushing a task
  136. * and in write mode for adding or removing workers
  137. */
  138. starpu_pthread_mutex_t lock;
  139. };
  140. struct starpu_sched_node * starpu_sched_node_create(void);
  141. void starpu_sched_node_destroy(struct starpu_sched_node * node);
  142. void starpu_sched_node_set_father(struct starpu_sched_node *node, struct starpu_sched_node *father_node, unsigned sched_ctx_id);
  143. void starpu_sched_node_add_child(struct starpu_sched_node * node, struct starpu_sched_node * child);
  144. void starpu_sched_node_remove_child(struct starpu_sched_node * node, struct starpu_sched_node * child);
  145. int starpu_sched_node_can_execute_task(struct starpu_sched_node * node, struct starpu_task * task);
  146. int STARPU_WARN_UNUSED_RESULT starpu_sched_node_execute_preds(struct starpu_sched_node * node, struct starpu_task * task, double * length);
  147. double starpu_sched_node_transfer_length(struct starpu_sched_node * node, struct starpu_task * task);
  148. void starpu_sched_node_prefetch_on_node(struct starpu_sched_node * node, struct starpu_task * task);
  149. /* no public create function for workers because we dont want to have several node_worker for a single workerid */
  150. struct starpu_sched_node * starpu_sched_node_worker_get(int workerid);
  151. /* this function compare the available function of the node with the standard available for worker nodes*/
  152. int starpu_sched_node_is_worker(struct starpu_sched_node * node);
  153. int starpu_sched_node_is_simple_worker(struct starpu_sched_node * node);
  154. int starpu_sched_node_is_combined_worker(struct starpu_sched_node * node);
  155. int starpu_sched_node_worker_get_workerid(struct starpu_sched_node * worker_node);
  156. struct starpu_fifo_data
  157. {
  158. unsigned ntasks_threshold;
  159. double exp_len_threshold;
  160. };
  161. struct starpu_sched_node * starpu_sched_node_fifo_create(struct starpu_fifo_data * fifo_data);
  162. int starpu_sched_node_is_fifo(struct starpu_sched_node * node);
  163. struct starpu_prio_data
  164. {
  165. unsigned ntasks_threshold;
  166. double exp_len_threshold;
  167. };
  168. struct starpu_sched_node * starpu_sched_node_prio_create(struct starpu_prio_data * prio_data);
  169. int starpu_sched_node_is_prio(struct starpu_sched_node * node);
  170. struct starpu_sched_node * starpu_sched_node_work_stealing_create(void * arg STARPU_ATTRIBUTE_UNUSED);
  171. int starpu_sched_node_is_work_stealing(struct starpu_sched_node * node);
  172. int starpu_sched_tree_work_stealing_push_task(struct starpu_task *task);
  173. struct starpu_sched_node * starpu_sched_node_random_create(void * arg STARPU_ATTRIBUTE_UNUSED);
  174. int starpu_sched_node_is_random(struct starpu_sched_node *);
  175. struct starpu_sched_node * starpu_sched_node_eager_create(void * arg STARPU_ATTRIBUTE_UNUSED);
  176. int starpu_sched_node_is_eager(struct starpu_sched_node *);
  177. struct starpu_mct_data
  178. {
  179. double alpha;
  180. double beta;
  181. double gamma;
  182. double idle_power;
  183. };
  184. /* create a node with mct_data paremeters
  185. a copy the struct starpu_mct_data * given is performed during the init_data call
  186. the mct node doesnt do anything but pushing tasks on no_perf_model_node and calibrating_node
  187. */
  188. struct starpu_sched_node * starpu_sched_node_mct_create(struct starpu_mct_data * mct_data);
  189. int starpu_sched_node_is_mct(struct starpu_sched_node * node);
  190. /* this node select the best implementation for the first worker in context that can execute task.
  191. * and fill task->predicted and task->predicted_transfer
  192. * cannot have several childs if push_task is called
  193. */
  194. struct starpu_sched_node * starpu_sched_node_best_implementation_create(void * arg STARPU_ATTRIBUTE_UNUSED);
  195. struct starpu_perfmodel_select_data
  196. {
  197. struct starpu_sched_node * calibrator_node;
  198. struct starpu_sched_node * no_perfmodel_node;
  199. struct starpu_sched_node * perfmodel_node;
  200. };
  201. struct starpu_sched_node * starpu_sched_node_perfmodel_select_create(struct starpu_perfmodel_select_data * perfmodel_select_data);
  202. int starpu_sched_node_is_perfmodel_select(struct starpu_sched_node * node);
  203. int starpu_sched_node_perfmodel_select_room(struct starpu_sched_node * node, unsigned sched_ctx_id);
  204. /*create an empty tree
  205. */
  206. struct starpu_sched_tree * starpu_sched_tree_create(unsigned sched_ctx_id);
  207. void starpu_sched_tree_destroy(struct starpu_sched_tree * tree);
  208. /* destroy node and all his child
  209. * except if they are shared between several contexts
  210. */
  211. void starpu_sched_node_destroy_rec(struct starpu_sched_node * node, unsigned sched_ctx_id);
  212. /* update all the node->workers member recursively
  213. */
  214. void starpu_sched_tree_update_workers(struct starpu_sched_tree * t);
  215. /* idem for workers_in_ctx
  216. */
  217. void starpu_sched_tree_update_workers_in_ctx(struct starpu_sched_tree * t);
  218. /* wake up one underlaying workers of node which can execute the task
  219. */
  220. void starpu_sched_node_wake_available_worker(struct starpu_sched_node * node, struct starpu_task * task );
  221. /* wake up underlaying workers of node
  222. */
  223. void starpu_sched_node_available(struct starpu_sched_node * node);
  224. int starpu_sched_tree_push_task(struct starpu_task * task);
  225. struct starpu_task * starpu_sched_tree_pop_task(unsigned sched_ctx_id);
  226. void starpu_sched_tree_add_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers);
  227. void starpu_sched_tree_remove_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers);
  228. void starpu_sched_node_worker_pre_exec_hook(struct starpu_task * task);
  229. void starpu_sched_node_worker_post_exec_hook(struct starpu_task * task);
  230. struct starpu_sched_node_composed_recipe;
  231. /* create empty recipe */
  232. struct starpu_sched_node_composed_recipe * starpu_sched_node_create_recipe(void);
  233. struct starpu_sched_node_composed_recipe * starpu_sched_node_create_recipe_singleton(struct starpu_sched_node *(*create_node)(void * arg), void * arg);
  234. /* add a function creation node to recipe */
  235. void starpu_sched_recipe_add_node(struct starpu_sched_node_composed_recipe * recipe, struct starpu_sched_node *(*create_node)(void * arg), void * arg);
  236. void starpu_destroy_composed_sched_node_recipe(struct starpu_sched_node_composed_recipe *);
  237. struct starpu_sched_node * starpu_sched_node_composed_node_create(struct starpu_sched_node_composed_recipe * recipe);
  238. #ifdef STARPU_HAVE_HWLOC
  239. /* null pointer mean to ignore a level L of hierarchy, then nodes of levels > L become childs of level L - 1 */
  240. struct starpu_sched_specs
  241. {
  242. /* hw_loc_machine_composed_sched_node must be set as its the root of the topology */
  243. struct starpu_sched_node_composed_recipe * hwloc_machine_composed_sched_node;
  244. struct starpu_sched_node_composed_recipe * hwloc_node_composed_sched_node;
  245. struct starpu_sched_node_composed_recipe * hwloc_socket_composed_sched_node;
  246. struct starpu_sched_node_composed_recipe * hwloc_cache_composed_sched_node;
  247. /* this member should return a new allocated starpu_sched_node_composed_recipe or NULL
  248. * the starpu_sched_node_composed_recipe_t must not include the worker node
  249. */
  250. struct starpu_sched_node_composed_recipe * (*worker_composed_sched_node)(enum starpu_worker_archtype archtype);
  251. /* this flag indicate if heterogenous workers should be brothers or cousins,
  252. * as example, if a gpu and a cpu should share or not there numa node
  253. */
  254. int mix_heterogeneous_workers;
  255. };
  256. struct starpu_sched_tree * starpu_sched_node_make_scheduler(unsigned sched_ctx_id, struct starpu_sched_specs);
  257. #endif /* STARPU_HAVE_HWLOC */
  258. #endif