starpu_sched_node.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2013 Simon Archipoff
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #ifndef __STARPU_SCHED_NODE_H__
  17. #define __STARPU_SCHED_NODE_H__
  18. #include <starpu.h>
  19. #include <common/starpu_spinlock.h>
  20. #include <starpu_bitmap.h>
  21. #ifdef STARPU_HAVE_HWLOC
  22. #include <hwloc.h>
  23. #endif
  24. /* struct starpu_sched_node are scheduler modules, a scheduler is a tree-like
  25. * structure of them, some parts of scheduler can be shared by several contexes
  26. * to perform some local optimisations, so, for all nodes, a list of father is
  27. * defined indexed by sched_ctx_id
  28. *
  29. * they embed there specialised method in a pseudo object-style, so calls are like node->push_task(node,task)
  30. *
  31. */
  32. struct starpu_sched_node
  33. {
  34. /* node->push_task(node, task)
  35. * this function is called to push a task on node subtree, this can either
  36. * perform a recursive call on a child or store the task in the node, then
  37. * it will be returned by a further pop_task call
  38. *
  39. * the caller must ensure that node is able to execute task
  40. */
  41. int (*push_task)(struct starpu_sched_node *,
  42. struct starpu_task *);
  43. /* this function is called by workers to get a task on them fathers
  44. * this function should first return a localy stored task or perform
  45. * a recursive call on father
  46. *
  47. * a default implementation simply do a recursive call on father
  48. */
  49. struct starpu_task * (*pop_task)(struct starpu_sched_node *,
  50. unsigned sched_ctx_id);
  51. /* node->push_back_task(node, task)
  52. * This function is called when a push fails after a pop has been done,
  53. * to push it back correctly to the popped node
  54. */
  55. int (*push_back_task)(struct starpu_sched_node *,
  56. struct starpu_task *);
  57. /* this function is an heuristic that compute load of subtree, basicaly
  58. * it compute
  59. * estimated_load(node) = sum(estimated_load(node_childs)) +
  60. * nb_local_tasks / average(relative_speedup(underlying_worker))
  61. */
  62. double (*estimated_load)(struct starpu_sched_node * node);
  63. double (*estimated_end)(struct starpu_sched_node * node);
  64. /* the numbers of node's childs
  65. */
  66. int nchilds;
  67. /* the vector of node's childs
  68. */
  69. struct starpu_sched_node ** childs;
  70. /* may be shared by several contexts
  71. * so we need several fathers
  72. */
  73. struct starpu_sched_node * fathers[STARPU_NMAX_SCHED_CTXS];
  74. /* the set of workers in the node's subtree
  75. */
  76. struct starpu_bitmap * workers;
  77. /* the workers available in context
  78. * this member is set with :
  79. * node->workers UNION tree->workers UNION
  80. * node->child[i]->workers_in_ctx iff exist x such as node->childs[i]->fathers[x] == node
  81. */
  82. struct starpu_bitmap * workers_in_ctx;
  83. /* node's private data, no restriction on use
  84. */
  85. void * data;
  86. void (*add_child)(struct starpu_sched_node * node, struct starpu_sched_node * child);
  87. void (*remove_child)(struct starpu_sched_node * node, struct starpu_sched_node * child);
  88. /* this function is called for each node when workers are added or removed from a context
  89. */
  90. void (*notify_change_workers)(struct starpu_sched_node * node);
  91. /* this function is called by starpu_sched_node_destroy just before freeing node
  92. */
  93. void (*deinit_data)(struct starpu_sched_node * node);
  94. /* is_homogeneous is 0 iff workers in the node's subtree are heterogeneous,
  95. * this field is set and updated automaticaly, you shouldn't write on it
  96. */
  97. int properties;
  98. void (*room)(struct starpu_sched_node * node, unsigned sched_ctx_id);
  99. int (*avail)(struct starpu_sched_node * node);
  100. #ifdef STARPU_HAVE_HWLOC
  101. /* in case of a hierarchical scheduler, this is set to the part of
  102. * topology that is binded to this node, eg: a numa node for a ws
  103. * node that would balance load between underlying sockets
  104. */
  105. hwloc_obj_t obj;
  106. #endif
  107. };
  108. enum starpu_sched_node_properties
  109. {
  110. STARPU_SCHED_NODE_HOMOGENEOUS = (1<<0),
  111. STARPU_SCHED_NODE_SINGLE_MEMORY_NODE = (1<<1)
  112. };
  113. #define STARPU_SCHED_NODE_IS_HOMOGENEOUS(node) ((node)->properties & STARPU_SCHED_NODE_HOMOGENEOUS)
  114. #define STARPU_SCHED_NODE_IS_SINGLE_MEMORY_NODE(node) ((node)->properties & STARPU_SCHED_NODE_SINGLE_MEMORY_NODE)
  115. struct starpu_sched_tree
  116. {
  117. struct starpu_sched_node * root;
  118. struct starpu_bitmap * workers;
  119. unsigned sched_ctx_id;
  120. /* this lock is used to protect the scheduler,
  121. * it is taken in read mode pushing a task
  122. * and in write mode for adding or removing workers
  123. */
  124. starpu_pthread_mutex_t lock;
  125. };
  126. struct starpu_sched_node * starpu_sched_node_create(void);
  127. void starpu_sched_node_destroy(struct starpu_sched_node * node);
  128. void starpu_sched_node_set_father(struct starpu_sched_node *node, struct starpu_sched_node *father_node, unsigned sched_ctx_id);
  129. void starpu_sched_node_add_child(struct starpu_sched_node * node, struct starpu_sched_node * child);
  130. void starpu_sched_node_remove_child(struct starpu_sched_node * node, struct starpu_sched_node * child);
  131. int starpu_sched_node_can_execute_task(struct starpu_sched_node * node, struct starpu_task * task);
  132. int STARPU_WARN_UNUSED_RESULT starpu_sched_node_execute_preds(struct starpu_sched_node * node, struct starpu_task * task, double * length);
  133. double starpu_sched_node_transfer_length(struct starpu_sched_node * node, struct starpu_task * task);
  134. void starpu_sched_node_prefetch_on_node(struct starpu_sched_node * node, struct starpu_task * task);
  135. /* no public create function for workers because we dont want to have several node_worker for a single workerid */
  136. struct starpu_sched_node * starpu_sched_node_worker_get(int workerid);
  137. /* this function compare the available function of the node with the standard available for worker nodes*/
  138. int starpu_sched_node_is_worker(struct starpu_sched_node * node);
  139. int starpu_sched_node_is_simple_worker(struct starpu_sched_node * node);
  140. int starpu_sched_node_is_combined_worker(struct starpu_sched_node * node);
  141. int starpu_sched_node_worker_get_workerid(struct starpu_sched_node * worker_node);
  142. struct starpu_fifo_data
  143. {
  144. unsigned ntasks_threshold;
  145. double exp_len_threshold;
  146. };
  147. struct starpu_sched_node * starpu_sched_node_fifo_create(struct starpu_fifo_data * fifo_data);
  148. int starpu_sched_node_is_fifo(struct starpu_sched_node * node);
  149. struct starpu_prio_data
  150. {
  151. unsigned ntasks_threshold;
  152. double exp_len_threshold;
  153. };
  154. struct starpu_sched_node * starpu_sched_node_prio_create(struct starpu_prio_data * prio_data);
  155. int starpu_sched_node_is_prio(struct starpu_sched_node * node);
  156. struct starpu_sched_node * starpu_sched_node_work_stealing_create(void * arg STARPU_ATTRIBUTE_UNUSED);
  157. int starpu_sched_node_is_work_stealing(struct starpu_sched_node * node);
  158. int starpu_sched_tree_work_stealing_push_task(struct starpu_task *task);
  159. struct starpu_sched_node * starpu_sched_node_random_create(void * arg STARPU_ATTRIBUTE_UNUSED);
  160. int starpu_sched_node_is_random(struct starpu_sched_node *);
  161. struct starpu_heft_data
  162. {
  163. double alpha;
  164. double beta;
  165. double gamma;
  166. double idle_power;
  167. };
  168. /* create a node with heft_data paremeters
  169. a copy the struct starpu_heft_data * given is performed during the init_data call
  170. the heft node doesnt do anything but pushing tasks on no_perf_model_node and calibrating_node
  171. */
  172. struct starpu_sched_node * starpu_sched_node_heft_create(struct starpu_heft_data * heft_data);
  173. int starpu_sched_node_is_heft(struct starpu_sched_node * node);
  174. /* this node select the best implementation for the first worker in context that can execute task.
  175. * and fill task->predicted and task->predicted_transfer
  176. * cannot have several childs if push_task is called
  177. */
  178. struct starpu_sched_node * starpu_sched_node_best_implementation_create(void * arg STARPU_ATTRIBUTE_UNUSED);
  179. struct starpu_calibrator_data
  180. {
  181. struct starpu_sched_node * (*no_perf_model_node_create)(void * arg_no_perf_model);
  182. void * arg_no_perf_model;
  183. struct starpu_sched_node * next_node;
  184. };
  185. struct starpu_sched_node * starpu_sched_node_calibrator_create(struct starpu_calibrator_data * calibrator_data);
  186. int starpu_sched_node_is_calibrator(struct starpu_sched_node * node);
  187. int starpu_sched_node_calibrator_room(struct starpu_sched_node * node, unsigned sched_ctx_id);
  188. /*create an empty tree
  189. */
  190. struct starpu_sched_tree * starpu_sched_tree_create(unsigned sched_ctx_id);
  191. void starpu_sched_tree_destroy(struct starpu_sched_tree * tree);
  192. /* destroy node and all his child
  193. * except if they are shared between several contexts
  194. */
  195. void starpu_sched_node_destroy_rec(struct starpu_sched_node * node, unsigned sched_ctx_id);
  196. /* update all the node->workers member recursively
  197. */
  198. void starpu_sched_tree_update_workers(struct starpu_sched_tree * t);
  199. /* idem for workers_in_ctx
  200. */
  201. void starpu_sched_tree_update_workers_in_ctx(struct starpu_sched_tree * t);
  202. /* wake up one underlaying workers of node which can execute the task
  203. */
  204. void starpu_sched_node_wake_available_worker(struct starpu_sched_node * node, struct starpu_task * task );
  205. /* wake up underlaying workers of node
  206. */
  207. void starpu_sched_node_available(struct starpu_sched_node * node);
  208. int starpu_sched_tree_push_task(struct starpu_task * task);
  209. struct starpu_task * starpu_sched_tree_pop_task(unsigned sched_ctx_id);
  210. void starpu_sched_tree_add_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers);
  211. void starpu_sched_tree_remove_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers);
  212. void starpu_sched_node_worker_pre_exec_hook(struct starpu_task * task);
  213. void starpu_sched_node_worker_post_exec_hook(struct starpu_task * task);
  214. struct starpu_sched_node_composed_recipe;
  215. /* create empty recipe */
  216. struct starpu_sched_node_composed_recipe * starpu_sched_node_create_recipe(void);
  217. struct starpu_sched_node_composed_recipe * starpu_sched_node_create_recipe_singleton(struct starpu_sched_node *(*create_node)(void * arg), void * arg);
  218. /* add a function creation node to recipe */
  219. void starpu_sched_recipe_add_node(struct starpu_sched_node_composed_recipe * recipe, struct starpu_sched_node *(*create_node)(void * arg), void * arg);
  220. void starpu_destroy_composed_sched_node_recipe(struct starpu_sched_node_composed_recipe *);
  221. struct starpu_sched_node * starpu_sched_node_composed_node_create(struct starpu_sched_node_composed_recipe * recipe);
  222. #ifdef STARPU_HAVE_HWLOC
  223. /* null pointer mean to ignore a level L of hierarchy, then nodes of levels > L become childs of level L - 1 */
  224. struct starpu_sched_specs
  225. {
  226. /* hw_loc_machine_composed_sched_node must be set as its the root of the topology */
  227. struct starpu_sched_node_composed_recipe * hwloc_machine_composed_sched_node;
  228. struct starpu_sched_node_composed_recipe * hwloc_node_composed_sched_node;
  229. struct starpu_sched_node_composed_recipe * hwloc_socket_composed_sched_node;
  230. struct starpu_sched_node_composed_recipe * hwloc_cache_composed_sched_node;
  231. /* this member should return a new allocated starpu_sched_node_composed_recipe or NULL
  232. * the starpu_sched_node_composed_recipe_t must not include the worker node
  233. */
  234. struct starpu_sched_node_composed_recipe * (*worker_composed_sched_node)(enum starpu_worker_archtype archtype);
  235. /* this flag indicate if heterogenous workers should be brothers or cousins,
  236. * as example, if a gpu and a cpu should share or not there numa node
  237. */
  238. int mix_heterogeneous_workers;
  239. };
  240. struct starpu_sched_tree * starpu_sched_node_make_scheduler(unsigned sched_ctx_id, struct starpu_sched_specs);
  241. #endif /* STARPU_HAVE_HWLOC */
  242. #endif