modularized_scheduler.doxy 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146
  1. /*
  2. * This file is part of the StarPU Handbook.
  3. * Copyright (C) 2013 Simon Archipoff
  4. * See the file version.doxy for copying conditions.
  5. */
  6. /*! \page ModularizedScheduler Modularized Scheduler
  7. \section Introduction
  8. Scheduler are a tree-like structure of homogeneous nodes that each
  9. provides push and pop primitives. Each node may have one father by
  10. context, specially worker nodes as they are shared between all contexts.
  11. Tasks make a top bottom traversal of tree.
  12. A push call on a node make either a recursive call on one of its
  13. childs or make the task stored in the node and made available to a
  14. pop, in this case that node should call starpu_sched_node_available to wake workers
  15. up. Push must be called on a child, and only if this child can execute
  16. the task.
  17. A pop call on a node can either return a locally stored task or perform
  18. a recursive call on its father in its current context. Only workers
  19. should call pop.
  20. \section Initialization
  21. Scheduler node are created with the starpu_sched_node_foo_create() functions
  22. and then must be assembled using them starpu_sched_node::add_child and
  23. starpu_sched_node::remove_child functions.
  24. A father can be set to allow him to be reachable by a starpu_sched_node::pop_task
  25. call.
  26. Underlyings workers are memoized in starpu_sched_node::workers. Hence the
  27. function starpu_sched_tree_update_workers should be called when the scheduler is
  28. finished, or modified.
  29. \section Push
  30. All scheduler node must define a starpu_sched_node::push_task
  31. function. The caller ensure that the node can actually execute the task.
  32. \section Pop
  33. starpu_sched_node::push_task should either return a local task or
  34. perform a recursive call on
  35. starpu_sched_node::fathers[sched_ctx_id], or \c NULL if its a root
  36. node.
  37. \section WorkersAndCombinedWorkers Workers and Combined workers
  38. Leafs are either a worker node that is bind to a starpu workers or a
  39. combined worker node that is bind to several worker nodes.
  40. Pushing a task on a combined worker node will in fact push a copy of
  41. that task on each worker node of the combined worker.
  42. A push call simply enqueue task in worker queue, no sort is performed
  43. here.
  44. If a worker call pop and get a parallel task, it will execute it with the
  45. combined worker it belong to.
  46. \section Example
  47. Here we build a simple scheduler with a heft node on top and a work stealing node per memory node, and a best_impl node per worker,
  48. which use random node to push uncalibrated tasks and tasks with no perf model, this is probably stupid.
  49. \code{.c}
  50. static void initialize_scheduler(unsigned sched_ctx_id)
  51. {
  52. starpu_sched_ctx_create_worker_collection(sched_ctx_id, STARPU_WORKER_LIST);
  53. struct starpu_sched_tree * t = starpu_sched_tree_create(sched_ctx_id);
  54. struct starpu_sched_node * ws_nodes[STARPU_NMAXWORKERS] = {0};
  55. struct starpu_heft_data data =
  56. {
  57. .alpha = 1,
  58. .beta = 2,
  59. .gamma = 0,
  60. .idle_power = 0,
  61. .no_perf_model_node_create = starpu_sched_node_random_create,
  62. .arg_no_perf_model = NULL,
  63. .calibrating_node_create = starpu_sched_node_random_create,
  64. .arg_calibrating_node = NULL
  65. };
  66. struct starpu_sched_node * heft = starpu_sched_node_heft_create(&data);
  67. unsigned i;
  68. for(i = 0; i < starpu_worker_get_count() + starpu_combined_worker_get_count(); i++)
  69. {
  70. struct starpu_sched_node * worker_node = starpu_sched_node_worker_get(i);
  71. struct starpu_sched_node * best_impl = starpu_sched_node_best_implementation_create(NULL);
  72. best_impl->add_child(best_impl, worker_node);
  73. starpu_sched_node_set_father(worker_node, best_impl, sched_ctx_id);
  74. int memory_node = starpu_worker_get_memory_node(i);
  75. if(!ws_nodes[memory_node])
  76. {
  77. ws_nodes[memory_node] = starpu_sched_node_work_stealing_create(NULL);
  78. heft->add_child(heft, ws_nodes[memory_node]);
  79. starpu_sched_node_set_father(ws_nodes[memory_node], heft, sched_ctx_id);
  80. }
  81. struct starpu_sched_node * ws = ws_nodes[memory_node];
  82. ws->add_child(ws,best_impl);
  83. starpu_sched_node_set_father(best_impl, ws, sched_ctx_id);
  84. }
  85. t->root = heft;
  86. starpu_sched_tree_update_workers(t);
  87. starpu_sched_ctx_set_policy_data(sched_ctx_id, (void*)t);
  88. }
  89. static void deinitialize_scheduler(unsigned sched_ctx_id)
  90. {
  91. struct starpu_sched_tree *t = (struct starpu_sched_tree*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  92. starpu_sched_tree_destroy(t);
  93. starpu_sched_ctx_delete_worker_collection(sched_ctx_id);
  94. }
  95. struct starpu_sched_policy scheduling_policy =
  96. {
  97. .init_sched = initialize_scheduler,
  98. .deinit_sched = deinitialize_scheduler,
  99. .add_workers = starpu_sched_tree_add_workers,
  100. .remove_workers = starpu_sched_tree_remove_workers,
  101. .push_task = starpu_sched_tree_push_task,
  102. .pop_task = starpu_sched_tree_pop_task,
  103. .pre_exec_hook = starpu_sched_node_worker_pre_exec_hook,
  104. .post_exec_hook = starpu_sched_node_worker_post_exec_hook,
  105. .pop_every_task = NULL,
  106. .policy_name = "tree-heft",
  107. .policy_description = "heft tree policy"
  108. };
  109. \endcode
  110. */