sched_ctx.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011, 2013 INRIA
  4. * Copyright (C) 2016 Uppsala University
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #ifndef __SCHED_CONTEXT_H__
  18. #define __SCHED_CONTEXT_H__
  19. #include <starpu.h>
  20. #include <starpu_sched_ctx.h>
  21. #include <starpu_sched_ctx_hypervisor.h>
  22. #include <starpu_scheduler.h>
  23. #include <common/config.h>
  24. #include <common/barrier_counter.h>
  25. #include <profiling/profiling.h>
  26. #include <semaphore.h>
  27. #include "sched_ctx_list.h"
  28. #ifdef STARPU_HAVE_HWLOC
  29. #include <hwloc.h>
  30. #endif
  31. #define NO_RESIZE -1
  32. #define REQ_RESIZE 0
  33. #define DO_RESIZE 1
  34. #define STARPU_GLOBAL_SCHED_CTX 0
  35. #define STARPU_NMAXSMS 13
  36. struct _starpu_sched_ctx
  37. {
  38. /* id of the context used in user mode*/
  39. unsigned id;
  40. /* name of context */
  41. const char *name;
  42. /* policy of the context */
  43. struct starpu_sched_policy *sched_policy;
  44. /* data necessary for the policy */
  45. void *policy_data;
  46. /* pointer for application use */
  47. void *user_data;
  48. struct starpu_worker_collection *workers;
  49. /* we keep an initial sched which we never delete */
  50. unsigned is_initial_sched;
  51. /* wait for the tasks submitted to the context to be executed */
  52. struct _starpu_barrier_counter tasks_barrier;
  53. /* wait for the tasks ready of the context to be executed */
  54. struct _starpu_barrier_counter ready_tasks_barrier;
  55. /* amount of ready flops in a context */
  56. double ready_flops;
  57. /* cond to block push when there are no workers in the ctx */
  58. starpu_pthread_cond_t no_workers_cond;
  59. /* mutex to block push when there are no workers in the ctx */
  60. starpu_pthread_mutex_t no_workers_mutex;
  61. /*ready tasks that couldn't be pushed because the ctx has no workers*/
  62. struct starpu_task_list empty_ctx_tasks;
  63. /* mutext protecting empty_ctx_tasks list */
  64. starpu_pthread_mutex_t empty_ctx_mutex;
  65. /*ready tasks that couldn't be pushed because the the window of tasks was already full*/
  66. struct starpu_task_list waiting_tasks;
  67. /* mutext protecting waiting_tasks list */
  68. starpu_pthread_mutex_t waiting_tasks_mutex;
  69. /* mutext protecting write to all worker's sched_ctx_list structure for this sched_ctx */
  70. starpu_pthread_mutex_t sched_ctx_list_mutex;
  71. /* min CPUs to execute*/
  72. int min_ncpus;
  73. /* max CPUs to execute*/
  74. int max_ncpus;
  75. /* min GPUs to execute*/
  76. int min_ngpus;
  77. /* max GPUs to execute*/
  78. int max_ngpus;
  79. /* in case we delete the context leave resources to the inheritor*/
  80. unsigned inheritor;
  81. /* indicates whether the application finished submitting tasks
  82. to this context*/
  83. unsigned finished_submit;
  84. /* By default we have a binary type of priority: either a task is a priority
  85. * task (level 1) or it is not (level 0). */
  86. int min_priority;
  87. int max_priority;
  88. int min_priority_is_set;
  89. int max_priority_is_set;
  90. /* hwloc tree structure of workers */
  91. #ifdef STARPU_HAVE_HWLOC
  92. hwloc_bitmap_t hwloc_workers_set;
  93. #endif
  94. #ifdef STARPU_USE_SC_HYPERVISOR
  95. /* a structure containing a series of performance counters determining the resize procedure */
  96. struct starpu_sched_ctx_performance_counters *perf_counters;
  97. #endif //STARPU_USE_SC_HYPERVISOR
  98. /* callback called when the context finished executed its submitted tasks */
  99. void (*close_callback)(unsigned sched_ctx_id, void* args);
  100. void *close_args;
  101. /* value placing the contexts in their hierarchy */
  102. unsigned hierarchy_level;
  103. /* if we execute non-StarPU code inside the context
  104. we have a single master worker that stays awake,
  105. if not master is -1 */
  106. int main_master;
  107. /* conditions variables used when parallel sections are executed in contexts */
  108. starpu_pthread_cond_t parallel_sect_cond[STARPU_NMAXWORKERS];
  109. starpu_pthread_mutex_t parallel_sect_mutex[STARPU_NMAXWORKERS];
  110. starpu_pthread_cond_t parallel_sect_cond_busy[STARPU_NMAXWORKERS];
  111. int busy[STARPU_NMAXWORKERS];
  112. /* boolean indicating that workers should block in order to allow
  113. parallel sections to be executed on their allocated resources */
  114. unsigned parallel_sect[STARPU_NMAXWORKERS];
  115. /* semaphore that block appl thread until starpu threads are
  116. all blocked and ready to exec the parallel code */
  117. sem_t fall_asleep_sem[STARPU_NMAXWORKERS];
  118. /* semaphore that block appl thread until starpu threads are
  119. all woke up and ready continue appl */
  120. sem_t wake_up_sem[STARPU_NMAXWORKERS];
  121. /* bool indicating if the workers is sleeping in this ctx */
  122. unsigned sleeping[STARPU_NMAXWORKERS];
  123. /* ctx nesting the current ctx */
  124. unsigned nesting_sched_ctx;
  125. /* perf model for the device comb of the ctx */
  126. struct starpu_perfmodel_arch perf_arch;
  127. /* For parallel workers, say whether it is viewed as sequential or not. This
  128. is a helper for the prologue code. */
  129. unsigned parallel_view;
  130. /* for ctxs without policy: flag to indicate that we want to get
  131. the threads to sleep in order to replace them with other threads or leave
  132. them awake & use them in the parallel code*/
  133. unsigned awake_workers;
  134. /* function called when initializing the scheduler */
  135. void (*init_sched)(unsigned);
  136. int sub_ctxs[STARPU_NMAXWORKERS];
  137. int nsub_ctxs;
  138. /* nr of SMs assigned to this ctx if we partition gpus*/
  139. int nsms;
  140. int sms_start_idx;
  141. int sms_end_idx;
  142. int stream_worker;
  143. };
  144. struct _starpu_machine_config;
  145. /* init sched_ctx_id of all contextes*/
  146. void _starpu_init_all_sched_ctxs(struct _starpu_machine_config *config);
  147. /* allocate all structures belonging to a context */
  148. struct _starpu_sched_ctx* _starpu_create_sched_ctx(struct starpu_sched_policy *policy, int *workerid, int nworkerids, unsigned is_init_sched, const char *sched_name,
  149. int min_prio_set, int min_prio,
  150. int max_prio_set, int max_prio, unsigned awake_workers, void (*sched_policy_init)(unsigned), void *user_data,
  151. int nsub_ctxs, int *sub_ctxs, int nsms);
  152. /* delete all sched_ctx */
  153. void _starpu_delete_all_sched_ctxs();
  154. /* This function waits until all the tasks that were already submitted to a specific
  155. * context have been executed. */
  156. int _starpu_wait_for_all_tasks_of_sched_ctx(unsigned sched_ctx_id);
  157. /* This function waits until at most n tasks are still submitted. */
  158. int _starpu_wait_for_n_submitted_tasks_of_sched_ctx(unsigned sched_ctx_id, unsigned n);
  159. /* In order to implement starpu_wait_for_all_tasks_of_ctx, we keep track of the number of
  160. * task currently submitted to the context */
  161. void _starpu_decrement_nsubmitted_tasks_of_sched_ctx(unsigned sched_ctx_id);
  162. void _starpu_increment_nsubmitted_tasks_of_sched_ctx(unsigned sched_ctx_id);
  163. int _starpu_get_nsubmitted_tasks_of_sched_ctx(unsigned sched_ctx_id);
  164. int _starpu_check_nsubmitted_tasks_of_sched_ctx(unsigned sched_ctx_id);
  165. void _starpu_decrement_nready_tasks_of_sched_ctx(unsigned sched_ctx_id, double ready_flops);
  166. unsigned _starpu_increment_nready_tasks_of_sched_ctx(unsigned sched_ctx_id, double ready_flops, struct starpu_task *task);
  167. int _starpu_wait_for_no_ready_of_sched_ctx(unsigned sched_ctx_id);
  168. /* Return the corresponding index of the workerid in the ctx table */
  169. int _starpu_get_index_in_ctx_of_workerid(unsigned sched_ctx, unsigned workerid);
  170. /* Get the mutex corresponding to the global workerid */
  171. starpu_pthread_mutex_t *_starpu_get_sched_mutex(struct _starpu_sched_ctx *sched_ctx, int worker);
  172. /* Get workers belonging to a certain context, it returns the number of workers
  173. take care: no mutex taken, the list of workers might not be updated */
  174. int _starpu_get_workers_of_sched_ctx(unsigned sched_ctx_id, int *pus, enum starpu_worker_archtype arch);
  175. /* Let the worker know it does not belong to the context and that
  176. it should stop poping from it */
  177. void _starpu_worker_gets_out_of_ctx(unsigned sched_ctx_id, struct _starpu_worker *worker);
  178. /* Check if the worker belongs to another sched_ctx */
  179. unsigned _starpu_worker_belongs_to_a_sched_ctx(int workerid, unsigned sched_ctx_id);
  180. /* mutex synchronising several simultaneous modifications of a context */
  181. starpu_pthread_rwlock_t* _starpu_sched_ctx_get_changing_ctx_mutex(unsigned sched_ctx_id);
  182. /* indicates wheather this worker should go to sleep or not
  183. (if it is the last one awake in a context he should better keep awake) */
  184. unsigned _starpu_sched_ctx_last_worker_awake(struct _starpu_worker *worker);
  185. /* let the appl know that the worker blocked to execute parallel code */
  186. void _starpu_sched_ctx_signal_worker_blocked(unsigned sched_ctx_id, int workerid);
  187. /* let the appl know that the worker woke up */
  188. void _starpu_sched_ctx_signal_worker_woke_up(unsigned sched_ctx_id, int workerid);
  189. /* If starpu_sched_ctx_set_context() has been called, returns the context
  190. * id set by its last call, or the id of the initial context */
  191. unsigned _starpu_sched_ctx_get_current_context();
  192. /* verify how many workers can execute a certain task */
  193. int _starpu_nworkers_able_to_execute_task(struct starpu_task *task, struct _starpu_sched_ctx *sched_ctx);
  194. void _starpu_fetch_tasks_from_empty_ctx_list(struct _starpu_sched_ctx *sched_ctx);
  195. unsigned _starpu_sched_ctx_allow_hypervisor(unsigned sched_ctx_id);
  196. struct starpu_perfmodel_arch * _starpu_sched_ctx_get_perf_archtype(unsigned sched_ctx);
  197. #ifdef STARPU_USE_SC_HYPERVISOR
  198. /* Notifies the hypervisor that a tasks was poped from the workers' list */
  199. void _starpu_sched_ctx_post_exec_task_cb(int workerid, struct starpu_task *task, size_t data_size, uint32_t footprint);
  200. #endif //STARPU_USE_SC_HYPERVISOR
  201. /* if the worker is the master of a parallel context, and the job is meant to be executed on this parallel context, return a pointer to the context */
  202. struct _starpu_sched_ctx *__starpu_sched_ctx_get_sched_ctx_for_worker_and_job(struct _starpu_worker *worker, struct _starpu_job *j);
  203. #define _starpu_sched_ctx_get_sched_ctx_for_worker_and_job(w,j) \
  204. (_starpu_get_nsched_ctxs() <= 1 ? _starpu_get_sched_ctx_struct(0) : __starpu_sched_ctx_get_sched_ctx_for_worker_and_job((w),(j)))
  205. #endif // __SCHED_CONTEXT_H__