sched_ctx.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011 INRIA
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <core/sched_policy.h>
  17. #include <core/sched_ctx.h>
  18. #include <common/utils.h>
  19. extern struct worker_collection worker_list;
  20. pthread_key_t sched_ctx_key;
  21. unsigned with_hypervisor = 0;
  22. static unsigned _starpu_get_first_free_sched_ctx(struct _starpu_machine_config *config);
  23. static unsigned _starpu_worker_get_first_free_sched_ctx(struct _starpu_worker *worker);
  24. static unsigned _starpu_worker_get_sched_ctx_id(struct _starpu_worker *worker, unsigned sched_ctx_id);
  25. static void change_worker_sched_ctx(unsigned sched_ctx_id)
  26. {
  27. int workerid = starpu_worker_get_id();
  28. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  29. int worker_sched_ctx_id = _starpu_worker_get_sched_ctx_id(worker, sched_ctx_id);
  30. /* if the worker is not in the ctx's list it means the update concerns the addition of ctxs*/
  31. if(worker_sched_ctx_id == STARPU_NMAX_SCHED_CTXS)
  32. {
  33. worker_sched_ctx_id = _starpu_worker_get_first_free_sched_ctx(worker);
  34. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  35. /* add context to worker */
  36. worker->sched_ctx[worker_sched_ctx_id] = sched_ctx;
  37. worker->nsched_ctxs++;
  38. }
  39. else
  40. {
  41. /* remove context from worker */
  42. if(worker->sched_ctx[worker_sched_ctx_id]->sched_policy)
  43. worker->sched_ctx[worker_sched_ctx_id]->sched_policy->remove_workers(sched_ctx_id, &worker->workerid, 1);
  44. worker->sched_ctx[worker_sched_ctx_id] = NULL;
  45. worker->nsched_ctxs--;
  46. }
  47. }
  48. static void update_workers_func(void *buffers[] __attribute__ ((unused)), void *_args)
  49. {
  50. int sched_ctx_id = (int)_args;
  51. change_worker_sched_ctx(sched_ctx_id);
  52. }
  53. struct starpu_codelet sched_ctx_info_cl = {
  54. .where = STARPU_CPU|STARPU_CUDA|STARPU_OPENCL,
  55. .cuda_func = update_workers_func,
  56. .cpu_func = update_workers_func,
  57. .opencl_func = update_workers_func,
  58. .nbuffers = 0
  59. };
  60. static void _starpu_update_workers(int *workerids, int nworkers, int sched_ctx_id)
  61. {
  62. int i;
  63. struct _starpu_worker *worker[nworkers];
  64. struct _starpu_worker *curr_worker = _starpu_get_local_worker_key();
  65. for(i = 0; i < nworkers; i++)
  66. {
  67. worker[i] = _starpu_get_worker_struct(workerids[i]);
  68. /* if the current thread requires resize it's no need
  69. to send itsefl a message in order to change its
  70. sched_ctx info */
  71. if(curr_worker && curr_worker == worker[i])
  72. change_worker_sched_ctx(sched_ctx_id);
  73. else
  74. {
  75. worker[i]->tasks[sched_ctx_id] = starpu_task_create();
  76. worker[i]->tasks[sched_ctx_id]->cl = &sched_ctx_info_cl;
  77. worker[i]->tasks[sched_ctx_id]->cl_arg = (void*)sched_ctx_id;
  78. worker[i]->tasks[sched_ctx_id]->execute_on_a_specific_worker = 1;
  79. worker[i]->tasks[sched_ctx_id]->workerid = workerids[i];
  80. worker[i]->tasks[sched_ctx_id]->destroy = 1;
  81. int worker_sched_ctx_id = _starpu_worker_get_sched_ctx_id(worker[i], sched_ctx_id);
  82. /* if the ctx is not in the worker's list it means the update concerns the addition of ctxs*/
  83. if(worker_sched_ctx_id == STARPU_NMAX_SCHED_CTXS)
  84. worker[i]->tasks[sched_ctx_id]->priority = 1;
  85. _starpu_exclude_task_from_dag(worker[i]->tasks[sched_ctx_id]);
  86. _starpu_task_submit_internally(worker[i]->tasks[sched_ctx_id]);
  87. }
  88. }
  89. }
  90. static void _starpu_add_workers_to_sched_ctx(struct _starpu_sched_ctx *sched_ctx, int *workerids, int nworkers,
  91. int *added_workers, int *n_added_workers)
  92. {
  93. struct worker_collection *workers = sched_ctx->workers;
  94. struct _starpu_machine_config *config = (struct _starpu_machine_config *)_starpu_get_machine_config();
  95. int nworkers_to_add = nworkers == -1 ? config->topology.nworkers : nworkers;
  96. int workers_to_add[nworkers_to_add];
  97. int i = 0;
  98. for(i = 0; i < nworkers_to_add; i++)
  99. {
  100. /* added_workers is NULL for the call of this func at the creation of the context*/
  101. /* if the function is called at the creation of the context it's no need to do this verif */
  102. if(added_workers)
  103. {
  104. int worker = workers->add(workers, (workerids == NULL ? i : workerids[i]));
  105. if(worker >= 0)
  106. added_workers[(*n_added_workers)++] = worker;
  107. }
  108. else
  109. {
  110. int worker = (workerids == NULL ? i : workerids[i]);
  111. workers->add(workers, worker);
  112. workers_to_add[i] = worker;
  113. }
  114. }
  115. if(added_workers)
  116. {
  117. if(*n_added_workers > 0)
  118. sched_ctx->sched_policy->add_workers(sched_ctx->id, added_workers, *n_added_workers);
  119. }
  120. else
  121. sched_ctx->sched_policy->add_workers(sched_ctx->id, workers_to_add, nworkers_to_add);
  122. return;
  123. }
  124. static void _starpu_remove_workers_from_sched_ctx(struct _starpu_sched_ctx *sched_ctx, int *workerids,
  125. int nworkers, int *removed_workers, int *n_removed_workers)
  126. {
  127. struct worker_collection *workers = sched_ctx->workers;
  128. int i = 0;
  129. for(i = 0; i < nworkers; i++)
  130. {
  131. if(workers->nworkers > 0)
  132. {
  133. int worker = workers->remove(workers, workerids[i]);
  134. if(worker >= 0)
  135. removed_workers[(*n_removed_workers)++] = worker;
  136. }
  137. }
  138. if(*n_removed_workers)
  139. sched_ctx->sched_policy->remove_workers(sched_ctx->id, removed_workers, *n_removed_workers);
  140. return;
  141. }
  142. struct _starpu_sched_ctx* _starpu_create_sched_ctx(const char *policy_name, int *workerids,
  143. int nworkers_ctx, unsigned is_initial_sched,
  144. const char *sched_name)
  145. {
  146. struct _starpu_machine_config *config = (struct _starpu_machine_config *)_starpu_get_machine_config();
  147. STARPU_ASSERT(config->topology.nsched_ctxs < STARPU_NMAX_SCHED_CTXS);
  148. unsigned id = _starpu_get_first_free_sched_ctx(config);
  149. struct _starpu_sched_ctx *sched_ctx = &config->sched_ctxs[id];
  150. sched_ctx->id = id;
  151. int nworkers = config->topology.nworkers;
  152. STARPU_ASSERT(nworkers_ctx <= nworkers);
  153. _STARPU_PTHREAD_MUTEX_INIT(&sched_ctx->changing_ctx_mutex, NULL);
  154. _STARPU_PTHREAD_MUTEX_INIT(&sched_ctx->no_workers_mutex, NULL);
  155. _STARPU_PTHREAD_COND_INIT(&sched_ctx->no_workers_cond, NULL);
  156. _STARPU_PTHREAD_MUTEX_INIT(&sched_ctx->empty_ctx_mutex, NULL);
  157. starpu_task_list_init(&sched_ctx->empty_ctx_tasks);
  158. sched_ctx->sched_policy = (struct starpu_sched_policy*)malloc(sizeof(struct starpu_sched_policy));
  159. sched_ctx->is_initial_sched = is_initial_sched;
  160. sched_ctx->name = sched_name;
  161. _starpu_barrier_counter_init(&sched_ctx->tasks_barrier, 0);
  162. /* initialise all sync structures bc the number of workers can modify */
  163. sched_ctx->sched_mutex = (pthread_mutex_t**)malloc(STARPU_NMAXWORKERS * sizeof(pthread_mutex_t*));
  164. sched_ctx->sched_cond = (pthread_cond_t**)malloc(STARPU_NMAXWORKERS * sizeof(pthread_cond_t*));
  165. /*init the strategy structs and the worker_collection of the ressources of the context */
  166. _starpu_init_sched_policy(config, sched_ctx, policy_name);
  167. /* construct the collection of workers(list/tree/etc.) */
  168. sched_ctx->workers->workerids = sched_ctx->workers->init(sched_ctx->workers);
  169. sched_ctx->workers->nworkers = 0;
  170. /* after having an worker_collection on the ressources add them */
  171. _starpu_add_workers_to_sched_ctx(sched_ctx, workerids, nworkers_ctx, NULL, NULL);
  172. config->topology.nsched_ctxs++;
  173. /* if we create the initial big sched ctx we can update workers' status here
  174. because they haven't been launched yet */
  175. if(is_initial_sched)
  176. {
  177. int i;
  178. for(i = 0; i < nworkers; i++)
  179. {
  180. struct _starpu_worker *worker = _starpu_get_worker_struct(i);
  181. worker->sched_ctx[_starpu_worker_get_first_free_sched_ctx(worker)] = sched_ctx;
  182. worker->nsched_ctxs++;
  183. }
  184. }
  185. return sched_ctx;
  186. }
  187. unsigned starpu_create_sched_ctx(const char *policy_name, int *workerids,
  188. int nworkers_ctx, const char *sched_name)
  189. {
  190. struct _starpu_sched_ctx *sched_ctx = _starpu_create_sched_ctx(policy_name, workerids, nworkers_ctx, 0, sched_name);
  191. _starpu_update_workers(sched_ctx->workers->workerids, sched_ctx->workers->nworkers, sched_ctx->id);
  192. #ifdef STARPU_USE_SCHED_CTX_HYPERVISOR
  193. sched_ctx->perf_counters = NULL;
  194. #endif
  195. return sched_ctx->id;
  196. }
  197. #ifdef STARPU_USE_SCHED_CTX_HYPERVISOR
  198. void starpu_set_perf_counters(unsigned sched_ctx_id, struct starpu_performance_counters *perf_counters)
  199. {
  200. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  201. sched_ctx->perf_counters = perf_counters;
  202. return sched_ctx_id;
  203. }
  204. #endif
  205. /* free all structures for the context */
  206. static void _starpu_delete_sched_ctx(struct _starpu_sched_ctx *sched_ctx)
  207. {
  208. _starpu_deinit_sched_policy(sched_ctx);
  209. free(sched_ctx->sched_policy);
  210. free(sched_ctx->sched_mutex);
  211. free(sched_ctx->sched_cond);
  212. sched_ctx->sched_policy = NULL;
  213. sched_ctx->sched_mutex = NULL;
  214. sched_ctx->sched_cond = NULL;
  215. _STARPU_PTHREAD_MUTEX_DESTROY(&sched_ctx->changing_ctx_mutex);
  216. _STARPU_PTHREAD_MUTEX_DESTROY(&sched_ctx->empty_ctx_mutex);
  217. _STARPU_PTHREAD_MUTEX_DESTROY(&sched_ctx->no_workers_mutex);
  218. _STARPU_PTHREAD_COND_DESTROY(&sched_ctx->no_workers_cond);
  219. struct _starpu_machine_config *config = _starpu_get_machine_config();
  220. config->topology.nsched_ctxs--;
  221. sched_ctx->id = STARPU_NMAX_SCHED_CTXS;
  222. }
  223. void starpu_delete_sched_ctx(unsigned sched_ctx_id, unsigned inheritor_sched_ctx_id)
  224. {
  225. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  226. struct _starpu_sched_ctx *inheritor_sched_ctx = _starpu_get_sched_ctx_struct(inheritor_sched_ctx_id);
  227. _STARPU_PTHREAD_MUTEX_LOCK(&sched_ctx->changing_ctx_mutex);
  228. _starpu_update_workers(sched_ctx->workers->workerids, sched_ctx->workers->nworkers, sched_ctx->id);
  229. _STARPU_PTHREAD_MUTEX_UNLOCK(&sched_ctx->changing_ctx_mutex);
  230. /*if both of them have all the ressources is pointless*/
  231. /*trying to transfer ressources from one ctx to the other*/
  232. struct _starpu_machine_config *config = (struct _starpu_machine_config *)_starpu_get_machine_config();
  233. int nworkers = config->topology.nworkers;
  234. if(!(sched_ctx->workers->nworkers == nworkers && sched_ctx->workers->nworkers == inheritor_sched_ctx->workers->nworkers) && sched_ctx->workers->nworkers > 0 && inheritor_sched_ctx_id != STARPU_NMAX_SCHED_CTXS)
  235. {
  236. starpu_add_workers_to_sched_ctx(sched_ctx->workers->workerids, sched_ctx->workers->nworkers, inheritor_sched_ctx_id);
  237. }
  238. if(!_starpu_wait_for_all_tasks_of_sched_ctx(sched_ctx_id))
  239. _starpu_delete_sched_ctx(sched_ctx);
  240. return;
  241. }
  242. /* called after the workers are terminated so we don't have anything else to do but free the memory*/
  243. void _starpu_delete_all_sched_ctxs()
  244. {
  245. unsigned i;
  246. for(i = 0; i < STARPU_NMAX_SCHED_CTXS; i++)
  247. {
  248. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(i);
  249. if(sched_ctx->id != STARPU_NMAX_SCHED_CTXS)
  250. {
  251. _starpu_barrier_counter_destroy(&sched_ctx->tasks_barrier);
  252. _starpu_delete_sched_ctx(sched_ctx);
  253. }
  254. }
  255. return;
  256. }
  257. static void _starpu_check_workers(int *workerids, int nworkers)
  258. {
  259. struct _starpu_machine_config *config = (struct _starpu_machine_config *)_starpu_get_machine_config();
  260. int nworkers_conf = config->topology.nworkers;
  261. int i;
  262. for(i = 0; i < nworkers; i++)
  263. {
  264. /* take care the user does not ask for a resource that does not exist */
  265. STARPU_ASSERT(workerids[i] >= 0 && workerids[i] <= nworkers_conf);
  266. }
  267. }
  268. void starpu_add_workers_to_sched_ctx(int *workers_to_add, int nworkers_to_add, unsigned sched_ctx_id)
  269. {
  270. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  271. int added_workers[nworkers_to_add];
  272. int n_added_workers = 0;
  273. STARPU_ASSERT(workers_to_add != NULL && nworkers_to_add > 0);
  274. _starpu_check_workers(workers_to_add, nworkers_to_add);
  275. _STARPU_PTHREAD_MUTEX_LOCK(&sched_ctx->changing_ctx_mutex);
  276. _starpu_add_workers_to_sched_ctx(sched_ctx, workers_to_add, nworkers_to_add, added_workers, &n_added_workers);
  277. if(n_added_workers > 0)
  278. _starpu_update_workers(added_workers, n_added_workers, sched_ctx->id);
  279. _STARPU_PTHREAD_MUTEX_UNLOCK(&sched_ctx->changing_ctx_mutex);
  280. if(n_added_workers > 0)
  281. {
  282. _STARPU_PTHREAD_MUTEX_LOCK(&sched_ctx->no_workers_mutex);
  283. _STARPU_PTHREAD_COND_BROADCAST(&sched_ctx->no_workers_cond);
  284. _STARPU_PTHREAD_MUTEX_UNLOCK(&sched_ctx->no_workers_mutex);
  285. }
  286. unsigned unlocked = 0;
  287. _STARPU_PTHREAD_MUTEX_LOCK(&sched_ctx->empty_ctx_mutex);
  288. while(!starpu_task_list_empty(&sched_ctx->empty_ctx_tasks))
  289. {
  290. struct starpu_task *old_task = starpu_task_list_pop_back(&sched_ctx->empty_ctx_tasks);
  291. _STARPU_PTHREAD_MUTEX_UNLOCK(&sched_ctx->empty_ctx_mutex);
  292. unlocked = 1;
  293. struct _starpu_job *old_j = _starpu_get_job_associated_to_task(old_task);
  294. _starpu_push_task(old_j);
  295. }
  296. if(!unlocked)
  297. _STARPU_PTHREAD_MUTEX_UNLOCK(&sched_ctx->empty_ctx_mutex);
  298. return;
  299. }
  300. void starpu_remove_workers_from_sched_ctx(int *workers_to_remove, int nworkers_to_remove, unsigned sched_ctx_id)
  301. {
  302. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  303. int removed_workers[sched_ctx->workers->nworkers];
  304. int n_removed_workers = 0;
  305. _starpu_check_workers(workers_to_remove, nworkers_to_remove);
  306. _STARPU_PTHREAD_MUTEX_LOCK(&sched_ctx->changing_ctx_mutex);
  307. _starpu_remove_workers_from_sched_ctx(sched_ctx, workers_to_remove, nworkers_to_remove, removed_workers, &n_removed_workers);
  308. if(n_removed_workers > 0)
  309. _starpu_update_workers(removed_workers, n_removed_workers, sched_ctx->id);
  310. _STARPU_PTHREAD_MUTEX_UNLOCK(&sched_ctx->changing_ctx_mutex);
  311. return;
  312. }
  313. /* unused sched_ctx have the id STARPU_NMAX_SCHED_CTXS */
  314. void _starpu_init_all_sched_ctxs(struct _starpu_machine_config *config)
  315. {
  316. pthread_key_create(&sched_ctx_key, NULL);
  317. unsigned i;
  318. for(i = 0; i < STARPU_NMAX_SCHED_CTXS; i++)
  319. config->sched_ctxs[i].id = STARPU_NMAX_SCHED_CTXS;
  320. return;
  321. }
  322. /* unused sched_ctx pointers of a worker are NULL */
  323. void _starpu_init_sched_ctx_for_worker(unsigned workerid)
  324. {
  325. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  326. worker->sched_ctx = (struct _starpu_sched_ctx**)malloc(STARPU_NMAX_SCHED_CTXS * sizeof(struct _starpu_sched_ctx*));
  327. unsigned i;
  328. for(i = 0; i < STARPU_NMAX_SCHED_CTXS; i++)
  329. worker->sched_ctx[i] = NULL;
  330. return;
  331. }
  332. /* sched_ctx aren't necessarly one next to another */
  333. /* for eg when we remove one its place is free */
  334. /* when we add new one we reuse its place */
  335. static unsigned _starpu_get_first_free_sched_ctx(struct _starpu_machine_config *config)
  336. {
  337. unsigned i;
  338. for(i = 0; i < STARPU_NMAX_SCHED_CTXS; i++)
  339. if(config->sched_ctxs[i].id == STARPU_NMAX_SCHED_CTXS)
  340. return i;
  341. STARPU_ASSERT(0);
  342. return STARPU_NMAX_SCHED_CTXS;
  343. }
  344. static unsigned _starpu_worker_get_first_free_sched_ctx(struct _starpu_worker *worker)
  345. {
  346. unsigned i;
  347. for(i = 0; i < STARPU_NMAX_SCHED_CTXS; i++)
  348. if(worker->sched_ctx[i] == NULL)
  349. return i;
  350. STARPU_ASSERT(0);
  351. return STARPU_NMAX_SCHED_CTXS;
  352. }
  353. static unsigned _starpu_worker_get_sched_ctx_id(struct _starpu_worker *worker, unsigned sched_ctx_id)
  354. {
  355. unsigned to_be_deleted = STARPU_NMAX_SCHED_CTXS;
  356. unsigned i;
  357. for(i = 0; i < STARPU_NMAX_SCHED_CTXS; i++)
  358. if(worker->sched_ctx[i] != NULL)
  359. if(worker->sched_ctx[i]->id == sched_ctx_id)
  360. return i;
  361. else if(worker->sched_ctx[i]->id == STARPU_NMAX_SCHED_CTXS)
  362. to_be_deleted = i;
  363. /* little bit of a hack be carefull */
  364. if(to_be_deleted != STARPU_NMAX_SCHED_CTXS)
  365. return to_be_deleted;
  366. return STARPU_NMAX_SCHED_CTXS;
  367. }
  368. int _starpu_wait_for_all_tasks_of_sched_ctx(unsigned sched_ctx_id)
  369. {
  370. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  371. if (STARPU_UNLIKELY(!_starpu_worker_may_perform_blocking_calls()))
  372. return -EDEADLK;
  373. return _starpu_barrier_counter_wait_for_empty_counter(&sched_ctx->tasks_barrier);
  374. }
  375. void _starpu_decrement_nsubmitted_tasks_of_sched_ctx(unsigned sched_ctx_id)
  376. {
  377. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  378. _starpu_barrier_counter_decrement_until_empty_counter(&sched_ctx->tasks_barrier);
  379. }
  380. void _starpu_increment_nsubmitted_tasks_of_sched_ctx(unsigned sched_ctx_id)
  381. {
  382. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  383. _starpu_barrier_counter_increment(&sched_ctx->tasks_barrier);
  384. }
  385. void starpu_set_sched_ctx(unsigned *sched_ctx)
  386. {
  387. pthread_setspecific(sched_ctx_key, (void*)sched_ctx);
  388. }
  389. unsigned starpu_get_sched_ctx()
  390. {
  391. unsigned *sched_ctx = (unsigned*)pthread_getspecific(sched_ctx_key);
  392. if(sched_ctx == NULL)
  393. return STARPU_NMAX_SCHED_CTXS;
  394. STARPU_ASSERT(*sched_ctx < STARPU_NMAX_SCHED_CTXS);
  395. return *sched_ctx;
  396. }
  397. void starpu_notify_hypervisor_exists()
  398. {
  399. with_hypervisor = 1;
  400. }
  401. unsigned starpu_check_if_hypervisor_exists()
  402. {
  403. return with_hypervisor;
  404. }
  405. unsigned _starpu_get_nsched_ctxs()
  406. {
  407. struct _starpu_machine_config *config = (struct _starpu_machine_config *)_starpu_get_machine_config();
  408. return config->topology.nsched_ctxs;
  409. }
  410. void starpu_set_sched_ctx_policy_data(unsigned sched_ctx_id, void* policy_data)
  411. {
  412. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  413. sched_ctx->policy_data = policy_data;
  414. }
  415. void* starpu_get_sched_ctx_policy_data(unsigned sched_ctx_id)
  416. {
  417. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  418. return sched_ctx->policy_data;
  419. }
  420. pthread_mutex_t *_starpu_get_sched_mutex(struct _starpu_sched_ctx *sched_ctx, int workerid)
  421. {
  422. if(sched_ctx->sched_mutex)
  423. return sched_ctx->sched_mutex[workerid];
  424. else
  425. return NULL;
  426. }
  427. void starpu_worker_set_sched_condition(unsigned sched_ctx_id, int workerid, pthread_mutex_t *sched_mutex, pthread_cond_t *sched_cond)
  428. {
  429. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  430. if(sched_ctx->sched_mutex && sched_ctx->sched_cond)
  431. {
  432. sched_ctx->sched_mutex[workerid] = sched_mutex;
  433. sched_ctx->sched_cond[workerid] = sched_cond;
  434. }
  435. }
  436. void starpu_worker_get_sched_condition(unsigned sched_ctx_id, int workerid, pthread_mutex_t **sched_mutex, pthread_cond_t **sched_cond)
  437. {
  438. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  439. *sched_mutex = sched_ctx->sched_mutex[workerid];
  440. *sched_cond = sched_ctx->sched_cond[workerid];
  441. /* the tasks concerning changings of the the ctxs were not executed in order */
  442. if(!*sched_mutex)
  443. {
  444. struct _starpu_worker *workerarg = _starpu_get_worker_struct(workerid);
  445. *sched_mutex = &workerarg->sched_mutex;
  446. *sched_cond = &workerarg->sched_cond;
  447. starpu_worker_set_sched_condition(sched_ctx_id, workerid, *sched_mutex, *sched_cond);
  448. }
  449. }
  450. void starpu_worker_init_sched_condition(unsigned sched_ctx_id, int workerid)
  451. {
  452. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  453. sched_ctx->sched_mutex[workerid] = (pthread_mutex_t*)malloc(sizeof(pthread_mutex_t));
  454. sched_ctx->sched_cond[workerid] = (pthread_cond_t*)malloc(sizeof(pthread_cond_t));
  455. _STARPU_PTHREAD_MUTEX_INIT(sched_ctx->sched_mutex[workerid], NULL);
  456. _STARPU_PTHREAD_COND_INIT(sched_ctx->sched_cond[workerid], NULL);
  457. }
  458. void starpu_worker_deinit_sched_condition(unsigned sched_ctx_id, int workerid)
  459. {
  460. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  461. _STARPU_PTHREAD_MUTEX_DESTROY(sched_ctx->sched_mutex[workerid]);
  462. _STARPU_PTHREAD_COND_DESTROY(sched_ctx->sched_cond[workerid]);
  463. free(sched_ctx->sched_mutex[workerid]);
  464. free(sched_ctx->sched_cond[workerid]);
  465. }
  466. struct worker_collection* starpu_create_worker_collection_for_sched_ctx(unsigned sched_ctx_id, int worker_collection_type)
  467. {
  468. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  469. sched_ctx->workers = (struct worker_collection*)malloc(sizeof(struct worker_collection));
  470. switch(worker_collection_type)
  471. {
  472. case WORKER_LIST:
  473. sched_ctx->workers->has_next = worker_list.has_next;
  474. sched_ctx->workers->get_next = worker_list.get_next;
  475. sched_ctx->workers->add = worker_list.add;
  476. sched_ctx->workers->remove = worker_list.remove;
  477. sched_ctx->workers->init = worker_list.init;
  478. sched_ctx->workers->deinit = worker_list.deinit;
  479. sched_ctx->workers->init_cursor = worker_list.init_cursor;
  480. sched_ctx->workers->deinit_cursor = worker_list.deinit_cursor;
  481. sched_ctx->workers->type = WORKER_LIST;
  482. break;
  483. }
  484. return sched_ctx->workers;
  485. }
  486. void starpu_delete_worker_collection_for_sched_ctx(unsigned sched_ctx_id)
  487. {
  488. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  489. sched_ctx->workers->deinit(sched_ctx->workers);
  490. free(sched_ctx->workers);
  491. }
  492. struct worker_collection* starpu_get_worker_collection_of_sched_ctx(unsigned sched_ctx_id)
  493. {
  494. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  495. return sched_ctx->workers;
  496. }
  497. pthread_mutex_t* starpu_get_changing_ctx_mutex(unsigned sched_ctx_id)
  498. {
  499. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  500. return &sched_ctx->changing_ctx_mutex;
  501. }
  502. unsigned starpu_get_nworkers_of_sched_ctx(unsigned sched_ctx_id)
  503. {
  504. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  505. if(sched_ctx != NULL)
  506. return sched_ctx->workers->nworkers;
  507. else
  508. return 0;
  509. }
  510. unsigned starpu_get_nshared_workers(unsigned sched_ctx_id, unsigned sched_ctx_id2)
  511. {
  512. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  513. struct _starpu_sched_ctx *sched_ctx2 = _starpu_get_sched_ctx_struct(sched_ctx_id2);
  514. struct worker_collection *workers = sched_ctx->workers;
  515. struct worker_collection *workers2 = sched_ctx2->workers;
  516. int worker, worker2;
  517. int shared_workers = 0;
  518. if(workers->init_cursor)
  519. workers->init_cursor(workers);
  520. if(workers2->init_cursor)
  521. workers2->init_cursor(workers2);
  522. while(workers->has_next(workers))
  523. {
  524. worker = workers->get_next(workers);
  525. while(workers2->has_next(workers2))
  526. {
  527. worker2 = workers2->get_next(workers2);
  528. if(worker == worker2)
  529. shared_workers++;
  530. }
  531. }
  532. if(workers->init_cursor)
  533. workers->deinit_cursor(workers);
  534. if(workers2->init_cursor)
  535. workers2->deinit_cursor(workers2);
  536. return shared_workers;
  537. }
  538. #ifdef STARPU_USE_SCHED_CTX_HYPERVISOR
  539. void starpu_call_poped_task_cb(int workerid, unsigned sched_ctx_id, double flops)
  540. {
  541. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  542. if(sched_ctx != NULL && sched_ctx_id != 0 && sched_ctx_id != STARPU_NMAX_SCHED_CTXS
  543. && sched_ctx->perf_counters != NULL)
  544. sched_ctx->perf_counters->notify_poped_task(sched_ctx_id, workerid, flops);
  545. }
  546. void starpu_call_pushed_task_cb(int workerid, unsigned sched_ctx_id)
  547. {
  548. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  549. if(sched_ctx != NULL && sched_ctx_id != 0 && sched_ctx_id != STARPU_NMAX_SCHED_CTXS
  550. && sched_ctx->perf_counters != NULL)
  551. sched_ctx->perf_counters->notify_pushed_task(sched_ctx_id, workerid);
  552. }
  553. #endif //STARPU_USE_SCHED_CTX_HYPERVISOR