sched_ctx.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011 INRIA
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <core/sched_policy.h>
  17. #include <core/sched_ctx.h>
  18. #include <common/utils.h>
  19. extern struct worker_collection worker_list;
  20. pthread_key_t sched_ctx_key;
  21. static unsigned _starpu_get_first_free_sched_ctx(struct starpu_machine_config_s *config);
  22. static unsigned _starpu_worker_get_first_free_sched_ctx(struct starpu_worker_s *worker);
  23. static unsigned _starpu_worker_get_sched_ctx_id(struct starpu_worker_s *worker, unsigned sched_ctx_id);
  24. static void change_worker_sched_ctx(unsigned sched_ctx_id)
  25. {
  26. int workerid = starpu_worker_get_id();
  27. struct starpu_worker_s *worker = _starpu_get_worker_struct(workerid);
  28. int worker_sched_ctx_id = _starpu_worker_get_sched_ctx_id(worker, sched_ctx_id);
  29. /* if the ctx is not in the worker's list it means the update concerns the addition of ctxs*/
  30. if(worker_sched_ctx_id == STARPU_NMAX_SCHED_CTXS)
  31. {
  32. worker_sched_ctx_id = _starpu_worker_get_first_free_sched_ctx(worker);
  33. struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  34. /* add context to worker */
  35. worker->sched_ctx[worker_sched_ctx_id] = sched_ctx;
  36. worker->nsched_ctxs++;
  37. }
  38. else
  39. {
  40. /* remove context from worker */
  41. if(worker->sched_ctx[worker_sched_ctx_id]->sched_policy)
  42. worker->sched_ctx[worker_sched_ctx_id]->sched_policy->remove_workers(sched_ctx_id, &worker->workerid, 1);
  43. worker->sched_ctx[worker_sched_ctx_id] = NULL;
  44. worker->nsched_ctxs--;
  45. }
  46. }
  47. static void update_workers_func(void *buffers[] __attribute__ ((unused)), void *_args)
  48. {
  49. int sched_ctx_id = (int)_args;
  50. change_worker_sched_ctx(sched_ctx_id);
  51. }
  52. struct starpu_codelet_t sched_ctx_info_cl = {
  53. .where = STARPU_CPU|STARPU_CUDA|STARPU_OPENCL,
  54. .cuda_func = update_workers_func,
  55. .cpu_func = update_workers_func,
  56. .opencl_func = update_workers_func,
  57. .nbuffers = 0
  58. };
  59. static void _starpu_update_workers(int *workerids, int nworkers, int sched_ctx_id)
  60. {
  61. int i, ret;
  62. struct starpu_worker_s *worker[nworkers];
  63. struct starpu_worker_s *curr_worker = _starpu_get_local_worker_key();
  64. for(i = 0; i < nworkers; i++)
  65. {
  66. worker[i] = _starpu_get_worker_struct(workerids[i]);
  67. /* if the current thread requires resize it's no need
  68. to send itsefl a message in order to change its
  69. sched_ctx info */
  70. if(curr_worker && curr_worker == worker[i])
  71. change_worker_sched_ctx(sched_ctx_id);
  72. else
  73. {
  74. worker[i]->tasks[sched_ctx_id] = starpu_task_create();
  75. worker[i]->tasks[sched_ctx_id]->cl = &sched_ctx_info_cl;
  76. worker[i]->tasks[sched_ctx_id]->cl_arg = (void*)sched_ctx_id;
  77. worker[i]->tasks[sched_ctx_id]->execute_on_a_specific_worker = 1;
  78. worker[i]->tasks[sched_ctx_id]->workerid = workerids[i];
  79. worker[i]->tasks[sched_ctx_id]->destroy = 1;
  80. int worker_sched_ctx_id = _starpu_worker_get_sched_ctx_id(worker[i], sched_ctx_id);
  81. /* if the ctx is not in the worker's list it means the update concerns the addition of ctxs*/
  82. if(worker_sched_ctx_id == STARPU_NMAX_SCHED_CTXS)
  83. worker[i]->tasks[sched_ctx_id]->priority = 1;
  84. _starpu_exclude_task_from_dag(worker[i]->tasks[sched_ctx_id]);
  85. ret = _starpu_task_submit_internal(worker[i]->tasks[sched_ctx_id]);
  86. }
  87. }
  88. }
  89. static void _starpu_add_workers_to_sched_ctx(struct starpu_sched_ctx *sched_ctx, int *workerids, int nworkers,
  90. int *added_workers, int *n_added_workers)
  91. {
  92. struct worker_collection *workers = sched_ctx->workers;
  93. struct starpu_machine_config_s *config = (struct starpu_machine_config_s *)_starpu_get_machine_config();
  94. int init_nworkers = sched_ctx->workers->nworkers;
  95. int nworkers_to_add = nworkers == -1 ? config->topology.nworkers : nworkers;
  96. int workers_to_add[nworkers_to_add];
  97. int i = 0;
  98. for(i = 0; i < nworkers_to_add; i++)
  99. {
  100. /* added_workers is NULL for the call of this func at the creation of the context*/
  101. /* if the function is called at the creation of the context it's no need to do this verif */
  102. if(added_workers)
  103. {
  104. int worker = workers->add(workers, (workerids == NULL ? i : workerids[i]));
  105. if(worker >= 0)
  106. added_workers[(*n_added_workers)++] = worker;
  107. }
  108. else
  109. {
  110. int worker = (workerids == NULL ? i : workerids[i]);
  111. workers->add(workers, worker);
  112. workers_to_add[i] = worker;
  113. }
  114. }
  115. if(added_workers)
  116. {
  117. if(*n_added_workers > 0)
  118. sched_ctx->sched_policy->add_workers(sched_ctx->id, added_workers, *n_added_workers);
  119. }
  120. else
  121. sched_ctx->sched_policy->add_workers(sched_ctx->id, workers_to_add, nworkers_to_add);
  122. return;
  123. }
  124. static void _starpu_remove_workers_from_sched_ctx(struct starpu_sched_ctx *sched_ctx, int *workerids, unsigned nworkers,
  125. int *removed_workers, int *n_removed_workers)
  126. {
  127. struct worker_collection *workers = sched_ctx->workers;
  128. int i = 0;
  129. for(i = 0; i < nworkers; i++)
  130. {
  131. if(workers->nworkers > 0)
  132. {
  133. int worker = workers->remove(workers, workerids[i]);
  134. if(worker >= 0)
  135. removed_workers[(*n_removed_workers)++] = worker;
  136. }
  137. }
  138. return;
  139. }
  140. struct starpu_sched_ctx* _starpu_create_sched_ctx(const char *policy_name, int *workerids,
  141. int nworkers_ctx, unsigned is_initial_sched,
  142. const char *sched_name)
  143. {
  144. struct starpu_machine_config_s *config = (struct starpu_machine_config_s *)_starpu_get_machine_config();
  145. STARPU_ASSERT(config->topology.nsched_ctxs < STARPU_NMAX_SCHED_CTXS - 1);
  146. unsigned id = _starpu_get_first_free_sched_ctx(config);
  147. struct starpu_sched_ctx *sched_ctx = &config->sched_ctxs[id];
  148. sched_ctx->id = id;
  149. int nworkers = config->topology.nworkers;
  150. STARPU_ASSERT(nworkers_ctx <= nworkers);
  151. PTHREAD_MUTEX_INIT(&sched_ctx->changing_ctx_mutex, NULL);
  152. PTHREAD_MUTEX_INIT(&sched_ctx->no_workers_mutex, NULL);
  153. PTHREAD_COND_INIT(&sched_ctx->no_workers_cond, NULL);
  154. PTHREAD_MUTEX_INIT(&sched_ctx->empty_ctx_mutex, NULL);
  155. starpu_task_list_init(&sched_ctx->empty_ctx_tasks);
  156. sched_ctx->sched_policy = (struct starpu_sched_policy_s*)malloc(sizeof(struct starpu_sched_policy_s));
  157. sched_ctx->is_initial_sched = is_initial_sched;
  158. sched_ctx->name = sched_name;
  159. _starpu_barrier_counter_init(&sched_ctx->tasks_barrier, 0);
  160. /* initialise all sync structures bc the number of workers can modify */
  161. sched_ctx->sched_mutex = (pthread_mutex_t**)malloc(STARPU_NMAXWORKERS * sizeof(pthread_mutex_t*));
  162. sched_ctx->sched_cond = (pthread_cond_t**)malloc(STARPU_NMAXWORKERS * sizeof(pthread_cond_t*));
  163. /*init the strategy structs and the worker_collection of the ressources of the context */
  164. _starpu_init_sched_policy(config, sched_ctx, policy_name);
  165. /* construct the collection of workers(list/tree/etc.) */
  166. sched_ctx->workers->workerids = sched_ctx->workers->init(sched_ctx->workers);
  167. sched_ctx->workers->nworkers = 0;
  168. /* after having an worker_collection on the ressources add them */
  169. _starpu_add_workers_to_sched_ctx(sched_ctx, workerids, nworkers_ctx, NULL, NULL);
  170. config->topology.nsched_ctxs++;
  171. /* if we create the initial big sched ctx we can update workers' status here
  172. because they haven't been launched yet */
  173. if(is_initial_sched)
  174. {
  175. int i;
  176. for(i = 0; i < nworkers; i++)
  177. {
  178. struct starpu_worker_s *worker = _starpu_get_worker_struct(i);
  179. worker->sched_ctx[_starpu_worker_get_first_free_sched_ctx(worker)] = sched_ctx;
  180. worker->nsched_ctxs++;
  181. }
  182. }
  183. return sched_ctx;
  184. }
  185. unsigned starpu_create_sched_ctx(const char *policy_name, int *workerids,
  186. int nworkers_ctx, const char *sched_name)
  187. {
  188. struct starpu_sched_ctx *sched_ctx = _starpu_create_sched_ctx(policy_name, workerids, nworkers_ctx, 0, sched_name);
  189. _starpu_update_workers(sched_ctx->workers->workerids, sched_ctx->workers->nworkers, sched_ctx->id);
  190. return sched_ctx->id;
  191. }
  192. #ifdef STARPU_USE_SCHED_CTX_HYPERVISOR
  193. unsigned starpu_create_sched_ctx_with_criteria(const char *policy_name, int *workerids,
  194. int nworkers_ctx, const char *sched_name,
  195. struct starpu_sched_ctx_hypervisor_criteria **criteria)
  196. {
  197. unsigned sched_ctx_id = starpu_create_sched_ctx(policy_name, workerids, nworkers_ctx, sched_name);
  198. struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  199. sched_ctx->criteria = criteria;
  200. return sched_ctx_id;
  201. }
  202. #endif
  203. /* free all structures for the context */
  204. static void free_sched_ctx_mem(struct starpu_sched_ctx *sched_ctx)
  205. {
  206. sched_ctx->workers->deinit(sched_ctx->workers);
  207. free(sched_ctx->workers);
  208. free(sched_ctx->sched_policy);
  209. free(sched_ctx->sched_mutex);
  210. free(sched_ctx->sched_cond);
  211. sched_ctx->workers = NULL;
  212. sched_ctx->sched_policy = NULL;
  213. sched_ctx->sched_mutex = NULL;
  214. sched_ctx->sched_cond = NULL;
  215. PTHREAD_MUTEX_DESTROY(&sched_ctx->no_workers_mutex);
  216. PTHREAD_COND_DESTROY(&sched_ctx->no_workers_cond);
  217. struct starpu_machine_config_s *config = _starpu_get_machine_config();
  218. config->topology.nsched_ctxs--;
  219. sched_ctx->id = STARPU_NMAX_SCHED_CTXS;
  220. }
  221. void starpu_delete_sched_ctx(unsigned sched_ctx_id, unsigned inheritor_sched_ctx_id)
  222. {
  223. struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  224. struct starpu_sched_ctx *inheritor_sched_ctx = _starpu_get_sched_ctx_struct(inheritor_sched_ctx_id);
  225. PTHREAD_MUTEX_LOCK(&sched_ctx->changing_ctx_mutex);
  226. _starpu_update_workers(sched_ctx->workers->workerids, sched_ctx->workers->nworkers, sched_ctx->id);
  227. PTHREAD_MUTEX_UNLOCK(&sched_ctx->changing_ctx_mutex);
  228. /*if both of them have all the ressources is pointless*/
  229. /*trying to transfer ressources from one ctx to the other*/
  230. struct starpu_machine_config_s *config = (struct starpu_machine_config_s *)_starpu_get_machine_config();
  231. int nworkers = config->topology.nworkers;
  232. if(!(sched_ctx->workers->nworkers == nworkers && sched_ctx->workers->nworkers == inheritor_sched_ctx->workers->nworkers) && sched_ctx->workers->nworkers > 0 && inheritor_sched_ctx_id != STARPU_NMAX_SCHED_CTXS)
  233. {
  234. starpu_add_workers_to_sched_ctx(sched_ctx->workers->workerids, sched_ctx->workers->nworkers, inheritor_sched_ctx_id);
  235. }
  236. if(!starpu_wait_for_all_tasks_of_sched_ctx(sched_ctx_id))
  237. {
  238. PTHREAD_MUTEX_LOCK(&sched_ctx->changing_ctx_mutex);
  239. free_sched_ctx_mem(sched_ctx);
  240. PTHREAD_MUTEX_UNLOCK(&sched_ctx->changing_ctx_mutex);
  241. }
  242. return;
  243. }
  244. /* called after the workers are terminated so we don't have anything else to do but free the memory*/
  245. void _starpu_delete_all_sched_ctxs()
  246. {
  247. unsigned i;
  248. for(i = 0; i < STARPU_NMAX_SCHED_CTXS; i++)
  249. {
  250. struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(i);
  251. if(sched_ctx->id != STARPU_NMAX_SCHED_CTXS)
  252. {
  253. _starpu_deinit_sched_policy(sched_ctx);
  254. _starpu_barrier_counter_destroy(&sched_ctx->tasks_barrier);
  255. free_sched_ctx_mem(sched_ctx);
  256. }
  257. }
  258. return;
  259. }
  260. static void _starpu_check_workers(int *workerids, int nworkers)
  261. {
  262. struct starpu_machine_config_s *config = (struct starpu_machine_config_s *)_starpu_get_machine_config();
  263. int nworkers_conf = config->topology.nworkers;
  264. int i;
  265. for(i = 0; i < nworkers; i++)
  266. {
  267. /* take care the user does not ask for a resource that does not exist */
  268. STARPU_ASSERT(workerids[i] >= 0 && workerids[i] <= nworkers_conf);
  269. }
  270. }
  271. void starpu_add_workers_to_sched_ctx(int *workers_to_add, int nworkers_to_add, unsigned sched_ctx_id)
  272. {
  273. struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  274. int init_workers = sched_ctx->workers->nworkers;
  275. int added_workers[nworkers_to_add];
  276. int n_added_workers = 0;
  277. STARPU_ASSERT(workers_to_add != NULL && nworkers_to_add > 0);
  278. _starpu_check_workers(workers_to_add, nworkers_to_add);
  279. PTHREAD_MUTEX_LOCK(&sched_ctx->changing_ctx_mutex);
  280. _starpu_add_workers_to_sched_ctx(sched_ctx, workers_to_add, nworkers_to_add, added_workers, &n_added_workers);
  281. if(n_added_workers > 0)
  282. _starpu_update_workers(added_workers, n_added_workers, sched_ctx->id);
  283. PTHREAD_MUTEX_UNLOCK(&sched_ctx->changing_ctx_mutex);
  284. if(n_added_workers > 0)
  285. {
  286. PTHREAD_MUTEX_LOCK(&sched_ctx->no_workers_mutex);
  287. PTHREAD_COND_BROADCAST(&sched_ctx->no_workers_cond);
  288. PTHREAD_MUTEX_UNLOCK(&sched_ctx->no_workers_mutex);
  289. }
  290. unsigned unlocked = 0;
  291. PTHREAD_MUTEX_LOCK(&sched_ctx->empty_ctx_mutex);
  292. while(!starpu_task_list_empty(&sched_ctx->empty_ctx_tasks))
  293. {
  294. struct starpu_task *old_task = starpu_task_list_pop_back(&sched_ctx->empty_ctx_tasks);
  295. PTHREAD_MUTEX_UNLOCK(&sched_ctx->empty_ctx_mutex);
  296. unlocked = 1;
  297. starpu_job_t old_j = _starpu_get_job_associated_to_task(old_task);
  298. _starpu_push_task(old_j, 1);
  299. }
  300. if(!unlocked)
  301. PTHREAD_MUTEX_UNLOCK(&sched_ctx->empty_ctx_mutex);
  302. return;
  303. }
  304. void starpu_remove_workers_from_sched_ctx(int *workers_to_remove, int nworkers_to_remove, unsigned sched_ctx_id)
  305. {
  306. struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  307. int removed_workers[nworkers_to_remove];
  308. int n_removed_workers = 0;
  309. STARPU_ASSERT(workers_to_remove != NULL && nworkers_to_remove > 0);
  310. _starpu_check_workers(workers_to_remove, nworkers_to_remove);
  311. PTHREAD_MUTEX_LOCK(&sched_ctx->changing_ctx_mutex);
  312. _starpu_remove_workers_from_sched_ctx(sched_ctx, workers_to_remove, nworkers_to_remove, removed_workers, &n_removed_workers);
  313. if(n_removed_workers > 0)
  314. _starpu_update_workers(removed_workers, n_removed_workers, sched_ctx->id);
  315. PTHREAD_MUTEX_UNLOCK(&sched_ctx->changing_ctx_mutex);
  316. return;
  317. }
  318. /* unused sched_ctx have the id STARPU_NMAX_SCHED_CTXS */
  319. void _starpu_init_all_sched_ctxs(struct starpu_machine_config_s *config)
  320. {
  321. pthread_key_create(&sched_ctx_key, NULL);
  322. unsigned i;
  323. for(i = 0; i < STARPU_NMAX_SCHED_CTXS; i++)
  324. config->sched_ctxs[i].id = STARPU_NMAX_SCHED_CTXS;
  325. return;
  326. }
  327. /* unused sched_ctx pointers of a worker are NULL */
  328. void _starpu_init_sched_ctx_for_worker(unsigned workerid)
  329. {
  330. struct starpu_worker_s *worker = _starpu_get_worker_struct(workerid);
  331. worker->sched_ctx = (struct starpu_sched_ctx**)malloc(STARPU_NMAX_SCHED_CTXS * sizeof(struct starpu_sched_ctx*));
  332. unsigned i;
  333. for(i = 0; i < STARPU_NMAX_SCHED_CTXS; i++)
  334. worker->sched_ctx[i] = NULL;
  335. return;
  336. }
  337. /* sched_ctx aren't necessarly one next to another */
  338. /* for eg when we remove one its place is free */
  339. /* when we add new one we reuse its place */
  340. static unsigned _starpu_get_first_free_sched_ctx(struct starpu_machine_config_s *config)
  341. {
  342. unsigned i;
  343. for(i = 0; i < STARPU_NMAX_SCHED_CTXS; i++)
  344. if(config->sched_ctxs[i].id == STARPU_NMAX_SCHED_CTXS)
  345. return i;
  346. STARPU_ASSERT(0);
  347. return STARPU_NMAX_SCHED_CTXS;
  348. }
  349. static unsigned _starpu_worker_get_first_free_sched_ctx(struct starpu_worker_s *worker)
  350. {
  351. unsigned i;
  352. for(i = 0; i < STARPU_NMAX_SCHED_CTXS; i++)
  353. if(worker->sched_ctx[i] == NULL)
  354. return i;
  355. STARPU_ASSERT(0);
  356. return STARPU_NMAX_SCHED_CTXS;
  357. }
  358. static unsigned _starpu_worker_get_sched_ctx_id(struct starpu_worker_s *worker, unsigned sched_ctx_id)
  359. {
  360. unsigned to_be_deleted = STARPU_NMAX_SCHED_CTXS;
  361. unsigned i;
  362. for(i = 0; i < STARPU_NMAX_SCHED_CTXS; i++)
  363. if(worker->sched_ctx[i] != NULL)
  364. if(worker->sched_ctx[i]->id == sched_ctx_id)
  365. return i;
  366. else if(worker->sched_ctx[i]->id == STARPU_NMAX_SCHED_CTXS)
  367. to_be_deleted = i;
  368. /* little bit of a hack be carefull */
  369. if(to_be_deleted != STARPU_NMAX_SCHED_CTXS)
  370. return to_be_deleted;
  371. return STARPU_NMAX_SCHED_CTXS;
  372. }
  373. int starpu_wait_for_all_tasks_of_worker(int workerid)
  374. {
  375. if (STARPU_UNLIKELY(!_starpu_worker_may_perform_blocking_calls()))
  376. return -EDEADLK;
  377. struct starpu_worker_s *worker = _starpu_get_worker_struct(workerid);
  378. _starpu_barrier_counter_wait_for_empty_counter(&worker->tasks_barrier);
  379. return 0;
  380. }
  381. int starpu_wait_for_all_tasks_of_workers(int *workerids, int nworkers_ctx){
  382. int ret_val = 0;
  383. struct starpu_machine_config_s *config = _starpu_get_machine_config();
  384. int nworkers = nworkers_ctx == -1 ? (int)config->topology.nworkers : nworkers_ctx;
  385. int workerid = -1;
  386. int i, n;
  387. for(i = 0; i < nworkers; i++)
  388. {
  389. workerid = workerids == NULL ? i : workerids[i];
  390. n = starpu_wait_for_all_tasks_of_worker(workerid);
  391. ret_val = (ret_val && n);
  392. }
  393. return ret_val;
  394. }
  395. void _starpu_decrement_nsubmitted_tasks_of_worker(int workerid)
  396. {
  397. struct starpu_worker_s *worker = _starpu_get_worker_struct(workerid);
  398. _starpu_barrier_counter_decrement_until_empty_counter(&worker->tasks_barrier);
  399. return;
  400. }
  401. void _starpu_increment_nsubmitted_tasks_of_worker(int workerid)
  402. {
  403. struct starpu_worker_s *worker = _starpu_get_worker_struct(workerid);
  404. _starpu_barrier_counter_increment(&worker->tasks_barrier);
  405. return;
  406. }
  407. int starpu_wait_for_all_tasks_of_sched_ctx(unsigned sched_ctx_id)
  408. {
  409. struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  410. if (STARPU_UNLIKELY(!_starpu_worker_may_perform_blocking_calls()))
  411. return -EDEADLK;
  412. return _starpu_barrier_counter_wait_for_empty_counter(&sched_ctx->tasks_barrier);
  413. }
  414. void _starpu_decrement_nsubmitted_tasks_of_sched_ctx(unsigned sched_ctx_id)
  415. {
  416. struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  417. _starpu_barrier_counter_decrement_until_empty_counter(&sched_ctx->tasks_barrier);
  418. }
  419. void _starpu_increment_nsubmitted_tasks_of_sched_ctx(unsigned sched_ctx_id)
  420. {
  421. struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  422. _starpu_barrier_counter_increment(&sched_ctx->tasks_barrier);
  423. }
  424. pthread_mutex_t *_starpu_get_sched_mutex(struct starpu_sched_ctx *sched_ctx, int workerid)
  425. {
  426. if(sched_ctx->sched_mutex)
  427. return sched_ctx->sched_mutex[workerid];
  428. else
  429. return NULL;
  430. }
  431. pthread_cond_t *_starpu_get_sched_cond(struct starpu_sched_ctx *sched_ctx, int workerid)
  432. {
  433. return sched_ctx->sched_cond[workerid];
  434. }
  435. void starpu_set_sched_ctx(unsigned *sched_ctx)
  436. {
  437. pthread_setspecific(sched_ctx_key, (void*)sched_ctx);
  438. }
  439. unsigned starpu_get_sched_ctx()
  440. {
  441. unsigned sched_ctx = *(unsigned*)pthread_getspecific(sched_ctx_key);
  442. STARPU_ASSERT(sched_ctx >= 0 && sched_ctx < STARPU_NMAX_SCHED_CTXS);
  443. return sched_ctx;
  444. }
  445. unsigned _starpu_get_nsched_ctxs()
  446. {
  447. struct starpu_machine_config_s *config = (struct starpu_machine_config_s *)_starpu_get_machine_config();
  448. return config->topology.nsched_ctxs;
  449. }
  450. void starpu_set_sched_ctx_policy_data(unsigned sched_ctx_id, void* policy_data)
  451. {
  452. struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  453. sched_ctx->policy_data = policy_data;
  454. }
  455. void* starpu_get_sched_ctx_policy_data(unsigned sched_ctx_id)
  456. {
  457. struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  458. return sched_ctx->policy_data;
  459. }
  460. void starpu_worker_set_sched_condition(unsigned sched_ctx_id, int workerid, pthread_mutex_t *sched_mutex, pthread_cond_t *sched_cond)
  461. {
  462. struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  463. if(sched_ctx->sched_mutex && sched_ctx->sched_cond)
  464. {
  465. sched_ctx->sched_mutex[workerid] = sched_mutex;
  466. sched_ctx->sched_cond[workerid] = sched_cond;
  467. }
  468. }
  469. void starpu_worker_get_sched_condition(unsigned sched_ctx_id, int workerid, pthread_mutex_t **sched_mutex, pthread_cond_t **sched_cond)
  470. {
  471. struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  472. *sched_mutex = sched_ctx->sched_mutex[workerid];
  473. *sched_cond = sched_ctx->sched_cond[workerid];
  474. }
  475. void starpu_worker_init_sched_condition(unsigned sched_ctx_id, int workerid)
  476. {
  477. struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  478. sched_ctx->sched_mutex[workerid] = (pthread_mutex_t*)malloc(sizeof(pthread_mutex_t));
  479. sched_ctx->sched_cond[workerid] = (pthread_cond_t*)malloc(sizeof(pthread_cond_t));
  480. PTHREAD_MUTEX_INIT(sched_ctx->sched_mutex[workerid], NULL);
  481. PTHREAD_COND_INIT(sched_ctx->sched_cond[workerid], NULL);
  482. }
  483. void starpu_worker_deinit_sched_condition(unsigned sched_ctx_id, int workerid)
  484. {
  485. struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  486. PTHREAD_MUTEX_DESTROY(sched_ctx->sched_mutex[workerid]);
  487. PTHREAD_COND_DESTROY(sched_ctx->sched_cond[workerid]);
  488. free(sched_ctx->sched_mutex[workerid]);
  489. free(sched_ctx->sched_cond[workerid]);
  490. }
  491. void starpu_create_worker_collection_for_sched_ctx(unsigned sched_ctx_id, int worker_collection_type)
  492. {
  493. struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  494. sched_ctx->workers = (struct worker_collection*)malloc(sizeof(struct worker_collection));
  495. switch(worker_collection_type)
  496. {
  497. case WORKER_LIST:
  498. sched_ctx->workers->has_next = worker_list.has_next;
  499. sched_ctx->workers->get_next = worker_list.get_next;
  500. sched_ctx->workers->add = worker_list.add;
  501. sched_ctx->workers->remove = worker_list.remove;
  502. sched_ctx->workers->init = worker_list.init;
  503. sched_ctx->workers->deinit = worker_list.deinit;
  504. sched_ctx->workers->init_cursor = worker_list.init_cursor;
  505. sched_ctx->workers->deinit_cursor = worker_list.deinit_cursor;
  506. sched_ctx->workers->type = WORKER_LIST;
  507. break;
  508. }
  509. return;
  510. }
  511. struct worker_collection* starpu_get_worker_collection_of_sched_ctx(unsigned sched_ctx_id)
  512. {
  513. struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  514. return sched_ctx->workers;
  515. }
  516. pthread_mutex_t* starpu_get_changing_ctx_mutex(unsigned sched_ctx_id)
  517. {
  518. struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  519. return &sched_ctx->changing_ctx_mutex;
  520. }
  521. unsigned starpu_get_nworkers_of_sched_ctx(unsigned sched_ctx_id)
  522. {
  523. struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  524. return sched_ctx->workers->nworkers;
  525. }
  526. unsigned starpu_get_nshared_workers(unsigned sched_ctx_id, unsigned sched_ctx_id2)
  527. {
  528. struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  529. struct starpu_sched_ctx *sched_ctx2 = _starpu_get_sched_ctx_struct(sched_ctx_id2);
  530. struct worker_collection *workers = sched_ctx->workers;
  531. struct worker_collection *workers2 = sched_ctx2->workers;
  532. int worker, worker2;
  533. int shared_workers = 0;
  534. if(workers->init_cursor)
  535. workers->init_cursor(workers);
  536. if(workers2->init_cursor)
  537. workers2->init_cursor(workers2);
  538. while(workers->has_next(workers))
  539. {
  540. worker = workers->get_next(workers);
  541. while(workers2->has_next(workers2))
  542. {
  543. worker2 = workers2->get_next(workers2);
  544. if(worker == worker2)
  545. shared_workers++;
  546. }
  547. }
  548. if(workers->init_cursor)
  549. workers->deinit_cursor(workers);
  550. if(workers2->init_cursor)
  551. workers2->deinit_cursor(workers2);
  552. return shared_workers;
  553. }
  554. #ifdef STARPU_USE_SCHED_CTX_HYPERVISOR
  555. void starpu_call_poped_task_cb(int workerid, unsigned sched_ctx_id, double flops)
  556. {
  557. struct starpu_worker_s *worker = _starpu_get_worker_struct(workerid);
  558. struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  559. if(sched_ctx != NULL && sched_ctx_id != 0 && sched_ctx_id != STARPU_NMAX_SCHED_CTXS
  560. && *sched_ctx->criteria != NULL)
  561. (*sched_ctx->criteria)->poped_task_cb(sched_ctx_id, worker->workerid, flops);
  562. }
  563. void starpu_call_pushed_task_cb(int workerid, unsigned sched_ctx_id)
  564. {
  565. struct starpu_worker_s *worker = _starpu_get_worker_struct(workerid);
  566. struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  567. if(sched_ctx != NULL && sched_ctx_id != 0)
  568. if(*sched_ctx->criteria != NULL)
  569. (*sched_ctx->criteria)->pushed_task_cb(sched_ctx_id, workerid);
  570. }
  571. #endif //STARPU_USE_SCHED_CTX_HYPERVISOR