sched_policy.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011-2017 Inria
  4. * Copyright (C) 2013 Simon Archipoff
  5. * Copyright (C) 2008-2018 Université de Bordeaux
  6. * Copyright (C) 2010-2017 CNRS
  7. * Copyright (C) 2013 Thibaut Lambert
  8. * Copyright (C) 2016 Uppsala University
  9. *
  10. * StarPU is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU Lesser General Public License as published by
  12. * the Free Software Foundation; either version 2.1 of the License, or (at
  13. * your option) any later version.
  14. *
  15. * StarPU is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  18. *
  19. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  20. */
  21. #include <starpu.h>
  22. #include <common/config.h>
  23. #include <common/utils.h>
  24. #include <core/sched_policy.h>
  25. #include <profiling/profiling.h>
  26. #include <common/barrier.h>
  27. #include <core/debug.h>
  28. #include <core/task.h>
  29. static int use_prefetch = 0;
  30. static double idle[STARPU_NMAXWORKERS];
  31. static double idle_start[STARPU_NMAXWORKERS];
  32. long _starpu_task_break_on_push = -1;
  33. long _starpu_task_break_on_sched = -1;
  34. long _starpu_task_break_on_pop = -1;
  35. long _starpu_task_break_on_exec = -1;
  36. static const char *starpu_idle_file;
  37. void _starpu_sched_init(void)
  38. {
  39. _starpu_task_break_on_push = starpu_get_env_number_default("STARPU_TASK_BREAK_ON_PUSH", -1);
  40. _starpu_task_break_on_sched = starpu_get_env_number_default("STARPU_TASK_BREAK_ON_SCHED", -1);
  41. _starpu_task_break_on_pop = starpu_get_env_number_default("STARPU_TASK_BREAK_ON_POP", -1);
  42. _starpu_task_break_on_exec = starpu_get_env_number_default("STARPU_TASK_BREAK_ON_EXEC", -1);
  43. starpu_idle_file = starpu_getenv("STARPU_IDLE_FILE");
  44. }
  45. int starpu_get_prefetch_flag(void)
  46. {
  47. return use_prefetch;
  48. }
  49. static struct starpu_sched_policy *predefined_policies[] =
  50. {
  51. &_starpu_sched_modular_eager_policy,
  52. &_starpu_sched_modular_eager_prefetching_policy,
  53. &_starpu_sched_modular_prio_policy,
  54. &_starpu_sched_modular_prio_prefetching_policy,
  55. &_starpu_sched_modular_random_policy,
  56. &_starpu_sched_modular_random_prio_policy,
  57. &_starpu_sched_modular_random_prefetching_policy,
  58. &_starpu_sched_modular_random_prio_prefetching_policy,
  59. &_starpu_sched_modular_ws_policy,
  60. &_starpu_sched_modular_heft_policy,
  61. &_starpu_sched_modular_heft_prio_policy,
  62. &_starpu_sched_modular_heft2_policy,
  63. &_starpu_sched_eager_policy,
  64. &_starpu_sched_prio_policy,
  65. &_starpu_sched_random_policy,
  66. &_starpu_sched_lws_policy,
  67. &_starpu_sched_ws_policy,
  68. &_starpu_sched_dm_policy,
  69. &_starpu_sched_dmda_policy,
  70. &_starpu_sched_dmda_ready_policy,
  71. &_starpu_sched_dmda_sorted_policy,
  72. &_starpu_sched_dmda_sorted_decision_policy,
  73. &_starpu_sched_parallel_heft_policy,
  74. &_starpu_sched_peager_policy,
  75. &_starpu_sched_heteroprio_policy,
  76. &_starpu_sched_graph_test_policy,
  77. NULL
  78. };
  79. struct starpu_sched_policy **starpu_sched_get_predefined_policies()
  80. {
  81. return predefined_policies;
  82. }
  83. struct starpu_sched_policy *_starpu_get_sched_policy(struct _starpu_sched_ctx *sched_ctx)
  84. {
  85. return sched_ctx->sched_policy;
  86. }
  87. /*
  88. * Methods to initialize the scheduling policy
  89. */
  90. static void load_sched_policy(struct starpu_sched_policy *sched_policy, struct _starpu_sched_ctx *sched_ctx)
  91. {
  92. STARPU_ASSERT(sched_policy);
  93. #ifdef STARPU_VERBOSE
  94. if (sched_policy->policy_name)
  95. {
  96. if (sched_policy->policy_description)
  97. _STARPU_DEBUG("Use %s scheduler (%s)\n", sched_policy->policy_name, sched_policy->policy_description);
  98. else
  99. _STARPU_DEBUG("Use %s scheduler \n", sched_policy->policy_name);
  100. }
  101. #endif
  102. struct starpu_sched_policy *policy = sched_ctx->sched_policy;
  103. memcpy(policy, sched_policy, sizeof(*policy));
  104. }
  105. static struct starpu_sched_policy *find_sched_policy_from_name(const char *policy_name)
  106. {
  107. if (!policy_name)
  108. return NULL;
  109. if (strcmp(policy_name, "") == 0)
  110. return NULL;
  111. if (strncmp(policy_name, "heft", 4) == 0)
  112. {
  113. _STARPU_MSG("Warning: heft is now called \"dmda\".\n");
  114. return &_starpu_sched_dmda_policy;
  115. }
  116. struct starpu_sched_policy **policy;
  117. for(policy=predefined_policies ; *policy!=NULL ; policy++)
  118. {
  119. struct starpu_sched_policy *p = *policy;
  120. if (p->policy_name)
  121. {
  122. if (strcmp(policy_name, p->policy_name) == 0)
  123. {
  124. /* we found a policy with the requested name */
  125. return p;
  126. }
  127. }
  128. }
  129. if (strcmp(policy_name, "help") != 0)
  130. _STARPU_MSG("Warning: scheduling policy '%s' was not found, try 'help' to get a list\n", policy_name);
  131. /* nothing was found */
  132. return NULL;
  133. }
  134. static void display_sched_help_message(FILE *stream)
  135. {
  136. const char *sched_env = starpu_getenv("STARPU_SCHED");
  137. if (sched_env && (strcmp(sched_env, "help") == 0))
  138. {
  139. /* display the description of all predefined policies */
  140. struct starpu_sched_policy **policy;
  141. fprintf(stream, "\nThe variable STARPU_SCHED can be set to one of the following strings:\n");
  142. for(policy=predefined_policies ; *policy!=NULL ; policy++)
  143. {
  144. struct starpu_sched_policy *p = *policy;
  145. fprintf(stream, "%-30s\t-> %s\n", p->policy_name, p->policy_description);
  146. }
  147. fprintf(stream, "\n");
  148. }
  149. }
  150. struct starpu_sched_policy *_starpu_select_sched_policy(struct _starpu_machine_config *config, const char *required_policy)
  151. {
  152. struct starpu_sched_policy *selected_policy = NULL;
  153. struct starpu_conf *user_conf = &config->conf;
  154. if(required_policy)
  155. selected_policy = find_sched_policy_from_name(required_policy);
  156. /* If there is a policy that matches the required name, return it */
  157. if (selected_policy)
  158. return selected_policy;
  159. /* First, we check whether the application explicitely gave a scheduling policy or not */
  160. if (user_conf && (user_conf->sched_policy))
  161. return user_conf->sched_policy;
  162. /* Otherwise, we look if the application specified the name of a policy to load */
  163. const char *sched_pol_name;
  164. sched_pol_name = starpu_getenv("STARPU_SCHED");
  165. if (sched_pol_name == NULL && user_conf && user_conf->sched_policy_name)
  166. sched_pol_name = user_conf->sched_policy_name;
  167. if (sched_pol_name)
  168. selected_policy = find_sched_policy_from_name(sched_pol_name);
  169. /* If there is a policy that matches the name, return it */
  170. if (selected_policy)
  171. return selected_policy;
  172. /* If no policy was specified, we use the eager policy by default */
  173. return &_starpu_sched_lws_policy;
  174. }
  175. void _starpu_init_sched_policy(struct _starpu_machine_config *config, struct _starpu_sched_ctx *sched_ctx, struct starpu_sched_policy *selected_policy)
  176. {
  177. /* Perhaps we have to display some help */
  178. display_sched_help_message(stderr);
  179. /* Prefetch is activated by default */
  180. use_prefetch = starpu_get_env_number("STARPU_PREFETCH");
  181. if (use_prefetch == -1)
  182. use_prefetch = 1;
  183. /* Set calibrate flag */
  184. _starpu_set_calibrate_flag(config->conf.calibrate);
  185. load_sched_policy(selected_policy, sched_ctx);
  186. if (starpu_get_env_number_default("STARPU_WORKER_TREE", 0))
  187. {
  188. #ifdef STARPU_HAVE_HWLOC
  189. sched_ctx->sched_policy->worker_type = STARPU_WORKER_TREE;
  190. #else
  191. _STARPU_DISP("STARPU_WORKER_TREE ignored, please rebuild StarPU with hwloc support to enable it.");
  192. #endif
  193. }
  194. starpu_sched_ctx_create_worker_collection(sched_ctx->id,
  195. sched_ctx->sched_policy->worker_type);
  196. _STARPU_SCHED_BEGIN;
  197. sched_ctx->sched_policy->init_sched(sched_ctx->id);
  198. _STARPU_SCHED_END;
  199. }
  200. void _starpu_deinit_sched_policy(struct _starpu_sched_ctx *sched_ctx)
  201. {
  202. struct starpu_sched_policy *policy = sched_ctx->sched_policy;
  203. if (policy->deinit_sched)
  204. {
  205. _STARPU_SCHED_BEGIN;
  206. policy->deinit_sched(sched_ctx->id);
  207. _STARPU_SCHED_END;
  208. }
  209. starpu_sched_ctx_delete_worker_collection(sched_ctx->id);
  210. }
  211. void _starpu_sched_task_submit(struct starpu_task *task)
  212. {
  213. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(task->sched_ctx);
  214. if (!sched_ctx->sched_policy)
  215. return;
  216. if (!sched_ctx->sched_policy->submit_hook)
  217. return;
  218. _STARPU_SCHED_BEGIN;
  219. sched_ctx->sched_policy->submit_hook(task);
  220. _STARPU_SCHED_END;
  221. }
  222. void _starpu_sched_do_schedule(unsigned sched_ctx_id)
  223. {
  224. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  225. if (!sched_ctx->sched_policy)
  226. return;
  227. if (!sched_ctx->sched_policy->do_schedule)
  228. return;
  229. _STARPU_SCHED_BEGIN;
  230. sched_ctx->sched_policy->do_schedule(sched_ctx_id);
  231. _STARPU_SCHED_END;
  232. }
  233. static void _starpu_push_task_on_specific_worker_notify_sched(struct starpu_task *task, struct _starpu_worker *worker, int workerid, int perf_workerid)
  234. {
  235. /* if we push a task on a specific worker, notify all the sched_ctxs the worker belongs to */
  236. struct _starpu_sched_ctx_list_iterator list_it;
  237. _starpu_sched_ctx_list_iterator_init(worker->sched_ctx_list, &list_it);
  238. while (_starpu_sched_ctx_list_iterator_has_next(&list_it))
  239. {
  240. struct _starpu_sched_ctx_elt *e = _starpu_sched_ctx_list_iterator_get_next(&list_it);
  241. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(e->sched_ctx);
  242. if (sched_ctx->sched_policy != NULL && sched_ctx->sched_policy->push_task_notify)
  243. {
  244. _STARPU_SCHED_BEGIN;
  245. sched_ctx->sched_policy->push_task_notify(task, workerid, perf_workerid, sched_ctx->id);
  246. _STARPU_SCHED_END;
  247. }
  248. }
  249. }
  250. /* Enqueue a task into the list of tasks explicitely attached to a worker. In
  251. * case workerid identifies a combined worker, a task will be enqueued into
  252. * each worker of the combination. */
  253. static int _starpu_push_task_on_specific_worker(struct starpu_task *task, int workerid)
  254. {
  255. int nbasic_workers = (int)starpu_worker_get_count();
  256. /* Is this a basic worker or a combined worker ? */
  257. int is_basic_worker = (workerid < nbasic_workers);
  258. unsigned memory_node;
  259. struct _starpu_worker *worker = NULL;
  260. struct _starpu_combined_worker *combined_worker = NULL;
  261. if (is_basic_worker)
  262. {
  263. worker = _starpu_get_worker_struct(workerid);
  264. memory_node = worker->memory_node;
  265. }
  266. else
  267. {
  268. combined_worker = _starpu_get_combined_worker_struct(workerid);
  269. memory_node = combined_worker->memory_node;
  270. }
  271. if (use_prefetch)
  272. starpu_prefetch_task_input_on_node(task, memory_node);
  273. if (is_basic_worker)
  274. _starpu_push_task_on_specific_worker_notify_sched(task, worker, workerid, workerid);
  275. else
  276. {
  277. /* Notify all workers of the combined worker */
  278. int worker_size = combined_worker->worker_size;
  279. int *combined_workerid = combined_worker->combined_workerid;
  280. int j;
  281. for (j = 0; j < worker_size; j++)
  282. {
  283. int subworkerid = combined_workerid[j];
  284. _starpu_push_task_on_specific_worker_notify_sched(task, _starpu_get_worker_struct(subworkerid), subworkerid, workerid);
  285. }
  286. }
  287. #ifdef STARPU_USE_SC_HYPERVISOR
  288. starpu_sched_ctx_call_pushed_task_cb(workerid, task->sched_ctx);
  289. #endif //STARPU_USE_SC_HYPERVISOR
  290. if (is_basic_worker)
  291. {
  292. unsigned node = starpu_worker_get_memory_node(workerid);
  293. if (_starpu_task_uses_multiformat_handles(task))
  294. {
  295. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(task);
  296. unsigned i;
  297. for (i = 0; i < nbuffers; i++)
  298. {
  299. struct starpu_task *conversion_task;
  300. starpu_data_handle_t handle;
  301. handle = STARPU_TASK_GET_HANDLE(task, i);
  302. if (!_starpu_handle_needs_conversion_task(handle, node))
  303. continue;
  304. conversion_task = _starpu_create_conversion_task(handle, node);
  305. conversion_task->mf_skip = 1;
  306. conversion_task->execute_on_a_specific_worker = 1;
  307. conversion_task->workerid = workerid;
  308. _starpu_task_submit_conversion_task(conversion_task, workerid);
  309. //_STARPU_DEBUG("Pushing a conversion task\n");
  310. }
  311. for (i = 0; i < nbuffers; i++)
  312. {
  313. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(task, i);
  314. handle->mf_node = node;
  315. }
  316. }
  317. // if(task->sched_ctx != _starpu_get_initial_sched_ctx()->id)
  318. if(task->priority > 0)
  319. return _starpu_push_local_task(worker, task, 1);
  320. else
  321. return _starpu_push_local_task(worker, task, 0);
  322. }
  323. else
  324. {
  325. /* This is a combined worker so we create task aliases */
  326. int worker_size = combined_worker->worker_size;
  327. int *combined_workerid = combined_worker->combined_workerid;
  328. int ret = 0;
  329. struct _starpu_job *job = _starpu_get_job_associated_to_task(task);
  330. job->task_size = worker_size;
  331. job->combined_workerid = workerid;
  332. job->active_task_alias_count = 0;
  333. STARPU_PTHREAD_BARRIER_INIT(&job->before_work_barrier, NULL, worker_size);
  334. STARPU_PTHREAD_BARRIER_INIT(&job->after_work_barrier, NULL, worker_size);
  335. job->after_work_busy_barrier = worker_size;
  336. /* Note: we have to call that early, or else the task may have
  337. * disappeared already */
  338. starpu_push_task_end(task);
  339. int j;
  340. for (j = 0; j < worker_size; j++)
  341. {
  342. struct starpu_task *alias = starpu_task_dup(task);
  343. alias->destroy = 1;
  344. _STARPU_TRACE_JOB_PUSH(alias, alias->priority > 0);
  345. worker = _starpu_get_worker_struct(combined_workerid[j]);
  346. ret |= _starpu_push_local_task(worker, alias, 0);
  347. }
  348. return ret;
  349. }
  350. }
  351. /* the generic interface that call the proper underlying implementation */
  352. int _starpu_push_task(struct _starpu_job *j)
  353. {
  354. if(j->task->prologue_callback_func)
  355. j->task->prologue_callback_func(j->task->prologue_callback_arg);
  356. return _starpu_repush_task(j);
  357. }
  358. int _starpu_repush_task(struct _starpu_job *j)
  359. {
  360. struct starpu_task *task = j->task;
  361. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(task->sched_ctx);
  362. int ret;
  363. _STARPU_LOG_IN();
  364. unsigned can_push = _starpu_increment_nready_tasks_of_sched_ctx(task->sched_ctx, task->flops, task);
  365. task->status = STARPU_TASK_READY;
  366. STARPU_AYU_ADDTOTASKQUEUE(j->job_id, -1);
  367. /* if the context does not have any workers save the tasks in a temp list */
  368. if ((task->cl != NULL && task->where != STARPU_NOWHERE) && (!sched_ctx->is_initial_sched))
  369. {
  370. /*if there are workers in the ctx that are not able to execute tasks
  371. we consider the ctx empty */
  372. unsigned nworkers = _starpu_nworkers_able_to_execute_task(task, sched_ctx);
  373. if(nworkers == 0)
  374. {
  375. _starpu_sched_ctx_lock_write(sched_ctx->id);
  376. starpu_task_list_push_front(&sched_ctx->empty_ctx_tasks, task);
  377. _starpu_sched_ctx_unlock_write(sched_ctx->id);
  378. #ifdef STARPU_USE_SC_HYPERVISOR
  379. if(sched_ctx->id != 0 && sched_ctx->perf_counters != NULL
  380. && sched_ctx->perf_counters->notify_empty_ctx)
  381. {
  382. _STARPU_TRACE_HYPERVISOR_BEGIN();
  383. sched_ctx->perf_counters->notify_empty_ctx(sched_ctx->id, task);
  384. _STARPU_TRACE_HYPERVISOR_END();
  385. }
  386. #endif
  387. return 0;
  388. }
  389. }
  390. if(!can_push)
  391. return 0;
  392. /* in case there is no codelet associated to the task (that's a control
  393. * task), we directly execute its callback and enforce the
  394. * corresponding dependencies */
  395. if (task->cl == NULL || task->where == STARPU_NOWHERE)
  396. {
  397. if (task->prologue_callback_pop_func)
  398. task->prologue_callback_pop_func(task->prologue_callback_pop_arg);
  399. if (task->cl && task->cl->specific_nodes)
  400. {
  401. /* Nothing to do, but we are asked to fetch data on some memory nodes */
  402. _starpu_fetch_nowhere_task_input(j);
  403. }
  404. else
  405. {
  406. if (task->cl)
  407. __starpu_push_task_output(j);
  408. _starpu_handle_job_termination(j);
  409. _STARPU_LOG_OUT_TAG("handle_job_termination");
  410. }
  411. return 0;
  412. }
  413. ret = _starpu_push_task_to_workers(task);
  414. if (ret == -EAGAIN)
  415. /* pushed to empty context, that's fine */
  416. ret = 0;
  417. return ret;
  418. }
  419. int _starpu_push_task_to_workers(struct starpu_task *task)
  420. {
  421. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(task->sched_ctx);
  422. unsigned nworkers = 0;
  423. _STARPU_TRACE_JOB_PUSH(task, task->priority > 0);
  424. /* if the contexts still does not have workers put the task back to its place in
  425. the empty ctx list */
  426. if(!sched_ctx->is_initial_sched)
  427. {
  428. /*if there are workers in the ctx that are not able to execute tasks
  429. we consider the ctx empty */
  430. nworkers = _starpu_nworkers_able_to_execute_task(task, sched_ctx);
  431. if (nworkers == 0)
  432. {
  433. _starpu_sched_ctx_lock_write(sched_ctx->id);
  434. starpu_task_list_push_back(&sched_ctx->empty_ctx_tasks, task);
  435. _starpu_sched_ctx_unlock_write(sched_ctx->id);
  436. #ifdef STARPU_USE_SC_HYPERVISOR
  437. if(sched_ctx->id != 0 && sched_ctx->perf_counters != NULL
  438. && sched_ctx->perf_counters->notify_empty_ctx)
  439. {
  440. _STARPU_TRACE_HYPERVISOR_BEGIN();
  441. sched_ctx->perf_counters->notify_empty_ctx(sched_ctx->id, task);
  442. _STARPU_TRACE_HYPERVISOR_END();
  443. }
  444. #endif
  445. return -EAGAIN;
  446. }
  447. }
  448. _starpu_profiling_set_task_push_start_time(task);
  449. int ret = 0;
  450. if (STARPU_UNLIKELY(task->execute_on_a_specific_worker))
  451. {
  452. unsigned node = starpu_worker_get_memory_node(task->workerid);
  453. if (starpu_get_prefetch_flag())
  454. starpu_prefetch_task_input_on_node(task, node);
  455. ret = _starpu_push_task_on_specific_worker(task, task->workerid);
  456. }
  457. else
  458. {
  459. struct _starpu_machine_config *config = _starpu_get_machine_config();
  460. /* When a task can only be executed on a given arch and we have
  461. * only one memory node for that arch, we can systematically
  462. * prefetch before the scheduling decision. */
  463. if (starpu_get_prefetch_flag())
  464. {
  465. if (task->where == STARPU_CPU && config->cpus_nodeid >= 0)
  466. starpu_prefetch_task_input_on_node(task, config->cpus_nodeid);
  467. else if (task->where == STARPU_CUDA && config->cuda_nodeid >= 0)
  468. starpu_prefetch_task_input_on_node(task, config->cuda_nodeid);
  469. else if (task->where == STARPU_OPENCL && config->opencl_nodeid >= 0)
  470. starpu_prefetch_task_input_on_node(task, config->opencl_nodeid);
  471. else if (task->where == STARPU_MIC && config->mic_nodeid >= 0)
  472. starpu_prefetch_task_input_on_node(task, config->mic_nodeid);
  473. else if (task->where == STARPU_SCC && config->scc_nodeid >= 0)
  474. starpu_prefetch_task_input_on_node(task, config->scc_nodeid);
  475. }
  476. if(!sched_ctx->sched_policy)
  477. {
  478. /* Note: we have to call that early, or else the task may have
  479. * disappeared already */
  480. starpu_push_task_end(task);
  481. if(!sched_ctx->awake_workers)
  482. ret = _starpu_push_task_on_specific_worker(task, sched_ctx->main_master);
  483. else
  484. {
  485. struct starpu_worker_collection *workers = sched_ctx->workers;
  486. struct _starpu_job *job = _starpu_get_job_associated_to_task(task);
  487. job->task_size = workers->nworkers;
  488. job->combined_workerid = -1; // workerid; its a ctx not combined worker
  489. job->active_task_alias_count = 0;
  490. STARPU_PTHREAD_BARRIER_INIT(&job->before_work_barrier, NULL, workers->nworkers);
  491. STARPU_PTHREAD_BARRIER_INIT(&job->after_work_barrier, NULL, workers->nworkers);
  492. job->after_work_busy_barrier = workers->nworkers;
  493. struct starpu_sched_ctx_iterator it;
  494. if(workers->init_iterator)
  495. workers->init_iterator(workers, &it);
  496. while(workers->has_next(workers, &it))
  497. {
  498. unsigned workerid = workers->get_next(workers, &it);
  499. struct starpu_task *alias;
  500. if (job->task_size > 1)
  501. {
  502. alias = starpu_task_dup(task);
  503. _STARPU_TRACE_JOB_PUSH(alias, alias->priority > 0);
  504. alias->destroy = 1;
  505. }
  506. else
  507. alias = task;
  508. ret |= _starpu_push_task_on_specific_worker(alias, workerid);
  509. }
  510. }
  511. }
  512. else
  513. {
  514. STARPU_ASSERT(sched_ctx->sched_policy->push_task);
  515. /* check out if there are any workers in the context */
  516. nworkers = starpu_sched_ctx_get_nworkers(sched_ctx->id);
  517. if (nworkers == 0)
  518. ret = -1;
  519. else
  520. {
  521. struct _starpu_worker *worker = _starpu_get_local_worker_key();
  522. if (worker)
  523. {
  524. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&worker->sched_mutex);
  525. _starpu_worker_enter_sched_op(worker);
  526. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  527. }
  528. _STARPU_TASK_BREAK_ON(task, push);
  529. _STARPU_SCHED_BEGIN;
  530. ret = sched_ctx->sched_policy->push_task(task);
  531. _STARPU_SCHED_END;
  532. if (worker)
  533. {
  534. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&worker->sched_mutex);
  535. _starpu_worker_leave_sched_op(worker);
  536. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  537. }
  538. }
  539. }
  540. if(ret == -1)
  541. {
  542. _STARPU_MSG("repush task \n");
  543. _STARPU_TRACE_JOB_POP(task, task->priority > 0);
  544. ret = _starpu_push_task_to_workers(task);
  545. }
  546. }
  547. /* Note: from here, the task might have been destroyed already! */
  548. _STARPU_LOG_OUT();
  549. return ret;
  550. }
  551. /* This is called right after the scheduler has pushed a task to a queue
  552. * but just before releasing mutexes: we need the task to still be alive!
  553. */
  554. int starpu_push_task_end(struct starpu_task *task)
  555. {
  556. _starpu_profiling_set_task_push_end_time(task);
  557. task->scheduled = 1;
  558. return 0;
  559. }
  560. /* This is called right after the scheduler has pushed a task to a queue
  561. * but just before releasing mutexes: we need the task to still be alive!
  562. */
  563. int _starpu_pop_task_end(struct starpu_task *task)
  564. {
  565. if (!task)
  566. return 0;
  567. _STARPU_TRACE_JOB_POP(task, task->priority > 0);
  568. return 0;
  569. }
  570. /*
  571. * Given a handle that needs to be converted in order to be used on the given
  572. * node, returns a task that takes care of the conversion.
  573. */
  574. struct starpu_task *_starpu_create_conversion_task(starpu_data_handle_t handle,
  575. unsigned int node)
  576. {
  577. return _starpu_create_conversion_task_for_arch(handle, starpu_node_get_kind(node));
  578. }
  579. struct starpu_task *_starpu_create_conversion_task_for_arch(starpu_data_handle_t handle,
  580. enum starpu_node_kind node_kind)
  581. {
  582. struct starpu_task *conversion_task;
  583. #if defined(STARPU_USE_OPENCL) || defined(STARPU_USE_CUDA) || defined(STARPU_USE_MIC) || defined(STARPU_USE_SCC) || defined(STARPU_SIMGRID)
  584. struct starpu_multiformat_interface *format_interface;
  585. #endif
  586. conversion_task = starpu_task_create();
  587. conversion_task->name = "conversion_task";
  588. conversion_task->synchronous = 0;
  589. STARPU_TASK_SET_HANDLE(conversion_task, handle, 0);
  590. #if defined(STARPU_USE_OPENCL) || defined(STARPU_USE_CUDA) || defined(STARPU_USE_MIC) || defined(STARPU_USE_SCC) || defined(STARPU_SIMGRID)
  591. /* The node does not really matter here */
  592. format_interface = (struct starpu_multiformat_interface *) starpu_data_get_interface_on_node(handle, STARPU_MAIN_RAM);
  593. #endif
  594. _starpu_spin_lock(&handle->header_lock);
  595. handle->refcnt++;
  596. handle->busy_count++;
  597. _starpu_spin_unlock(&handle->header_lock);
  598. switch(node_kind)
  599. {
  600. case STARPU_CPU_RAM:
  601. case STARPU_SCC_RAM:
  602. case STARPU_SCC_SHM:
  603. switch (starpu_node_get_kind(handle->mf_node))
  604. {
  605. case STARPU_CPU_RAM:
  606. case STARPU_SCC_RAM:
  607. case STARPU_SCC_SHM:
  608. STARPU_ABORT();
  609. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  610. case STARPU_CUDA_RAM:
  611. {
  612. struct starpu_multiformat_data_interface_ops *mf_ops;
  613. mf_ops = (struct starpu_multiformat_data_interface_ops *) handle->ops->get_mf_ops(format_interface);
  614. conversion_task->cl = mf_ops->cuda_to_cpu_cl;
  615. break;
  616. }
  617. #endif
  618. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  619. case STARPU_OPENCL_RAM:
  620. {
  621. struct starpu_multiformat_data_interface_ops *mf_ops;
  622. mf_ops = (struct starpu_multiformat_data_interface_ops *) handle->ops->get_mf_ops(format_interface);
  623. conversion_task->cl = mf_ops->opencl_to_cpu_cl;
  624. break;
  625. }
  626. #endif
  627. #ifdef STARPU_USE_MIC
  628. case STARPU_MIC_RAM:
  629. {
  630. struct starpu_multiformat_data_interface_ops *mf_ops;
  631. mf_ops = (struct starpu_multiformat_data_interface_ops *) handle->ops->get_mf_ops(format_interface);
  632. conversion_task->cl = mf_ops->mic_to_cpu_cl;
  633. break;
  634. }
  635. #endif
  636. default:
  637. _STARPU_ERROR("Oops : %u\n", handle->mf_node);
  638. }
  639. break;
  640. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  641. case STARPU_CUDA_RAM:
  642. {
  643. struct starpu_multiformat_data_interface_ops *mf_ops;
  644. mf_ops = (struct starpu_multiformat_data_interface_ops *) handle->ops->get_mf_ops(format_interface);
  645. conversion_task->cl = mf_ops->cpu_to_cuda_cl;
  646. break;
  647. }
  648. #endif
  649. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  650. case STARPU_OPENCL_RAM:
  651. {
  652. struct starpu_multiformat_data_interface_ops *mf_ops;
  653. mf_ops = (struct starpu_multiformat_data_interface_ops *) handle->ops->get_mf_ops(format_interface);
  654. conversion_task->cl = mf_ops->cpu_to_opencl_cl;
  655. break;
  656. }
  657. #endif
  658. #ifdef STARPU_USE_MIC
  659. case STARPU_MIC_RAM:
  660. {
  661. struct starpu_multiformat_data_interface_ops *mf_ops;
  662. mf_ops = (struct starpu_multiformat_data_interface_ops *) handle->ops->get_mf_ops(format_interface);
  663. conversion_task->cl = mf_ops->cpu_to_mic_cl;
  664. break;
  665. }
  666. #endif
  667. default:
  668. STARPU_ABORT();
  669. }
  670. STARPU_TASK_SET_MODE(conversion_task, STARPU_RW, 0);
  671. return conversion_task;
  672. }
  673. static
  674. struct _starpu_sched_ctx* _get_next_sched_ctx_to_pop_into(struct _starpu_worker *worker)
  675. {
  676. struct _starpu_sched_ctx_elt *e = NULL;
  677. struct _starpu_sched_ctx_list_iterator list_it;
  678. int found = 0;
  679. _starpu_sched_ctx_list_iterator_init(worker->sched_ctx_list, &list_it);
  680. while (_starpu_sched_ctx_list_iterator_has_next(&list_it))
  681. {
  682. e = _starpu_sched_ctx_list_iterator_get_next(&list_it);
  683. if (e->task_number > 0)
  684. return _starpu_get_sched_ctx_struct(e->sched_ctx);
  685. }
  686. _starpu_sched_ctx_list_iterator_init(worker->sched_ctx_list, &list_it);
  687. while (_starpu_sched_ctx_list_iterator_has_next(&list_it))
  688. {
  689. e = _starpu_sched_ctx_list_iterator_get_next(&list_it);
  690. if (e->last_poped)
  691. {
  692. e->last_poped = 0;
  693. if (_starpu_sched_ctx_list_iterator_has_next(&list_it))
  694. {
  695. e = _starpu_sched_ctx_list_iterator_get_next(&list_it);
  696. found = 1;
  697. }
  698. break;
  699. }
  700. }
  701. if (!found)
  702. e = worker->sched_ctx_list->head;
  703. e->last_poped = 1;
  704. return _starpu_get_sched_ctx_struct(e->sched_ctx);
  705. }
  706. struct starpu_task *_starpu_pop_task(struct _starpu_worker *worker)
  707. {
  708. struct starpu_task *task;
  709. int worker_id;
  710. unsigned node;
  711. /* We can't tell in advance which task will be picked up, so we measure
  712. * a timestamp, and will attribute it afterwards to the task. */
  713. int profiling = starpu_profiling_status_get();
  714. struct timespec pop_start_time;
  715. if (profiling)
  716. _starpu_clock_gettime(&pop_start_time);
  717. pick:
  718. /* perhaps there is some local task to be executed first */
  719. task = _starpu_pop_local_task(worker);
  720. if (task)
  721. _STARPU_TASK_BREAK_ON(task, pop);
  722. /* get tasks from the stacks of the strategy */
  723. if(!task)
  724. {
  725. struct _starpu_sched_ctx *sched_ctx ;
  726. #ifndef STARPU_NON_BLOCKING_DRIVERS
  727. int been_here[STARPU_NMAX_SCHED_CTXS];
  728. int i;
  729. for(i = 0; i < STARPU_NMAX_SCHED_CTXS; i++)
  730. been_here[i] = 0;
  731. while(!task)
  732. #endif
  733. {
  734. if(worker->nsched_ctxs == 1)
  735. sched_ctx = _starpu_get_initial_sched_ctx();
  736. else
  737. {
  738. while(1)
  739. {
  740. /** Caution
  741. * If you use multiple contexts your scheduler *needs*
  742. * to update the variable task_number of the ctx list.
  743. * In order to get the best performances.
  744. * This is done using functions :
  745. * starpu_sched_ctx_list_task_counters_increment...(...)
  746. * starpu_sched_ctx_list_task_counters_decrement...(...)
  747. **/
  748. sched_ctx = _get_next_sched_ctx_to_pop_into(worker);
  749. if(worker->removed_from_ctx[sched_ctx->id] == 1 && worker->shares_tasks_lists[sched_ctx->id] == 1)
  750. {
  751. _starpu_worker_gets_out_of_ctx(sched_ctx->id, worker);
  752. worker->removed_from_ctx[sched_ctx->id] = 0;
  753. sched_ctx = NULL;
  754. }
  755. else
  756. break;
  757. }
  758. }
  759. if(sched_ctx && sched_ctx->id != STARPU_NMAX_SCHED_CTXS)
  760. {
  761. if (sched_ctx->sched_policy && sched_ctx->sched_policy->pop_task)
  762. {
  763. /* Note: we do not push the scheduling state here, because
  764. * otherwise when a worker is idle, we'd keep
  765. * pushing/popping a scheduling state here, while what we
  766. * want to see in the trace is a permanent idle state. */
  767. task = sched_ctx->sched_policy->pop_task(sched_ctx->id);
  768. if (task)
  769. _STARPU_TASK_BREAK_ON(task, pop);
  770. _starpu_pop_task_end(task);
  771. }
  772. }
  773. if(!task)
  774. {
  775. /* it doesn't matter if it shares tasks list or not in the scheduler,
  776. if it does not have any task to pop just get it out of here */
  777. /* however if it shares a task list it will be removed as soon as he
  778. finishes this job (in handle_job_termination) */
  779. if(worker->removed_from_ctx[sched_ctx->id])
  780. {
  781. _starpu_worker_gets_out_of_ctx(sched_ctx->id, worker);
  782. worker->removed_from_ctx[sched_ctx->id] = 0;
  783. }
  784. #ifdef STARPU_USE_SC_HYPERVISOR
  785. if(worker->pop_ctx_priority)
  786. {
  787. struct starpu_sched_ctx_performance_counters *perf_counters = sched_ctx->perf_counters;
  788. if(sched_ctx->id != 0 && perf_counters != NULL && perf_counters->notify_idle_cycle && _starpu_sched_ctx_allow_hypervisor(sched_ctx->id))
  789. {
  790. // _STARPU_TRACE_HYPERVISOR_BEGIN();
  791. perf_counters->notify_idle_cycle(sched_ctx->id, worker->workerid, 1.0);
  792. // _STARPU_TRACE_HYPERVISOR_END();
  793. }
  794. }
  795. #endif //STARPU_USE_SC_HYPERVISOR
  796. #ifndef STARPU_NON_BLOCKING_DRIVERS
  797. if(been_here[sched_ctx->id] || worker->nsched_ctxs == 1)
  798. break;
  799. been_here[sched_ctx->id] = 1;
  800. #endif
  801. }
  802. }
  803. }
  804. if (!task)
  805. {
  806. if (starpu_idle_file)
  807. idle_start[worker->workerid] = starpu_timing_now();
  808. return NULL;
  809. }
  810. if(starpu_idle_file && idle_start[worker->workerid] != 0.0)
  811. {
  812. double idle_end = starpu_timing_now();
  813. idle[worker->workerid] += (idle_end - idle_start[worker->workerid]);
  814. idle_start[worker->workerid] = 0.0;
  815. }
  816. #ifdef STARPU_USE_SC_HYPERVISOR
  817. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(task->sched_ctx);
  818. struct starpu_sched_ctx_performance_counters *perf_counters = sched_ctx->perf_counters;
  819. if(sched_ctx->id != 0 && perf_counters != NULL && perf_counters->notify_poped_task && _starpu_sched_ctx_allow_hypervisor(sched_ctx->id))
  820. {
  821. // _STARPU_TRACE_HYPERVISOR_BEGIN();
  822. perf_counters->notify_poped_task(task->sched_ctx, worker->workerid);
  823. // _STARPU_TRACE_HYPERVISOR_END();
  824. }
  825. #endif //STARPU_USE_SC_HYPERVISOR
  826. /* Make sure we do not bother with all the multiformat-specific code if
  827. * it is not necessary. */
  828. if (!_starpu_task_uses_multiformat_handles(task))
  829. goto profiling;
  830. /* This is either a conversion task, or a regular task for which the
  831. * conversion tasks have already been created and submitted */
  832. if (task->mf_skip)
  833. goto profiling;
  834. /*
  835. * This worker may not be able to execute this task. In this case, we
  836. * should return the task anyway. It will be pushed back almost immediatly.
  837. * This way, we avoid computing and executing the conversions tasks.
  838. * Here, we do not care about what implementation is used.
  839. */
  840. worker_id = starpu_worker_get_id_check();
  841. if (!starpu_worker_can_execute_task_first_impl(worker_id, task, NULL))
  842. return task;
  843. node = starpu_worker_get_memory_node(worker_id);
  844. /*
  845. * We do have a task that uses multiformat handles. Let's create the
  846. * required conversion tasks.
  847. */
  848. unsigned i;
  849. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(task);
  850. for (i = 0; i < nbuffers; i++)
  851. {
  852. struct starpu_task *conversion_task;
  853. starpu_data_handle_t handle;
  854. handle = STARPU_TASK_GET_HANDLE(task, i);
  855. if (!_starpu_handle_needs_conversion_task(handle, node))
  856. continue;
  857. conversion_task = _starpu_create_conversion_task(handle, node);
  858. conversion_task->mf_skip = 1;
  859. conversion_task->execute_on_a_specific_worker = 1;
  860. conversion_task->workerid = worker_id;
  861. /*
  862. * Next tasks will need to know where these handles have gone.
  863. */
  864. handle->mf_node = node;
  865. _starpu_task_submit_conversion_task(conversion_task, worker_id);
  866. }
  867. task->mf_skip = 1;
  868. starpu_task_list_push_back(&worker->local_tasks, task);
  869. goto pick;
  870. profiling:
  871. if (profiling)
  872. {
  873. struct starpu_profiling_task_info *profiling_info;
  874. profiling_info = task->profiling_info;
  875. /* The task may have been created before profiling was enabled,
  876. * so we check if the profiling_info structure is available
  877. * even though we already tested if profiling is enabled. */
  878. if (profiling_info)
  879. {
  880. memcpy(&profiling_info->pop_start_time,
  881. &pop_start_time, sizeof(struct timespec));
  882. _starpu_clock_gettime(&profiling_info->pop_end_time);
  883. }
  884. }
  885. if(task->prologue_callback_pop_func)
  886. {
  887. _starpu_set_current_task(task);
  888. task->prologue_callback_pop_func(task->prologue_callback_pop_arg);
  889. _starpu_set_current_task(NULL);
  890. }
  891. return task;
  892. }
  893. struct starpu_task *_starpu_pop_every_task(struct _starpu_sched_ctx *sched_ctx)
  894. {
  895. struct starpu_task *task = NULL;
  896. if(sched_ctx->sched_policy)
  897. {
  898. STARPU_ASSERT(sched_ctx->sched_policy->pop_every_task);
  899. /* TODO set profiling info */
  900. if(sched_ctx->sched_policy->pop_every_task)
  901. {
  902. _STARPU_SCHED_BEGIN;
  903. task = sched_ctx->sched_policy->pop_every_task(sched_ctx->id);
  904. _STARPU_SCHED_END;
  905. }
  906. }
  907. return task;
  908. }
  909. void _starpu_sched_pre_exec_hook(struct starpu_task *task)
  910. {
  911. unsigned sched_ctx_id = starpu_sched_ctx_get_ctx_for_task(task);
  912. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  913. if (sched_ctx->sched_policy && sched_ctx->sched_policy->pre_exec_hook)
  914. {
  915. _STARPU_SCHED_BEGIN;
  916. sched_ctx->sched_policy->pre_exec_hook(task, sched_ctx_id);
  917. _STARPU_SCHED_END;
  918. }
  919. if(!sched_ctx->sched_policy)
  920. {
  921. int workerid = starpu_worker_get_id();
  922. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  923. struct _starpu_sched_ctx_list_iterator list_it;
  924. _starpu_sched_ctx_list_iterator_init(worker->sched_ctx_list, &list_it);
  925. while (_starpu_sched_ctx_list_iterator_has_next(&list_it))
  926. {
  927. struct _starpu_sched_ctx *other_sched_ctx;
  928. struct _starpu_sched_ctx_elt *e = NULL;
  929. e = _starpu_sched_ctx_list_iterator_get_next(&list_it);
  930. other_sched_ctx = _starpu_get_sched_ctx_struct(e->sched_ctx);
  931. if (other_sched_ctx != sched_ctx &&
  932. other_sched_ctx->sched_policy != NULL &&
  933. other_sched_ctx->sched_policy->pre_exec_hook)
  934. {
  935. _STARPU_SCHED_BEGIN;
  936. other_sched_ctx->sched_policy->pre_exec_hook(task, other_sched_ctx->id);
  937. _STARPU_SCHED_END;
  938. }
  939. }
  940. }
  941. }
  942. void _starpu_sched_post_exec_hook(struct starpu_task *task)
  943. {
  944. STARPU_ASSERT(task->cl != NULL && task->cl->where != STARPU_NOWHERE);
  945. unsigned sched_ctx_id = starpu_sched_ctx_get_ctx_for_task(task);
  946. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  947. if (sched_ctx->sched_policy && sched_ctx->sched_policy->post_exec_hook)
  948. {
  949. _STARPU_SCHED_BEGIN;
  950. sched_ctx->sched_policy->post_exec_hook(task, sched_ctx_id);
  951. _STARPU_SCHED_END;
  952. }
  953. if(!sched_ctx->sched_policy)
  954. {
  955. int workerid = starpu_worker_get_id();
  956. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  957. struct _starpu_sched_ctx_list_iterator list_it;
  958. _starpu_sched_ctx_list_iterator_init(worker->sched_ctx_list, &list_it);
  959. while (_starpu_sched_ctx_list_iterator_has_next(&list_it))
  960. {
  961. struct _starpu_sched_ctx *other_sched_ctx;
  962. struct _starpu_sched_ctx_elt *e = NULL;
  963. e = _starpu_sched_ctx_list_iterator_get_next(&list_it);
  964. other_sched_ctx = _starpu_get_sched_ctx_struct(e->sched_ctx);
  965. if (other_sched_ctx != sched_ctx &&
  966. other_sched_ctx->sched_policy != NULL &&
  967. other_sched_ctx->sched_policy->post_exec_hook)
  968. {
  969. _STARPU_SCHED_BEGIN;
  970. other_sched_ctx->sched_policy->post_exec_hook(task, other_sched_ctx->id);
  971. _STARPU_SCHED_END;
  972. }
  973. }
  974. }
  975. }
  976. void _starpu_wait_on_sched_event(void)
  977. {
  978. struct _starpu_worker *worker = _starpu_get_local_worker_key();
  979. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&worker->sched_mutex);
  980. _starpu_handle_all_pending_node_data_requests(worker->memory_node);
  981. if (_starpu_machine_is_running())
  982. {
  983. #ifndef STARPU_NON_BLOCKING_DRIVERS
  984. STARPU_PTHREAD_COND_WAIT(&worker->sched_cond,
  985. &worker->sched_mutex);
  986. #endif
  987. }
  988. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  989. }
  990. /* The scheduling policy may put tasks directly into a worker's local queue so
  991. * that it is not always necessary to create its own queue when the local queue
  992. * is sufficient. If "back" not null, the task is put at the back of the queue
  993. * where the worker will pop tasks first. Setting "back" to 0 therefore ensures
  994. * a FIFO ordering. */
  995. int starpu_push_local_task(int workerid, struct starpu_task *task, int prio)
  996. {
  997. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  998. return _starpu_push_local_task(worker, task, prio);
  999. }
  1000. void _starpu_print_idle_time()
  1001. {
  1002. if(!starpu_idle_file)
  1003. return;
  1004. double all_idle = 0.0;
  1005. int i = 0;
  1006. for(i = 0; i < STARPU_NMAXWORKERS; i++)
  1007. all_idle += idle[i];
  1008. FILE *f;
  1009. f = fopen(starpu_idle_file, "a");
  1010. if (!f)
  1011. {
  1012. _STARPU_MSG("couldn't open %s: %s\n", starpu_idle_file, strerror(errno));
  1013. }
  1014. else
  1015. {
  1016. fprintf(f, "%lf \n", all_idle);
  1017. fclose(f);
  1018. }
  1019. }
  1020. void starpu_sched_task_break(struct starpu_task *task)
  1021. {
  1022. _STARPU_TASK_BREAK_ON(task, sched);
  1023. }