sched_policy.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2008-2021 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. * Copyright (C) 2013 Simon Archipoff
  5. * Copyright (C) 2013 Thibaut Lambert
  6. * Copyright (C) 2016 Uppsala University
  7. *
  8. * StarPU is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU Lesser General Public License as published by
  10. * the Free Software Foundation; either version 2.1 of the License, or (at
  11. * your option) any later version.
  12. *
  13. * StarPU is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  16. *
  17. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  18. */
  19. #include <starpu.h>
  20. #include <common/config.h>
  21. #include <common/utils.h>
  22. #include <core/sched_policy.h>
  23. #include <profiling/profiling.h>
  24. #include <datawizard/memory_nodes.h>
  25. #include <common/barrier.h>
  26. #include <core/debug.h>
  27. #include <core/task.h>
  28. static int use_prefetch = 0;
  29. static double idle[STARPU_NMAXWORKERS];
  30. static double idle_start[STARPU_NMAXWORKERS];
  31. long _starpu_task_break_on_push = -1;
  32. long _starpu_task_break_on_sched = -1;
  33. long _starpu_task_break_on_pop = -1;
  34. long _starpu_task_break_on_exec = -1;
  35. static const char *starpu_idle_file;
  36. void _starpu_sched_init(void)
  37. {
  38. _starpu_task_break_on_push = starpu_get_env_number_default("STARPU_TASK_BREAK_ON_PUSH", -1);
  39. _starpu_task_break_on_sched = starpu_get_env_number_default("STARPU_TASK_BREAK_ON_SCHED", -1);
  40. _starpu_task_break_on_pop = starpu_get_env_number_default("STARPU_TASK_BREAK_ON_POP", -1);
  41. _starpu_task_break_on_exec = starpu_get_env_number_default("STARPU_TASK_BREAK_ON_EXEC", -1);
  42. starpu_idle_file = starpu_getenv("STARPU_IDLE_FILE");
  43. }
  44. int starpu_get_prefetch_flag(void)
  45. {
  46. return use_prefetch;
  47. }
  48. static struct starpu_sched_policy *predefined_policies[] =
  49. {
  50. &_starpu_sched_modular_eager_policy,
  51. &_starpu_sched_modular_eager_prefetching_policy,
  52. &_starpu_sched_modular_eager_prio_policy,
  53. &_starpu_sched_modular_gemm_policy,
  54. &_starpu_sched_modular_prio_policy,
  55. &_starpu_sched_modular_prio_prefetching_policy,
  56. &_starpu_sched_modular_random_policy,
  57. &_starpu_sched_modular_random_prio_policy,
  58. &_starpu_sched_modular_random_prefetching_policy,
  59. &_starpu_sched_modular_random_prio_prefetching_policy,
  60. &_starpu_sched_modular_parallel_random_policy,
  61. &_starpu_sched_modular_parallel_random_prio_policy,
  62. &_starpu_sched_modular_ws_policy,
  63. &_starpu_sched_modular_heft_policy,
  64. &_starpu_sched_modular_heft_prio_policy,
  65. &_starpu_sched_modular_heft2_policy,
  66. &_starpu_sched_modular_heteroprio_policy,
  67. &_starpu_sched_modular_heteroprio_heft_policy,
  68. &_starpu_sched_modular_parallel_heft_policy,
  69. &_starpu_sched_eager_policy,
  70. &_starpu_sched_prio_policy,
  71. &_starpu_sched_random_policy,
  72. &_starpu_sched_lws_policy,
  73. &_starpu_sched_ws_policy,
  74. &_starpu_sched_dm_policy,
  75. &_starpu_sched_dmda_policy,
  76. &_starpu_sched_dmda_prio_policy,
  77. &_starpu_sched_dmda_ready_policy,
  78. &_starpu_sched_dmda_sorted_policy,
  79. &_starpu_sched_dmda_sorted_decision_policy,
  80. &_starpu_sched_parallel_heft_policy,
  81. &_starpu_sched_peager_policy,
  82. &_starpu_sched_heteroprio_policy,
  83. &_starpu_sched_graph_test_policy,
  84. #ifdef STARPU_HAVE_HWLOC
  85. //&_starpu_sched_tree_heft_hierarchical_policy,
  86. #endif
  87. NULL
  88. };
  89. struct starpu_sched_policy **starpu_sched_get_predefined_policies()
  90. {
  91. return predefined_policies;
  92. }
  93. struct starpu_sched_policy *_starpu_get_sched_policy(struct _starpu_sched_ctx *sched_ctx)
  94. {
  95. return sched_ctx->sched_policy;
  96. }
  97. /*
  98. * Methods to initialize the scheduling policy
  99. */
  100. static void load_sched_policy(struct starpu_sched_policy *sched_policy, struct _starpu_sched_ctx *sched_ctx)
  101. {
  102. STARPU_ASSERT(sched_policy);
  103. #ifdef STARPU_VERBOSE
  104. if (sched_policy->policy_name)
  105. {
  106. if (sched_policy->policy_description)
  107. _STARPU_DEBUG("Use %s scheduler (%s)\n", sched_policy->policy_name, sched_policy->policy_description);
  108. else
  109. _STARPU_DEBUG("Use %s scheduler \n", sched_policy->policy_name);
  110. }
  111. #endif
  112. struct starpu_sched_policy *policy = sched_ctx->sched_policy;
  113. memcpy(policy, sched_policy, sizeof(*policy));
  114. }
  115. static struct starpu_sched_policy *find_sched_policy_from_name(const char *policy_name)
  116. {
  117. if (!policy_name)
  118. return NULL;
  119. if (strcmp(policy_name, "") == 0)
  120. return NULL;
  121. if (strncmp(policy_name, "heft", 4) == 0)
  122. {
  123. _STARPU_MSG("Warning: heft is now called \"dmda\".\n");
  124. return &_starpu_sched_dmda_policy;
  125. }
  126. struct starpu_sched_policy **policy;
  127. for(policy=predefined_policies ; *policy!=NULL ; policy++)
  128. {
  129. struct starpu_sched_policy *p = *policy;
  130. if (p->policy_name)
  131. {
  132. if (strcmp(policy_name, p->policy_name) == 0)
  133. {
  134. /* we found a policy with the requested name */
  135. return p;
  136. }
  137. }
  138. }
  139. if (strcmp(policy_name, "help") != 0)
  140. _STARPU_MSG("Warning: scheduling policy '%s' was not found, try 'help' to get a list\n", policy_name);
  141. /* nothing was found */
  142. return NULL;
  143. }
  144. static void display_sched_help_message(FILE *stream)
  145. {
  146. const char *sched_env = starpu_getenv("STARPU_SCHED");
  147. if (sched_env && (strcmp(sched_env, "help") == 0))
  148. {
  149. /* display the description of all predefined policies */
  150. struct starpu_sched_policy **policy;
  151. fprintf(stream, "\nThe variable STARPU_SCHED can be set to one of the following strings:\n");
  152. for(policy=predefined_policies ; *policy!=NULL ; policy++)
  153. {
  154. struct starpu_sched_policy *p = *policy;
  155. fprintf(stream, "%-30s\t-> %s\n", p->policy_name, p->policy_description);
  156. }
  157. fprintf(stream, "\n");
  158. }
  159. }
  160. struct starpu_sched_policy *_starpu_select_sched_policy(struct _starpu_machine_config *config, const char *required_policy)
  161. {
  162. struct starpu_sched_policy *selected_policy = NULL;
  163. struct starpu_conf *user_conf = &config->conf;
  164. if(required_policy)
  165. selected_policy = find_sched_policy_from_name(required_policy);
  166. /* If there is a policy that matches the required name, return it */
  167. if (selected_policy)
  168. return selected_policy;
  169. /* First, we check whether the application explicitely gave a scheduling policy or not */
  170. if (user_conf && (user_conf->sched_policy))
  171. return user_conf->sched_policy;
  172. /* Otherwise, we look if the application specified the name of a policy to load */
  173. const char *sched_pol_name;
  174. sched_pol_name = starpu_getenv("STARPU_SCHED");
  175. if (sched_pol_name == NULL && user_conf && user_conf->sched_policy_name)
  176. sched_pol_name = user_conf->sched_policy_name;
  177. if (sched_pol_name)
  178. selected_policy = find_sched_policy_from_name(sched_pol_name);
  179. /* If there is a policy that matches the name, return it */
  180. if (selected_policy)
  181. return selected_policy;
  182. /* If no policy was specified, we use the lws policy by default */
  183. return &_starpu_sched_lws_policy;
  184. }
  185. void _starpu_init_sched_policy(struct _starpu_machine_config *config, struct _starpu_sched_ctx *sched_ctx, struct starpu_sched_policy *selected_policy)
  186. {
  187. /* Perhaps we have to display some help */
  188. display_sched_help_message(stderr);
  189. /* Prefetch is activated by default */
  190. use_prefetch = starpu_get_env_number("STARPU_PREFETCH");
  191. if (use_prefetch == -1)
  192. use_prefetch = 1;
  193. /* Set calibrate flag */
  194. _starpu_set_calibrate_flag(config->conf.calibrate);
  195. load_sched_policy(selected_policy, sched_ctx);
  196. if (starpu_get_env_number_default("STARPU_WORKER_TREE", 0))
  197. {
  198. #ifdef STARPU_HAVE_HWLOC
  199. sched_ctx->sched_policy->worker_type = STARPU_WORKER_TREE;
  200. #else
  201. _STARPU_DISP("STARPU_WORKER_TREE ignored, please rebuild StarPU with hwloc support to enable it.");
  202. #endif
  203. }
  204. starpu_sched_ctx_create_worker_collection(sched_ctx->id,
  205. sched_ctx->sched_policy->worker_type);
  206. _STARPU_SCHED_BEGIN;
  207. sched_ctx->sched_policy->init_sched(sched_ctx->id);
  208. _STARPU_SCHED_END;
  209. }
  210. void _starpu_deinit_sched_policy(struct _starpu_sched_ctx *sched_ctx)
  211. {
  212. struct starpu_sched_policy *policy = sched_ctx->sched_policy;
  213. if (policy->deinit_sched)
  214. {
  215. _STARPU_SCHED_BEGIN;
  216. policy->deinit_sched(sched_ctx->id);
  217. _STARPU_SCHED_END;
  218. }
  219. starpu_sched_ctx_delete_worker_collection(sched_ctx->id);
  220. }
  221. void _starpu_sched_task_submit(struct starpu_task *task)
  222. {
  223. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(task->sched_ctx);
  224. if (!sched_ctx->sched_policy)
  225. return;
  226. if (!sched_ctx->sched_policy->submit_hook)
  227. return;
  228. _STARPU_SCHED_BEGIN;
  229. sched_ctx->sched_policy->submit_hook(task);
  230. _STARPU_SCHED_END;
  231. }
  232. void _starpu_sched_do_schedule(unsigned sched_ctx_id)
  233. {
  234. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  235. if (!sched_ctx->sched_policy)
  236. return;
  237. if (!sched_ctx->sched_policy->do_schedule)
  238. return;
  239. _STARPU_SCHED_BEGIN;
  240. sched_ctx->sched_policy->do_schedule(sched_ctx_id);
  241. _STARPU_SCHED_END;
  242. }
  243. static void _starpu_push_task_on_specific_worker_notify_sched(struct starpu_task *task, struct _starpu_worker *worker, int workerid, int perf_workerid)
  244. {
  245. /* if we push a task on a specific worker, notify all the sched_ctxs the worker belongs to */
  246. struct _starpu_sched_ctx_list_iterator list_it;
  247. _starpu_sched_ctx_list_iterator_init(worker->sched_ctx_list, &list_it);
  248. while (_starpu_sched_ctx_list_iterator_has_next(&list_it))
  249. {
  250. struct _starpu_sched_ctx_elt *e = _starpu_sched_ctx_list_iterator_get_next(&list_it);
  251. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(e->sched_ctx);
  252. if (sched_ctx->sched_policy != NULL && sched_ctx->sched_policy->push_task_notify)
  253. {
  254. _STARPU_SCHED_BEGIN;
  255. sched_ctx->sched_policy->push_task_notify(task, workerid, perf_workerid, sched_ctx->id);
  256. _STARPU_SCHED_END;
  257. }
  258. }
  259. }
  260. /* Enqueue a task into the list of tasks explicitely attached to a worker. In
  261. * case workerid identifies a combined worker, a task will be enqueued into
  262. * each worker of the combination. */
  263. static int _starpu_push_task_on_specific_worker(struct starpu_task *task, int workerid)
  264. {
  265. int nbasic_workers = (int)starpu_worker_get_count();
  266. /* Is this a basic worker or a combined worker ? */
  267. int is_basic_worker = (workerid < nbasic_workers);
  268. struct _starpu_worker *worker = NULL;
  269. struct _starpu_combined_worker *combined_worker = NULL;
  270. if (is_basic_worker)
  271. {
  272. worker = _starpu_get_worker_struct(workerid);
  273. }
  274. else
  275. {
  276. combined_worker = _starpu_get_combined_worker_struct(workerid);
  277. }
  278. if (use_prefetch)
  279. starpu_prefetch_task_input_for(task, workerid);
  280. if (is_basic_worker)
  281. _starpu_push_task_on_specific_worker_notify_sched(task, worker, workerid, workerid);
  282. else
  283. {
  284. /* Notify all workers of the combined worker */
  285. int worker_size = combined_worker->worker_size;
  286. int *combined_workerid = combined_worker->combined_workerid;
  287. int j;
  288. for (j = 0; j < worker_size; j++)
  289. {
  290. int subworkerid = combined_workerid[j];
  291. _starpu_push_task_on_specific_worker_notify_sched(task, _starpu_get_worker_struct(subworkerid), subworkerid, workerid);
  292. }
  293. }
  294. #ifdef STARPU_USE_SC_HYPERVISOR
  295. starpu_sched_ctx_call_pushed_task_cb(workerid, task->sched_ctx);
  296. #endif //STARPU_USE_SC_HYPERVISOR
  297. if (is_basic_worker)
  298. {
  299. unsigned node = starpu_worker_get_memory_node(workerid);
  300. if (_starpu_task_uses_multiformat_handles(task))
  301. {
  302. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(task);
  303. unsigned i;
  304. for (i = 0; i < nbuffers; i++)
  305. {
  306. struct starpu_task *conversion_task;
  307. starpu_data_handle_t handle;
  308. handle = STARPU_TASK_GET_HANDLE(task, i);
  309. if (!_starpu_handle_needs_conversion_task(handle, node))
  310. continue;
  311. conversion_task = _starpu_create_conversion_task(handle, node);
  312. conversion_task->mf_skip = 1;
  313. conversion_task->execute_on_a_specific_worker = 1;
  314. conversion_task->workerid = workerid;
  315. _starpu_task_submit_conversion_task(conversion_task, workerid);
  316. //_STARPU_DEBUG("Pushing a conversion task\n");
  317. }
  318. for (i = 0; i < nbuffers; i++)
  319. {
  320. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(task, i);
  321. handle->mf_node = node;
  322. }
  323. }
  324. // if(task->sched_ctx != _starpu_get_initial_sched_ctx()->id)
  325. return _starpu_push_local_task(worker, task);
  326. }
  327. else
  328. {
  329. /* This is a combined worker so we create task aliases */
  330. int worker_size = combined_worker->worker_size;
  331. int *combined_workerid = combined_worker->combined_workerid;
  332. int ret = 0;
  333. struct _starpu_job *job = _starpu_get_job_associated_to_task(task);
  334. job->task_size = worker_size;
  335. job->combined_workerid = workerid;
  336. job->active_task_alias_count = 0;
  337. STARPU_PTHREAD_BARRIER_INIT(&job->before_work_barrier, NULL, worker_size);
  338. STARPU_PTHREAD_BARRIER_INIT(&job->after_work_barrier, NULL, worker_size);
  339. job->after_work_busy_barrier = worker_size;
  340. /* Note: we have to call that early, or else the task may have
  341. * disappeared already */
  342. starpu_push_task_end(task);
  343. int j;
  344. for (j = 0; j < worker_size; j++)
  345. {
  346. struct starpu_task *alias = starpu_task_dup(task);
  347. alias->destroy = 1;
  348. _STARPU_TRACE_JOB_PUSH(alias, alias->priority);
  349. worker = _starpu_get_worker_struct(combined_workerid[j]);
  350. ret |= _starpu_push_local_task(worker, alias);
  351. }
  352. return ret;
  353. }
  354. }
  355. /* the generic interface that call the proper underlying implementation */
  356. int _starpu_push_task(struct _starpu_job *j)
  357. {
  358. #ifdef STARPU_SIMGRID
  359. //if (_starpu_simgrid_task_push_cost())
  360. starpu_sleep(0.000001);
  361. #endif
  362. if(j->task->prologue_callback_func)
  363. {
  364. _starpu_set_current_task(j->task);
  365. j->task->prologue_callback_func(j->task->prologue_callback_arg);
  366. _starpu_set_current_task(NULL);
  367. }
  368. return _starpu_repush_task(j);
  369. }
  370. int _starpu_repush_task(struct _starpu_job *j)
  371. {
  372. struct starpu_task *task = j->task;
  373. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(task->sched_ctx);
  374. int ret;
  375. _STARPU_LOG_IN();
  376. unsigned can_push = _starpu_increment_nready_tasks_of_sched_ctx(task->sched_ctx, task->flops, task);
  377. STARPU_ASSERT(task->status == STARPU_TASK_BLOCKED || task->status == STARPU_TASK_BLOCKED_ON_TAG || task->status == STARPU_TASK_BLOCKED_ON_TASK || task->status == STARPU_TASK_BLOCKED_ON_DATA);
  378. task->status = STARPU_TASK_READY;
  379. const unsigned continuation =
  380. #ifdef STARPU_OPENMP
  381. j->continuation
  382. #else
  383. 0
  384. #endif
  385. ;
  386. if (!_starpu_perf_counter_paused() && !j->internal && !continuation)
  387. {
  388. (void) STARPU_ATOMIC_ADD64(& _starpu_task__g_current_submitted__value, -1);
  389. int64_t value = STARPU_ATOMIC_ADD64(& _starpu_task__g_current_ready__value, 1);
  390. _starpu_perf_counter_update_max_int64(&_starpu_task__g_peak_ready__value, value);
  391. if (task->cl && task->cl->perf_counter_values)
  392. {
  393. struct starpu_perf_counter_sample_cl_values * const pcv = task->cl->perf_counter_values;
  394. (void)STARPU_ATOMIC_ADD64(&pcv->task.current_submitted, -1);
  395. value = STARPU_ATOMIC_ADD64(&pcv->task.current_ready, 1);
  396. _starpu_perf_counter_update_max_int64(&pcv->task.peak_ready, value);
  397. }
  398. }
  399. STARPU_AYU_ADDTOTASKQUEUE(j->job_id, -1);
  400. /* if the context does not have any workers save the tasks in a temp list */
  401. if ((task->cl != NULL && task->where != STARPU_NOWHERE) && (!sched_ctx->is_initial_sched))
  402. {
  403. /*if there are workers in the ctx that are not able to execute tasks
  404. we consider the ctx empty */
  405. unsigned able = _starpu_workers_able_to_execute_task(task, sched_ctx);
  406. if(!able)
  407. {
  408. _starpu_sched_ctx_lock_write(sched_ctx->id);
  409. starpu_task_list_push_front(&sched_ctx->empty_ctx_tasks, task);
  410. _starpu_sched_ctx_unlock_write(sched_ctx->id);
  411. #ifdef STARPU_USE_SC_HYPERVISOR
  412. if(sched_ctx->id != 0 && sched_ctx->perf_counters != NULL
  413. && sched_ctx->perf_counters->notify_empty_ctx)
  414. {
  415. _STARPU_TRACE_HYPERVISOR_BEGIN();
  416. sched_ctx->perf_counters->notify_empty_ctx(sched_ctx->id, task);
  417. _STARPU_TRACE_HYPERVISOR_END();
  418. }
  419. #endif
  420. return 0;
  421. }
  422. }
  423. if(!can_push)
  424. return 0;
  425. /* in case there is no codelet associated to the task (that's a control
  426. * task), we directly execute its callback and enforce the
  427. * corresponding dependencies */
  428. if (task->cl == NULL || task->where == STARPU_NOWHERE)
  429. {
  430. if (!_starpu_perf_counter_paused() && !j->internal)
  431. {
  432. (void)STARPU_ATOMIC_ADD64(& _starpu_task__g_current_ready__value, -1);
  433. if (task->cl && task->cl->perf_counter_values)
  434. {
  435. struct starpu_perf_counter_sample_cl_values * const pcv = task->cl->perf_counter_values;
  436. (void)STARPU_ATOMIC_ADD64(&pcv->task.current_ready, -1);
  437. }
  438. }
  439. task->status = STARPU_TASK_RUNNING;
  440. if (task->prologue_callback_pop_func)
  441. {
  442. _starpu_set_current_task(task);
  443. task->prologue_callback_pop_func(task->prologue_callback_pop_arg);
  444. _starpu_set_current_task(NULL);
  445. }
  446. if (task->cl && task->cl->specific_nodes)
  447. {
  448. /* Nothing to do, but we are asked to fetch data on some memory nodes */
  449. _starpu_fetch_nowhere_task_input(j);
  450. }
  451. else
  452. {
  453. if (task->cl)
  454. __starpu_push_task_output(j);
  455. _starpu_handle_job_termination(j);
  456. _STARPU_LOG_OUT_TAG("handle_job_termination");
  457. }
  458. return 0;
  459. }
  460. ret = _starpu_push_task_to_workers(task);
  461. if (ret == -EAGAIN)
  462. /* pushed to empty context, that's fine */
  463. ret = 0;
  464. return ret;
  465. }
  466. int _starpu_push_task_to_workers(struct starpu_task *task)
  467. {
  468. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(task->sched_ctx);
  469. _STARPU_TRACE_JOB_PUSH(task, task->priority);
  470. /* if the contexts still does not have workers put the task back to its place in
  471. the empty ctx list */
  472. if(!sched_ctx->is_initial_sched)
  473. {
  474. /*if there are workers in the ctx that are not able to execute tasks
  475. we consider the ctx empty */
  476. unsigned able = _starpu_workers_able_to_execute_task(task, sched_ctx);
  477. if (!able)
  478. {
  479. _starpu_sched_ctx_lock_write(sched_ctx->id);
  480. starpu_task_list_push_back(&sched_ctx->empty_ctx_tasks, task);
  481. _starpu_sched_ctx_unlock_write(sched_ctx->id);
  482. #ifdef STARPU_USE_SC_HYPERVISOR
  483. if(sched_ctx->id != 0 && sched_ctx->perf_counters != NULL
  484. && sched_ctx->perf_counters->notify_empty_ctx)
  485. {
  486. _STARPU_TRACE_HYPERVISOR_BEGIN();
  487. sched_ctx->perf_counters->notify_empty_ctx(sched_ctx->id, task);
  488. _STARPU_TRACE_HYPERVISOR_END();
  489. }
  490. #endif
  491. return -EAGAIN;
  492. }
  493. }
  494. _starpu_profiling_set_task_push_start_time(task);
  495. int ret = 0;
  496. if (STARPU_UNLIKELY(task->execute_on_a_specific_worker))
  497. {
  498. ret = _starpu_push_task_on_specific_worker(task, task->workerid);
  499. }
  500. else
  501. {
  502. struct _starpu_machine_config *config = _starpu_get_machine_config();
  503. if(!sched_ctx->sched_policy)
  504. {
  505. /* Note: we have to call that early, or else the task may have
  506. * disappeared already */
  507. starpu_push_task_end(task);
  508. if(!sched_ctx->awake_workers)
  509. ret = _starpu_push_task_on_specific_worker(task, sched_ctx->main_master);
  510. else
  511. {
  512. struct starpu_worker_collection *workers = sched_ctx->workers;
  513. struct _starpu_job *job = _starpu_get_job_associated_to_task(task);
  514. job->task_size = workers->nworkers;
  515. job->combined_workerid = -1; // workerid; its a ctx not combined worker
  516. job->active_task_alias_count = 0;
  517. STARPU_PTHREAD_BARRIER_INIT(&job->before_work_barrier, NULL, workers->nworkers);
  518. STARPU_PTHREAD_BARRIER_INIT(&job->after_work_barrier, NULL, workers->nworkers);
  519. job->after_work_busy_barrier = workers->nworkers;
  520. struct starpu_sched_ctx_iterator it;
  521. if(workers->init_iterator)
  522. workers->init_iterator(workers, &it);
  523. while(workers->has_next(workers, &it))
  524. {
  525. unsigned workerid = workers->get_next(workers, &it);
  526. struct starpu_task *alias;
  527. if (job->task_size > 1)
  528. {
  529. alias = starpu_task_dup(task);
  530. _STARPU_TRACE_JOB_PUSH(alias, alias->priority);
  531. alias->destroy = 1;
  532. }
  533. else
  534. alias = task;
  535. ret |= _starpu_push_task_on_specific_worker(alias, workerid);
  536. }
  537. }
  538. }
  539. else
  540. {
  541. /* When a task can only be executed on a given arch and we have
  542. * only one memory node for that arch, we can systematically
  543. * prefetch before the scheduling decision. */
  544. if (!sched_ctx->sched_policy->prefetches
  545. && starpu_get_prefetch_flag()
  546. && starpu_memory_nodes_get_count() > 1)
  547. {
  548. enum starpu_worker_archtype type;
  549. for (type = 0; type < STARPU_NARCH; type++)
  550. {
  551. if (task->where == (int32_t) STARPU_WORKER_TO_MASK(type))
  552. {
  553. if (config->arch_nodeid[type] >= 0)
  554. starpu_prefetch_task_input_on_node(task, config->arch_nodeid[type]);
  555. break;
  556. }
  557. }
  558. }
  559. STARPU_ASSERT(sched_ctx->sched_policy->push_task);
  560. /* check out if there are any workers in the context */
  561. unsigned nworkers = starpu_sched_ctx_get_nworkers(sched_ctx->id);
  562. if (nworkers == 0)
  563. ret = -1;
  564. else
  565. {
  566. struct _starpu_worker *worker = _starpu_get_local_worker_key();
  567. if (worker)
  568. {
  569. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&worker->sched_mutex);
  570. _starpu_worker_enter_sched_op(worker);
  571. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  572. }
  573. _STARPU_TASK_BREAK_ON(task, push);
  574. _STARPU_SCHED_BEGIN;
  575. ret = sched_ctx->sched_policy->push_task(task);
  576. _STARPU_SCHED_END;
  577. if (worker)
  578. {
  579. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&worker->sched_mutex);
  580. _starpu_worker_leave_sched_op(worker);
  581. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  582. }
  583. }
  584. }
  585. if(ret == -1)
  586. {
  587. _STARPU_MSG("repush task \n");
  588. _STARPU_TRACE_JOB_POP(task, task->priority);
  589. ret = _starpu_push_task_to_workers(task);
  590. }
  591. }
  592. /* Note: from here, the task might have been destroyed already! */
  593. _STARPU_LOG_OUT();
  594. return ret;
  595. }
  596. /* This is called right after the scheduler has pushed a task to a queue
  597. * but just before releasing mutexes: we need the task to still be alive!
  598. */
  599. int starpu_push_task_end(struct starpu_task *task)
  600. {
  601. _starpu_profiling_set_task_push_end_time(task);
  602. task->scheduled = 1;
  603. return 0;
  604. }
  605. /* This is called right after the scheduler has pushed a task to a queue
  606. * but just before releasing mutexes: we need the task to still be alive!
  607. */
  608. int _starpu_pop_task_end(struct starpu_task *task)
  609. {
  610. if (!task)
  611. return 0;
  612. _STARPU_TRACE_JOB_POP(task, task->priority);
  613. return 0;
  614. }
  615. /*
  616. * Given a handle that needs to be converted in order to be used on the given
  617. * node, returns a task that takes care of the conversion.
  618. */
  619. struct starpu_task *_starpu_create_conversion_task(starpu_data_handle_t handle, unsigned int node)
  620. {
  621. return _starpu_create_conversion_task_for_arch(handle, starpu_node_get_kind(node));
  622. }
  623. struct starpu_task *_starpu_create_conversion_task_for_arch(starpu_data_handle_t handle, enum starpu_node_kind node_kind)
  624. {
  625. struct starpu_task *conversion_task;
  626. #if defined(STARPU_USE_OPENCL) || defined(STARPU_USE_CUDA) || defined(STARPU_USE_MIC) || defined(STARPU_SIMGRID)
  627. struct starpu_multiformat_interface *format_interface;
  628. #endif
  629. conversion_task = starpu_task_create();
  630. conversion_task->name = "conversion_task";
  631. conversion_task->synchronous = 0;
  632. STARPU_TASK_SET_HANDLE(conversion_task, handle, 0);
  633. #if defined(STARPU_USE_OPENCL) || defined(STARPU_USE_CUDA) || defined(STARPU_USE_MIC) || defined(STARPU_SIMGRID)
  634. /* The node does not really matter here */
  635. format_interface = (struct starpu_multiformat_interface *) starpu_data_get_interface_on_node(handle, STARPU_MAIN_RAM);
  636. #endif
  637. _starpu_spin_lock(&handle->header_lock);
  638. handle->refcnt++;
  639. handle->busy_count++;
  640. _starpu_spin_unlock(&handle->header_lock);
  641. switch(node_kind)
  642. {
  643. case STARPU_CPU_RAM:
  644. switch (starpu_node_get_kind(handle->mf_node))
  645. {
  646. case STARPU_CPU_RAM:
  647. STARPU_ABORT();
  648. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  649. case STARPU_CUDA_RAM:
  650. {
  651. struct starpu_multiformat_data_interface_ops *mf_ops;
  652. mf_ops = (struct starpu_multiformat_data_interface_ops *) handle->ops->get_mf_ops(format_interface);
  653. conversion_task->cl = mf_ops->cuda_to_cpu_cl;
  654. break;
  655. }
  656. #endif
  657. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  658. case STARPU_OPENCL_RAM:
  659. {
  660. struct starpu_multiformat_data_interface_ops *mf_ops;
  661. mf_ops = (struct starpu_multiformat_data_interface_ops *) handle->ops->get_mf_ops(format_interface);
  662. conversion_task->cl = mf_ops->opencl_to_cpu_cl;
  663. break;
  664. }
  665. #endif
  666. #ifdef STARPU_USE_MIC
  667. case STARPU_MIC_RAM:
  668. {
  669. struct starpu_multiformat_data_interface_ops *mf_ops;
  670. mf_ops = (struct starpu_multiformat_data_interface_ops *) handle->ops->get_mf_ops(format_interface);
  671. conversion_task->cl = mf_ops->mic_to_cpu_cl;
  672. break;
  673. }
  674. #endif
  675. default:
  676. _STARPU_ERROR("Oops : %u\n", handle->mf_node);
  677. }
  678. break;
  679. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  680. case STARPU_CUDA_RAM:
  681. {
  682. struct starpu_multiformat_data_interface_ops *mf_ops;
  683. mf_ops = (struct starpu_multiformat_data_interface_ops *) handle->ops->get_mf_ops(format_interface);
  684. conversion_task->cl = mf_ops->cpu_to_cuda_cl;
  685. break;
  686. }
  687. #endif
  688. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  689. case STARPU_OPENCL_RAM:
  690. {
  691. struct starpu_multiformat_data_interface_ops *mf_ops;
  692. mf_ops = (struct starpu_multiformat_data_interface_ops *) handle->ops->get_mf_ops(format_interface);
  693. conversion_task->cl = mf_ops->cpu_to_opencl_cl;
  694. break;
  695. }
  696. #endif
  697. #ifdef STARPU_USE_MIC
  698. case STARPU_MIC_RAM:
  699. {
  700. struct starpu_multiformat_data_interface_ops *mf_ops;
  701. mf_ops = (struct starpu_multiformat_data_interface_ops *) handle->ops->get_mf_ops(format_interface);
  702. conversion_task->cl = mf_ops->cpu_to_mic_cl;
  703. break;
  704. }
  705. #endif
  706. default:
  707. STARPU_ABORT();
  708. }
  709. _starpu_codelet_check_deprecated_fields(conversion_task->cl);
  710. STARPU_TASK_SET_MODE(conversion_task, STARPU_RW, 0);
  711. return conversion_task;
  712. }
  713. static
  714. struct _starpu_sched_ctx* _get_next_sched_ctx_to_pop_into(struct _starpu_worker *worker)
  715. {
  716. struct _starpu_sched_ctx_elt *e = NULL;
  717. struct _starpu_sched_ctx_list_iterator list_it;
  718. int found = 0;
  719. _starpu_sched_ctx_list_iterator_init(worker->sched_ctx_list, &list_it);
  720. while (_starpu_sched_ctx_list_iterator_has_next(&list_it))
  721. {
  722. e = _starpu_sched_ctx_list_iterator_get_next(&list_it);
  723. if (e->task_number > 0)
  724. return _starpu_get_sched_ctx_struct(e->sched_ctx);
  725. }
  726. _starpu_sched_ctx_list_iterator_init(worker->sched_ctx_list, &list_it);
  727. while (_starpu_sched_ctx_list_iterator_has_next(&list_it))
  728. {
  729. e = _starpu_sched_ctx_list_iterator_get_next(&list_it);
  730. if (e->last_poped)
  731. {
  732. e->last_poped = 0;
  733. if (_starpu_sched_ctx_list_iterator_has_next(&list_it))
  734. {
  735. e = _starpu_sched_ctx_list_iterator_get_next(&list_it);
  736. found = 1;
  737. }
  738. break;
  739. }
  740. }
  741. if (!found)
  742. e = worker->sched_ctx_list->head;
  743. e->last_poped = 1;
  744. return _starpu_get_sched_ctx_struct(e->sched_ctx);
  745. }
  746. struct starpu_task *_starpu_pop_task(struct _starpu_worker *worker)
  747. {
  748. struct starpu_task *task;
  749. int worker_id;
  750. unsigned node;
  751. /* We can't tell in advance which task will be picked up, so we measure
  752. * a timestamp, and will attribute it afterwards to the task. */
  753. int profiling = starpu_profiling_status_get();
  754. struct timespec pop_start_time;
  755. if (profiling)
  756. _starpu_clock_gettime(&pop_start_time);
  757. pick:
  758. /* perhaps there is some local task to be executed first */
  759. task = _starpu_pop_local_task(worker);
  760. if (task)
  761. _STARPU_TASK_BREAK_ON(task, pop);
  762. /* get tasks from the stacks of the strategy */
  763. if(!task)
  764. {
  765. struct _starpu_sched_ctx *sched_ctx ;
  766. #ifndef STARPU_NON_BLOCKING_DRIVERS
  767. int been_here[STARPU_NMAX_SCHED_CTXS];
  768. int i;
  769. for(i = 0; i < STARPU_NMAX_SCHED_CTXS; i++)
  770. been_here[i] = 0;
  771. while(!task)
  772. #endif
  773. {
  774. if(worker->nsched_ctxs == 1)
  775. sched_ctx = _starpu_get_initial_sched_ctx();
  776. else
  777. {
  778. while(1)
  779. {
  780. /** Caution
  781. * If you use multiple contexts your scheduler *needs*
  782. * to update the variable task_number of the ctx list.
  783. * In order to get the best performances.
  784. * This is done using functions :
  785. * starpu_sched_ctx_list_task_counters_increment...(...)
  786. * starpu_sched_ctx_list_task_counters_decrement...(...)
  787. **/
  788. sched_ctx = _get_next_sched_ctx_to_pop_into(worker);
  789. if(worker->removed_from_ctx[sched_ctx->id] == 1 && worker->shares_tasks_lists[sched_ctx->id] == 1)
  790. {
  791. _starpu_worker_gets_out_of_ctx(sched_ctx->id, worker);
  792. worker->removed_from_ctx[sched_ctx->id] = 0;
  793. sched_ctx = NULL;
  794. }
  795. else
  796. break;
  797. }
  798. }
  799. if(sched_ctx && sched_ctx->id != STARPU_NMAX_SCHED_CTXS)
  800. {
  801. if (sched_ctx->sched_policy && sched_ctx->sched_policy->pop_task)
  802. {
  803. /* Note: we do not push the scheduling state here, because
  804. * otherwise when a worker is idle, we'd keep
  805. * pushing/popping a scheduling state here, while what we
  806. * want to see in the trace is a permanent idle state. */
  807. task = sched_ctx->sched_policy->pop_task(sched_ctx->id);
  808. if (task)
  809. _STARPU_TASK_BREAK_ON(task, pop);
  810. _starpu_pop_task_end(task);
  811. }
  812. }
  813. if(!task)
  814. {
  815. /* it doesn't matter if it shares tasks list or not in the scheduler,
  816. if it does not have any task to pop just get it out of here */
  817. /* however if it shares a task list it will be removed as soon as he
  818. finishes this job (in handle_job_termination) */
  819. if(worker->removed_from_ctx[sched_ctx->id])
  820. {
  821. _starpu_worker_gets_out_of_ctx(sched_ctx->id, worker);
  822. worker->removed_from_ctx[sched_ctx->id] = 0;
  823. }
  824. #ifdef STARPU_USE_SC_HYPERVISOR
  825. if(worker->pop_ctx_priority)
  826. {
  827. struct starpu_sched_ctx_performance_counters *perf_counters = sched_ctx->perf_counters;
  828. if(sched_ctx->id != 0 && perf_counters != NULL && perf_counters->notify_idle_cycle && _starpu_sched_ctx_allow_hypervisor(sched_ctx->id))
  829. {
  830. // _STARPU_TRACE_HYPERVISOR_BEGIN();
  831. perf_counters->notify_idle_cycle(sched_ctx->id, worker->workerid, 1.0);
  832. // _STARPU_TRACE_HYPERVISOR_END();
  833. }
  834. }
  835. #endif //STARPU_USE_SC_HYPERVISOR
  836. #ifndef STARPU_NON_BLOCKING_DRIVERS
  837. if(been_here[sched_ctx->id] || worker->nsched_ctxs == 1)
  838. break;
  839. been_here[sched_ctx->id] = 1;
  840. #endif
  841. }
  842. }
  843. }
  844. if (!task)
  845. {
  846. if (starpu_idle_file)
  847. idle_start[worker->workerid] = starpu_timing_now();
  848. return NULL;
  849. }
  850. if(starpu_idle_file && idle_start[worker->workerid] != 0.0)
  851. {
  852. double idle_end = starpu_timing_now();
  853. idle[worker->workerid] += (idle_end - idle_start[worker->workerid]);
  854. idle_start[worker->workerid] = 0.0;
  855. }
  856. #ifdef STARPU_USE_SC_HYPERVISOR
  857. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(task->sched_ctx);
  858. struct starpu_sched_ctx_performance_counters *perf_counters = sched_ctx->perf_counters;
  859. if(sched_ctx->id != 0 && perf_counters != NULL && perf_counters->notify_poped_task && _starpu_sched_ctx_allow_hypervisor(sched_ctx->id))
  860. {
  861. // _STARPU_TRACE_HYPERVISOR_BEGIN();
  862. perf_counters->notify_poped_task(task->sched_ctx, worker->workerid);
  863. // _STARPU_TRACE_HYPERVISOR_END();
  864. }
  865. #endif //STARPU_USE_SC_HYPERVISOR
  866. /* Make sure we do not bother with all the multiformat-specific code if
  867. * it is not necessary. */
  868. if (!_starpu_task_uses_multiformat_handles(task))
  869. goto profiling;
  870. /* This is either a conversion task, or a regular task for which the
  871. * conversion tasks have already been created and submitted */
  872. if (task->mf_skip)
  873. goto profiling;
  874. /*
  875. * This worker may not be able to execute this task. In this case, we
  876. * should return the task anyway. It will be pushed back almost immediatly.
  877. * This way, we avoid computing and executing the conversions tasks.
  878. * Here, we do not care about what implementation is used.
  879. */
  880. worker_id = starpu_worker_get_id_check();
  881. if (!starpu_worker_can_execute_task_first_impl(worker_id, task, NULL))
  882. return task;
  883. node = starpu_worker_get_memory_node(worker_id);
  884. /*
  885. * We do have a task that uses multiformat handles. Let's create the
  886. * required conversion tasks.
  887. */
  888. unsigned i;
  889. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(task);
  890. for (i = 0; i < nbuffers; i++)
  891. {
  892. struct starpu_task *conversion_task;
  893. starpu_data_handle_t handle;
  894. handle = STARPU_TASK_GET_HANDLE(task, i);
  895. if (!_starpu_handle_needs_conversion_task(handle, node))
  896. continue;
  897. conversion_task = _starpu_create_conversion_task(handle, node);
  898. conversion_task->mf_skip = 1;
  899. conversion_task->execute_on_a_specific_worker = 1;
  900. conversion_task->workerid = worker_id;
  901. /*
  902. * Next tasks will need to know where these handles have gone.
  903. */
  904. handle->mf_node = node;
  905. _starpu_task_submit_conversion_task(conversion_task, worker_id);
  906. }
  907. task->mf_skip = 1;
  908. starpu_task_prio_list_push_back(&worker->local_tasks, task);
  909. goto pick;
  910. profiling:
  911. if (profiling)
  912. {
  913. struct starpu_profiling_task_info *profiling_info;
  914. profiling_info = task->profiling_info;
  915. /* The task may have been created before profiling was enabled,
  916. * so we check if the profiling_info structure is available
  917. * even though we already tested if profiling is enabled. */
  918. if (profiling_info)
  919. {
  920. memcpy(&profiling_info->pop_start_time,
  921. &pop_start_time, sizeof(struct timespec));
  922. _starpu_clock_gettime(&profiling_info->pop_end_time);
  923. }
  924. }
  925. if(task->prologue_callback_pop_func)
  926. {
  927. _starpu_set_current_task(task);
  928. task->prologue_callback_pop_func(task->prologue_callback_pop_arg);
  929. _starpu_set_current_task(NULL);
  930. }
  931. return task;
  932. }
  933. struct starpu_task *_starpu_pop_every_task(struct _starpu_sched_ctx *sched_ctx)
  934. {
  935. struct starpu_task *task = NULL;
  936. if(sched_ctx->sched_policy)
  937. {
  938. STARPU_ASSERT(sched_ctx->sched_policy->pop_every_task);
  939. /* TODO set profiling info */
  940. if(sched_ctx->sched_policy->pop_every_task)
  941. {
  942. _STARPU_SCHED_BEGIN;
  943. task = sched_ctx->sched_policy->pop_every_task(sched_ctx->id);
  944. _STARPU_SCHED_END;
  945. }
  946. }
  947. return task;
  948. }
  949. void _starpu_sched_pre_exec_hook(struct starpu_task *task)
  950. {
  951. unsigned sched_ctx_id = starpu_sched_ctx_get_ctx_for_task(task);
  952. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  953. if (sched_ctx->sched_policy && sched_ctx->sched_policy->pre_exec_hook)
  954. {
  955. _STARPU_SCHED_BEGIN;
  956. sched_ctx->sched_policy->pre_exec_hook(task, sched_ctx_id);
  957. _STARPU_SCHED_END;
  958. }
  959. if(!sched_ctx->sched_policy)
  960. {
  961. int workerid = starpu_worker_get_id();
  962. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  963. struct _starpu_sched_ctx_list_iterator list_it;
  964. _starpu_sched_ctx_list_iterator_init(worker->sched_ctx_list, &list_it);
  965. while (_starpu_sched_ctx_list_iterator_has_next(&list_it))
  966. {
  967. struct _starpu_sched_ctx *other_sched_ctx;
  968. struct _starpu_sched_ctx_elt *e;
  969. e = _starpu_sched_ctx_list_iterator_get_next(&list_it);
  970. other_sched_ctx = _starpu_get_sched_ctx_struct(e->sched_ctx);
  971. if (other_sched_ctx != sched_ctx &&
  972. other_sched_ctx->sched_policy != NULL &&
  973. other_sched_ctx->sched_policy->pre_exec_hook)
  974. {
  975. _STARPU_SCHED_BEGIN;
  976. other_sched_ctx->sched_policy->pre_exec_hook(task, other_sched_ctx->id);
  977. _STARPU_SCHED_END;
  978. }
  979. }
  980. }
  981. }
  982. void _starpu_sched_post_exec_hook(struct starpu_task *task)
  983. {
  984. STARPU_ASSERT(task->cl != NULL && task->cl->where != STARPU_NOWHERE);
  985. unsigned sched_ctx_id = starpu_sched_ctx_get_ctx_for_task(task);
  986. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  987. if (sched_ctx->sched_policy && sched_ctx->sched_policy->post_exec_hook)
  988. {
  989. _STARPU_SCHED_BEGIN;
  990. sched_ctx->sched_policy->post_exec_hook(task, sched_ctx_id);
  991. _STARPU_SCHED_END;
  992. }
  993. if(!sched_ctx->sched_policy)
  994. {
  995. int workerid = starpu_worker_get_id();
  996. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  997. struct _starpu_sched_ctx_list_iterator list_it;
  998. _starpu_sched_ctx_list_iterator_init(worker->sched_ctx_list, &list_it);
  999. while (_starpu_sched_ctx_list_iterator_has_next(&list_it))
  1000. {
  1001. struct _starpu_sched_ctx *other_sched_ctx;
  1002. struct _starpu_sched_ctx_elt *e;
  1003. e = _starpu_sched_ctx_list_iterator_get_next(&list_it);
  1004. other_sched_ctx = _starpu_get_sched_ctx_struct(e->sched_ctx);
  1005. if (other_sched_ctx != sched_ctx &&
  1006. other_sched_ctx->sched_policy != NULL &&
  1007. other_sched_ctx->sched_policy->post_exec_hook)
  1008. {
  1009. _STARPU_SCHED_BEGIN;
  1010. other_sched_ctx->sched_policy->post_exec_hook(task, other_sched_ctx->id);
  1011. _STARPU_SCHED_END;
  1012. }
  1013. }
  1014. }
  1015. }
  1016. int starpu_push_local_task(int workerid, struct starpu_task *task, int back STARPU_ATTRIBUTE_UNUSED)
  1017. {
  1018. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  1019. return _starpu_push_local_task(worker, task);
  1020. }
  1021. void _starpu_print_idle_time()
  1022. {
  1023. if(!starpu_idle_file)
  1024. return;
  1025. double all_idle = 0.0;
  1026. int i = 0;
  1027. for(i = 0; i < STARPU_NMAXWORKERS; i++)
  1028. all_idle += idle[i];
  1029. FILE *f;
  1030. f = fopen(starpu_idle_file, "a");
  1031. if (!f)
  1032. {
  1033. _STARPU_MSG("couldn't open %s: %s\n", starpu_idle_file, strerror(errno));
  1034. }
  1035. else
  1036. {
  1037. fprintf(f, "%lf \n", all_idle);
  1038. fclose(f);
  1039. }
  1040. }
  1041. void starpu_sched_task_break(struct starpu_task *task)
  1042. {
  1043. _STARPU_TASK_BREAK_ON(task, sched);
  1044. }