sched_policy.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2017 Université de Bordeaux
  4. * Copyright (C) 2010-2017 CNRS
  5. * Copyright (C) 2011, 2016 INRIA
  6. * Copyright (C) 2016 Uppsala University
  7. *
  8. * StarPU is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU Lesser General Public License as published by
  10. * the Free Software Foundation; either version 2.1 of the License, or (at
  11. * your option) any later version.
  12. *
  13. * StarPU is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  16. *
  17. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  18. */
  19. #include <starpu.h>
  20. #include <common/config.h>
  21. #include <common/utils.h>
  22. #include <core/sched_policy.h>
  23. #include <profiling/profiling.h>
  24. #include <common/barrier.h>
  25. #include <core/debug.h>
  26. #include <core/task.h>
  27. static int use_prefetch = 0;
  28. static double idle[STARPU_NMAXWORKERS];
  29. static double idle_start[STARPU_NMAXWORKERS];
  30. long _starpu_task_break_on_push = -1;
  31. long _starpu_task_break_on_pop = -1;
  32. long _starpu_task_break_on_sched = -1;
  33. static const char *starpu_idle_file;
  34. void _starpu_sched_init(void)
  35. {
  36. _starpu_task_break_on_push = starpu_get_env_number_default("STARPU_TASK_BREAK_ON_PUSH", -1);
  37. _starpu_task_break_on_pop = starpu_get_env_number_default("STARPU_TASK_BREAK_ON_POP", -1);
  38. _starpu_task_break_on_sched = starpu_get_env_number_default("STARPU_TASK_BREAK_ON_SCHED", -1);
  39. starpu_idle_file = starpu_getenv("STARPU_IDLE_FILE");
  40. }
  41. int starpu_get_prefetch_flag(void)
  42. {
  43. return use_prefetch;
  44. }
  45. static struct starpu_sched_policy *predefined_policies[] =
  46. {
  47. &_starpu_sched_modular_eager_policy,
  48. &_starpu_sched_modular_eager_prefetching_policy,
  49. &_starpu_sched_modular_prio_policy,
  50. &_starpu_sched_modular_prio_prefetching_policy,
  51. &_starpu_sched_modular_random_policy,
  52. &_starpu_sched_modular_random_prio_policy,
  53. &_starpu_sched_modular_random_prefetching_policy,
  54. &_starpu_sched_modular_random_prio_prefetching_policy,
  55. &_starpu_sched_modular_ws_policy,
  56. &_starpu_sched_modular_heft_policy,
  57. &_starpu_sched_modular_heft_prio_policy,
  58. &_starpu_sched_modular_heft2_policy,
  59. &_starpu_sched_eager_policy,
  60. &_starpu_sched_prio_policy,
  61. &_starpu_sched_random_policy,
  62. &_starpu_sched_lws_policy,
  63. &_starpu_sched_ws_policy,
  64. &_starpu_sched_dm_policy,
  65. &_starpu_sched_dmda_policy,
  66. &_starpu_sched_dmda_ready_policy,
  67. &_starpu_sched_dmda_sorted_policy,
  68. &_starpu_sched_dmda_sorted_decision_policy,
  69. &_starpu_sched_parallel_heft_policy,
  70. &_starpu_sched_peager_policy,
  71. &_starpu_sched_heteroprio_policy,
  72. &_starpu_sched_graph_test_policy,
  73. NULL
  74. };
  75. struct starpu_sched_policy **starpu_sched_get_predefined_policies()
  76. {
  77. return predefined_policies;
  78. }
  79. struct starpu_sched_policy *_starpu_get_sched_policy(struct _starpu_sched_ctx *sched_ctx)
  80. {
  81. return sched_ctx->sched_policy;
  82. }
  83. /*
  84. * Methods to initialize the scheduling policy
  85. */
  86. static void load_sched_policy(struct starpu_sched_policy *sched_policy, struct _starpu_sched_ctx *sched_ctx)
  87. {
  88. STARPU_ASSERT(sched_policy);
  89. #ifdef STARPU_VERBOSE
  90. if (sched_policy->policy_name)
  91. {
  92. if (sched_policy->policy_description)
  93. _STARPU_DEBUG("Use %s scheduler (%s)\n", sched_policy->policy_name, sched_policy->policy_description);
  94. else
  95. _STARPU_DEBUG("Use %s scheduler \n", sched_policy->policy_name);
  96. }
  97. #endif
  98. struct starpu_sched_policy *policy = sched_ctx->sched_policy;
  99. memcpy(policy, sched_policy, sizeof(*policy));
  100. }
  101. static struct starpu_sched_policy *find_sched_policy_from_name(const char *policy_name)
  102. {
  103. if (!policy_name)
  104. return NULL;
  105. if (strcmp(policy_name, "") == 0)
  106. return NULL;
  107. if (strncmp(policy_name, "heft", 4) == 0)
  108. {
  109. _STARPU_MSG("Warning: heft is now called \"dmda\".\n");
  110. return &_starpu_sched_dmda_policy;
  111. }
  112. struct starpu_sched_policy **policy;
  113. for(policy=predefined_policies ; *policy!=NULL ; policy++)
  114. {
  115. struct starpu_sched_policy *p = *policy;
  116. if (p->policy_name)
  117. {
  118. if (strcmp(policy_name, p->policy_name) == 0)
  119. {
  120. /* we found a policy with the requested name */
  121. return p;
  122. }
  123. }
  124. }
  125. if (strcmp(policy_name, "help") != 0)
  126. _STARPU_MSG("Warning: scheduling policy '%s' was not found, try 'help' to get a list\n", policy_name);
  127. /* nothing was found */
  128. return NULL;
  129. }
  130. static void display_sched_help_message(FILE *stream)
  131. {
  132. const char *sched_env = starpu_getenv("STARPU_SCHED");
  133. if (sched_env && (strcmp(sched_env, "help") == 0))
  134. {
  135. /* display the description of all predefined policies */
  136. struct starpu_sched_policy **policy;
  137. fprintf(stream, "\nThe variable STARPU_SCHED can be set to one of the following strings:\n");
  138. for(policy=predefined_policies ; *policy!=NULL ; policy++)
  139. {
  140. struct starpu_sched_policy *p = *policy;
  141. fprintf(stream, "%-30s\t-> %s\n", p->policy_name, p->policy_description);
  142. }
  143. fprintf(stream, "\n");
  144. }
  145. }
  146. struct starpu_sched_policy *_starpu_select_sched_policy(struct _starpu_machine_config *config, const char *required_policy)
  147. {
  148. struct starpu_sched_policy *selected_policy = NULL;
  149. struct starpu_conf *user_conf = &config->conf;
  150. if(required_policy)
  151. selected_policy = find_sched_policy_from_name(required_policy);
  152. /* If there is a policy that matches the required name, return it */
  153. if (selected_policy)
  154. return selected_policy;
  155. /* First, we check whether the application explicitely gave a scheduling policy or not */
  156. if (user_conf && (user_conf->sched_policy))
  157. return user_conf->sched_policy;
  158. /* Otherwise, we look if the application specified the name of a policy to load */
  159. const char *sched_pol_name;
  160. sched_pol_name = starpu_getenv("STARPU_SCHED");
  161. if (sched_pol_name == NULL && user_conf && user_conf->sched_policy_name)
  162. sched_pol_name = user_conf->sched_policy_name;
  163. if (sched_pol_name)
  164. selected_policy = find_sched_policy_from_name(sched_pol_name);
  165. /* If there is a policy that matches the name, return it */
  166. if (selected_policy)
  167. return selected_policy;
  168. /* If no policy was specified, we use the eager policy by default */
  169. return &_starpu_sched_eager_policy;
  170. }
  171. void _starpu_init_sched_policy(struct _starpu_machine_config *config, struct _starpu_sched_ctx *sched_ctx, struct starpu_sched_policy *selected_policy)
  172. {
  173. /* Perhaps we have to display some help */
  174. display_sched_help_message(stderr);
  175. /* Prefetch is activated by default */
  176. use_prefetch = starpu_get_env_number("STARPU_PREFETCH");
  177. if (use_prefetch == -1)
  178. use_prefetch = 1;
  179. /* Set calibrate flag */
  180. _starpu_set_calibrate_flag(config->conf.calibrate);
  181. load_sched_policy(selected_policy, sched_ctx);
  182. if (starpu_get_env_number_default("STARPU_WORKER_TREE", 0))
  183. {
  184. #ifdef STARPU_HAVE_HWLOC
  185. sched_ctx->sched_policy->worker_type = STARPU_WORKER_TREE;
  186. #else
  187. _STARPU_DISP("STARPU_WORKER_TREE ignored, please rebuild StarPU with hwloc support to enable it.");
  188. #endif
  189. }
  190. starpu_sched_ctx_create_worker_collection(sched_ctx->id,
  191. sched_ctx->sched_policy->worker_type);
  192. _STARPU_SCHED_BEGIN;
  193. sched_ctx->sched_policy->init_sched(sched_ctx->id);
  194. _STARPU_SCHED_END;
  195. }
  196. void _starpu_deinit_sched_policy(struct _starpu_sched_ctx *sched_ctx)
  197. {
  198. struct starpu_sched_policy *policy = sched_ctx->sched_policy;
  199. if (policy->deinit_sched)
  200. {
  201. _STARPU_SCHED_BEGIN;
  202. policy->deinit_sched(sched_ctx->id);
  203. _STARPU_SCHED_END;
  204. }
  205. starpu_sched_ctx_delete_worker_collection(sched_ctx->id);
  206. }
  207. void _starpu_sched_task_submit(struct starpu_task *task)
  208. {
  209. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(task->sched_ctx);
  210. if (!sched_ctx->sched_policy)
  211. return;
  212. if (!sched_ctx->sched_policy->submit_hook)
  213. return;
  214. _STARPU_SCHED_BEGIN;
  215. sched_ctx->sched_policy->submit_hook(task);
  216. _STARPU_SCHED_END;
  217. }
  218. void _starpu_sched_do_schedule(unsigned sched_ctx_id)
  219. {
  220. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  221. if (!sched_ctx->sched_policy)
  222. return;
  223. if (!sched_ctx->sched_policy->do_schedule)
  224. return;
  225. _STARPU_SCHED_BEGIN;
  226. sched_ctx->sched_policy->do_schedule(sched_ctx_id);
  227. _STARPU_SCHED_END;
  228. }
  229. static void _starpu_push_task_on_specific_worker_notify_sched(struct starpu_task *task, struct _starpu_worker *worker, int workerid, int perf_workerid)
  230. {
  231. /* if we push a task on a specific worker, notify all the sched_ctxs the worker belongs to */
  232. struct _starpu_sched_ctx_list_iterator list_it;
  233. _starpu_sched_ctx_list_iterator_init(worker->sched_ctx_list, &list_it);
  234. while (_starpu_sched_ctx_list_iterator_has_next(&list_it))
  235. {
  236. struct _starpu_sched_ctx_elt *e = _starpu_sched_ctx_list_iterator_get_next(&list_it);
  237. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(e->sched_ctx);
  238. if (sched_ctx->sched_policy != NULL && sched_ctx->sched_policy->push_task_notify)
  239. {
  240. _STARPU_SCHED_BEGIN;
  241. sched_ctx->sched_policy->push_task_notify(task, workerid, perf_workerid, sched_ctx->id);
  242. _STARPU_SCHED_END;
  243. }
  244. }
  245. }
  246. /* Enqueue a task into the list of tasks explicitely attached to a worker. In
  247. * case workerid identifies a combined worker, a task will be enqueued into
  248. * each worker of the combination. */
  249. static int _starpu_push_task_on_specific_worker(struct starpu_task *task, int workerid)
  250. {
  251. int nbasic_workers = (int)starpu_worker_get_count();
  252. /* Is this a basic worker or a combined worker ? */
  253. int is_basic_worker = (workerid < nbasic_workers);
  254. unsigned memory_node;
  255. struct _starpu_worker *worker = NULL;
  256. struct _starpu_combined_worker *combined_worker = NULL;
  257. if (is_basic_worker)
  258. {
  259. worker = _starpu_get_worker_struct(workerid);
  260. memory_node = worker->memory_node;
  261. }
  262. else
  263. {
  264. combined_worker = _starpu_get_combined_worker_struct(workerid);
  265. memory_node = combined_worker->memory_node;
  266. }
  267. if (use_prefetch)
  268. starpu_prefetch_task_input_on_node(task, memory_node);
  269. if (is_basic_worker)
  270. _starpu_push_task_on_specific_worker_notify_sched(task, worker, workerid, workerid);
  271. else
  272. {
  273. /* Notify all workers of the combined worker */
  274. int worker_size = combined_worker->worker_size;
  275. int *combined_workerid = combined_worker->combined_workerid;
  276. int j;
  277. for (j = 0; j < worker_size; j++)
  278. {
  279. int subworkerid = combined_workerid[j];
  280. _starpu_push_task_on_specific_worker_notify_sched(task, _starpu_get_worker_struct(subworkerid), subworkerid, workerid);
  281. }
  282. }
  283. #ifdef STARPU_USE_SC_HYPERVISOR
  284. starpu_sched_ctx_call_pushed_task_cb(workerid, task->sched_ctx);
  285. #endif //STARPU_USE_SC_HYPERVISOR
  286. if (is_basic_worker)
  287. {
  288. unsigned node = starpu_worker_get_memory_node(workerid);
  289. if (_starpu_task_uses_multiformat_handles(task))
  290. {
  291. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(task);
  292. unsigned i;
  293. for (i = 0; i < nbuffers; i++)
  294. {
  295. struct starpu_task *conversion_task;
  296. starpu_data_handle_t handle;
  297. handle = STARPU_TASK_GET_HANDLE(task, i);
  298. if (!_starpu_handle_needs_conversion_task(handle, node))
  299. continue;
  300. conversion_task = _starpu_create_conversion_task(handle, node);
  301. conversion_task->mf_skip = 1;
  302. conversion_task->execute_on_a_specific_worker = 1;
  303. conversion_task->workerid = workerid;
  304. _starpu_task_submit_conversion_task(conversion_task, workerid);
  305. //_STARPU_DEBUG("Pushing a conversion task\n");
  306. }
  307. for (i = 0; i < nbuffers; i++)
  308. {
  309. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(task, i);
  310. handle->mf_node = node;
  311. }
  312. }
  313. // if(task->sched_ctx != _starpu_get_initial_sched_ctx()->id)
  314. if(task->priority > 0)
  315. return _starpu_push_local_task(worker, task, 1);
  316. else
  317. return _starpu_push_local_task(worker, task, 0);
  318. }
  319. else
  320. {
  321. /* This is a combined worker so we create task aliases */
  322. int worker_size = combined_worker->worker_size;
  323. int *combined_workerid = combined_worker->combined_workerid;
  324. int ret = 0;
  325. struct _starpu_job *job = _starpu_get_job_associated_to_task(task);
  326. job->task_size = worker_size;
  327. job->combined_workerid = workerid;
  328. job->active_task_alias_count = 0;
  329. STARPU_PTHREAD_BARRIER_INIT(&job->before_work_barrier, NULL, worker_size);
  330. STARPU_PTHREAD_BARRIER_INIT(&job->after_work_barrier, NULL, worker_size);
  331. job->after_work_busy_barrier = worker_size;
  332. /* Note: we have to call that early, or else the task may have
  333. * disappeared already */
  334. starpu_push_task_end(task);
  335. int j;
  336. for (j = 0; j < worker_size; j++)
  337. {
  338. struct starpu_task *alias = starpu_task_dup(task);
  339. alias->destroy = 1;
  340. worker = _starpu_get_worker_struct(combined_workerid[j]);
  341. ret |= _starpu_push_local_task(worker, alias, 0);
  342. }
  343. return ret;
  344. }
  345. }
  346. /* the generic interface that call the proper underlying implementation */
  347. int _starpu_push_task(struct _starpu_job *j)
  348. {
  349. if(j->task->prologue_callback_func)
  350. j->task->prologue_callback_func(j->task->prologue_callback_arg);
  351. return _starpu_repush_task(j);
  352. }
  353. int _starpu_repush_task(struct _starpu_job *j)
  354. {
  355. struct starpu_task *task = j->task;
  356. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(task->sched_ctx);
  357. int ret;
  358. _STARPU_LOG_IN();
  359. unsigned can_push = _starpu_increment_nready_tasks_of_sched_ctx(task->sched_ctx, task->flops, task);
  360. task->status = STARPU_TASK_READY;
  361. STARPU_AYU_ADDTOTASKQUEUE(j->job_id, -1);
  362. /* if the context does not have any workers save the tasks in a temp list */
  363. if(!sched_ctx->is_initial_sched)
  364. {
  365. /*if there are workers in the ctx that are not able to execute tasks
  366. we consider the ctx empty */
  367. unsigned nworkers = _starpu_nworkers_able_to_execute_task(task, sched_ctx);
  368. if(nworkers == 0)
  369. {
  370. STARPU_PTHREAD_MUTEX_LOCK(&sched_ctx->empty_ctx_mutex);
  371. starpu_task_list_push_front(&sched_ctx->empty_ctx_tasks, task);
  372. STARPU_PTHREAD_MUTEX_UNLOCK(&sched_ctx->empty_ctx_mutex);
  373. #ifdef STARPU_USE_SC_HYPERVISOR
  374. if(sched_ctx->id != 0 && sched_ctx->perf_counters != NULL
  375. && sched_ctx->perf_counters->notify_empty_ctx)
  376. {
  377. _STARPU_TRACE_HYPERVISOR_BEGIN();
  378. sched_ctx->perf_counters->notify_empty_ctx(sched_ctx->id, task);
  379. _STARPU_TRACE_HYPERVISOR_END();
  380. }
  381. #endif
  382. return 0;
  383. }
  384. }
  385. if(!can_push)
  386. return 0;
  387. /* in case there is no codelet associated to the task (that's a control
  388. * task), we directly execute its callback and enforce the
  389. * corresponding dependencies */
  390. if (task->cl == NULL || task->cl->where == STARPU_NOWHERE)
  391. {
  392. if (task->prologue_callback_pop_func)
  393. task->prologue_callback_pop_func(task->prologue_callback_pop_arg);
  394. if (task->cl && task->cl->specific_nodes)
  395. {
  396. /* Nothing to do, but we are asked to fetch data on some memory nodes */
  397. _starpu_fetch_nowhere_task_input(j);
  398. }
  399. else
  400. {
  401. if (task->cl)
  402. __starpu_push_task_output(j);
  403. _starpu_handle_job_termination(j);
  404. _STARPU_LOG_OUT_TAG("handle_job_termination");
  405. }
  406. return 0;
  407. }
  408. ret = _starpu_push_task_to_workers(task);
  409. if (ret == -EAGAIN)
  410. /* pushed to empty context, that's fine */
  411. ret = 0;
  412. return ret;
  413. }
  414. int _starpu_push_task_to_workers(struct starpu_task *task)
  415. {
  416. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(task->sched_ctx);
  417. unsigned nworkers = 0;
  418. _STARPU_TRACE_JOB_PUSH(task, task->priority > 0);
  419. /* if the contexts still does not have workers put the task back to its place in
  420. the empty ctx list */
  421. if(!sched_ctx->is_initial_sched)
  422. {
  423. /*if there are workers in the ctx that are not able to execute tasks
  424. we consider the ctx empty */
  425. nworkers = _starpu_nworkers_able_to_execute_task(task, sched_ctx);
  426. if (nworkers == 0)
  427. {
  428. STARPU_PTHREAD_MUTEX_LOCK(&sched_ctx->empty_ctx_mutex);
  429. starpu_task_list_push_back(&sched_ctx->empty_ctx_tasks, task);
  430. STARPU_PTHREAD_MUTEX_UNLOCK(&sched_ctx->empty_ctx_mutex);
  431. #ifdef STARPU_USE_SC_HYPERVISOR
  432. if(sched_ctx->id != 0 && sched_ctx->perf_counters != NULL
  433. && sched_ctx->perf_counters->notify_empty_ctx)
  434. {
  435. _STARPU_TRACE_HYPERVISOR_BEGIN();
  436. sched_ctx->perf_counters->notify_empty_ctx(sched_ctx->id, task);
  437. _STARPU_TRACE_HYPERVISOR_END();
  438. }
  439. #endif
  440. return -EAGAIN;
  441. }
  442. }
  443. _starpu_profiling_set_task_push_start_time(task);
  444. int ret = 0;
  445. if (STARPU_UNLIKELY(task->execute_on_a_specific_worker))
  446. {
  447. unsigned node = starpu_worker_get_memory_node(task->workerid);
  448. if (starpu_get_prefetch_flag())
  449. starpu_prefetch_task_input_on_node(task, node);
  450. ret = _starpu_push_task_on_specific_worker(task, task->workerid);
  451. }
  452. else
  453. {
  454. struct _starpu_machine_config *config = _starpu_get_machine_config();
  455. /* When a task can only be executed on a given arch and we have
  456. * only one memory node for that arch, we can systematically
  457. * prefetch before the scheduling decision. */
  458. if (starpu_get_prefetch_flag())
  459. {
  460. if (task->cl->where == STARPU_CPU && config->cpus_nodeid >= 0)
  461. starpu_prefetch_task_input_on_node(task, config->cpus_nodeid);
  462. else if (task->cl->where == STARPU_CUDA && config->cuda_nodeid >= 0)
  463. starpu_prefetch_task_input_on_node(task, config->cuda_nodeid);
  464. else if (task->cl->where == STARPU_OPENCL && config->opencl_nodeid >= 0)
  465. starpu_prefetch_task_input_on_node(task, config->opencl_nodeid);
  466. else if (task->cl->where == STARPU_MIC && config->mic_nodeid >= 0)
  467. starpu_prefetch_task_input_on_node(task, config->mic_nodeid);
  468. else if (task->cl->where == STARPU_SCC && config->scc_nodeid >= 0)
  469. starpu_prefetch_task_input_on_node(task, config->scc_nodeid);
  470. }
  471. if(!sched_ctx->sched_policy)
  472. {
  473. /* Note: we have to call that early, or else the task may have
  474. * disappeared already */
  475. starpu_push_task_end(task);
  476. if(!sched_ctx->awake_workers)
  477. ret = _starpu_push_task_on_specific_worker(task, sched_ctx->main_master);
  478. else
  479. {
  480. struct starpu_worker_collection *workers = sched_ctx->workers;
  481. struct _starpu_job *job = _starpu_get_job_associated_to_task(task);
  482. job->task_size = workers->nworkers;
  483. job->combined_workerid = -1; // workerid; its a ctx not combined worker
  484. job->active_task_alias_count = 0;
  485. STARPU_PTHREAD_BARRIER_INIT(&job->before_work_barrier, NULL, workers->nworkers);
  486. STARPU_PTHREAD_BARRIER_INIT(&job->after_work_barrier, NULL, workers->nworkers);
  487. job->after_work_busy_barrier = workers->nworkers;
  488. struct starpu_sched_ctx_iterator it;
  489. if(workers->init_iterator)
  490. workers->init_iterator(workers, &it);
  491. while(workers->has_next(workers, &it))
  492. {
  493. unsigned workerid = workers->get_next(workers, &it);
  494. struct starpu_task *alias;
  495. if (job->task_size > 1)
  496. {
  497. alias = starpu_task_dup(task);
  498. alias->destroy = 1;
  499. }
  500. else
  501. alias = task;
  502. ret |= _starpu_push_task_on_specific_worker(alias, workerid);
  503. }
  504. }
  505. }
  506. else
  507. {
  508. STARPU_ASSERT(sched_ctx->sched_policy->push_task);
  509. /* check out if there are any workers in the context */
  510. starpu_pthread_rwlock_t *changing_ctx_mutex = _starpu_sched_ctx_get_changing_ctx_mutex(sched_ctx->id);
  511. STARPU_PTHREAD_RWLOCK_RDLOCK(changing_ctx_mutex);
  512. nworkers = starpu_sched_ctx_get_nworkers(sched_ctx->id);
  513. if (nworkers == 0)
  514. ret = -1;
  515. else
  516. {
  517. _STARPU_TASK_BREAK_ON(task, push);
  518. _STARPU_SCHED_BEGIN;
  519. ret = sched_ctx->sched_policy->push_task(task);
  520. _STARPU_SCHED_END;
  521. }
  522. STARPU_PTHREAD_RWLOCK_UNLOCK(changing_ctx_mutex);
  523. }
  524. if(ret == -1)
  525. {
  526. _STARPU_MSG("repush task \n");
  527. _STARPU_TRACE_JOB_POP(task, task->priority > 0);
  528. ret = _starpu_push_task_to_workers(task);
  529. }
  530. }
  531. /* Note: from here, the task might have been destroyed already! */
  532. _STARPU_LOG_OUT();
  533. return ret;
  534. }
  535. /* This is called right after the scheduler has pushed a task to a queue
  536. * but just before releasing mutexes: we need the task to still be alive!
  537. */
  538. int starpu_push_task_end(struct starpu_task *task)
  539. {
  540. _starpu_profiling_set_task_push_end_time(task);
  541. task->scheduled = 1;
  542. return 0;
  543. }
  544. /* This is called right after the scheduler has pushed a task to a queue
  545. * but just before releasing mutexes: we need the task to still be alive!
  546. */
  547. int _starpu_pop_task_end(struct starpu_task *task)
  548. {
  549. if (!task)
  550. return 0;
  551. _STARPU_TRACE_JOB_POP(task, task->priority > 0);
  552. return 0;
  553. }
  554. /*
  555. * Given a handle that needs to be converted in order to be used on the given
  556. * node, returns a task that takes care of the conversion.
  557. */
  558. struct starpu_task *_starpu_create_conversion_task(starpu_data_handle_t handle,
  559. unsigned int node)
  560. {
  561. return _starpu_create_conversion_task_for_arch(handle, starpu_node_get_kind(node));
  562. }
  563. struct starpu_task *_starpu_create_conversion_task_for_arch(starpu_data_handle_t handle,
  564. enum starpu_node_kind node_kind)
  565. {
  566. struct starpu_task *conversion_task;
  567. #if defined(STARPU_USE_OPENCL) || defined(STARPU_USE_CUDA) || defined(STARPU_USE_MIC) || defined(STARPU_USE_SCC) || defined(STARPU_SIMGRID)
  568. struct starpu_multiformat_interface *format_interface;
  569. #endif
  570. conversion_task = starpu_task_create();
  571. conversion_task->name = "conversion_task";
  572. conversion_task->synchronous = 0;
  573. STARPU_TASK_SET_HANDLE(conversion_task, handle, 0);
  574. #if defined(STARPU_USE_OPENCL) || defined(STARPU_USE_CUDA) || defined(STARPU_USE_MIC) || defined(STARPU_USE_SCC) || defined(STARPU_SIMGRID)
  575. /* The node does not really matter here */
  576. format_interface = (struct starpu_multiformat_interface *) starpu_data_get_interface_on_node(handle, STARPU_MAIN_RAM);
  577. #endif
  578. _starpu_spin_lock(&handle->header_lock);
  579. handle->refcnt++;
  580. handle->busy_count++;
  581. _starpu_spin_unlock(&handle->header_lock);
  582. switch(node_kind)
  583. {
  584. case STARPU_CPU_RAM:
  585. case STARPU_SCC_RAM:
  586. case STARPU_SCC_SHM:
  587. switch (starpu_node_get_kind(handle->mf_node))
  588. {
  589. case STARPU_CPU_RAM:
  590. case STARPU_SCC_RAM:
  591. case STARPU_SCC_SHM:
  592. STARPU_ABORT();
  593. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  594. case STARPU_CUDA_RAM:
  595. {
  596. struct starpu_multiformat_data_interface_ops *mf_ops;
  597. mf_ops = (struct starpu_multiformat_data_interface_ops *) handle->ops->get_mf_ops(format_interface);
  598. conversion_task->cl = mf_ops->cuda_to_cpu_cl;
  599. break;
  600. }
  601. #endif
  602. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  603. case STARPU_OPENCL_RAM:
  604. {
  605. struct starpu_multiformat_data_interface_ops *mf_ops;
  606. mf_ops = (struct starpu_multiformat_data_interface_ops *) handle->ops->get_mf_ops(format_interface);
  607. conversion_task->cl = mf_ops->opencl_to_cpu_cl;
  608. break;
  609. }
  610. #endif
  611. #ifdef STARPU_USE_MIC
  612. case STARPU_MIC_RAM:
  613. {
  614. struct starpu_multiformat_data_interface_ops *mf_ops;
  615. mf_ops = (struct starpu_multiformat_data_interface_ops *) handle->ops->get_mf_ops(format_interface);
  616. conversion_task->cl = mf_ops->mic_to_cpu_cl;
  617. break;
  618. }
  619. #endif
  620. default:
  621. _STARPU_ERROR("Oops : %u\n", handle->mf_node);
  622. }
  623. break;
  624. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  625. case STARPU_CUDA_RAM:
  626. {
  627. struct starpu_multiformat_data_interface_ops *mf_ops;
  628. mf_ops = (struct starpu_multiformat_data_interface_ops *) handle->ops->get_mf_ops(format_interface);
  629. conversion_task->cl = mf_ops->cpu_to_cuda_cl;
  630. break;
  631. }
  632. #endif
  633. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  634. case STARPU_OPENCL_RAM:
  635. {
  636. struct starpu_multiformat_data_interface_ops *mf_ops;
  637. mf_ops = (struct starpu_multiformat_data_interface_ops *) handle->ops->get_mf_ops(format_interface);
  638. conversion_task->cl = mf_ops->cpu_to_opencl_cl;
  639. break;
  640. }
  641. #endif
  642. #ifdef STARPU_USE_MIC
  643. case STARPU_MIC_RAM:
  644. {
  645. struct starpu_multiformat_data_interface_ops *mf_ops;
  646. mf_ops = (struct starpu_multiformat_data_interface_ops *) handle->ops->get_mf_ops(format_interface);
  647. conversion_task->cl = mf_ops->cpu_to_mic_cl;
  648. break;
  649. }
  650. #endif
  651. default:
  652. STARPU_ABORT();
  653. }
  654. STARPU_TASK_SET_MODE(conversion_task, STARPU_RW, 0);
  655. return conversion_task;
  656. }
  657. static
  658. struct _starpu_sched_ctx* _get_next_sched_ctx_to_pop_into(struct _starpu_worker *worker)
  659. {
  660. struct _starpu_sched_ctx_elt *e = NULL;
  661. struct _starpu_sched_ctx_list_iterator list_it;
  662. int found = 0;
  663. _starpu_sched_ctx_list_iterator_init(worker->sched_ctx_list, &list_it);
  664. while (_starpu_sched_ctx_list_iterator_has_next(&list_it))
  665. {
  666. e = _starpu_sched_ctx_list_iterator_get_next(&list_it);
  667. if (e->task_number > 0)
  668. return _starpu_get_sched_ctx_struct(e->sched_ctx);
  669. }
  670. _starpu_sched_ctx_list_iterator_init(worker->sched_ctx_list, &list_it);
  671. while (_starpu_sched_ctx_list_iterator_has_next(&list_it))
  672. {
  673. e = _starpu_sched_ctx_list_iterator_get_next(&list_it);
  674. if (e->last_poped)
  675. {
  676. e->last_poped = 0;
  677. if (_starpu_sched_ctx_list_iterator_has_next(&list_it))
  678. {
  679. e = _starpu_sched_ctx_list_iterator_get_next(&list_it);
  680. found = 1;
  681. }
  682. break;
  683. }
  684. }
  685. if (!found)
  686. e = worker->sched_ctx_list->head;
  687. e->last_poped = 1;
  688. return _starpu_get_sched_ctx_struct(e->sched_ctx);
  689. }
  690. struct starpu_task *_starpu_pop_task(struct _starpu_worker *worker)
  691. {
  692. struct starpu_task *task;
  693. int worker_id;
  694. unsigned node;
  695. /* We can't tell in advance which task will be picked up, so we measure
  696. * a timestamp, and will attribute it afterwards to the task. */
  697. int profiling = starpu_profiling_status_get();
  698. struct timespec pop_start_time;
  699. if (profiling)
  700. _starpu_clock_gettime(&pop_start_time);
  701. pick:
  702. /* perhaps there is some local task to be executed first */
  703. task = _starpu_pop_local_task(worker);
  704. if (task)
  705. _STARPU_TASK_BREAK_ON(task, pop);
  706. /* get tasks from the stacks of the strategy */
  707. if(!task)
  708. {
  709. struct _starpu_sched_ctx *sched_ctx ;
  710. #ifndef STARPU_NON_BLOCKING_DRIVERS
  711. int been_here[STARPU_NMAX_SCHED_CTXS];
  712. int i;
  713. for(i = 0; i < STARPU_NMAX_SCHED_CTXS; i++)
  714. been_here[i] = 0;
  715. while(!task)
  716. #endif
  717. {
  718. if(worker->nsched_ctxs == 1)
  719. sched_ctx = _starpu_get_initial_sched_ctx();
  720. else
  721. {
  722. while(1)
  723. {
  724. /** Caution
  725. * If you use multiple contexts your scheduler *needs*
  726. * to update the variable task_number of the ctx list.
  727. * In order to get the best performances.
  728. * This is done using functions :
  729. * starpu_sched_ctx_list_task_counters_increment...(...)
  730. * starpu_sched_ctx_list_task_counters_decrement...(...)
  731. **/
  732. sched_ctx = _get_next_sched_ctx_to_pop_into(worker);
  733. if(worker->removed_from_ctx[sched_ctx->id] == 1 && worker->shares_tasks_lists[sched_ctx->id] == 1)
  734. {
  735. _starpu_worker_gets_out_of_ctx(sched_ctx->id, worker);
  736. worker->removed_from_ctx[sched_ctx->id] = 0;
  737. sched_ctx = NULL;
  738. }
  739. else
  740. break;
  741. }
  742. }
  743. if(sched_ctx && sched_ctx->id != STARPU_NMAX_SCHED_CTXS)
  744. {
  745. if (sched_ctx->sched_policy && sched_ctx->sched_policy->pop_task)
  746. {
  747. /* Note: we do not push the scheduling state here, because
  748. * otherwise when a worker is idle, we'd keep
  749. * pushing/popping a scheduling state here, while what we
  750. * want to see in the trace is a permanent idle state. */
  751. task = sched_ctx->sched_policy->pop_task(sched_ctx->id);
  752. if (task)
  753. _STARPU_TASK_BREAK_ON(task, pop);
  754. _starpu_pop_task_end(task);
  755. }
  756. }
  757. if(!task)
  758. {
  759. /* it doesn't matter if it shares tasks list or not in the scheduler,
  760. if it does not have any task to pop just get it out of here */
  761. /* however if it shares a task list it will be removed as soon as he
  762. finishes this job (in handle_job_termination) */
  763. if(worker->removed_from_ctx[sched_ctx->id])
  764. {
  765. _starpu_worker_gets_out_of_ctx(sched_ctx->id, worker);
  766. worker->removed_from_ctx[sched_ctx->id] = 0;
  767. }
  768. #ifdef STARPU_USE_SC_HYPERVISOR
  769. if(worker->pop_ctx_priority)
  770. {
  771. struct starpu_sched_ctx_performance_counters *perf_counters = sched_ctx->perf_counters;
  772. if(sched_ctx->id != 0 && perf_counters != NULL && perf_counters->notify_idle_cycle && _starpu_sched_ctx_allow_hypervisor(sched_ctx->id))
  773. {
  774. // _STARPU_TRACE_HYPERVISOR_BEGIN();
  775. perf_counters->notify_idle_cycle(sched_ctx->id, worker->workerid, 1.0);
  776. // _STARPU_TRACE_HYPERVISOR_END();
  777. }
  778. }
  779. #endif //STARPU_USE_SC_HYPERVISOR
  780. #ifndef STARPU_NON_BLOCKING_DRIVERS
  781. if(been_here[sched_ctx->id] || worker->nsched_ctxs == 1)
  782. break;
  783. been_here[sched_ctx->id] = 1;
  784. #endif
  785. }
  786. }
  787. }
  788. if (!task)
  789. {
  790. if (starpu_idle_file)
  791. idle_start[worker->workerid] = starpu_timing_now();
  792. return NULL;
  793. }
  794. if(starpu_idle_file && idle_start[worker->workerid] != 0.0)
  795. {
  796. double idle_end = starpu_timing_now();
  797. idle[worker->workerid] += (idle_end - idle_start[worker->workerid]);
  798. idle_start[worker->workerid] = 0.0;
  799. }
  800. #ifdef STARPU_USE_SC_HYPERVISOR
  801. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(task->sched_ctx);
  802. struct starpu_sched_ctx_performance_counters *perf_counters = sched_ctx->perf_counters;
  803. if(sched_ctx->id != 0 && perf_counters != NULL && perf_counters->notify_poped_task && _starpu_sched_ctx_allow_hypervisor(sched_ctx->id))
  804. {
  805. // _STARPU_TRACE_HYPERVISOR_BEGIN();
  806. perf_counters->notify_poped_task(task->sched_ctx, worker->workerid);
  807. // _STARPU_TRACE_HYPERVISOR_END();
  808. }
  809. #endif //STARPU_USE_SC_HYPERVISOR
  810. /* Make sure we do not bother with all the multiformat-specific code if
  811. * it is not necessary. */
  812. if (!_starpu_task_uses_multiformat_handles(task))
  813. goto profiling;
  814. /* This is either a conversion task, or a regular task for which the
  815. * conversion tasks have already been created and submitted */
  816. if (task->mf_skip)
  817. goto profiling;
  818. /*
  819. * This worker may not be able to execute this task. In this case, we
  820. * should return the task anyway. It will be pushed back almost immediatly.
  821. * This way, we avoid computing and executing the conversions tasks.
  822. * Here, we do not care about what implementation is used.
  823. */
  824. worker_id = starpu_worker_get_id_check();
  825. if (!starpu_worker_can_execute_task_first_impl(worker_id, task, NULL))
  826. return task;
  827. node = starpu_worker_get_memory_node(worker_id);
  828. /*
  829. * We do have a task that uses multiformat handles. Let's create the
  830. * required conversion tasks.
  831. */
  832. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  833. unsigned i;
  834. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(task);
  835. for (i = 0; i < nbuffers; i++)
  836. {
  837. struct starpu_task *conversion_task;
  838. starpu_data_handle_t handle;
  839. handle = STARPU_TASK_GET_HANDLE(task, i);
  840. if (!_starpu_handle_needs_conversion_task(handle, node))
  841. continue;
  842. conversion_task = _starpu_create_conversion_task(handle, node);
  843. conversion_task->mf_skip = 1;
  844. conversion_task->execute_on_a_specific_worker = 1;
  845. conversion_task->workerid = worker_id;
  846. /*
  847. * Next tasks will need to know where these handles have gone.
  848. */
  849. handle->mf_node = node;
  850. _starpu_task_submit_conversion_task(conversion_task, worker_id);
  851. }
  852. task->mf_skip = 1;
  853. starpu_task_list_push_back(&worker->local_tasks, task);
  854. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&worker->sched_mutex);
  855. goto pick;
  856. profiling:
  857. if (profiling)
  858. {
  859. struct starpu_profiling_task_info *profiling_info;
  860. profiling_info = task->profiling_info;
  861. /* The task may have been created before profiling was enabled,
  862. * so we check if the profiling_info structure is available
  863. * even though we already tested if profiling is enabled. */
  864. if (profiling_info)
  865. {
  866. memcpy(&profiling_info->pop_start_time,
  867. &pop_start_time, sizeof(struct timespec));
  868. _starpu_clock_gettime(&profiling_info->pop_end_time);
  869. }
  870. }
  871. if(task->prologue_callback_pop_func)
  872. {
  873. _starpu_set_current_task(task);
  874. task->prologue_callback_pop_func(task->prologue_callback_pop_arg);
  875. _starpu_set_current_task(NULL);
  876. }
  877. return task;
  878. }
  879. struct starpu_task *_starpu_pop_every_task(struct _starpu_sched_ctx *sched_ctx)
  880. {
  881. struct starpu_task *task = NULL;
  882. if(sched_ctx->sched_policy)
  883. {
  884. STARPU_ASSERT(sched_ctx->sched_policy->pop_every_task);
  885. /* TODO set profiling info */
  886. if(sched_ctx->sched_policy->pop_every_task)
  887. {
  888. _STARPU_SCHED_BEGIN;
  889. task = sched_ctx->sched_policy->pop_every_task(sched_ctx->id);
  890. _STARPU_SCHED_END;
  891. }
  892. }
  893. return task;
  894. }
  895. void _starpu_sched_pre_exec_hook(struct starpu_task *task)
  896. {
  897. unsigned sched_ctx_id = starpu_sched_ctx_get_ctx_for_task(task);
  898. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  899. if (sched_ctx->sched_policy && sched_ctx->sched_policy->pre_exec_hook)
  900. {
  901. _STARPU_SCHED_BEGIN;
  902. sched_ctx->sched_policy->pre_exec_hook(task, sched_ctx_id);
  903. _STARPU_SCHED_END;
  904. }
  905. if(!sched_ctx->sched_policy)
  906. {
  907. int workerid = starpu_worker_get_id();
  908. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  909. struct _starpu_sched_ctx *other_sched_ctx;
  910. struct _starpu_sched_ctx_elt *e = NULL;
  911. struct _starpu_sched_ctx_list_iterator list_it;
  912. _starpu_sched_ctx_list_iterator_init(worker->sched_ctx_list, &list_it);
  913. while (_starpu_sched_ctx_list_iterator_has_next(&list_it))
  914. {
  915. e = _starpu_sched_ctx_list_iterator_get_next(&list_it);
  916. other_sched_ctx = _starpu_get_sched_ctx_struct(e->sched_ctx);
  917. if (other_sched_ctx != sched_ctx &&
  918. other_sched_ctx->sched_policy != NULL &&
  919. other_sched_ctx->sched_policy->pre_exec_hook)
  920. {
  921. _STARPU_SCHED_BEGIN;
  922. other_sched_ctx->sched_policy->pre_exec_hook(task, other_sched_ctx->id);
  923. _STARPU_SCHED_END;
  924. }
  925. }
  926. }
  927. }
  928. void _starpu_sched_post_exec_hook(struct starpu_task *task)
  929. {
  930. unsigned sched_ctx_id = starpu_sched_ctx_get_ctx_for_task(task);
  931. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
  932. if (sched_ctx->sched_policy && sched_ctx->sched_policy->post_exec_hook)
  933. {
  934. _STARPU_SCHED_BEGIN;
  935. sched_ctx->sched_policy->post_exec_hook(task, sched_ctx_id);
  936. _STARPU_SCHED_END;
  937. }
  938. if(!sched_ctx->sched_policy)
  939. {
  940. int workerid = starpu_worker_get_id();
  941. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  942. struct _starpu_sched_ctx *other_sched_ctx;
  943. struct _starpu_sched_ctx_elt *e = NULL;
  944. struct _starpu_sched_ctx_list_iterator list_it;
  945. _starpu_sched_ctx_list_iterator_init(worker->sched_ctx_list, &list_it);
  946. while (_starpu_sched_ctx_list_iterator_has_next(&list_it))
  947. {
  948. e = _starpu_sched_ctx_list_iterator_get_next(&list_it);
  949. other_sched_ctx = _starpu_get_sched_ctx_struct(e->sched_ctx);
  950. if (other_sched_ctx != sched_ctx &&
  951. other_sched_ctx->sched_policy != NULL &&
  952. other_sched_ctx->sched_policy->post_exec_hook)
  953. {
  954. _STARPU_SCHED_BEGIN;
  955. other_sched_ctx->sched_policy->post_exec_hook(task, other_sched_ctx->id);
  956. _STARPU_SCHED_END;
  957. }
  958. }
  959. }
  960. }
  961. void _starpu_wait_on_sched_event(void)
  962. {
  963. struct _starpu_worker *worker = _starpu_get_local_worker_key();
  964. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&worker->sched_mutex);
  965. _starpu_handle_all_pending_node_data_requests(worker->memory_node);
  966. if (_starpu_machine_is_running())
  967. {
  968. #ifndef STARPU_NON_BLOCKING_DRIVERS
  969. STARPU_PTHREAD_COND_WAIT(&worker->sched_cond,
  970. &worker->sched_mutex);
  971. #endif
  972. }
  973. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  974. }
  975. /* The scheduling policy may put tasks directly into a worker's local queue so
  976. * that it is not always necessary to create its own queue when the local queue
  977. * is sufficient. If "back" not null, the task is put at the back of the queue
  978. * where the worker will pop tasks first. Setting "back" to 0 therefore ensures
  979. * a FIFO ordering. */
  980. int starpu_push_local_task(int workerid, struct starpu_task *task, int prio)
  981. {
  982. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  983. return _starpu_push_local_task(worker, task, prio);
  984. }
  985. void _starpu_print_idle_time()
  986. {
  987. if(!starpu_idle_file)
  988. return;
  989. double all_idle = 0.0;
  990. int i = 0;
  991. for(i = 0; i < STARPU_NMAXWORKERS; i++)
  992. all_idle += idle[i];
  993. FILE *f;
  994. f = fopen(starpu_idle_file, "a");
  995. if (!f)
  996. {
  997. _STARPU_MSG("couldn't open %s: %s\n", starpu_idle_file, strerror(errno));
  998. }
  999. else
  1000. {
  1001. fprintf(f, "%lf \n", all_idle);
  1002. fclose(f);
  1003. }
  1004. }