driver_common.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2020 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. * Copyright (C) 2011 Télécom-SudParis
  5. * Copyright (C) 2013 Thibaut Lambert
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. #include <math.h>
  19. #include <starpu.h>
  20. #include <starpu_profiling.h>
  21. #include <profiling/profiling.h>
  22. #include <common/utils.h>
  23. #include <core/debug.h>
  24. #include <core/sched_ctx.h>
  25. #include <drivers/driver_common/driver_common.h>
  26. #include <core/sched_policy.h>
  27. #include <core/debug.h>
  28. #include <core/task.h>
  29. void _starpu_driver_start_job(struct _starpu_worker *worker, struct _starpu_job *j, struct starpu_perfmodel_arch* perf_arch, int rank, int profiling)
  30. {
  31. struct starpu_task *task = j->task;
  32. struct starpu_codelet *cl = task->cl;
  33. int workerid = worker->workerid;
  34. unsigned calibrate_model = 0;
  35. if (worker->bindid_requested != -1)
  36. {
  37. typedef unsigned __attribute__((__may_alias__)) alias_unsigned;
  38. typedef int __attribute__((__may_alias__)) alias_int;
  39. unsigned raw_bindid_requested = STARPU_VAL_EXCHANGE((alias_unsigned *)&worker->bindid_requested, -1);
  40. int bindid_requested = *(alias_int *)&raw_bindid_requested;
  41. if (bindid_requested != -1)
  42. {
  43. worker->bindid = bindid_requested;
  44. _starpu_bind_thread_on_cpu(worker->bindid, worker->workerid, NULL);
  45. }
  46. }
  47. if (cl->model && cl->model->benchmarking)
  48. calibrate_model = 1;
  49. /* If the job is executed on a combined worker there is no need for the
  50. * scheduler to process it : it doesn't contain any valuable data
  51. * as it's not linked to an actual worker */
  52. if (j->task_size == 1 && rank == 0)
  53. _starpu_sched_pre_exec_hook(task);
  54. _starpu_set_worker_status(worker, STATUS_EXECUTING);
  55. if (rank == 0)
  56. {
  57. STARPU_ASSERT(task->status == STARPU_TASK_READY);
  58. if (!_starpu_perf_counter_paused() && !j->internal)
  59. {
  60. (void)STARPU_ATOMIC_ADD64(& _starpu_task__g_current_ready__value, -1);
  61. if (task->cl && task->cl->perf_counter_values)
  62. {
  63. struct starpu_perf_counter_sample_cl_values * const pcv = task->cl->perf_counter_values;
  64. (void)STARPU_ATOMIC_ADD64(&pcv->task.current_ready, -1);
  65. }
  66. }
  67. task->status = STARPU_TASK_RUNNING;
  68. STARPU_AYU_RUNTASK(j->job_id);
  69. cl->per_worker_stats[workerid]++;
  70. _starpu_clock_gettime(&worker->cl_start);
  71. struct starpu_profiling_task_info *profiling_info = task->profiling_info;
  72. if ((profiling && profiling_info) || calibrate_model)
  73. {
  74. _starpu_worker_register_executing_start_date(workerid, &worker->cl_start);
  75. }
  76. _starpu_job_notify_start(j, perf_arch);
  77. }
  78. // Find out if the worker is the master of a parallel context
  79. struct _starpu_sched_ctx *sched_ctx = _starpu_sched_ctx_get_sched_ctx_for_worker_and_job(worker, j);
  80. if(!sched_ctx)
  81. sched_ctx = _starpu_get_sched_ctx_struct(j->task->sched_ctx);
  82. _starpu_sched_ctx_lock_read(sched_ctx->id);
  83. if(!sched_ctx->sched_policy)
  84. {
  85. if(!sched_ctx->awake_workers && sched_ctx->main_master == worker->workerid)
  86. {
  87. struct starpu_worker_collection *workers = sched_ctx->workers;
  88. struct starpu_sched_ctx_iterator it;
  89. int new_rank = 0;
  90. if (workers->init_iterator)
  91. workers->init_iterator(workers, &it);
  92. while (workers->has_next(workers, &it))
  93. {
  94. int _workerid = workers->get_next(workers, &it);
  95. if (_workerid != workerid)
  96. {
  97. new_rank++;
  98. struct _starpu_worker *_worker = _starpu_get_worker_struct(_workerid);
  99. _starpu_driver_start_job(_worker, j, &_worker->perf_arch, new_rank, profiling);
  100. }
  101. }
  102. }
  103. _STARPU_TRACE_TASK_COLOR(j);
  104. _STARPU_TRACE_START_CODELET_BODY(j, j->nimpl, &sched_ctx->perf_arch, workerid);
  105. }
  106. else
  107. {
  108. _STARPU_TRACE_TASK_COLOR(j);
  109. _STARPU_TRACE_START_CODELET_BODY(j, j->nimpl, perf_arch, workerid);
  110. }
  111. _starpu_sched_ctx_unlock_read(sched_ctx->id);
  112. _STARPU_TASK_BREAK_ON(task, exec);
  113. }
  114. void _starpu_driver_end_job(struct _starpu_worker *worker, struct _starpu_job *j, struct starpu_perfmodel_arch* perf_arch STARPU_ATTRIBUTE_UNUSED, int rank, int profiling)
  115. {
  116. struct starpu_task *task = j->task;
  117. struct starpu_codelet *cl = task->cl;
  118. int workerid = worker->workerid;
  119. unsigned calibrate_model = 0;
  120. // Find out if the worker is the master of a parallel context
  121. struct _starpu_sched_ctx *sched_ctx = _starpu_sched_ctx_get_sched_ctx_for_worker_and_job(worker, j);
  122. if(!sched_ctx)
  123. sched_ctx = _starpu_get_sched_ctx_struct(j->task->sched_ctx);
  124. if (!sched_ctx->sched_policy)
  125. {
  126. _starpu_perfmodel_create_comb_if_needed(&(sched_ctx->perf_arch));
  127. _STARPU_TRACE_END_CODELET_BODY(j, j->nimpl, &(sched_ctx->perf_arch), workerid);
  128. }
  129. else
  130. {
  131. _starpu_perfmodel_create_comb_if_needed(perf_arch);
  132. _STARPU_TRACE_END_CODELET_BODY(j, j->nimpl, perf_arch, workerid);
  133. }
  134. if (cl && cl->model && cl->model->benchmarking)
  135. calibrate_model = 1;
  136. if (rank == 0)
  137. {
  138. _starpu_clock_gettime(&worker->cl_end);
  139. struct starpu_profiling_task_info *profiling_info = task->profiling_info;
  140. if ((profiling && profiling_info) || calibrate_model)
  141. {
  142. _starpu_worker_register_executing_end(workerid);
  143. }
  144. STARPU_AYU_POSTRUNTASK(j->job_id);
  145. }
  146. _starpu_set_worker_status(worker, STATUS_UNKNOWN);
  147. if(!sched_ctx->sched_policy && !sched_ctx->awake_workers &&
  148. sched_ctx->main_master == worker->workerid)
  149. {
  150. struct starpu_worker_collection *workers = sched_ctx->workers;
  151. struct starpu_sched_ctx_iterator it;
  152. int new_rank = 0;
  153. if (workers->init_iterator)
  154. workers->init_iterator(workers, &it);
  155. while (workers->has_next(workers, &it))
  156. {
  157. int _workerid = workers->get_next(workers, &it);
  158. if (_workerid != workerid)
  159. {
  160. new_rank++;
  161. struct _starpu_worker *_worker = _starpu_get_worker_struct(_workerid);
  162. _starpu_driver_end_job(_worker, j, &_worker->perf_arch, new_rank, profiling);
  163. }
  164. }
  165. }
  166. }
  167. void _starpu_driver_update_job_feedback(struct _starpu_job *j, struct _starpu_worker *worker,
  168. struct starpu_perfmodel_arch* perf_arch,
  169. int profiling)
  170. {
  171. struct starpu_profiling_task_info *profiling_info = j->task->profiling_info;
  172. struct timespec measured_ts;
  173. int workerid = worker->workerid;
  174. struct starpu_codelet *cl = j->task->cl;
  175. int calibrate_model = 0;
  176. int updated = 0;
  177. _starpu_perfmodel_create_comb_if_needed(perf_arch);
  178. #ifndef STARPU_SIMGRID
  179. if (cl->model && cl->model->benchmarking)
  180. calibrate_model = 1;
  181. #endif
  182. if ((profiling && profiling_info) || calibrate_model || !_starpu_perf_counter_paused())
  183. {
  184. starpu_timespec_sub(&worker->cl_end, &worker->cl_start, &measured_ts);
  185. double measured = starpu_timing_timespec_to_us(&measured_ts);
  186. STARPU_ASSERT_MSG(measured >= 0, "measured=%lf\n", measured);
  187. if (!_starpu_perf_counter_paused())
  188. {
  189. worker->__w_total_executed__value++;
  190. worker->__w_cumul_execution_time__value += measured;
  191. _starpu_perf_counter_update_per_worker_sample(worker->workerid);
  192. if (cl->perf_counter_values)
  193. {
  194. struct starpu_perf_counter_sample_cl_values * const pcv = cl->perf_counter_values;
  195. (void)STARPU_ATOMIC_ADD64(&pcv->task.total_executed, 1);
  196. _starpu_perf_counter_update_acc_double(&pcv->task.cumul_execution_time, measured);
  197. _starpu_perf_counter_update_per_codelet_sample(cl);
  198. }
  199. }
  200. if (profiling && profiling_info)
  201. {
  202. memcpy(&profiling_info->start_time, &worker->cl_start, sizeof(struct timespec));
  203. memcpy(&profiling_info->end_time, &worker->cl_end, sizeof(struct timespec));
  204. profiling_info->workerid = workerid;
  205. _starpu_worker_update_profiling_info_executing(workerid, &measured_ts, 1,
  206. profiling_info->used_cycles,
  207. profiling_info->stall_cycles,
  208. profiling_info->energy_consumed,
  209. j->task->flops);
  210. updated = 1;
  211. }
  212. if (calibrate_model)
  213. {
  214. #ifdef STARPU_OPENMP
  215. double time_consumed = measured;
  216. unsigned do_update_time_model;
  217. if (j->continuation)
  218. {
  219. /* The job is only paused, thus we accumulate
  220. * its timing, but we don't update its
  221. * perfmodel now. */
  222. starpu_timespec_accumulate(&j->cumulated_ts, &measured_ts);
  223. do_update_time_model = 0;
  224. }
  225. else
  226. {
  227. if (j->discontinuous)
  228. {
  229. /* The job was paused at least once but is now
  230. * really completing. We need to take into
  231. * account its past execution time in its
  232. * perfmodel. */
  233. starpu_timespec_accumulate(&measured_ts, &j->cumulated_ts);
  234. time_consumed = starpu_timing_timespec_to_us(&measured_ts);
  235. }
  236. do_update_time_model = 1;
  237. }
  238. #else
  239. unsigned do_update_time_model = 1;
  240. const double time_consumed = measured;
  241. #endif
  242. if (j->task->failed)
  243. /* Do not record perfmodel for failed tasks, they may terminate earlier */
  244. do_update_time_model = 0;
  245. if (do_update_time_model)
  246. {
  247. _starpu_update_perfmodel_history(j, j->task->cl->model, perf_arch, worker->devid, time_consumed, j->nimpl);
  248. }
  249. }
  250. }
  251. if (!updated)
  252. _starpu_worker_update_profiling_info_executing(workerid, NULL, 1, 0, 0, 0, 0);
  253. if (profiling_info && profiling_info->energy_consumed && cl->energy_model && cl->energy_model->benchmarking)
  254. {
  255. #ifdef STARPU_OPENMP
  256. double energy_consumed = profiling_info->energy_consumed;
  257. unsigned do_update_energy_model;
  258. if (j->continuation)
  259. {
  260. j->cumulated_energy_consumed += energy_consumed;
  261. do_update_energy_model = 0;
  262. }
  263. else
  264. {
  265. if (j->discontinuous)
  266. {
  267. energy_consumed += j->cumulated_energy_consumed;
  268. }
  269. do_update_energy_model = 1;
  270. }
  271. #else
  272. const double energy_consumed = profiling_info->energy_consumed;
  273. unsigned do_update_energy_model = 1;
  274. #endif
  275. if (j->task->failed)
  276. /* Do not record perfmodel for failed tasks, they may terminate earlier */
  277. do_update_energy_model = 0;
  278. if (do_update_energy_model)
  279. {
  280. _starpu_update_perfmodel_history(j, j->task->cl->energy_model, perf_arch, worker->devid, energy_consumed, j->nimpl);
  281. }
  282. }
  283. }
  284. static void _starpu_worker_set_status_scheduling(int workerid)
  285. {
  286. if (_starpu_worker_get_status(workerid) == STATUS_SLEEPING)
  287. {
  288. _starpu_worker_set_status(workerid, STATUS_SLEEPING_SCHEDULING);
  289. }
  290. else if (_starpu_worker_get_status(workerid) != STATUS_SCHEDULING)
  291. {
  292. _STARPU_TRACE_WORKER_SCHEDULING_START;
  293. _starpu_worker_set_status(workerid, STATUS_SCHEDULING);
  294. }
  295. }
  296. static void _starpu_worker_set_status_scheduling_done(int workerid)
  297. {
  298. if (_starpu_worker_get_status(workerid) == STATUS_SLEEPING_SCHEDULING)
  299. {
  300. _starpu_worker_set_status(workerid, STATUS_SLEEPING);
  301. }
  302. else if (_starpu_worker_get_status(workerid) == STATUS_SCHEDULING)
  303. {
  304. _STARPU_TRACE_WORKER_SCHEDULING_END;
  305. _starpu_worker_set_status(workerid, STATUS_UNKNOWN);
  306. }
  307. }
  308. static void _starpu_worker_set_status_sleeping(int workerid)
  309. {
  310. if (_starpu_worker_get_status(workerid) != STATUS_SLEEPING)
  311. {
  312. if (_starpu_worker_get_status(workerid) != STATUS_SLEEPING_SCHEDULING)
  313. {
  314. _STARPU_TRACE_WORKER_SLEEP_START;
  315. _starpu_worker_restart_sleeping(workerid);
  316. }
  317. _starpu_worker_set_status(workerid, STATUS_SLEEPING);
  318. }
  319. }
  320. static void _starpu_worker_set_status_wakeup(int workerid)
  321. {
  322. if (_starpu_worker_get_status(workerid) == STATUS_SLEEPING)
  323. {
  324. _STARPU_TRACE_WORKER_SLEEP_END;
  325. _starpu_worker_stop_sleeping(workerid);
  326. _starpu_worker_set_status(workerid, STATUS_UNKNOWN);
  327. }
  328. }
  329. #if !defined(STARPU_SIMGRID)
  330. static void _starpu_exponential_backoff(struct _starpu_worker *worker)
  331. {
  332. int delay = worker->spinning_backoff;
  333. if (worker->spinning_backoff < worker->config->conf.driver_spinning_backoff_max)
  334. worker->spinning_backoff<<=1;
  335. while(delay--)
  336. STARPU_UYIELD();
  337. }
  338. #endif
  339. /* Workers may block when there is no work to do at all. */
  340. struct starpu_task *_starpu_get_worker_task(struct _starpu_worker *worker, int workerid, unsigned memnode STARPU_ATTRIBUTE_UNUSED)
  341. {
  342. struct starpu_task *task;
  343. #if !defined(STARPU_SIMGRID)
  344. unsigned keep_awake = 0;
  345. #endif
  346. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&worker->sched_mutex);
  347. _starpu_worker_enter_sched_op(worker);
  348. _starpu_worker_set_status_scheduling(workerid);
  349. #if !defined(STARPU_SIMGRID)
  350. if ((worker->pipeline_length == 0 && worker->current_task)
  351. || (worker->pipeline_length != 0 && worker->ntasks))
  352. /* This worker is executing something */
  353. keep_awake = 1;
  354. #endif
  355. /*if the worker is already executing a task then */
  356. if (worker->pipeline_length && (worker->ntasks == worker->pipeline_length || worker->pipeline_stuck))
  357. task = NULL;
  358. /* don't push a task if we are already transferring one */
  359. else if (worker->task_transferring != NULL)
  360. task = NULL;
  361. /*else try to pop a task*/
  362. else
  363. {
  364. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  365. task = _starpu_pop_task(worker);
  366. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&worker->sched_mutex);
  367. #if !defined(STARPU_SIMGRID)
  368. if (worker->state_keep_awake)
  369. {
  370. keep_awake = 1;
  371. worker->state_keep_awake = 0;
  372. }
  373. #endif
  374. }
  375. #if !defined(STARPU_SIMGRID)
  376. if (task == NULL && !keep_awake)
  377. {
  378. /* Didn't get a task to run and none are running, go to sleep */
  379. /* Note: we need to keep the sched condition mutex all along the path
  380. * from popping a task from the scheduler to blocking. Otherwise the
  381. * driver may go block just after the scheduler got a new task to be
  382. * executed, and thus hanging. */
  383. _starpu_worker_set_status_sleeping(workerid);
  384. _starpu_worker_leave_sched_op(worker);
  385. STARPU_PTHREAD_COND_BROADCAST(&worker->sched_cond);
  386. #ifndef STARPU_NON_BLOCKING_DRIVERS
  387. if (_starpu_worker_can_block(memnode, worker)
  388. && !worker->state_block_in_parallel_req
  389. && !worker->state_unblock_in_parallel_req
  390. && !_starpu_sched_ctx_last_worker_awake(worker))
  391. {
  392. #ifdef STARPU_WORKER_CALLBACKS
  393. if (_starpu_config.conf.callback_worker_going_to_sleep != NULL)
  394. {
  395. _starpu_config.conf.callback_worker_going_to_sleep(workerid);
  396. }
  397. #endif
  398. do
  399. {
  400. STARPU_PTHREAD_COND_WAIT(&worker->sched_cond, &worker->sched_mutex);
  401. if (!worker->state_keep_awake
  402. && _starpu_worker_can_block(memnode, worker)
  403. && !worker->state_block_in_parallel_req
  404. && !worker->state_unblock_in_parallel_req)
  405. {
  406. _starpu_worker_set_status_sleeping(workerid);
  407. if (_starpu_sched_ctx_last_worker_awake(worker))
  408. {
  409. break;
  410. }
  411. }
  412. else
  413. {
  414. break;
  415. }
  416. }
  417. while (1);
  418. worker->state_keep_awake = 0;
  419. _starpu_worker_set_status_scheduling_done(workerid);
  420. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  421. #ifdef STARPU_WORKER_CALLBACKS
  422. if (_starpu_config.conf.callback_worker_waking_up != NULL)
  423. {
  424. /* the wake up callback should be called once the sched_mutex has been unlocked,
  425. * so that an external resource manager can potentially defer the wake-up momentarily if
  426. * the corresponding computing unit is still in use by another runtime system */
  427. _starpu_config.conf.callback_worker_waking_up(workerid);
  428. }
  429. #endif
  430. }
  431. else
  432. #endif
  433. {
  434. _starpu_worker_set_status_scheduling_done(workerid);
  435. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  436. if (_starpu_machine_is_running())
  437. _starpu_exponential_backoff(worker);
  438. }
  439. return NULL;
  440. }
  441. #endif
  442. if (task)
  443. {
  444. _starpu_worker_set_status_scheduling_done(workerid);
  445. _starpu_worker_set_status_wakeup(workerid);
  446. }
  447. else
  448. {
  449. _starpu_worker_set_status_sleeping(workerid);
  450. }
  451. worker->spinning_backoff = worker->config->conf.driver_spinning_backoff_min;
  452. _starpu_worker_leave_sched_op(worker);
  453. STARPU_PTHREAD_COND_BROADCAST(&worker->sched_cond);
  454. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  455. STARPU_AYU_PRERUNTASK(_starpu_get_job_associated_to_task(task)->job_id, workerid);
  456. return task;
  457. }
  458. int _starpu_get_multi_worker_task(struct _starpu_worker *workers, struct starpu_task ** tasks, int nworkers, unsigned memnode STARPU_ATTRIBUTE_UNUSED)
  459. {
  460. int i, count = 0;
  461. struct _starpu_job * j;
  462. int is_parallel_task;
  463. struct _starpu_combined_worker *combined_worker;
  464. #if !defined(STARPU_NON_BLOCKING_DRIVERS) && !defined(STARPU_SIMGRID)
  465. int executing = 0;
  466. #endif
  467. /*for each worker*/
  468. #ifndef STARPU_NON_BLOCKING_DRIVERS
  469. /* This assumes only 1 worker */
  470. STARPU_ASSERT_MSG(nworkers == 1, "Multiple workers is not yet possible in blocking drivers mode\n");
  471. _starpu_set_local_worker_key(&workers[0]);
  472. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&workers[0].sched_mutex);
  473. _starpu_worker_enter_sched_op(&workers[0]);
  474. #endif
  475. for (i = 0; i < nworkers; i++)
  476. {
  477. unsigned keep_awake = 0;
  478. #if !defined(STARPU_NON_BLOCKING_DRIVERS) && !defined(STARPU_SIMGRID)
  479. if ((workers[i].pipeline_length == 0 && workers[i].current_task)
  480. || (workers[i].pipeline_length != 0 && workers[i].ntasks))
  481. /* At least this worker is executing something */
  482. executing = 1;
  483. #endif
  484. /*if the worker is already executing a task then */
  485. if((workers[i].pipeline_length == 0 && workers[i].current_task)
  486. || (workers[i].pipeline_length != 0 &&
  487. (workers[i].ntasks == workers[i].pipeline_length
  488. || workers[i].pipeline_stuck)))
  489. {
  490. tasks[i] = NULL;
  491. }
  492. /* don't push a task if we are already transferring one */
  493. else if (workers[i].task_transferring != NULL)
  494. {
  495. tasks[i] = NULL;
  496. }
  497. /*else try to pop a task*/
  498. else
  499. {
  500. #ifdef STARPU_NON_BLOCKING_DRIVERS
  501. _starpu_set_local_worker_key(&workers[i]);
  502. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&workers[i].sched_mutex);
  503. _starpu_worker_enter_sched_op(&workers[i]);
  504. #endif
  505. _starpu_worker_set_status_scheduling(workers[i].workerid);
  506. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&workers[i].sched_mutex);
  507. tasks[i] = _starpu_pop_task(&workers[i]);
  508. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&workers[i].sched_mutex);
  509. if (workers[i].state_keep_awake)
  510. {
  511. keep_awake = workers[i].state_keep_awake;
  512. workers[i].state_keep_awake = 0;
  513. }
  514. if(tasks[i] != NULL || keep_awake)
  515. {
  516. _starpu_worker_set_status_scheduling_done(workers[i].workerid);
  517. _starpu_worker_set_status_wakeup(workers[i].workerid);
  518. STARPU_PTHREAD_COND_BROADCAST(&workers[i].sched_cond);
  519. #ifdef STARPU_NON_BLOCKING_DRIVERS
  520. _starpu_worker_leave_sched_op(&workers[i]);
  521. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&workers[i].sched_mutex);
  522. #endif
  523. count ++;
  524. if (tasks[i] == NULL)
  525. /* no task, but keep_awake */
  526. continue;
  527. j = _starpu_get_job_associated_to_task(tasks[i]);
  528. is_parallel_task = (j->task_size > 1);
  529. if (workers[i].pipeline_length)
  530. workers[i].current_tasks[(workers[i].first_task + workers[i].ntasks)%STARPU_MAX_PIPELINE] = tasks[i];
  531. else
  532. workers[i].current_task = j->task;
  533. workers[i].ntasks++;
  534. /* Get the rank in case it is a parallel task */
  535. if (is_parallel_task)
  536. {
  537. STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  538. workers[i].current_rank = j->active_task_alias_count++;
  539. STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
  540. if(j->combined_workerid != -1)
  541. {
  542. combined_worker = _starpu_get_combined_worker_struct(j->combined_workerid);
  543. workers[i].combined_workerid = j->combined_workerid;
  544. workers[i].worker_size = combined_worker->worker_size;
  545. }
  546. }
  547. else
  548. {
  549. workers[i].combined_workerid = workers[i].workerid;
  550. workers[i].worker_size = 1;
  551. workers[i].current_rank = 0;
  552. }
  553. STARPU_AYU_PRERUNTASK(_starpu_get_job_associated_to_task(tasks[i])->job_id, workers[i].workerid);
  554. }
  555. else
  556. {
  557. _starpu_worker_set_status_sleeping(workers[i].workerid);
  558. #ifdef STARPU_NON_BLOCKING_DRIVERS
  559. _starpu_worker_leave_sched_op(&workers[i]);
  560. #endif
  561. STARPU_PTHREAD_COND_BROADCAST(&workers[i].sched_cond);
  562. #ifdef STARPU_NON_BLOCKING_DRIVERS
  563. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&workers[i].sched_mutex);
  564. #endif
  565. }
  566. }
  567. }
  568. #if !defined(STARPU_NON_BLOCKING_DRIVERS)
  569. #if !defined(STARPU_SIMGRID)
  570. /* Block the assumed-to-be-only worker */
  571. struct _starpu_worker *worker = &workers[0];
  572. unsigned workerid = workers[0].workerid;
  573. if (!count && !executing)
  574. {
  575. /* Didn't get a task to run and none are running, go to sleep */
  576. /* Note: we need to keep the sched condition mutex all along the path
  577. * from popping a task from the scheduler to blocking. Otherwise the
  578. * driver may go block just after the scheduler got a new task to be
  579. * executed, and thus hanging. */
  580. _starpu_worker_set_status_sleeping(workerid);
  581. _starpu_worker_leave_sched_op(worker);
  582. if (_starpu_worker_can_block(memnode, worker)
  583. && !worker->state_block_in_parallel_req
  584. && !worker->state_unblock_in_parallel_req
  585. && !_starpu_sched_ctx_last_worker_awake(worker))
  586. {
  587. #ifdef STARPU_WORKER_CALLBACKS
  588. if (_starpu_config.conf.callback_worker_going_to_sleep != NULL)
  589. {
  590. _starpu_config.conf.callback_worker_going_to_sleep(workerid);
  591. }
  592. #endif
  593. do
  594. {
  595. STARPU_PTHREAD_COND_WAIT(&worker->sched_cond, &worker->sched_mutex);
  596. if (!worker->state_keep_awake
  597. && _starpu_worker_can_block(memnode, worker)
  598. && !worker->state_block_in_parallel_req
  599. && !worker->state_unblock_in_parallel_req)
  600. {
  601. _starpu_worker_set_status_sleeping(workerid);
  602. if (_starpu_sched_ctx_last_worker_awake(worker))
  603. {
  604. break;
  605. }
  606. }
  607. else
  608. {
  609. break;
  610. }
  611. }
  612. while (1);
  613. worker->state_keep_awake = 0;
  614. _starpu_worker_set_status_scheduling_done(workerid);
  615. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  616. #ifdef STARPU_WORKER_CALLBACKS
  617. if (_starpu_config.conf.callback_worker_waking_up != NULL)
  618. {
  619. /* the wake up callback should be called once the sched_mutex has been unlocked,
  620. * so that an external resource manager can potentially defer the wake-up momentarily if
  621. * the corresponding computing unit is still in use by another runtime system */
  622. _starpu_config.conf.callback_worker_waking_up(workerid);
  623. }
  624. #endif
  625. }
  626. else
  627. {
  628. _starpu_worker_set_status_scheduling_done(workerid);
  629. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  630. if (_starpu_machine_is_running())
  631. _starpu_exponential_backoff(worker);
  632. }
  633. return 0;
  634. }
  635. _starpu_worker_set_status_wakeup(workerid);
  636. worker->spinning_backoff = worker->config->conf.driver_spinning_backoff_min;
  637. #endif /* !STARPU_SIMGRID */
  638. _starpu_worker_leave_sched_op(&workers[0]);
  639. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&workers[0].sched_mutex);
  640. #endif /* !STARPU_NON_BLOCKING_DRIVERS */
  641. return count;
  642. }