driver_common.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2017 Université de Bordeaux
  4. * Copyright (C) 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 CNRS
  5. * Copyright (C) 2011 Télécom-SudParis
  6. * Copyright (C) 2014, 2016 INRIA
  7. *
  8. * StarPU is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU Lesser General Public License as published by
  10. * the Free Software Foundation; either version 2.1 of the License, or (at
  11. * your option) any later version.
  12. *
  13. * StarPU is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  16. *
  17. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  18. */
  19. #include <math.h>
  20. #include <starpu.h>
  21. #include <starpu_profiling.h>
  22. #include <profiling/profiling.h>
  23. #include <common/utils.h>
  24. #include <core/debug.h>
  25. #include <core/sched_ctx.h>
  26. #include <drivers/driver_common/driver_common.h>
  27. #include <starpu_top.h>
  28. #include <core/sched_policy.h>
  29. #include <top/starpu_top_core.h>
  30. #include <core/debug.h>
  31. #include <core/task.h>
  32. #define BACKOFF_MAX 32 /* TODO : use parameter to define them */
  33. #define BACKOFF_MIN 1
  34. void _starpu_driver_start_job(struct _starpu_worker *worker, struct _starpu_job *j, struct starpu_perfmodel_arch* perf_arch STARPU_ATTRIBUTE_UNUSED, struct timespec *codelet_start, int rank, int profiling)
  35. {
  36. struct starpu_task *task = j->task;
  37. struct starpu_codelet *cl = task->cl;
  38. int starpu_top=_starpu_top_status_get();
  39. int workerid = worker->workerid;
  40. unsigned calibrate_model = 0;
  41. if (cl->model && cl->model->benchmarking)
  42. calibrate_model = 1;
  43. /* If the job is executed on a combined worker there is no need for the
  44. * scheduler to process it : it doesn't contain any valuable data
  45. * as it's not linked to an actual worker */
  46. if (j->task_size == 1 && rank == 0)
  47. _starpu_sched_pre_exec_hook(task);
  48. _starpu_set_worker_status(worker, STATUS_EXECUTING);
  49. task->status = STARPU_TASK_RUNNING;
  50. if (rank == 0)
  51. {
  52. STARPU_AYU_RUNTASK(j->job_id);
  53. cl->per_worker_stats[workerid]++;
  54. struct starpu_profiling_task_info *profiling_info = task->profiling_info;
  55. if ((profiling && profiling_info) || calibrate_model || starpu_top)
  56. {
  57. _starpu_clock_gettime(codelet_start);
  58. _starpu_worker_register_executing_start_date(workerid, codelet_start);
  59. }
  60. }
  61. if (starpu_top)
  62. _starpu_top_task_started(task,workerid,codelet_start);
  63. // Find out if the worker is the master of a parallel context
  64. struct _starpu_sched_ctx *sched_ctx = _starpu_sched_ctx_get_sched_ctx_for_worker_and_job(worker, j);
  65. if(!sched_ctx)
  66. sched_ctx = _starpu_get_sched_ctx_struct(j->task->sched_ctx);
  67. _starpu_sched_ctx_lock_read(sched_ctx->id);
  68. if(!sched_ctx->sched_policy)
  69. {
  70. if(!sched_ctx->awake_workers && sched_ctx->main_master == worker->workerid)
  71. {
  72. struct starpu_worker_collection *workers = sched_ctx->workers;
  73. struct starpu_sched_ctx_iterator it;
  74. int new_rank = 0;
  75. if (workers->init_iterator)
  76. workers->init_iterator(workers, &it);
  77. while (workers->has_next(workers, &it))
  78. {
  79. int _workerid = workers->get_next(workers, &it);
  80. if (_workerid != workerid)
  81. {
  82. new_rank++;
  83. struct _starpu_worker *_worker = _starpu_get_worker_struct(_workerid);
  84. _starpu_driver_start_job(_worker, j, &_worker->perf_arch, codelet_start, new_rank, profiling);
  85. }
  86. }
  87. }
  88. _STARPU_TRACE_START_CODELET_BODY(j, j->nimpl, &sched_ctx->perf_arch, workerid);
  89. }
  90. else
  91. _STARPU_TRACE_START_CODELET_BODY(j, j->nimpl, perf_arch, workerid);
  92. _starpu_sched_ctx_unlock_read(sched_ctx->id);
  93. _STARPU_TASK_BREAK_ON(task, exec);
  94. }
  95. void _starpu_driver_end_job(struct _starpu_worker *worker, struct _starpu_job *j, struct starpu_perfmodel_arch* perf_arch STARPU_ATTRIBUTE_UNUSED, struct timespec *codelet_end, int rank, int profiling)
  96. {
  97. struct starpu_task *task = j->task;
  98. struct starpu_codelet *cl = task->cl;
  99. int starpu_top=_starpu_top_status_get();
  100. int workerid = worker->workerid;
  101. unsigned calibrate_model = 0;
  102. // Find out if the worker is the master of a parallel context
  103. struct _starpu_sched_ctx *sched_ctx = _starpu_sched_ctx_get_sched_ctx_for_worker_and_job(worker, j);
  104. if(!sched_ctx)
  105. sched_ctx = _starpu_get_sched_ctx_struct(j->task->sched_ctx);
  106. if (!sched_ctx->sched_policy)
  107. {
  108. _starpu_perfmodel_create_comb_if_needed(&(sched_ctx->perf_arch));
  109. _STARPU_TRACE_END_CODELET_BODY(j, j->nimpl, &(sched_ctx->perf_arch), workerid);
  110. }
  111. else
  112. {
  113. _starpu_perfmodel_create_comb_if_needed(perf_arch);
  114. _STARPU_TRACE_END_CODELET_BODY(j, j->nimpl, perf_arch, workerid);
  115. }
  116. if (cl && cl->model && cl->model->benchmarking)
  117. calibrate_model = 1;
  118. if (rank == 0)
  119. {
  120. struct starpu_profiling_task_info *profiling_info = task->profiling_info;
  121. if ((profiling && profiling_info) || calibrate_model || starpu_top)
  122. {
  123. _starpu_clock_gettime(codelet_end);
  124. _starpu_worker_register_executing_end(workerid);
  125. }
  126. STARPU_AYU_POSTRUNTASK(j->job_id);
  127. }
  128. if (starpu_top)
  129. _starpu_top_task_ended(task,workerid,codelet_end);
  130. _starpu_set_worker_status(worker, STATUS_UNKNOWN);
  131. if(!sched_ctx->sched_policy && !sched_ctx->awake_workers &&
  132. sched_ctx->main_master == worker->workerid)
  133. {
  134. struct starpu_worker_collection *workers = sched_ctx->workers;
  135. struct starpu_sched_ctx_iterator it;
  136. int new_rank = 0;
  137. if (workers->init_iterator)
  138. workers->init_iterator(workers, &it);
  139. while (workers->has_next(workers, &it))
  140. {
  141. int _workerid = workers->get_next(workers, &it);
  142. if (_workerid != workerid)
  143. {
  144. new_rank++;
  145. struct _starpu_worker *_worker = _starpu_get_worker_struct(_workerid);
  146. _starpu_driver_end_job(_worker, j, &_worker->perf_arch, codelet_end, new_rank, profiling);
  147. }
  148. }
  149. }
  150. }
  151. void _starpu_driver_update_job_feedback(struct _starpu_job *j, struct _starpu_worker *worker,
  152. struct starpu_perfmodel_arch* perf_arch,
  153. struct timespec *codelet_start, struct timespec *codelet_end, int profiling)
  154. {
  155. struct starpu_profiling_task_info *profiling_info = j->task->profiling_info;
  156. struct timespec measured_ts;
  157. int workerid = worker->workerid;
  158. struct starpu_codelet *cl = j->task->cl;
  159. int calibrate_model = 0;
  160. int updated = 0;
  161. _starpu_perfmodel_create_comb_if_needed(perf_arch);
  162. #ifndef STARPU_SIMGRID
  163. if (cl->model && cl->model->benchmarking)
  164. calibrate_model = 1;
  165. #endif
  166. if ((profiling && profiling_info) || calibrate_model)
  167. {
  168. double measured;
  169. starpu_timespec_sub(codelet_end, codelet_start, &measured_ts);
  170. measured = starpu_timing_timespec_to_us(&measured_ts);
  171. if (profiling && profiling_info)
  172. {
  173. memcpy(&profiling_info->start_time, codelet_start, sizeof(struct timespec));
  174. memcpy(&profiling_info->end_time, codelet_end, sizeof(struct timespec));
  175. profiling_info->workerid = workerid;
  176. _starpu_worker_update_profiling_info_executing(workerid, &measured_ts, 1,
  177. profiling_info->used_cycles,
  178. profiling_info->stall_cycles,
  179. profiling_info->energy_consumed,
  180. j->task->flops);
  181. updated = 1;
  182. }
  183. if (calibrate_model)
  184. {
  185. #ifdef STARPU_OPENMP
  186. double time_consumed = measured;
  187. unsigned do_update_time_model;
  188. if (j->continuation)
  189. {
  190. /* The job is only paused, thus we accumulate
  191. * its timing, but we don't update its
  192. * perfmodel now. */
  193. starpu_timespec_accumulate(&j->cumulated_ts, &measured_ts);
  194. do_update_time_model = 0;
  195. }
  196. else
  197. {
  198. if (j->discontinuous)
  199. {
  200. /* The job was paused at least once but is now
  201. * really completing. We need to take into
  202. * account its past execution time in its
  203. * perfmodel. */
  204. starpu_timespec_accumulate(&measured_ts, &j->cumulated_ts);
  205. time_consumed = starpu_timing_timespec_to_us(&measured_ts);
  206. }
  207. do_update_time_model = 1;
  208. }
  209. #else
  210. const unsigned do_update_time_model = 1;
  211. const double time_consumed = measured;
  212. #endif
  213. if (do_update_time_model)
  214. {
  215. _starpu_update_perfmodel_history(j, j->task->cl->model, perf_arch, worker->devid, time_consumed, j->nimpl);
  216. }
  217. }
  218. }
  219. if (!updated)
  220. _starpu_worker_update_profiling_info_executing(workerid, NULL, 1, 0, 0, 0, 0);
  221. if (profiling_info && profiling_info->energy_consumed && cl->energy_model && cl->energy_model->benchmarking)
  222. {
  223. #ifdef STARPU_OPENMP
  224. double energy_consumed = profiling_info->energy_consumed;
  225. unsigned do_update_energy_model;
  226. if (j->continuation)
  227. {
  228. j->cumulated_energy_consumed += energy_consumed;
  229. do_update_energy_model = 0;
  230. }
  231. else
  232. {
  233. if (j->discontinuous)
  234. {
  235. energy_consumed += j->cumulated_energy_consumed;
  236. }
  237. do_update_energy_model = 1;
  238. }
  239. #else
  240. const double energy_consumed = profiling_info->energy_consumed;
  241. const unsigned do_update_energy_model = 1;
  242. #endif
  243. if (do_update_energy_model)
  244. {
  245. _starpu_update_perfmodel_history(j, j->task->cl->energy_model, perf_arch, worker->devid, energy_consumed, j->nimpl);
  246. }
  247. }
  248. }
  249. static void _starpu_worker_set_status_scheduling(int workerid)
  250. {
  251. if (_starpu_worker_get_status(workerid) != STATUS_SLEEPING
  252. && _starpu_worker_get_status(workerid) != STATUS_SCHEDULING)
  253. {
  254. _STARPU_TRACE_WORKER_SCHEDULING_START;
  255. _starpu_worker_set_status(workerid, STATUS_SCHEDULING);
  256. }
  257. }
  258. static void _starpu_worker_set_status_scheduling_done(int workerid)
  259. {
  260. if (_starpu_worker_get_status(workerid) == STATUS_SCHEDULING)
  261. {
  262. _STARPU_TRACE_WORKER_SCHEDULING_END;
  263. _starpu_worker_set_status(workerid, STATUS_UNKNOWN);
  264. }
  265. }
  266. static void _starpu_worker_set_status_sleeping(int workerid)
  267. {
  268. if ( _starpu_worker_get_status(workerid) == STATUS_WAKING_UP)
  269. _starpu_worker_set_status(workerid, STATUS_SLEEPING);
  270. else if (_starpu_worker_get_status(workerid) != STATUS_SLEEPING)
  271. {
  272. _STARPU_TRACE_WORKER_SLEEP_START;
  273. _starpu_worker_restart_sleeping(workerid);
  274. _starpu_worker_set_status(workerid, STATUS_SLEEPING);
  275. }
  276. }
  277. static void _starpu_worker_set_status_wakeup(int workerid)
  278. {
  279. if (_starpu_worker_get_status(workerid) == STATUS_SLEEPING || _starpu_worker_get_status(workerid) == STATUS_WAKING_UP)
  280. {
  281. _STARPU_TRACE_WORKER_SLEEP_END;
  282. _starpu_worker_stop_sleeping(workerid);
  283. _starpu_worker_set_status(workerid, STATUS_UNKNOWN);
  284. }
  285. }
  286. #if !defined(STARPU_SIMGRID)
  287. static void _starpu_exponential_backoff(struct _starpu_worker *worker)
  288. {
  289. int delay = worker->spinning_backoff;
  290. if (worker->spinning_backoff < BACKOFF_MAX)
  291. worker->spinning_backoff<<=1;
  292. while(delay--)
  293. STARPU_UYIELD();
  294. }
  295. #endif
  296. /* Workers may block when there is no work to do at all. */
  297. struct starpu_task *_starpu_get_worker_task(struct _starpu_worker *worker, int workerid, unsigned memnode STARPU_ATTRIBUTE_UNUSED)
  298. {
  299. struct starpu_task *task;
  300. unsigned executing STARPU_ATTRIBUTE_UNUSED = 0;
  301. unsigned keep_awake = 0;
  302. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&worker->sched_mutex);
  303. _starpu_worker_enter_sched_op(worker);
  304. _starpu_worker_set_status_scheduling(workerid);
  305. if ((worker->pipeline_length == 0 && worker->current_task)
  306. || (worker->pipeline_length != 0 && worker->ntasks))
  307. /* This worker is executing something */
  308. executing = 1;
  309. /*if the worker is already executing a task then */
  310. if (worker->pipeline_length && (worker->ntasks == worker->pipeline_length || worker->pipeline_stuck))
  311. task = NULL;
  312. /* don't push a task if we are already transferring one */
  313. else if (worker->task_transferring != NULL)
  314. task = NULL;
  315. /*else try to pop a task*/
  316. else
  317. {
  318. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  319. task = _starpu_pop_task(worker);
  320. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&worker->sched_mutex);
  321. if (worker->state_keep_awake)
  322. {
  323. keep_awake = worker->state_keep_awake;
  324. worker->state_keep_awake = 0;
  325. }
  326. }
  327. #if !defined(STARPU_SIMGRID)
  328. if (task == NULL && !executing && !keep_awake)
  329. {
  330. /* Didn't get a task to run and none are running, go to sleep */
  331. /* Note: we need to keep the sched condition mutex all along the path
  332. * from popping a task from the scheduler to blocking. Otherwise the
  333. * driver may go block just after the scheduler got a new task to be
  334. * executed, and thus hanging. */
  335. _starpu_worker_set_status_sleeping(workerid);
  336. STARPU_PTHREAD_COND_BROADCAST(&worker->sched_cond);
  337. if (_starpu_worker_can_block(memnode, worker)
  338. && !_starpu_sched_ctx_last_worker_awake(worker))
  339. {
  340. do
  341. {
  342. STARPU_PTHREAD_COND_WAIT(&worker->sched_cond, &worker->sched_mutex);
  343. }
  344. while (worker->status == STATUS_SLEEPING);
  345. _starpu_worker_leave_sched_op(worker);
  346. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  347. }
  348. else
  349. {
  350. _starpu_worker_leave_sched_op(worker);
  351. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  352. if (_starpu_machine_is_running())
  353. _starpu_exponential_backoff(worker);
  354. }
  355. return NULL;
  356. }
  357. #endif
  358. if (task)
  359. {
  360. _starpu_worker_set_status_scheduling_done(workerid);
  361. _starpu_worker_set_status_wakeup(workerid);
  362. }
  363. else
  364. {
  365. _starpu_worker_set_status_sleeping(workerid);
  366. }
  367. STARPU_PTHREAD_COND_BROADCAST(&worker->sched_cond);
  368. worker->spinning_backoff = BACKOFF_MIN;
  369. _starpu_worker_leave_sched_op(worker);
  370. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  371. STARPU_AYU_PRERUNTASK(_starpu_get_job_associated_to_task(task)->job_id, workerid);
  372. return task;
  373. }
  374. int _starpu_get_multi_worker_task(struct _starpu_worker *workers, struct starpu_task ** tasks, int nworkers, unsigned memnode STARPU_ATTRIBUTE_UNUSED)
  375. {
  376. int i, count = 0;
  377. struct _starpu_job * j;
  378. int is_parallel_task;
  379. struct _starpu_combined_worker *combined_worker;
  380. int executing STARPU_ATTRIBUTE_UNUSED = 0;
  381. /*for each worker*/
  382. #ifndef STARPU_NON_BLOCKING_DRIVERS
  383. /* This assumes only 1 worker */
  384. STARPU_ASSERT_MSG(nworkers == 1, "Multiple workers is not yet possible in blocking drivers mode\n");
  385. _starpu_set_local_worker_key(&workers[0]);
  386. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&workers[0].sched_mutex);
  387. _starpu_worker_enter_sched_op(&workers[0]);
  388. #endif
  389. for (i = 0; i < nworkers; i++)
  390. {
  391. unsigned keep_awake = 0;
  392. if ((workers[i].pipeline_length == 0 && workers[i].current_task)
  393. || (workers[i].pipeline_length != 0 && workers[i].ntasks))
  394. /* At least this worker is executing something */
  395. executing = 1;
  396. /*if the worker is already executing a task then */
  397. if((workers[i].pipeline_length == 0 && workers[i].current_task)
  398. || (workers[i].pipeline_length != 0 &&
  399. (workers[i].ntasks == workers[i].pipeline_length
  400. || workers[i].pipeline_stuck)))
  401. {
  402. tasks[i] = NULL;
  403. }
  404. /* don't push a task if we are already transferring one */
  405. else if (workers[i].task_transferring != NULL)
  406. {
  407. tasks[i] = NULL;
  408. }
  409. /*else try to pop a task*/
  410. else
  411. {
  412. #ifdef STARPU_NON_BLOCKING_DRIVERS
  413. _starpu_set_local_worker_key(&workers[i]);
  414. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&workers[i].sched_mutex);
  415. _starpu_worker_enter_sched_op(&workers[i]);
  416. #endif
  417. _starpu_worker_set_status_scheduling(workers[i].workerid);
  418. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&workers[i].sched_mutex);
  419. tasks[i] = _starpu_pop_task(&workers[i]);
  420. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&workers[i].sched_mutex);
  421. if (workers[i].state_keep_awake)
  422. {
  423. keep_awake = workers[i].state_keep_awake;
  424. workers[i].state_keep_awake = 0;
  425. }
  426. if(tasks[i] != NULL || keep_awake)
  427. {
  428. _starpu_worker_set_status_scheduling_done(workers[i].workerid);
  429. _starpu_worker_set_status_wakeup(workers[i].workerid);
  430. STARPU_PTHREAD_COND_BROADCAST(&workers[i].sched_cond);
  431. #ifdef STARPU_NON_BLOCKING_DRIVERS
  432. _starpu_worker_leave_sched_op(&workers[i]);
  433. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&workers[i].sched_mutex);
  434. #endif
  435. count ++;
  436. if (tasks[i] == NULL)
  437. /* no task, but keep_awake */
  438. continue;
  439. j = _starpu_get_job_associated_to_task(tasks[i]);
  440. is_parallel_task = (j->task_size > 1);
  441. if (workers[i].pipeline_length)
  442. workers[i].current_tasks[(workers[i].first_task + workers[i].ntasks)%STARPU_MAX_PIPELINE] = tasks[i];
  443. else
  444. workers[i].current_task = j->task;
  445. workers[i].ntasks++;
  446. /* Get the rank in case it is a parallel task */
  447. if (is_parallel_task)
  448. {
  449. STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  450. workers[i].current_rank = j->active_task_alias_count++;
  451. STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
  452. if(j->combined_workerid != -1)
  453. {
  454. combined_worker = _starpu_get_combined_worker_struct(j->combined_workerid);
  455. workers[i].combined_workerid = j->combined_workerid;
  456. workers[i].worker_size = combined_worker->worker_size;
  457. }
  458. }
  459. else
  460. {
  461. workers[i].combined_workerid = workers[i].workerid;
  462. workers[i].worker_size = 1;
  463. workers[i].current_rank = 0;
  464. }
  465. STARPU_AYU_PRERUNTASK(_starpu_get_job_associated_to_task(tasks[i])->job_id, workers[i].workerid);
  466. }
  467. else
  468. {
  469. _starpu_worker_set_status_sleeping(workers[i].workerid);
  470. STARPU_PTHREAD_COND_BROADCAST(&workers[i].sched_cond);
  471. #ifdef STARPU_NON_BLOCKING_DRIVERS
  472. _starpu_worker_leave_sched_op(&workers[i]);
  473. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&workers[i].sched_mutex);
  474. #endif
  475. }
  476. }
  477. }
  478. #if !defined(STARPU_NON_BLOCKING_DRIVERS)
  479. #if !defined(STARPU_SIMGRID)
  480. /* Block the assumed-to-be-only worker */
  481. struct _starpu_worker *worker = &workers[0];
  482. unsigned workerid = workers[0].workerid;
  483. if (!count && !executing)
  484. {
  485. /* Didn't get a task to run and none are running, go to sleep */
  486. /* Note: we need to keep the sched condition mutex all along the path
  487. * from popping a task from the scheduler to blocking. Otherwise the
  488. * driver may go block just after the scheduler got a new task to be
  489. * executed, and thus hanging. */
  490. _starpu_worker_set_status_sleeping(workerid);
  491. STARPU_PTHREAD_COND_BROADCAST(&worker->sched_cond);
  492. if (_starpu_worker_can_block(memnode, worker)
  493. && !_starpu_sched_ctx_last_worker_awake(worker))
  494. {
  495. do
  496. {
  497. STARPU_PTHREAD_COND_WAIT(&worker->sched_cond, &worker->sched_mutex);
  498. }
  499. while (worker->status == STATUS_SLEEPING);
  500. _starpu_worker_leave_sched_op(worker);
  501. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  502. }
  503. else
  504. {
  505. _starpu_worker_leave_sched_op(worker);
  506. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  507. if (_starpu_machine_is_running())
  508. _starpu_exponential_backoff(worker);
  509. }
  510. return 0;
  511. }
  512. _starpu_worker_set_status_wakeup(workerid);
  513. STARPU_PTHREAD_COND_BROADCAST(&worker->sched_cond);
  514. worker->spinning_backoff = BACKOFF_MIN;
  515. #endif /* !STARPU_SIMGRID */
  516. _starpu_worker_leave_sched_op(&workers[0]);
  517. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&workers[0].sched_mutex);
  518. #endif /* !STARPU_NON_BLOCKING_DRIVERS */
  519. return count;
  520. }