driver_common.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2017 Université de Bordeaux
  4. * Copyright (C) 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 CNRS
  5. * Copyright (C) 2011 Télécom-SudParis
  6. * Copyright (C) 2014, 2016 INRIA
  7. *
  8. * StarPU is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU Lesser General Public License as published by
  10. * the Free Software Foundation; either version 2.1 of the License, or (at
  11. * your option) any later version.
  12. *
  13. * StarPU is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  16. *
  17. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  18. */
  19. #include <math.h>
  20. #include <starpu.h>
  21. #include <starpu_profiling.h>
  22. #include <profiling/profiling.h>
  23. #include <common/utils.h>
  24. #include <core/debug.h>
  25. #include <core/sched_ctx.h>
  26. #include <drivers/driver_common/driver_common.h>
  27. #include <starpu_top.h>
  28. #include <core/sched_policy.h>
  29. #include <top/starpu_top_core.h>
  30. #include <core/debug.h>
  31. #define BACKOFF_MAX 32 /* TODO : use parameter to define them */
  32. #define BACKOFF_MIN 1
  33. void _starpu_driver_start_job(struct _starpu_worker *worker, struct _starpu_job *j, struct starpu_perfmodel_arch* perf_arch STARPU_ATTRIBUTE_UNUSED, struct timespec *codelet_start, int rank, int profiling)
  34. {
  35. struct starpu_task *task = j->task;
  36. struct starpu_codelet *cl = task->cl;
  37. int starpu_top=_starpu_top_status_get();
  38. int workerid = worker->workerid;
  39. unsigned calibrate_model = 0;
  40. if (cl->model && cl->model->benchmarking)
  41. calibrate_model = 1;
  42. /* If the job is executed on a combined worker there is no need for the
  43. * scheduler to process it : it doesn't contain any valuable data
  44. * as it's not linked to an actual worker */
  45. if (j->task_size == 1 && rank == 0)
  46. _starpu_sched_pre_exec_hook(task);
  47. _starpu_set_worker_status(worker, STATUS_EXECUTING);
  48. task->status = STARPU_TASK_RUNNING;
  49. if (rank == 0)
  50. {
  51. STARPU_AYU_RUNTASK(j->job_id);
  52. cl->per_worker_stats[workerid]++;
  53. struct starpu_profiling_task_info *profiling_info = task->profiling_info;
  54. if ((profiling && profiling_info) || calibrate_model || starpu_top)
  55. {
  56. _starpu_clock_gettime(codelet_start);
  57. _starpu_worker_register_executing_start_date(workerid, codelet_start);
  58. }
  59. }
  60. if (starpu_top)
  61. _starpu_top_task_started(task,workerid,codelet_start);
  62. // Find out if the worker is the master of a parallel context
  63. struct _starpu_sched_ctx *sched_ctx = _starpu_sched_ctx_get_sched_ctx_for_worker_and_job(worker, j);
  64. if(!sched_ctx)
  65. sched_ctx = _starpu_get_sched_ctx_struct(j->task->sched_ctx);
  66. if(!sched_ctx->sched_policy)
  67. {
  68. if(!sched_ctx->awake_workers && sched_ctx->main_master == worker->workerid)
  69. {
  70. struct starpu_worker_collection *workers = sched_ctx->workers;
  71. struct starpu_sched_ctx_iterator it;
  72. int new_rank = 0;
  73. if (workers->init_iterator)
  74. workers->init_iterator(workers, &it);
  75. while (workers->has_next(workers, &it))
  76. {
  77. int _workerid = workers->get_next(workers, &it);
  78. if (_workerid != workerid)
  79. {
  80. new_rank++;
  81. struct _starpu_worker *_worker = _starpu_get_worker_struct(_workerid);
  82. _starpu_driver_start_job(_worker, j, &_worker->perf_arch, codelet_start, new_rank, profiling);
  83. }
  84. }
  85. }
  86. _STARPU_TRACE_START_CODELET_BODY(j, j->nimpl, &sched_ctx->perf_arch, workerid);
  87. }
  88. else
  89. _STARPU_TRACE_START_CODELET_BODY(j, j->nimpl, perf_arch, workerid);
  90. }
  91. void _starpu_driver_end_job(struct _starpu_worker *worker, struct _starpu_job *j, struct starpu_perfmodel_arch* perf_arch STARPU_ATTRIBUTE_UNUSED, struct timespec *codelet_end, int rank, int profiling)
  92. {
  93. struct starpu_task *task = j->task;
  94. struct starpu_codelet *cl = task->cl;
  95. int starpu_top=_starpu_top_status_get();
  96. int workerid = worker->workerid;
  97. unsigned calibrate_model = 0;
  98. // Find out if the worker is the master of a parallel context
  99. struct _starpu_sched_ctx *sched_ctx = _starpu_sched_ctx_get_sched_ctx_for_worker_and_job(worker, j);
  100. if(!sched_ctx)
  101. sched_ctx = _starpu_get_sched_ctx_struct(j->task->sched_ctx);
  102. if (!sched_ctx->sched_policy)
  103. {
  104. _starpu_perfmodel_create_comb_if_needed(&(sched_ctx->perf_arch));
  105. _STARPU_TRACE_END_CODELET_BODY(j, j->nimpl, &(sched_ctx->perf_arch), workerid);
  106. }
  107. else
  108. {
  109. _starpu_perfmodel_create_comb_if_needed(perf_arch);
  110. _STARPU_TRACE_END_CODELET_BODY(j, j->nimpl, perf_arch, workerid);
  111. }
  112. if (cl && cl->model && cl->model->benchmarking)
  113. calibrate_model = 1;
  114. if (rank == 0)
  115. {
  116. struct starpu_profiling_task_info *profiling_info = task->profiling_info;
  117. if ((profiling && profiling_info) || calibrate_model || starpu_top)
  118. {
  119. _starpu_clock_gettime(codelet_end);
  120. _starpu_worker_register_executing_end(workerid);
  121. }
  122. STARPU_AYU_POSTRUNTASK(j->job_id);
  123. }
  124. if (starpu_top)
  125. _starpu_top_task_ended(task,workerid,codelet_end);
  126. _starpu_set_worker_status(worker, STATUS_UNKNOWN);
  127. if(!sched_ctx->sched_policy && !sched_ctx->awake_workers &&
  128. sched_ctx->main_master == worker->workerid)
  129. {
  130. struct starpu_worker_collection *workers = sched_ctx->workers;
  131. struct starpu_sched_ctx_iterator it;
  132. int new_rank = 0;
  133. if (workers->init_iterator)
  134. workers->init_iterator(workers, &it);
  135. while (workers->has_next(workers, &it))
  136. {
  137. int _workerid = workers->get_next(workers, &it);
  138. if (_workerid != workerid)
  139. {
  140. new_rank++;
  141. struct _starpu_worker *_worker = _starpu_get_worker_struct(_workerid);
  142. _starpu_driver_end_job(_worker, j, &_worker->perf_arch, codelet_end, new_rank, profiling);
  143. }
  144. }
  145. }
  146. }
  147. void _starpu_driver_update_job_feedback(struct _starpu_job *j, struct _starpu_worker *worker,
  148. struct starpu_perfmodel_arch* perf_arch,
  149. struct timespec *codelet_start, struct timespec *codelet_end, int profiling)
  150. {
  151. struct starpu_profiling_task_info *profiling_info = j->task->profiling_info;
  152. struct timespec measured_ts;
  153. int workerid = worker->workerid;
  154. struct starpu_codelet *cl = j->task->cl;
  155. int calibrate_model = 0;
  156. int updated = 0;
  157. _starpu_perfmodel_create_comb_if_needed(perf_arch);
  158. #ifndef STARPU_SIMGRID
  159. if (cl->model && cl->model->benchmarking)
  160. calibrate_model = 1;
  161. #endif
  162. if ((profiling && profiling_info) || calibrate_model)
  163. {
  164. double measured;
  165. starpu_timespec_sub(codelet_end, codelet_start, &measured_ts);
  166. measured = starpu_timing_timespec_to_us(&measured_ts);
  167. if (profiling && profiling_info)
  168. {
  169. memcpy(&profiling_info->start_time, codelet_start, sizeof(struct timespec));
  170. memcpy(&profiling_info->end_time, codelet_end, sizeof(struct timespec));
  171. profiling_info->workerid = workerid;
  172. _starpu_worker_update_profiling_info_executing(workerid, &measured_ts, 1,
  173. profiling_info->used_cycles,
  174. profiling_info->stall_cycles,
  175. profiling_info->energy_consumed,
  176. j->task->flops);
  177. updated = 1;
  178. }
  179. if (calibrate_model)
  180. {
  181. #ifdef STARPU_OPENMP
  182. double time_consumed = measured;
  183. unsigned do_update_time_model;
  184. if (j->continuation)
  185. {
  186. /* The job is only paused, thus we accumulate
  187. * its timing, but we don't update its
  188. * perfmodel now. */
  189. starpu_timespec_accumulate(&j->cumulated_ts, &measured_ts);
  190. do_update_time_model = 0;
  191. }
  192. else
  193. {
  194. if (j->discontinuous)
  195. {
  196. /* The job was paused at least once but is now
  197. * really completing. We need to take into
  198. * account its past execution time in its
  199. * perfmodel. */
  200. starpu_timespec_accumulate(&measured_ts, &j->cumulated_ts);
  201. time_consumed = starpu_timing_timespec_to_us(&measured_ts);
  202. }
  203. do_update_time_model = 1;
  204. }
  205. #else
  206. const unsigned do_update_time_model = 1;
  207. const double time_consumed = measured;
  208. #endif
  209. if (do_update_time_model)
  210. {
  211. _starpu_update_perfmodel_history(j, j->task->cl->model, perf_arch, worker->devid, time_consumed, j->nimpl);
  212. }
  213. }
  214. }
  215. if (!updated)
  216. _starpu_worker_update_profiling_info_executing(workerid, NULL, 1, 0, 0, 0, 0);
  217. if (profiling_info && profiling_info->energy_consumed && cl->energy_model && cl->energy_model->benchmarking)
  218. {
  219. #ifdef STARPU_OPENMP
  220. double energy_consumed = profiling_info->energy_consumed;
  221. unsigned do_update_energy_model;
  222. if (j->continuation)
  223. {
  224. j->cumulated_energy_consumed += energy_consumed;
  225. do_update_energy_model = 0;
  226. }
  227. else
  228. {
  229. if (j->discontinuous)
  230. {
  231. energy_consumed += j->cumulated_energy_consumed;
  232. }
  233. do_update_energy_model = 1;
  234. }
  235. #else
  236. const double energy_consumed = profiling_info->energy_consumed;
  237. const unsigned do_update_energy_model = 1;
  238. #endif
  239. if (do_update_energy_model)
  240. {
  241. _starpu_update_perfmodel_history(j, j->task->cl->energy_model, perf_arch, worker->devid, energy_consumed, j->nimpl);
  242. }
  243. }
  244. }
  245. static void _starpu_worker_set_status_scheduling(int workerid)
  246. {
  247. if (_starpu_worker_get_status(workerid) != STATUS_SLEEPING
  248. && _starpu_worker_get_status(workerid) != STATUS_SCHEDULING)
  249. {
  250. _STARPU_TRACE_WORKER_SCHEDULING_START;
  251. _starpu_worker_set_status(workerid, STATUS_SCHEDULING);
  252. }
  253. }
  254. static void _starpu_worker_set_status_scheduling_done(int workerid)
  255. {
  256. if (_starpu_worker_get_status(workerid) == STATUS_SCHEDULING)
  257. {
  258. _STARPU_TRACE_WORKER_SCHEDULING_END;
  259. _starpu_worker_set_status(workerid, STATUS_UNKNOWN);
  260. }
  261. }
  262. static void _starpu_worker_set_status_sleeping(int workerid)
  263. {
  264. if ( _starpu_worker_get_status(workerid) == STATUS_WAKING_UP)
  265. _starpu_worker_set_status(workerid, STATUS_SLEEPING);
  266. else if (_starpu_worker_get_status(workerid) != STATUS_SLEEPING)
  267. {
  268. _STARPU_TRACE_WORKER_SLEEP_START;
  269. _starpu_worker_restart_sleeping(workerid);
  270. _starpu_worker_set_status(workerid, STATUS_SLEEPING);
  271. }
  272. }
  273. static void _starpu_worker_set_status_wakeup(int workerid)
  274. {
  275. if (_starpu_worker_get_status(workerid) == STATUS_SLEEPING || _starpu_worker_get_status(workerid) == STATUS_WAKING_UP)
  276. {
  277. _STARPU_TRACE_WORKER_SLEEP_END;
  278. _starpu_worker_stop_sleeping(workerid);
  279. _starpu_worker_set_status(workerid, STATUS_UNKNOWN);
  280. }
  281. }
  282. #if !defined(STARPU_SIMGRID)
  283. static void _starpu_exponential_backoff(struct _starpu_worker *worker)
  284. {
  285. int delay = worker->spinning_backoff;
  286. if (worker->spinning_backoff < BACKOFF_MAX)
  287. worker->spinning_backoff<<=1;
  288. while(delay--)
  289. STARPU_UYIELD();
  290. }
  291. #endif
  292. /* Workers may block when there is no work to do at all. */
  293. struct starpu_task *_starpu_get_worker_task(struct _starpu_worker *worker, int workerid, unsigned memnode STARPU_ATTRIBUTE_UNUSED)
  294. {
  295. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&worker->sched_mutex);
  296. struct starpu_task *task;
  297. unsigned needed = 1;
  298. unsigned executing STARPU_ATTRIBUTE_UNUSED = 0;
  299. _starpu_worker_set_status_scheduling(workerid);
  300. while(needed)
  301. {
  302. struct _starpu_sched_ctx *sched_ctx = NULL;
  303. struct _starpu_sched_ctx_elt *e = NULL;
  304. struct _starpu_sched_ctx_list_iterator list_it;
  305. _starpu_sched_ctx_list_iterator_init(worker->sched_ctx_list, &list_it);
  306. while (_starpu_sched_ctx_list_iterator_has_next(&list_it))
  307. {
  308. e = _starpu_sched_ctx_list_iterator_get_next(&list_it);
  309. sched_ctx = _starpu_get_sched_ctx_struct(e->sched_ctx);
  310. if(sched_ctx && sched_ctx->id > 0 && sched_ctx->id < STARPU_NMAX_SCHED_CTXS)
  311. {
  312. STARPU_PTHREAD_MUTEX_LOCK(&sched_ctx->parallel_sect_mutex[workerid]);
  313. if(!sched_ctx->sched_policy)
  314. worker->is_slave_somewhere = sched_ctx->main_master != workerid;
  315. if(sched_ctx->parallel_sect[workerid])
  316. {
  317. /* don't let the worker sleep with the sched_mutex taken */
  318. /* we need it until here bc of the list of ctxs of the workers
  319. that can change in another thread */
  320. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  321. needed = 0;
  322. _starpu_sched_ctx_signal_worker_blocked(sched_ctx->id, workerid);
  323. sched_ctx->busy[workerid] = 1;
  324. STARPU_PTHREAD_COND_WAIT(&sched_ctx->parallel_sect_cond[workerid], &sched_ctx->parallel_sect_mutex[workerid]);
  325. sched_ctx->busy[workerid] = 0;
  326. STARPU_PTHREAD_COND_SIGNAL(&sched_ctx->parallel_sect_cond_busy[workerid]);
  327. _starpu_sched_ctx_signal_worker_woke_up(sched_ctx->id, workerid);
  328. sched_ctx->parallel_sect[workerid] = 0;
  329. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&worker->sched_mutex);
  330. }
  331. STARPU_PTHREAD_MUTEX_UNLOCK(&sched_ctx->parallel_sect_mutex[workerid]);
  332. }
  333. if(!needed)
  334. break;
  335. }
  336. /* don't worry if the value is not correct (no lock) it will do it next time */
  337. if(worker->tmp_sched_ctx != -1)
  338. {
  339. sched_ctx = _starpu_get_sched_ctx_struct(worker->tmp_sched_ctx);
  340. STARPU_PTHREAD_MUTEX_LOCK(&sched_ctx->parallel_sect_mutex[workerid]);
  341. if(sched_ctx->parallel_sect[workerid])
  342. {
  343. // needed = 0;
  344. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  345. _starpu_sched_ctx_signal_worker_blocked(sched_ctx->id, workerid);
  346. sched_ctx->busy[workerid] = 1;
  347. STARPU_PTHREAD_COND_WAIT(&sched_ctx->parallel_sect_cond[workerid], &sched_ctx->parallel_sect_mutex[workerid]);
  348. sched_ctx->busy[workerid] = 0;
  349. STARPU_PTHREAD_COND_SIGNAL(&sched_ctx->parallel_sect_cond_busy[workerid]);
  350. _starpu_sched_ctx_signal_worker_woke_up(sched_ctx->id, workerid);
  351. sched_ctx->parallel_sect[workerid] = 0;
  352. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&worker->sched_mutex);
  353. }
  354. STARPU_PTHREAD_MUTEX_UNLOCK(&sched_ctx->parallel_sect_mutex[workerid]);
  355. }
  356. needed = !needed;
  357. }
  358. if ((worker->pipeline_length == 0 && worker->current_task)
  359. || (worker->pipeline_length != 0 && worker->ntasks))
  360. /* This worker is executing something */
  361. executing = 1;
  362. /*if the worker is already executing a task then */
  363. if (worker->pipeline_length && (worker->ntasks == worker->pipeline_length || worker->pipeline_stuck))
  364. task = NULL;
  365. /* don't push a task if we are already transferring one */
  366. else if (worker->task_transferring != NULL)
  367. task = NULL;
  368. /*else try to pop a task*/
  369. else
  370. task = _starpu_pop_task(worker);
  371. #if !defined(STARPU_SIMGRID)
  372. if (task == NULL && !executing)
  373. {
  374. /* Didn't get a task to run and none are running, go to sleep */
  375. /* Note: we need to keep the sched condition mutex all along the path
  376. * from popping a task from the scheduler to blocking. Otherwise the
  377. * driver may go block just after the scheduler got a new task to be
  378. * executed, and thus hanging. */
  379. _starpu_worker_set_status_sleeping(workerid);
  380. if (_starpu_worker_can_block(memnode, worker)
  381. && !_starpu_sched_ctx_last_worker_awake(worker))
  382. {
  383. STARPU_PTHREAD_COND_WAIT(&worker->sched_cond, &worker->sched_mutex);
  384. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  385. }
  386. else
  387. {
  388. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  389. if (_starpu_machine_is_running())
  390. _starpu_exponential_backoff(worker);
  391. }
  392. return NULL;
  393. }
  394. #endif
  395. if (task)
  396. {
  397. _starpu_worker_set_status_scheduling_done(workerid);
  398. _starpu_worker_set_status_wakeup(workerid);
  399. }
  400. else
  401. {
  402. _starpu_worker_set_status_sleeping(workerid);
  403. }
  404. worker->spinning_backoff = BACKOFF_MIN;
  405. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  406. STARPU_AYU_PRERUNTASK(_starpu_get_job_associated_to_task(task)->job_id, workerid);
  407. return task;
  408. }
  409. int _starpu_get_multi_worker_task(struct _starpu_worker *workers, struct starpu_task ** tasks, int nworkers, unsigned memnode STARPU_ATTRIBUTE_UNUSED)
  410. {
  411. int i, count = 0;
  412. struct _starpu_job * j;
  413. int is_parallel_task;
  414. struct _starpu_combined_worker *combined_worker;
  415. int executing STARPU_ATTRIBUTE_UNUSED = 0;
  416. /*for each worker*/
  417. #ifndef STARPU_NON_BLOCKING_DRIVERS
  418. /* This assumes only 1 worker */
  419. STARPU_ASSERT_MSG(nworkers == 1, "Multiple workers is not yet possible in blocking drivers mode\n");
  420. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&workers[0].sched_mutex);
  421. #endif
  422. for (i = 0; i < nworkers; i++)
  423. {
  424. if ((workers[i].pipeline_length == 0 && workers[i].current_task)
  425. || (workers[i].pipeline_length != 0 && workers[i].ntasks))
  426. /* At least this worker is executing something */
  427. executing = 1;
  428. /*if the worker is already executing a task then */
  429. if((workers[i].pipeline_length == 0 && workers[i].current_task)
  430. || (workers[i].pipeline_length != 0 &&
  431. (workers[i].ntasks == workers[i].pipeline_length
  432. || workers[i].pipeline_stuck)))
  433. {
  434. tasks[i] = NULL;
  435. }
  436. /* don't push a task if we are already transferring one */
  437. else if (workers[i].task_transferring != NULL)
  438. {
  439. tasks[i] = NULL;
  440. }
  441. /*else try to pop a task*/
  442. else
  443. {
  444. #ifdef STARPU_NON_BLOCKING_DRIVERS
  445. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&workers[i].sched_mutex);
  446. #endif
  447. _starpu_worker_set_status_scheduling(workers[i].workerid);
  448. _starpu_set_local_worker_key(&workers[i]);
  449. tasks[i] = _starpu_pop_task(&workers[i]);
  450. if(tasks[i] != NULL)
  451. {
  452. _starpu_worker_set_status_scheduling_done(workers[i].workerid);
  453. _starpu_worker_set_status_wakeup(workers[i].workerid);
  454. #ifdef STARPU_NON_BLOCKING_DRIVERS
  455. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&workers[i].sched_mutex);
  456. #endif
  457. count ++;
  458. j = _starpu_get_job_associated_to_task(tasks[i]);
  459. is_parallel_task = (j->task_size > 1);
  460. if (workers[i].pipeline_length)
  461. workers[i].current_tasks[(workers[i].first_task + workers[i].ntasks)%STARPU_MAX_PIPELINE] = tasks[i];
  462. else
  463. workers[i].current_task = j->task;
  464. workers[i].ntasks++;
  465. /* Get the rank in case it is a parallel task */
  466. if (is_parallel_task)
  467. {
  468. STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  469. workers[i].current_rank = j->active_task_alias_count++;
  470. STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
  471. if(j->combined_workerid != -1)
  472. {
  473. combined_worker = _starpu_get_combined_worker_struct(j->combined_workerid);
  474. workers[i].combined_workerid = j->combined_workerid;
  475. workers[i].worker_size = combined_worker->worker_size;
  476. }
  477. }
  478. else
  479. {
  480. workers[i].combined_workerid = workers[i].workerid;
  481. workers[i].worker_size = 1;
  482. workers[i].current_rank = 0;
  483. }
  484. STARPU_AYU_PRERUNTASK(_starpu_get_job_associated_to_task(tasks[i])->job_id, workers[i].workerid);
  485. }
  486. else
  487. {
  488. _starpu_worker_set_status_sleeping(workers[i].workerid);
  489. #ifdef STARPU_NON_BLOCKING_DRIVERS
  490. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&workers[i].sched_mutex);
  491. #endif
  492. }
  493. }
  494. }
  495. #if !defined(STARPU_NON_BLOCKING_DRIVERS)
  496. #if !defined(STARPU_SIMGRID)
  497. /* Block the assumed-to-be-only worker */
  498. struct _starpu_worker *worker = &workers[0];
  499. unsigned workerid = workers[0].workerid;
  500. if (!count && !executing)
  501. {
  502. /* Didn't get a task to run and none are running, go to sleep */
  503. /* Note: we need to keep the sched condition mutex all along the path
  504. * from popping a task from the scheduler to blocking. Otherwise the
  505. * driver may go block just after the scheduler got a new task to be
  506. * executed, and thus hanging. */
  507. _starpu_worker_set_status_sleeping(workerid);
  508. if (_starpu_worker_can_block(memnode, worker)
  509. && !_starpu_sched_ctx_last_worker_awake(worker))
  510. {
  511. STARPU_PTHREAD_COND_WAIT(&worker->sched_cond, &worker->sched_mutex);
  512. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  513. }
  514. else
  515. {
  516. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  517. if (_starpu_machine_is_running())
  518. _starpu_exponential_backoff(worker);
  519. }
  520. return 0;
  521. }
  522. _starpu_worker_set_status_wakeup(workerid);
  523. worker->spinning_backoff = BACKOFF_MIN;
  524. #endif /* !STARPU_SIMGRID */
  525. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&workers[0].sched_mutex);
  526. #endif /* !STARPU_NON_BLOCKING_DRIVERS */
  527. return count;
  528. }