driver_common.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2015 Université de Bordeaux
  4. * Copyright (C) 2010, 2011, 2012, 2013, 2014, 2015 CNRS
  5. * Copyright (C) 2011 Télécom-SudParis
  6. * Copyright (C) 2014 INRIA
  7. *
  8. * StarPU is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU Lesser General Public License as published by
  10. * the Free Software Foundation; either version 2.1 of the License, or (at
  11. * your option) any later version.
  12. *
  13. * StarPU is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  16. *
  17. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  18. */
  19. #include <math.h>
  20. #include <starpu.h>
  21. #include <starpu_profiling.h>
  22. #include <profiling/profiling.h>
  23. #include <common/utils.h>
  24. #include <core/debug.h>
  25. #include <core/sched_ctx.h>
  26. #include <drivers/driver_common/driver_common.h>
  27. #include <starpu_top.h>
  28. #include <core/sched_policy.h>
  29. #include <top/starpu_top_core.h>
  30. #include <core/debug.h>
  31. #define BACKOFF_MAX 32 /* TODO : use parameter to define them */
  32. #define BACKOFF_MIN 1
  33. void _starpu_driver_start_job(struct _starpu_worker *worker, struct _starpu_job *j, struct starpu_perfmodel_arch* perf_arch STARPU_ATTRIBUTE_UNUSED, struct timespec *codelet_start, int rank, int profiling)
  34. {
  35. struct starpu_task *task = j->task;
  36. struct starpu_codelet *cl = task->cl;
  37. struct starpu_profiling_task_info *profiling_info;
  38. int starpu_top=_starpu_top_status_get();
  39. int workerid = worker->workerid;
  40. unsigned calibrate_model = 0;
  41. if (cl->model && cl->model->benchmarking)
  42. calibrate_model = 1;
  43. /* If the job is executed on a combined worker there is no need for the
  44. * scheduler to process it : it doesn't contain any valuable data
  45. * as it's not linked to an actual worker */
  46. if (j->task_size == 1)
  47. _starpu_sched_pre_exec_hook(task);
  48. _starpu_set_worker_status(worker, STATUS_EXECUTING);
  49. task->status = STARPU_TASK_RUNNING;
  50. if (rank == 0)
  51. {
  52. #ifdef HAVE_AYUDAME_H
  53. if (AYU_event) AYU_event(AYU_RUNTASK, j->job_id, NULL);
  54. #endif
  55. cl->per_worker_stats[workerid]++;
  56. profiling_info = task->profiling_info;
  57. if ((profiling && profiling_info) || calibrate_model || starpu_top)
  58. {
  59. _starpu_clock_gettime(codelet_start);
  60. _starpu_worker_register_executing_start_date(workerid, codelet_start);
  61. }
  62. }
  63. if (starpu_top)
  64. _starpu_top_task_started(task,workerid,codelet_start);
  65. // Find out if the worker is the master of a parallel context
  66. struct _starpu_sched_ctx *sched_ctx = _starpu_sched_ctx_get_sched_ctx_for_worker_and_job(worker, j);
  67. if(!sched_ctx)
  68. sched_ctx = _starpu_get_sched_ctx_struct(j->task->sched_ctx);
  69. if(!sched_ctx->sched_policy)
  70. {
  71. if(!sched_ctx->awake_workers && sched_ctx->main_master == worker->workerid)
  72. {
  73. struct starpu_worker_collection *workers = sched_ctx->workers;
  74. struct starpu_sched_ctx_iterator it;
  75. if (workers->init_iterator)
  76. workers->init_iterator(workers, &it);
  77. while (workers->has_next(workers, &it))
  78. {
  79. int _workerid = workers->get_next(workers, &it);
  80. if (_workerid != workerid)
  81. {
  82. struct _starpu_worker *_worker = _starpu_get_worker_struct(_workerid);
  83. _starpu_driver_start_job(_worker, j, &_worker->perf_arch, codelet_start, rank, profiling);
  84. }
  85. }
  86. }
  87. _STARPU_TRACE_START_CODELET_BODY(j, j->nimpl, &sched_ctx->perf_arch, workerid);
  88. }
  89. else
  90. _STARPU_TRACE_START_CODELET_BODY(j, j->nimpl, perf_arch, workerid);
  91. }
  92. void _starpu_driver_end_job(struct _starpu_worker *worker, struct _starpu_job *j, struct starpu_perfmodel_arch* perf_arch STARPU_ATTRIBUTE_UNUSED, struct timespec *codelet_end, int rank, int profiling)
  93. {
  94. struct starpu_task *task = j->task;
  95. struct starpu_codelet *cl = task->cl;
  96. struct starpu_profiling_task_info *profiling_info = task->profiling_info;
  97. int starpu_top=_starpu_top_status_get();
  98. int workerid = worker->workerid;
  99. unsigned calibrate_model = 0;
  100. // Find out if the worker is the master of a parallel context
  101. struct _starpu_sched_ctx *sched_ctx = _starpu_sched_ctx_get_sched_ctx_for_worker_and_job(worker, j);
  102. if(!sched_ctx)
  103. sched_ctx = _starpu_get_sched_ctx_struct(j->task->sched_ctx);
  104. if (!sched_ctx->sched_policy)
  105. {
  106. _starpu_perfmodel_create_comb_if_needed(&(sched_ctx->perf_arch));
  107. _STARPU_TRACE_END_CODELET_BODY(j, j->nimpl, &(sched_ctx->perf_arch), workerid);
  108. }
  109. else
  110. {
  111. _starpu_perfmodel_create_comb_if_needed(perf_arch);
  112. _STARPU_TRACE_END_CODELET_BODY(j, j->nimpl, perf_arch, workerid);
  113. }
  114. if (cl && cl->model && cl->model->benchmarking)
  115. calibrate_model = 1;
  116. if (rank == 0)
  117. {
  118. if ((profiling && profiling_info) || calibrate_model || starpu_top)
  119. _starpu_clock_gettime(codelet_end);
  120. #ifdef HAVE_AYUDAME_H
  121. if (AYU_event) AYU_event(AYU_POSTRUNTASK, j->job_id, NULL);
  122. #endif
  123. }
  124. if (starpu_top)
  125. _starpu_top_task_ended(task,workerid,codelet_end);
  126. _starpu_set_worker_status(worker, STATUS_UNKNOWN);
  127. if(!sched_ctx->sched_policy && !sched_ctx->awake_workers &&
  128. sched_ctx->main_master == worker->workerid)
  129. {
  130. struct starpu_worker_collection *workers = sched_ctx->workers;
  131. struct starpu_sched_ctx_iterator it;
  132. if (workers->init_iterator)
  133. workers->init_iterator(workers, &it);
  134. while (workers->has_next(workers, &it))
  135. {
  136. int _workerid = workers->get_next(workers, &it);
  137. if (_workerid != workerid)
  138. {
  139. struct _starpu_worker *_worker = _starpu_get_worker_struct(_workerid);
  140. _starpu_driver_end_job(_worker, j, &_worker->perf_arch, codelet_end, rank, profiling);
  141. }
  142. }
  143. }
  144. }
  145. void _starpu_driver_update_job_feedback(struct _starpu_job *j, struct _starpu_worker *worker,
  146. struct starpu_perfmodel_arch* perf_arch,
  147. struct timespec *codelet_start, struct timespec *codelet_end, int profiling)
  148. {
  149. struct starpu_profiling_task_info *profiling_info = j->task->profiling_info;
  150. struct timespec measured_ts;
  151. double measured;
  152. int workerid = worker->workerid;
  153. struct starpu_codelet *cl = j->task->cl;
  154. int calibrate_model = 0;
  155. int updated = 0;
  156. _starpu_perfmodel_create_comb_if_needed(perf_arch);
  157. #ifndef STARPU_SIMGRID
  158. if (cl->model && cl->model->benchmarking)
  159. calibrate_model = 1;
  160. #endif
  161. if ((profiling && profiling_info) || calibrate_model)
  162. {
  163. starpu_timespec_sub(codelet_end, codelet_start, &measured_ts);
  164. measured = starpu_timing_timespec_to_us(&measured_ts);
  165. if (profiling && profiling_info)
  166. {
  167. memcpy(&profiling_info->start_time, codelet_start, sizeof(struct timespec));
  168. memcpy(&profiling_info->end_time, codelet_end, sizeof(struct timespec));
  169. profiling_info->workerid = workerid;
  170. _starpu_worker_update_profiling_info_executing(workerid, &measured_ts, 1,
  171. profiling_info->used_cycles,
  172. profiling_info->stall_cycles,
  173. profiling_info->power_consumed);
  174. updated = 1;
  175. }
  176. if (calibrate_model)
  177. {
  178. #ifdef STARPU_OPENMP
  179. double time_consumed = measured;
  180. unsigned do_update_time_model;
  181. if (j->continuation)
  182. {
  183. /* The job is only paused, thus we accumulate
  184. * its timing, but we don't update its
  185. * perfmodel now. */
  186. starpu_timespec_accumulate(&j->cumulated_ts, &measured_ts);
  187. do_update_time_model = 0;
  188. }
  189. else
  190. {
  191. if (j->discontinuous)
  192. {
  193. /* The job was paused at least once but is now
  194. * really completing. We need to take into
  195. * account its past execution time in its
  196. * perfmodel. */
  197. starpu_timespec_accumulate(&measured_ts, &j->cumulated_ts);
  198. time_consumed = starpu_timing_timespec_to_us(&measured_ts);
  199. }
  200. do_update_time_model = 1;
  201. }
  202. #else
  203. const unsigned do_update_time_model = 1;
  204. const double time_consumed = measured;
  205. #endif
  206. if (do_update_time_model)
  207. {
  208. _starpu_update_perfmodel_history(j, j->task->cl->model, perf_arch, worker->devid, time_consumed, j->nimpl);
  209. }
  210. }
  211. }
  212. if (!updated)
  213. _starpu_worker_update_profiling_info_executing(workerid, NULL, 1, 0, 0, 0);
  214. if (profiling_info && profiling_info->power_consumed && cl->power_model && cl->power_model->benchmarking)
  215. {
  216. #ifdef STARPU_OPENMP
  217. double power_consumed = profiling_info->power_consumed;
  218. unsigned do_update_power_model;
  219. if (j->continuation)
  220. {
  221. j->cumulated_power_consumed += power_consumed;
  222. do_update_power_model = 0;
  223. }
  224. else
  225. {
  226. if (j->discontinuous)
  227. {
  228. power_consumed += j->cumulated_power_consumed;
  229. }
  230. do_update_power_model = 1;
  231. }
  232. #else
  233. const double power_consumed = profiling_info->power_consumed;
  234. const unsigned do_update_power_model = 1;
  235. #endif
  236. if (do_update_power_model)
  237. {
  238. _starpu_update_perfmodel_history(j, j->task->cl->power_model, perf_arch, worker->devid, power_consumed, j->nimpl);
  239. }
  240. }
  241. }
  242. static void _starpu_worker_set_status_scheduling(int workerid)
  243. {
  244. if (_starpu_worker_get_status(workerid) != STATUS_SLEEPING
  245. && _starpu_worker_get_status(workerid) != STATUS_SCHEDULING)
  246. {
  247. _STARPU_TRACE_WORKER_SCHEDULING_START;
  248. _starpu_worker_set_status(workerid, STATUS_SCHEDULING);
  249. }
  250. }
  251. static void _starpu_worker_set_status_scheduling_done(int workerid)
  252. {
  253. if (_starpu_worker_get_status(workerid) == STATUS_SCHEDULING)
  254. {
  255. _STARPU_TRACE_WORKER_SCHEDULING_END;
  256. _starpu_worker_set_status(workerid, STATUS_UNKNOWN);
  257. }
  258. }
  259. static void _starpu_worker_set_status_sleeping(int workerid)
  260. {
  261. if ( _starpu_worker_get_status(workerid) == STATUS_WAKING_UP)
  262. _starpu_worker_set_status(workerid, STATUS_SLEEPING);
  263. else if (_starpu_worker_get_status(workerid) != STATUS_SLEEPING)
  264. {
  265. _STARPU_TRACE_WORKER_SLEEP_START;
  266. _starpu_worker_restart_sleeping(workerid);
  267. _starpu_worker_set_status(workerid, STATUS_SLEEPING);
  268. }
  269. }
  270. static void _starpu_worker_set_status_wakeup(int workerid)
  271. {
  272. if (_starpu_worker_get_status(workerid) == STATUS_SLEEPING || _starpu_worker_get_status(workerid) == STATUS_WAKING_UP)
  273. {
  274. _STARPU_TRACE_WORKER_SLEEP_END;
  275. _starpu_worker_stop_sleeping(workerid);
  276. _starpu_worker_set_status(workerid, STATUS_UNKNOWN);
  277. }
  278. }
  279. static void _starpu_exponential_backoff(struct _starpu_worker *worker)
  280. {
  281. int delay = worker->spinning_backoff;
  282. if (worker->spinning_backoff < BACKOFF_MAX)
  283. worker->spinning_backoff<<=1;
  284. while(delay--)
  285. STARPU_UYIELD();
  286. }
  287. /* Workers may block when there is no work to do at all. */
  288. struct starpu_task *_starpu_get_worker_task(struct _starpu_worker *worker, int workerid, unsigned memnode)
  289. {
  290. STARPU_PTHREAD_MUTEX_LOCK(&worker->sched_mutex);
  291. struct starpu_task *task;
  292. unsigned needed = 1;
  293. unsigned executing = 0;
  294. _starpu_worker_set_status_scheduling(workerid);
  295. while(needed)
  296. {
  297. struct _starpu_sched_ctx *sched_ctx = NULL;
  298. struct _starpu_sched_ctx_list *l = NULL;
  299. for (l = worker->sched_ctx_list; l; l = l->next)
  300. {
  301. sched_ctx = _starpu_get_sched_ctx_struct(l->sched_ctx);
  302. if(sched_ctx && sched_ctx->id > 0 && sched_ctx->id < STARPU_NMAX_SCHED_CTXS)
  303. {
  304. STARPU_PTHREAD_MUTEX_LOCK(&sched_ctx->parallel_sect_mutex[workerid]);
  305. if(!sched_ctx->sched_policy)
  306. worker->is_slave_somewhere = sched_ctx->main_master != workerid;
  307. if(sched_ctx->parallel_sect[workerid])
  308. {
  309. /* don't let the worker sleep with the sched_mutex taken */
  310. /* we need it until here bc of the list of ctxs of the workers
  311. that can change in another thread */
  312. STARPU_PTHREAD_MUTEX_UNLOCK(&worker->sched_mutex);
  313. needed = 0;
  314. _starpu_sched_ctx_signal_worker_blocked(sched_ctx->id, workerid);
  315. sched_ctx->busy[workerid] = 1;
  316. STARPU_PTHREAD_COND_WAIT(&sched_ctx->parallel_sect_cond[workerid], &sched_ctx->parallel_sect_mutex[workerid]);
  317. sched_ctx->busy[workerid] = 0;
  318. STARPU_PTHREAD_COND_SIGNAL(&sched_ctx->parallel_sect_cond_busy[workerid]);
  319. _starpu_sched_ctx_signal_worker_woke_up(sched_ctx->id, workerid);
  320. sched_ctx->parallel_sect[workerid] = 0;
  321. STARPU_PTHREAD_MUTEX_LOCK(&worker->sched_mutex);
  322. }
  323. STARPU_PTHREAD_MUTEX_UNLOCK(&sched_ctx->parallel_sect_mutex[workerid]);
  324. }
  325. if(!needed)
  326. break;
  327. }
  328. /* don't worry if the value is not correct (no lock) it will do it next time */
  329. if(worker->tmp_sched_ctx != -1)
  330. {
  331. sched_ctx = _starpu_get_sched_ctx_struct(worker->tmp_sched_ctx);
  332. STARPU_PTHREAD_MUTEX_LOCK(&sched_ctx->parallel_sect_mutex[workerid]);
  333. if(sched_ctx->parallel_sect[workerid])
  334. {
  335. // needed = 0;
  336. STARPU_PTHREAD_MUTEX_UNLOCK(&worker->sched_mutex);
  337. _starpu_sched_ctx_signal_worker_blocked(sched_ctx->id, workerid);
  338. sched_ctx->busy[workerid] = 1;
  339. STARPU_PTHREAD_COND_WAIT(&sched_ctx->parallel_sect_cond[workerid], &sched_ctx->parallel_sect_mutex[workerid]);
  340. sched_ctx->busy[workerid] = 0;
  341. STARPU_PTHREAD_COND_SIGNAL(&sched_ctx->parallel_sect_cond_busy[workerid]);
  342. _starpu_sched_ctx_signal_worker_woke_up(sched_ctx->id, workerid);
  343. sched_ctx->parallel_sect[workerid] = 0;
  344. STARPU_PTHREAD_MUTEX_LOCK(&worker->sched_mutex);
  345. }
  346. STARPU_PTHREAD_MUTEX_UNLOCK(&sched_ctx->parallel_sect_mutex[workerid]);
  347. }
  348. needed = !needed;
  349. }
  350. if ((worker->pipeline_length == 0 && worker->current_task)
  351. || (worker->pipeline_length != 0 && worker->ntasks))
  352. /* This worker is executing something */
  353. executing = 1;
  354. if (worker->pipeline_length && (worker->ntasks == worker->pipeline_length || worker->pipeline_stuck))
  355. task = NULL;
  356. else
  357. task = _starpu_pop_task(worker);
  358. if (task == NULL && !executing)
  359. {
  360. /* Didn't get a task to run and none are running, go to sleep */
  361. /* Note: we need to keep the sched condition mutex all along the path
  362. * from popping a task from the scheduler to blocking. Otherwise the
  363. * driver may go block just after the scheduler got a new task to be
  364. * executed, and thus hanging. */
  365. _starpu_worker_set_status_sleeping(workerid);
  366. if (_starpu_worker_can_block(memnode, worker)
  367. #ifndef STARPU_SIMGRID
  368. && !_starpu_sched_ctx_last_worker_awake(worker)
  369. #endif
  370. )
  371. {
  372. STARPU_PTHREAD_COND_WAIT(&worker->sched_cond, &worker->sched_mutex);
  373. STARPU_PTHREAD_MUTEX_UNLOCK(&worker->sched_mutex);
  374. }
  375. else
  376. {
  377. STARPU_PTHREAD_MUTEX_UNLOCK(&worker->sched_mutex);
  378. if (_starpu_machine_is_running())
  379. {
  380. _starpu_exponential_backoff(worker);
  381. #ifdef STARPU_SIMGRID
  382. static int warned;
  383. if (!warned)
  384. {
  385. warned = 1;
  386. _STARPU_DISP("Has to make simgrid spin for CPU idle time. You can try to pass --enable-blocking-drivers to ./configure to avoid this\n");
  387. }
  388. MSG_process_sleep(0.000010);
  389. #endif
  390. }
  391. }
  392. return NULL;
  393. }
  394. _starpu_worker_set_status_scheduling_done(workerid);
  395. _starpu_worker_set_status_wakeup(workerid);
  396. worker->spinning_backoff = BACKOFF_MIN;
  397. STARPU_PTHREAD_MUTEX_UNLOCK(&worker->sched_mutex);
  398. #ifdef HAVE_AYUDAME_H
  399. if (AYU_event)
  400. {
  401. intptr_t id = workerid;
  402. AYU_event(AYU_PRERUNTASK, _starpu_get_job_associated_to_task(task)->job_id, &id);
  403. }
  404. #endif
  405. return task;
  406. }
  407. int _starpu_get_multi_worker_task(struct _starpu_worker *workers, struct starpu_task ** tasks, int nworkers, unsigned memnode STARPU_ATTRIBUTE_UNUSED)
  408. {
  409. int i, count = 0;
  410. struct _starpu_job * j;
  411. int is_parallel_task;
  412. struct _starpu_combined_worker *combined_worker;
  413. #ifndef STARPU_NON_BLOCKING_DRIVERS
  414. int executing = 0;
  415. #endif
  416. /*for each worker*/
  417. #ifndef STARPU_NON_BLOCKING_DRIVERS
  418. /* This assumes only 1 worker */
  419. STARPU_ASSERT_MSG(nworkers == 1, "Multiple workers is not yet possible in blocking drivers mode\n");
  420. STARPU_PTHREAD_MUTEX_LOCK(&workers[0].sched_mutex);
  421. #endif
  422. for (i = 0; i < nworkers; i++)
  423. {
  424. #ifndef STARPU_NON_BLOCKING_DRIVERS
  425. if ((workers[i].pipeline_length == 0 && workers[i].current_task)
  426. || (workers[i].pipeline_length != 0 && workers[i].ntasks))
  427. /* At least this worker is executing something */
  428. executing = 1;
  429. #endif
  430. /*if the worker is already executing a task then */
  431. if((workers[i].pipeline_length == 0 && workers[i].current_task)
  432. || (workers[i].pipeline_length != 0 &&
  433. (workers[i].ntasks == workers[i].pipeline_length
  434. || workers[i].pipeline_stuck)))
  435. {
  436. tasks[i] = NULL;
  437. }
  438. /*else try to pop a task*/
  439. else
  440. {
  441. #ifdef STARPU_NON_BLOCKING_DRIVERS
  442. STARPU_PTHREAD_MUTEX_LOCK(&workers[i].sched_mutex);
  443. #endif
  444. _starpu_worker_set_status_scheduling(workers[i].workerid);
  445. _starpu_set_local_worker_key(&workers[i]);
  446. tasks[i] = _starpu_pop_task(&workers[i]);
  447. if(tasks[i] != NULL)
  448. {
  449. _starpu_worker_set_status_scheduling_done(workers[i].workerid);
  450. _starpu_worker_set_status_wakeup(workers[i].workerid);
  451. #ifdef STARPU_NON_BLOCKING_DRIVERS
  452. STARPU_PTHREAD_MUTEX_UNLOCK(&workers[i].sched_mutex);
  453. #endif
  454. count ++;
  455. j = _starpu_get_job_associated_to_task(tasks[i]);
  456. is_parallel_task = (j->task_size > 1);
  457. if (workers[i].pipeline_length)
  458. workers[i].current_tasks[(workers[i].first_task + workers[i].ntasks)%STARPU_MAX_PIPELINE] = tasks[i];
  459. else
  460. workers[i].current_task = j->task;
  461. workers[i].ntasks++;
  462. /* Get the rank in case it is a parallel task */
  463. if (is_parallel_task)
  464. {
  465. STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  466. workers[i].current_rank = j->active_task_alias_count++;
  467. STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
  468. if(j->combined_workerid != -1)
  469. {
  470. combined_worker = _starpu_get_combined_worker_struct(j->combined_workerid);
  471. workers[i].combined_workerid = j->combined_workerid;
  472. workers[i].worker_size = combined_worker->worker_size;
  473. }
  474. }
  475. else
  476. {
  477. workers[i].combined_workerid = workers[i].workerid;
  478. workers[i].worker_size = 1;
  479. workers[i].current_rank = 0;
  480. }
  481. #ifdef HAVE_AYUDAME_H
  482. if (AYU_event)
  483. {
  484. intptr_t id = workers[i].workerid;
  485. AYU_event(AYU_PRERUNTASK, _starpu_get_job_associated_to_task(tasks[i])->job_id, &id);
  486. }
  487. #endif
  488. }
  489. else
  490. {
  491. _starpu_worker_set_status_sleeping(workers[i].workerid);
  492. #ifdef STARPU_NON_BLOCKING_DRIVERS
  493. STARPU_PTHREAD_MUTEX_UNLOCK(&workers[i].sched_mutex);
  494. #endif
  495. }
  496. }
  497. }
  498. #ifndef STARPU_NON_BLOCKING_DRIVERS
  499. /* Block the assumed-to-be-only worker */
  500. struct _starpu_worker *worker = &workers[0];
  501. unsigned workerid = workers[0].workerid;
  502. if (!count && !executing)
  503. {
  504. /* Didn't get a task to run and none are running, go to sleep */
  505. /* Note: we need to keep the sched condition mutex all along the path
  506. * from popping a task from the scheduler to blocking. Otherwise the
  507. * driver may go block just after the scheduler got a new task to be
  508. * executed, and thus hanging. */
  509. _starpu_worker_set_status_sleeping(workerid);
  510. if (_starpu_worker_can_block(memnode, worker)
  511. #ifndef STARPU_SIMGRID
  512. && !_starpu_sched_ctx_last_worker_awake(worker)
  513. #endif
  514. )
  515. {
  516. STARPU_PTHREAD_COND_WAIT(&worker->sched_cond, &worker->sched_mutex);
  517. STARPU_PTHREAD_MUTEX_UNLOCK(&worker->sched_mutex);
  518. }
  519. else
  520. {
  521. STARPU_PTHREAD_MUTEX_UNLOCK(&worker->sched_mutex);
  522. if (_starpu_machine_is_running())
  523. {
  524. _starpu_exponential_backoff(worker);
  525. #ifdef STARPU_SIMGRID
  526. static int warned;
  527. if (!warned)
  528. {
  529. warned = 1;
  530. _STARPU_DISP("Has to make simgrid spin for CPU idle time. You can try to pass --enable-blocking-drivers to ./configure to avoid this\n");
  531. }
  532. MSG_process_sleep(0.000010);
  533. #endif
  534. }
  535. }
  536. return 0;
  537. }
  538. _starpu_worker_set_status_wakeup(workerid);
  539. worker->spinning_backoff = BACKOFF_MIN;
  540. STARPU_PTHREAD_MUTEX_UNLOCK(&worker->sched_mutex);
  541. #endif /* !STARPU_NON_BLOCKING_DRIVERS */
  542. return count;
  543. }