driver_common.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011-2017 Inria
  4. * Copyright (C) 2010-2018 Université de Bordeaux
  5. * Copyright (C) 2010-2017 CNRS
  6. * Copyright (C) 2013 Thibaut Lambert
  7. * Copyright (C) 2011 Télécom-SudParis
  8. *
  9. * StarPU is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU Lesser General Public License as published by
  11. * the Free Software Foundation; either version 2.1 of the License, or (at
  12. * your option) any later version.
  13. *
  14. * StarPU is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  17. *
  18. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  19. */
  20. #include <math.h>
  21. #include <starpu.h>
  22. #include <starpu_profiling.h>
  23. #include <profiling/profiling.h>
  24. #include <common/utils.h>
  25. #include <core/debug.h>
  26. #include <core/sched_ctx.h>
  27. #include <drivers/driver_common/driver_common.h>
  28. #include <starpu_top.h>
  29. #include <core/sched_policy.h>
  30. #include <top/starpu_top_core.h>
  31. #include <core/debug.h>
  32. #include <core/task.h>
  33. #define BACKOFF_MAX 32 /* TODO : use parameter to define them */
  34. #define BACKOFF_MIN 1
  35. void _starpu_driver_start_job(struct _starpu_worker *worker, struct _starpu_job *j, struct starpu_perfmodel_arch* perf_arch STARPU_ATTRIBUTE_UNUSED, int rank, int profiling)
  36. {
  37. struct starpu_task *task = j->task;
  38. struct starpu_codelet *cl = task->cl;
  39. int starpu_top=_starpu_top_status_get();
  40. int workerid = worker->workerid;
  41. unsigned calibrate_model = 0;
  42. if (cl->model && cl->model->benchmarking)
  43. calibrate_model = 1;
  44. /* If the job is executed on a combined worker there is no need for the
  45. * scheduler to process it : it doesn't contain any valuable data
  46. * as it's not linked to an actual worker */
  47. if (j->task_size == 1 && rank == 0)
  48. _starpu_sched_pre_exec_hook(task);
  49. _starpu_set_worker_status(worker, STATUS_EXECUTING);
  50. task->status = STARPU_TASK_RUNNING;
  51. if (rank == 0)
  52. {
  53. STARPU_AYU_RUNTASK(j->job_id);
  54. cl->per_worker_stats[workerid]++;
  55. struct starpu_profiling_task_info *profiling_info = task->profiling_info;
  56. if ((profiling && profiling_info) || calibrate_model || starpu_top)
  57. {
  58. _starpu_clock_gettime(&worker->cl_start);
  59. _starpu_worker_register_executing_start_date(workerid, &worker->cl_start);
  60. }
  61. }
  62. if (starpu_top)
  63. _starpu_top_task_started(task,workerid,&worker->cl_start);
  64. // Find out if the worker is the master of a parallel context
  65. struct _starpu_sched_ctx *sched_ctx = _starpu_sched_ctx_get_sched_ctx_for_worker_and_job(worker, j);
  66. if(!sched_ctx)
  67. sched_ctx = _starpu_get_sched_ctx_struct(j->task->sched_ctx);
  68. _starpu_sched_ctx_lock_read(sched_ctx->id);
  69. if(!sched_ctx->sched_policy)
  70. {
  71. if(!sched_ctx->awake_workers && sched_ctx->main_master == worker->workerid)
  72. {
  73. struct starpu_worker_collection *workers = sched_ctx->workers;
  74. struct starpu_sched_ctx_iterator it;
  75. int new_rank = 0;
  76. if (workers->init_iterator)
  77. workers->init_iterator(workers, &it);
  78. while (workers->has_next(workers, &it))
  79. {
  80. int _workerid = workers->get_next(workers, &it);
  81. if (_workerid != workerid)
  82. {
  83. new_rank++;
  84. struct _starpu_worker *_worker = _starpu_get_worker_struct(_workerid);
  85. _starpu_driver_start_job(_worker, j, &_worker->perf_arch, new_rank, profiling);
  86. }
  87. }
  88. }
  89. _STARPU_TRACE_TASK_COLOR(j);
  90. _STARPU_TRACE_START_CODELET_BODY(j, j->nimpl, &sched_ctx->perf_arch, workerid);
  91. }
  92. else
  93. {
  94. _STARPU_TRACE_TASK_COLOR(j);
  95. _STARPU_TRACE_START_CODELET_BODY(j, j->nimpl, perf_arch, workerid);
  96. }
  97. _starpu_sched_ctx_unlock_read(sched_ctx->id);
  98. _STARPU_TASK_BREAK_ON(task, exec);
  99. }
  100. void _starpu_driver_end_job(struct _starpu_worker *worker, struct _starpu_job *j, struct starpu_perfmodel_arch* perf_arch STARPU_ATTRIBUTE_UNUSED, int rank, int profiling)
  101. {
  102. struct starpu_task *task = j->task;
  103. struct starpu_codelet *cl = task->cl;
  104. int starpu_top=_starpu_top_status_get();
  105. int workerid = worker->workerid;
  106. unsigned calibrate_model = 0;
  107. // Find out if the worker is the master of a parallel context
  108. struct _starpu_sched_ctx *sched_ctx = _starpu_sched_ctx_get_sched_ctx_for_worker_and_job(worker, j);
  109. if(!sched_ctx)
  110. sched_ctx = _starpu_get_sched_ctx_struct(j->task->sched_ctx);
  111. if (!sched_ctx->sched_policy)
  112. {
  113. _starpu_perfmodel_create_comb_if_needed(&(sched_ctx->perf_arch));
  114. _STARPU_TRACE_END_CODELET_BODY(j, j->nimpl, &(sched_ctx->perf_arch), workerid);
  115. }
  116. else
  117. {
  118. _starpu_perfmodel_create_comb_if_needed(perf_arch);
  119. _STARPU_TRACE_END_CODELET_BODY(j, j->nimpl, perf_arch, workerid);
  120. }
  121. if (cl && cl->model && cl->model->benchmarking)
  122. calibrate_model = 1;
  123. if (rank == 0)
  124. {
  125. struct starpu_profiling_task_info *profiling_info = task->profiling_info;
  126. if ((profiling && profiling_info) || calibrate_model || starpu_top)
  127. {
  128. _starpu_clock_gettime(&worker->cl_end);
  129. _starpu_worker_register_executing_end(workerid);
  130. }
  131. STARPU_AYU_POSTRUNTASK(j->job_id);
  132. }
  133. if (starpu_top)
  134. _starpu_top_task_ended(task,workerid,&worker->cl_end);
  135. _starpu_set_worker_status(worker, STATUS_UNKNOWN);
  136. if(!sched_ctx->sched_policy && !sched_ctx->awake_workers &&
  137. sched_ctx->main_master == worker->workerid)
  138. {
  139. struct starpu_worker_collection *workers = sched_ctx->workers;
  140. struct starpu_sched_ctx_iterator it;
  141. int new_rank = 0;
  142. if (workers->init_iterator)
  143. workers->init_iterator(workers, &it);
  144. while (workers->has_next(workers, &it))
  145. {
  146. int _workerid = workers->get_next(workers, &it);
  147. if (_workerid != workerid)
  148. {
  149. new_rank++;
  150. struct _starpu_worker *_worker = _starpu_get_worker_struct(_workerid);
  151. _starpu_driver_end_job(_worker, j, &_worker->perf_arch, new_rank, profiling);
  152. }
  153. }
  154. }
  155. }
  156. void _starpu_driver_update_job_feedback(struct _starpu_job *j, struct _starpu_worker *worker,
  157. struct starpu_perfmodel_arch* perf_arch,
  158. int profiling)
  159. {
  160. struct starpu_profiling_task_info *profiling_info = j->task->profiling_info;
  161. struct timespec measured_ts;
  162. int workerid = worker->workerid;
  163. struct starpu_codelet *cl = j->task->cl;
  164. int calibrate_model = 0;
  165. int updated = 0;
  166. _starpu_perfmodel_create_comb_if_needed(perf_arch);
  167. #ifndef STARPU_SIMGRID
  168. if (cl->model && cl->model->benchmarking)
  169. calibrate_model = 1;
  170. #endif
  171. if ((profiling && profiling_info) || calibrate_model)
  172. {
  173. double measured;
  174. starpu_timespec_sub(&worker->cl_end, &worker->cl_start, &measured_ts);
  175. measured = starpu_timing_timespec_to_us(&measured_ts);
  176. STARPU_ASSERT_MSG(measured >= 0, "measured=%lf\n", measured);
  177. if (profiling && profiling_info)
  178. {
  179. memcpy(&profiling_info->start_time, &worker->cl_start, sizeof(struct timespec));
  180. memcpy(&profiling_info->end_time, &worker->cl_end, sizeof(struct timespec));
  181. profiling_info->workerid = workerid;
  182. _starpu_worker_update_profiling_info_executing(workerid, &measured_ts, 1,
  183. profiling_info->used_cycles,
  184. profiling_info->stall_cycles,
  185. profiling_info->energy_consumed,
  186. j->task->flops);
  187. updated = 1;
  188. }
  189. if (calibrate_model)
  190. {
  191. #ifdef STARPU_OPENMP
  192. double time_consumed = measured;
  193. unsigned do_update_time_model;
  194. if (j->continuation)
  195. {
  196. /* The job is only paused, thus we accumulate
  197. * its timing, but we don't update its
  198. * perfmodel now. */
  199. starpu_timespec_accumulate(&j->cumulated_ts, &measured_ts);
  200. do_update_time_model = 0;
  201. }
  202. else
  203. {
  204. if (j->discontinuous)
  205. {
  206. /* The job was paused at least once but is now
  207. * really completing. We need to take into
  208. * account its past execution time in its
  209. * perfmodel. */
  210. starpu_timespec_accumulate(&measured_ts, &j->cumulated_ts);
  211. time_consumed = starpu_timing_timespec_to_us(&measured_ts);
  212. }
  213. do_update_time_model = 1;
  214. }
  215. #else
  216. const unsigned do_update_time_model = 1;
  217. const double time_consumed = measured;
  218. #endif
  219. if (do_update_time_model)
  220. {
  221. _starpu_update_perfmodel_history(j, j->task->cl->model, perf_arch, worker->devid, time_consumed, j->nimpl);
  222. }
  223. }
  224. }
  225. if (!updated)
  226. _starpu_worker_update_profiling_info_executing(workerid, NULL, 1, 0, 0, 0, 0);
  227. if (profiling_info && profiling_info->energy_consumed && cl->energy_model && cl->energy_model->benchmarking)
  228. {
  229. #ifdef STARPU_OPENMP
  230. double energy_consumed = profiling_info->energy_consumed;
  231. unsigned do_update_energy_model;
  232. if (j->continuation)
  233. {
  234. j->cumulated_energy_consumed += energy_consumed;
  235. do_update_energy_model = 0;
  236. }
  237. else
  238. {
  239. if (j->discontinuous)
  240. {
  241. energy_consumed += j->cumulated_energy_consumed;
  242. }
  243. do_update_energy_model = 1;
  244. }
  245. #else
  246. const double energy_consumed = profiling_info->energy_consumed;
  247. const unsigned do_update_energy_model = 1;
  248. #endif
  249. if (do_update_energy_model)
  250. {
  251. _starpu_update_perfmodel_history(j, j->task->cl->energy_model, perf_arch, worker->devid, energy_consumed, j->nimpl);
  252. }
  253. }
  254. }
  255. static void _starpu_worker_set_status_scheduling(int workerid)
  256. {
  257. if (_starpu_worker_get_status(workerid) != STATUS_SLEEPING
  258. && _starpu_worker_get_status(workerid) != STATUS_SCHEDULING)
  259. {
  260. _STARPU_TRACE_WORKER_SCHEDULING_START;
  261. _starpu_worker_set_status(workerid, STATUS_SCHEDULING);
  262. }
  263. }
  264. static void _starpu_worker_set_status_scheduling_done(int workerid)
  265. {
  266. if (_starpu_worker_get_status(workerid) == STATUS_SCHEDULING)
  267. {
  268. _STARPU_TRACE_WORKER_SCHEDULING_END;
  269. _starpu_worker_set_status(workerid, STATUS_UNKNOWN);
  270. }
  271. }
  272. static void _starpu_worker_set_status_sleeping(int workerid)
  273. {
  274. if (_starpu_worker_get_status(workerid) != STATUS_SLEEPING)
  275. {
  276. _STARPU_TRACE_WORKER_SLEEP_START;
  277. _starpu_worker_restart_sleeping(workerid);
  278. _starpu_worker_set_status(workerid, STATUS_SLEEPING);
  279. }
  280. }
  281. static void _starpu_worker_set_status_wakeup(int workerid)
  282. {
  283. if (_starpu_worker_get_status(workerid) == STATUS_SLEEPING)
  284. {
  285. _STARPU_TRACE_WORKER_SLEEP_END;
  286. _starpu_worker_stop_sleeping(workerid);
  287. _starpu_worker_set_status(workerid, STATUS_UNKNOWN);
  288. }
  289. }
  290. #if !defined(STARPU_SIMGRID)
  291. static void _starpu_exponential_backoff(struct _starpu_worker *worker)
  292. {
  293. int delay = worker->spinning_backoff;
  294. if (worker->spinning_backoff < BACKOFF_MAX)
  295. worker->spinning_backoff<<=1;
  296. while(delay--)
  297. STARPU_UYIELD();
  298. }
  299. #endif
  300. /* Workers may block when there is no work to do at all. */
  301. struct starpu_task *_starpu_get_worker_task(struct _starpu_worker *worker, int workerid, unsigned memnode STARPU_ATTRIBUTE_UNUSED)
  302. {
  303. struct starpu_task *task;
  304. #if !defined(STARPU_SIMGRID)
  305. unsigned keep_awake = 0;
  306. #endif
  307. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&worker->sched_mutex);
  308. _starpu_worker_enter_sched_op(worker);
  309. _starpu_worker_set_status_scheduling(workerid);
  310. #if !defined(STARPU_SIMGRID)
  311. if ((worker->pipeline_length == 0 && worker->current_task)
  312. || (worker->pipeline_length != 0 && worker->ntasks))
  313. /* This worker is executing something */
  314. keep_awake = 1;
  315. #endif
  316. /*if the worker is already executing a task then */
  317. if (worker->pipeline_length && (worker->ntasks == worker->pipeline_length || worker->pipeline_stuck))
  318. task = NULL;
  319. /* don't push a task if we are already transferring one */
  320. else if (worker->task_transferring != NULL)
  321. task = NULL;
  322. /*else try to pop a task*/
  323. else
  324. {
  325. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  326. task = _starpu_pop_task(worker);
  327. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&worker->sched_mutex);
  328. #if !defined(STARPU_SIMGRID)
  329. if (worker->state_keep_awake)
  330. {
  331. keep_awake = 1;
  332. worker->state_keep_awake = 0;
  333. }
  334. #endif
  335. }
  336. #if !defined(STARPU_SIMGRID)
  337. if (task == NULL && !keep_awake)
  338. {
  339. /* Didn't get a task to run and none are running, go to sleep */
  340. /* Note: we need to keep the sched condition mutex all along the path
  341. * from popping a task from the scheduler to blocking. Otherwise the
  342. * driver may go block just after the scheduler got a new task to be
  343. * executed, and thus hanging. */
  344. _starpu_worker_set_status_sleeping(workerid);
  345. _starpu_worker_leave_sched_op(worker);
  346. STARPU_PTHREAD_COND_BROADCAST(&worker->sched_cond);
  347. #ifndef STARPU_NON_BLOCKING_DRIVERS
  348. if (_starpu_worker_can_block(memnode, worker)
  349. && !worker->state_block_in_parallel_req
  350. && !worker->state_unblock_in_parallel_req
  351. && !_starpu_sched_ctx_last_worker_awake(worker))
  352. {
  353. #ifdef STARPU_WORKER_CALLBACKS
  354. if (_starpu_config.conf.callback_worker_going_to_sleep != NULL)
  355. {
  356. _starpu_config.conf.callback_worker_going_to_sleep(workerid);
  357. }
  358. #endif
  359. do
  360. {
  361. STARPU_PTHREAD_COND_WAIT(&worker->sched_cond, &worker->sched_mutex);
  362. if (!worker->state_keep_awake
  363. && _starpu_worker_can_block(memnode, worker)
  364. && !worker->state_block_in_parallel_req
  365. && !worker->state_unblock_in_parallel_req)
  366. {
  367. _starpu_worker_set_status_sleeping(workerid);
  368. if (_starpu_sched_ctx_last_worker_awake(worker))
  369. {
  370. break;
  371. }
  372. }
  373. else
  374. {
  375. break;
  376. }
  377. }
  378. while (1);
  379. worker->state_keep_awake = 0;
  380. _starpu_worker_set_status_scheduling_done(workerid);
  381. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  382. #ifdef STARPU_WORKER_CALLBACKS
  383. if (_starpu_config.conf.callback_worker_waking_up != NULL)
  384. {
  385. /* the wake up callback should be called once the sched_mutex has been unlocked,
  386. * so that an external resource manager can potentially defer the wake-up momentarily if
  387. * the corresponding computing unit is still in use by another runtime system */
  388. _starpu_config.conf.callback_worker_waking_up(workerid);
  389. }
  390. #endif
  391. }
  392. else
  393. #endif
  394. {
  395. _starpu_worker_set_status_scheduling_done(workerid);
  396. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  397. if (_starpu_machine_is_running())
  398. _starpu_exponential_backoff(worker);
  399. }
  400. return NULL;
  401. }
  402. #endif
  403. if (task)
  404. {
  405. _starpu_worker_set_status_scheduling_done(workerid);
  406. _starpu_worker_set_status_wakeup(workerid);
  407. }
  408. else
  409. {
  410. _starpu_worker_set_status_sleeping(workerid);
  411. }
  412. worker->spinning_backoff = BACKOFF_MIN;
  413. _starpu_worker_leave_sched_op(worker);
  414. STARPU_PTHREAD_COND_BROADCAST(&worker->sched_cond);
  415. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  416. STARPU_AYU_PRERUNTASK(_starpu_get_job_associated_to_task(task)->job_id, workerid);
  417. return task;
  418. }
  419. int _starpu_get_multi_worker_task(struct _starpu_worker *workers, struct starpu_task ** tasks, int nworkers, unsigned memnode STARPU_ATTRIBUTE_UNUSED)
  420. {
  421. int i, count = 0;
  422. struct _starpu_job * j;
  423. int is_parallel_task;
  424. struct _starpu_combined_worker *combined_worker;
  425. #if !defined(STARPU_NON_BLOCKING_DRIVERS) && !defined(STARPU_SIMGRID)
  426. int executing = 0;
  427. #endif
  428. /*for each worker*/
  429. #ifndef STARPU_NON_BLOCKING_DRIVERS
  430. /* This assumes only 1 worker */
  431. STARPU_ASSERT_MSG(nworkers == 1, "Multiple workers is not yet possible in blocking drivers mode\n");
  432. _starpu_set_local_worker_key(&workers[0]);
  433. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&workers[0].sched_mutex);
  434. _starpu_worker_enter_sched_op(&workers[0]);
  435. #endif
  436. for (i = 0; i < nworkers; i++)
  437. {
  438. unsigned keep_awake = 0;
  439. #if !defined(STARPU_NON_BLOCKING_DRIVERS) && !defined(STARPU_SIMGRID)
  440. if ((workers[i].pipeline_length == 0 && workers[i].current_task)
  441. || (workers[i].pipeline_length != 0 && workers[i].ntasks))
  442. /* At least this worker is executing something */
  443. executing = 1;
  444. #endif
  445. /*if the worker is already executing a task then */
  446. if((workers[i].pipeline_length == 0 && workers[i].current_task)
  447. || (workers[i].pipeline_length != 0 &&
  448. (workers[i].ntasks == workers[i].pipeline_length
  449. || workers[i].pipeline_stuck)))
  450. {
  451. tasks[i] = NULL;
  452. }
  453. /* don't push a task if we are already transferring one */
  454. else if (workers[i].task_transferring != NULL)
  455. {
  456. tasks[i] = NULL;
  457. }
  458. /*else try to pop a task*/
  459. else
  460. {
  461. #ifdef STARPU_NON_BLOCKING_DRIVERS
  462. _starpu_set_local_worker_key(&workers[i]);
  463. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&workers[i].sched_mutex);
  464. _starpu_worker_enter_sched_op(&workers[i]);
  465. #endif
  466. _starpu_worker_set_status_scheduling(workers[i].workerid);
  467. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&workers[i].sched_mutex);
  468. tasks[i] = _starpu_pop_task(&workers[i]);
  469. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&workers[i].sched_mutex);
  470. if (workers[i].state_keep_awake)
  471. {
  472. keep_awake = workers[i].state_keep_awake;
  473. workers[i].state_keep_awake = 0;
  474. }
  475. if(tasks[i] != NULL || keep_awake)
  476. {
  477. _starpu_worker_set_status_scheduling_done(workers[i].workerid);
  478. _starpu_worker_set_status_wakeup(workers[i].workerid);
  479. STARPU_PTHREAD_COND_BROADCAST(&workers[i].sched_cond);
  480. #ifdef STARPU_NON_BLOCKING_DRIVERS
  481. _starpu_worker_leave_sched_op(&workers[i]);
  482. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&workers[i].sched_mutex);
  483. #endif
  484. count ++;
  485. if (tasks[i] == NULL)
  486. /* no task, but keep_awake */
  487. continue;
  488. j = _starpu_get_job_associated_to_task(tasks[i]);
  489. is_parallel_task = (j->task_size > 1);
  490. if (workers[i].pipeline_length)
  491. workers[i].current_tasks[(workers[i].first_task + workers[i].ntasks)%STARPU_MAX_PIPELINE] = tasks[i];
  492. else
  493. workers[i].current_task = j->task;
  494. workers[i].ntasks++;
  495. /* Get the rank in case it is a parallel task */
  496. if (is_parallel_task)
  497. {
  498. STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  499. workers[i].current_rank = j->active_task_alias_count++;
  500. STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
  501. if(j->combined_workerid != -1)
  502. {
  503. combined_worker = _starpu_get_combined_worker_struct(j->combined_workerid);
  504. workers[i].combined_workerid = j->combined_workerid;
  505. workers[i].worker_size = combined_worker->worker_size;
  506. }
  507. }
  508. else
  509. {
  510. workers[i].combined_workerid = workers[i].workerid;
  511. workers[i].worker_size = 1;
  512. workers[i].current_rank = 0;
  513. }
  514. STARPU_AYU_PRERUNTASK(_starpu_get_job_associated_to_task(tasks[i])->job_id, workers[i].workerid);
  515. }
  516. else
  517. {
  518. _starpu_worker_set_status_sleeping(workers[i].workerid);
  519. #ifdef STARPU_NON_BLOCKING_DRIVERS
  520. _starpu_worker_leave_sched_op(&workers[i]);
  521. #endif
  522. STARPU_PTHREAD_COND_BROADCAST(&workers[i].sched_cond);
  523. #ifdef STARPU_NON_BLOCKING_DRIVERS
  524. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&workers[i].sched_mutex);
  525. #endif
  526. }
  527. }
  528. }
  529. #if !defined(STARPU_NON_BLOCKING_DRIVERS)
  530. #if !defined(STARPU_SIMGRID)
  531. /* Block the assumed-to-be-only worker */
  532. struct _starpu_worker *worker = &workers[0];
  533. unsigned workerid = workers[0].workerid;
  534. if (!count && !executing)
  535. {
  536. /* Didn't get a task to run and none are running, go to sleep */
  537. /* Note: we need to keep the sched condition mutex all along the path
  538. * from popping a task from the scheduler to blocking. Otherwise the
  539. * driver may go block just after the scheduler got a new task to be
  540. * executed, and thus hanging. */
  541. _starpu_worker_set_status_sleeping(workerid);
  542. _starpu_worker_leave_sched_op(worker);
  543. if (_starpu_worker_can_block(memnode, worker)
  544. && !worker->state_block_in_parallel_req
  545. && !worker->state_unblock_in_parallel_req
  546. && !_starpu_sched_ctx_last_worker_awake(worker))
  547. {
  548. #ifdef STARPU_WORKER_CALLBACKS
  549. if (_starpu_config.conf.callback_worker_going_to_sleep != NULL)
  550. {
  551. _starpu_config.conf.callback_worker_going_to_sleep(workerid);
  552. }
  553. #endif
  554. do
  555. {
  556. STARPU_PTHREAD_COND_WAIT(&worker->sched_cond, &worker->sched_mutex);
  557. if (!worker->state_keep_awake
  558. && _starpu_worker_can_block(memnode, worker)
  559. && !worker->state_block_in_parallel_req
  560. && !worker->state_unblock_in_parallel_req)
  561. {
  562. _starpu_worker_set_status_sleeping(workerid);
  563. if (_starpu_sched_ctx_last_worker_awake(worker))
  564. {
  565. break;
  566. }
  567. }
  568. else
  569. {
  570. break;
  571. }
  572. }
  573. while (1);
  574. worker->state_keep_awake = 0;
  575. _starpu_worker_set_status_scheduling_done(workerid);
  576. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  577. #ifdef STARPU_WORKER_CALLBACKS
  578. if (_starpu_config.conf.callback_worker_waking_up != NULL)
  579. {
  580. /* the wake up callback should be called once the sched_mutex has been unlocked,
  581. * so that an external resource manager can potentially defer the wake-up momentarily if
  582. * the corresponding computing unit is still in use by another runtime system */
  583. _starpu_config.conf.callback_worker_waking_up(workerid);
  584. }
  585. #endif
  586. }
  587. else
  588. {
  589. _starpu_worker_set_status_scheduling_done(workerid);
  590. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  591. if (_starpu_machine_is_running())
  592. _starpu_exponential_backoff(worker);
  593. }
  594. return 0;
  595. }
  596. _starpu_worker_set_status_wakeup(workerid);
  597. worker->spinning_backoff = BACKOFF_MIN;
  598. #endif /* !STARPU_SIMGRID */
  599. _starpu_worker_leave_sched_op(&workers[0]);
  600. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&workers[0].sched_mutex);
  601. #endif /* !STARPU_NON_BLOCKING_DRIVERS */
  602. return count;
  603. }