driver_common.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2014 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012, 2013, 2014 Centre National de la Recherche Scientifique
  5. * Copyright (C) 2011 Télécom-SudParis
  6. * Copyright (C) 2014 Inria
  7. *
  8. * StarPU is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU Lesser General Public License as published by
  10. * the Free Software Foundation; either version 2.1 of the License, or (at
  11. * your option) any later version.
  12. *
  13. * StarPU is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  16. *
  17. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  18. */
  19. #include <math.h>
  20. #include <starpu.h>
  21. #include <starpu_profiling.h>
  22. #include <profiling/profiling.h>
  23. #include <common/utils.h>
  24. #include <core/debug.h>
  25. #include <core/sched_ctx.h>
  26. #include <drivers/driver_common/driver_common.h>
  27. #include <starpu_top.h>
  28. #include <core/sched_policy.h>
  29. #include <top/starpu_top_core.h>
  30. #include <core/debug.h>
  31. #define BACKOFF_MAX 32 /* TODO : use parameter to define them */
  32. #define BACKOFF_MIN 1
  33. void _starpu_driver_start_job(struct _starpu_worker *worker, struct _starpu_job *j, struct starpu_perfmodel_arch* perf_arch STARPU_ATTRIBUTE_UNUSED, struct timespec *codelet_start, int rank, int profiling)
  34. {
  35. struct starpu_task *task = j->task;
  36. struct starpu_codelet *cl = task->cl;
  37. struct starpu_profiling_task_info *profiling_info;
  38. int starpu_top=_starpu_top_status_get();
  39. int workerid = worker->workerid;
  40. unsigned calibrate_model = 0;
  41. if (cl->model && cl->model->benchmarking)
  42. calibrate_model = 1;
  43. /* If the job is executed on a combined worker there is no need for the
  44. * scheduler to process it : it doesn't contain any valuable data
  45. * as it's not linked to an actual worker */
  46. if (j->task_size == 1)
  47. _starpu_sched_pre_exec_hook(task);
  48. worker->status = STATUS_EXECUTING;
  49. task->status = STARPU_TASK_RUNNING;
  50. if (rank == 0)
  51. {
  52. #ifdef HAVE_AYUDAME_H
  53. if (AYU_event) AYU_event(AYU_RUNTASK, j->job_id, NULL);
  54. #endif
  55. cl->per_worker_stats[workerid]++;
  56. profiling_info = task->profiling_info;
  57. if ((profiling && profiling_info) || calibrate_model || starpu_top)
  58. {
  59. _starpu_clock_gettime(codelet_start);
  60. _starpu_worker_register_executing_start_date(workerid, codelet_start);
  61. }
  62. }
  63. if (starpu_top)
  64. _starpu_top_task_started(task,workerid,codelet_start);
  65. _STARPU_TRACE_START_CODELET_BODY(j, j->nimpl, perf_arch, workerid);
  66. }
  67. void _starpu_driver_end_job(struct _starpu_worker *worker, struct _starpu_job *j, struct starpu_perfmodel_arch* perf_arch STARPU_ATTRIBUTE_UNUSED, struct timespec *codelet_end, int rank, int profiling)
  68. {
  69. struct starpu_task *task = j->task;
  70. struct starpu_codelet *cl = task->cl;
  71. struct starpu_profiling_task_info *profiling_info = task->profiling_info;
  72. int starpu_top=_starpu_top_status_get();
  73. int workerid = worker->workerid;
  74. unsigned calibrate_model = 0;
  75. _STARPU_TRACE_END_CODELET_BODY(j, j->nimpl, perf_arch, workerid);
  76. if (cl && cl->model && cl->model->benchmarking)
  77. calibrate_model = 1;
  78. if (rank == 0)
  79. {
  80. if ((profiling && profiling_info) || calibrate_model || starpu_top)
  81. _starpu_clock_gettime(codelet_end);
  82. #ifdef HAVE_AYUDAME_H
  83. if (AYU_event) AYU_event(AYU_POSTRUNTASK, j->job_id, NULL);
  84. #endif
  85. }
  86. if (starpu_top)
  87. _starpu_top_task_ended(task,workerid,codelet_end);
  88. worker->status = STATUS_UNKNOWN;
  89. }
  90. void _starpu_driver_update_job_feedback(struct _starpu_job *j, struct _starpu_worker *worker,
  91. struct starpu_perfmodel_arch* perf_arch,
  92. struct timespec *codelet_start, struct timespec *codelet_end, int profiling)
  93. {
  94. struct starpu_profiling_task_info *profiling_info = j->task->profiling_info;
  95. struct timespec measured_ts;
  96. double measured;
  97. int workerid = worker->workerid;
  98. struct starpu_codelet *cl = j->task->cl;
  99. int calibrate_model = 0;
  100. int updated = 0;
  101. #ifndef STARPU_SIMGRID
  102. if (cl->model && cl->model->benchmarking)
  103. calibrate_model = 1;
  104. #endif
  105. if ((profiling && profiling_info) || calibrate_model)
  106. {
  107. starpu_timespec_sub(codelet_end, codelet_start, &measured_ts);
  108. measured = starpu_timing_timespec_to_us(&measured_ts);
  109. if (profiling && profiling_info)
  110. {
  111. memcpy(&profiling_info->start_time, codelet_start, sizeof(struct timespec));
  112. memcpy(&profiling_info->end_time, codelet_end, sizeof(struct timespec));
  113. profiling_info->workerid = workerid;
  114. _starpu_worker_update_profiling_info_executing(workerid, &measured_ts, 1,
  115. profiling_info->used_cycles,
  116. profiling_info->stall_cycles,
  117. profiling_info->power_consumed);
  118. updated = 1;
  119. }
  120. if (calibrate_model)
  121. {
  122. #ifdef STARPU_OPENMP
  123. double time_consumed = measured;
  124. unsigned do_update_time_model;
  125. if (j->continuation)
  126. {
  127. /* The job is only paused, thus we accumulate
  128. * its timing, but we don't update its
  129. * perfmodel now. */
  130. starpu_timespec_accumulate(&j->cumulated_ts, &measured_ts);
  131. do_update_time_model = 0;
  132. }
  133. else
  134. {
  135. if (j->discontinuous)
  136. {
  137. /* The job was paused at least once but is now
  138. * really completing. We need to take into
  139. * account its past execution time in its
  140. * perfmodel. */
  141. starpu_timespec_accumulate(&measured_ts, &j->cumulated_ts);
  142. time_consumed = starpu_timing_timespec_to_us(&measured_ts);
  143. }
  144. do_update_time_model = 1;
  145. }
  146. #else
  147. const unsigned do_update_time_model = 1;
  148. const double time_consumed = measured;
  149. #endif
  150. if (do_update_time_model)
  151. {
  152. _starpu_update_perfmodel_history(j, j->task->cl->model, perf_arch, worker_args->devid, time_consumed, j->nimpl);
  153. }
  154. }
  155. }
  156. if (!updated)
  157. _starpu_worker_update_profiling_info_executing(workerid, NULL, 1, 0, 0, 0);
  158. if (profiling_info && profiling_info->power_consumed && cl->power_model && cl->power_model->benchmarking)
  159. {
  160. #ifdef STARPU_OPENMP
  161. double power_consumed = profiling_info->power_consumed;
  162. unsigned do_update_power_model;
  163. if (j->continuation)
  164. {
  165. j->cumulated_power_consumed += power_consumed;
  166. do_update_power_model = 0;
  167. }
  168. else
  169. {
  170. if (j->discontinuous)
  171. {
  172. power_consumed += j->cumulated_power_consumed;
  173. }
  174. do_update_power_model = 1;
  175. }
  176. #else
  177. const double power_consumed = profiling_info->power_consumed;
  178. const unsigned do_update_power_model = 1;
  179. #endif
  180. if (do_update_power_model)
  181. {
  182. _starpu_update_perfmodel_history(j, j->task->cl->power_model, perf_arch, worker_args->devid, power_consumed, j->nimpl);
  183. }
  184. }
  185. }
  186. static void _starpu_worker_set_status_scheduling(int workerid)
  187. {
  188. if (_starpu_worker_get_status(workerid) != STATUS_SLEEPING
  189. && _starpu_worker_get_status(workerid) != STATUS_SCHEDULING)
  190. {
  191. _STARPU_TRACE_WORKER_SCHEDULING_START;
  192. _starpu_worker_set_status(workerid, STATUS_SCHEDULING);
  193. }
  194. }
  195. static void _starpu_worker_set_status_scheduling_done(int workerid)
  196. {
  197. if (_starpu_worker_get_status(workerid) == STATUS_SCHEDULING)
  198. {
  199. _STARPU_TRACE_WORKER_SCHEDULING_END;
  200. _starpu_worker_set_status(workerid, STATUS_UNKNOWN);
  201. }
  202. }
  203. static void _starpu_worker_set_status_sleeping(int workerid)
  204. {
  205. if ( _starpu_worker_get_status(workerid) == STATUS_WAKING_UP)
  206. _starpu_worker_set_status(workerid, STATUS_SLEEPING);
  207. else if (_starpu_worker_get_status(workerid) != STATUS_SLEEPING)
  208. {
  209. _STARPU_TRACE_WORKER_SLEEP_START;
  210. _starpu_worker_restart_sleeping(workerid);
  211. _starpu_worker_set_status(workerid, STATUS_SLEEPING);
  212. }
  213. }
  214. static void _starpu_worker_set_status_wakeup(int workerid)
  215. {
  216. if (_starpu_worker_get_status(workerid) == STATUS_SLEEPING || _starpu_worker_get_status(workerid) == STATUS_WAKING_UP)
  217. {
  218. _STARPU_TRACE_WORKER_SLEEP_END;
  219. _starpu_worker_stop_sleeping(workerid);
  220. _starpu_worker_set_status(workerid, STATUS_UNKNOWN);
  221. }
  222. }
  223. static void _starpu_exponential_backoff(struct _starpu_worker *worker)
  224. {
  225. int delay = worker->spinning_backoff;
  226. if (worker->spinning_backoff < BACKOFF_MAX)
  227. worker->spinning_backoff<<=1;
  228. while(delay--)
  229. STARPU_UYIELD();
  230. }
  231. /* Workers may block when there is no work to do at all. */
  232. struct starpu_task *_starpu_get_worker_task(struct _starpu_worker *worker, int workerid, unsigned memnode)
  233. {
  234. STARPU_PTHREAD_MUTEX_LOCK(&worker->sched_mutex);
  235. struct starpu_task *task;
  236. unsigned needed = 1;
  237. _starpu_worker_set_status_scheduling(workerid);
  238. while(needed)
  239. {
  240. struct _starpu_sched_ctx *sched_ctx = NULL;
  241. struct _starpu_sched_ctx_list *l = NULL;
  242. for (l = worker->sched_ctx_list; l; l = l->next)
  243. {
  244. sched_ctx = _starpu_get_sched_ctx_struct(l->sched_ctx);
  245. if(sched_ctx && sched_ctx->id > 0 && sched_ctx->id < STARPU_NMAX_SCHED_CTXS)
  246. {
  247. STARPU_PTHREAD_MUTEX_LOCK(&sched_ctx->parallel_sect_mutex[workerid]);
  248. if(sched_ctx->parallel_sect[workerid])
  249. {
  250. /* don't let the worker sleep with the sched_mutex taken */
  251. /* we need it until here bc of the list of ctxs of the workers
  252. that can change in another thread */
  253. STARPU_PTHREAD_MUTEX_UNLOCK(&worker->sched_mutex);
  254. needed = 0;
  255. _starpu_sched_ctx_signal_worker_blocked(sched_ctx->id, workerid);
  256. STARPU_PTHREAD_COND_WAIT(&sched_ctx->parallel_sect_cond[workerid], &sched_ctx->parallel_sect_mutex[workerid]);
  257. _starpu_sched_ctx_signal_worker_woke_up(sched_ctx->id, workerid);
  258. sched_ctx->parallel_sect[workerid] = 0;
  259. STARPU_PTHREAD_MUTEX_LOCK(&worker->sched_mutex);
  260. }
  261. STARPU_PTHREAD_MUTEX_UNLOCK(&sched_ctx->parallel_sect_mutex[workerid]);
  262. }
  263. if(!needed)
  264. break;
  265. }
  266. /* don't worry if the value is not correct (no lock) it will do it next time */
  267. if(worker->tmp_sched_ctx != -1)
  268. {
  269. sched_ctx = _starpu_get_sched_ctx_struct(worker->tmp_sched_ctx);
  270. STARPU_PTHREAD_MUTEX_LOCK(&sched_ctx->parallel_sect_mutex[workerid]);
  271. if(sched_ctx->parallel_sect[workerid])
  272. {
  273. // needed = 0;
  274. STARPU_PTHREAD_MUTEX_UNLOCK(&worker->sched_mutex);
  275. _starpu_sched_ctx_signal_worker_blocked(sched_ctx->id, workerid);
  276. STARPU_PTHREAD_COND_WAIT(&sched_ctx->parallel_sect_cond[workerid], &sched_ctx->parallel_sect_mutex[workerid]);
  277. _starpu_sched_ctx_signal_worker_woke_up(sched_ctx->id, workerid);
  278. sched_ctx->parallel_sect[workerid] = 0;
  279. STARPU_PTHREAD_MUTEX_LOCK(&worker->sched_mutex);
  280. }
  281. STARPU_PTHREAD_MUTEX_UNLOCK(&sched_ctx->parallel_sect_mutex[workerid]);
  282. }
  283. needed = !needed;
  284. }
  285. task = _starpu_pop_task(worker);
  286. if (task == NULL)
  287. {
  288. /* Note: we need to keep the sched condition mutex all along the path
  289. * from popping a task from the scheduler to blocking. Otherwise the
  290. * driver may go block just after the scheduler got a new task to be
  291. * executed, and thus hanging. */
  292. _starpu_worker_set_status_sleeping(workerid);
  293. if (_starpu_worker_can_block(memnode) && !_starpu_sched_ctx_last_worker_awake(worker))
  294. {
  295. STARPU_PTHREAD_COND_WAIT(&worker->sched_cond, &worker->sched_mutex);
  296. STARPU_PTHREAD_MUTEX_UNLOCK(&worker->sched_mutex);
  297. }
  298. else
  299. {
  300. STARPU_PTHREAD_MUTEX_UNLOCK(&worker->sched_mutex);
  301. if (_starpu_machine_is_running())
  302. {
  303. _starpu_exponential_backoff(worker);
  304. #ifdef STARPU_SIMGRID
  305. static int warned;
  306. if (!warned)
  307. {
  308. warned = 1;
  309. _STARPU_DISP("Has to make simgrid spin for CPU idle time. You can try to pass --enable-blocking-drivers to ./configure to avoid this\n");
  310. }
  311. MSG_process_sleep(0.000010);
  312. #endif
  313. }
  314. }
  315. return NULL;
  316. }
  317. _starpu_worker_set_status_scheduling_done(workerid);
  318. _starpu_worker_set_status_wakeup(workerid);
  319. worker->spinning_backoff = BACKOFF_MIN;
  320. STARPU_PTHREAD_MUTEX_UNLOCK(&worker->sched_mutex);
  321. #ifdef HAVE_AYUDAME_H
  322. if (AYU_event)
  323. {
  324. intptr_t id = workerid;
  325. AYU_event(AYU_PRERUNTASK, _starpu_get_job_associated_to_task(task)->job_id, &id);
  326. }
  327. #endif
  328. return task;
  329. }
  330. int _starpu_get_multi_worker_task(struct _starpu_worker *workers, struct starpu_task ** tasks, int nworkers)
  331. {
  332. int i, count = 0;
  333. struct _starpu_job * j;
  334. int is_parallel_task;
  335. struct _starpu_combined_worker *combined_worker;
  336. /*for each worker*/
  337. for (i = 0; i < nworkers; i++)
  338. {
  339. /*if the worker is already executinf a task then */
  340. if(workers[i].current_task)
  341. {
  342. tasks[i] = NULL;
  343. }
  344. /*else try to pop a task*/
  345. else
  346. {
  347. STARPU_PTHREAD_MUTEX_LOCK(&workers[i].sched_mutex);
  348. _starpu_worker_set_status_scheduling(workers[i].workerid);
  349. _starpu_set_local_worker_key(&workers[i]);
  350. tasks[i] = _starpu_pop_task(&workers[i]);
  351. if(tasks[i] != NULL)
  352. {
  353. _starpu_worker_set_status_scheduling_done(workers[i].workerid);
  354. _starpu_worker_set_status_wakeup(workers[i].workerid);
  355. STARPU_PTHREAD_MUTEX_UNLOCK(&workers[i].sched_mutex);
  356. count ++;
  357. j = _starpu_get_job_associated_to_task(tasks[i]);
  358. is_parallel_task = (j->task_size > 1);
  359. workers[i].current_task = j->task;
  360. /* Get the rank in case it is a parallel task */
  361. if (is_parallel_task)
  362. {
  363. STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  364. workers[i].current_rank = j->active_task_alias_count++;
  365. STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
  366. combined_worker = _starpu_get_combined_worker_struct(j->combined_workerid);
  367. workers[i].combined_workerid = j->combined_workerid;
  368. workers[i].worker_size = combined_worker->worker_size;
  369. }
  370. else
  371. {
  372. workers[i].combined_workerid = workers[i].workerid;
  373. workers[i].worker_size = 1;
  374. workers[i].current_rank = 0;
  375. }
  376. }
  377. else
  378. {
  379. _starpu_worker_set_status_sleeping(workers[i].workerid);
  380. STARPU_PTHREAD_MUTEX_UNLOCK(&workers[i].sched_mutex);
  381. }
  382. }
  383. }
  384. return count;
  385. }