driver_common.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2014 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012, 2013, 2014 Centre National de la Recherche Scientifique
  5. * Copyright (C) 2011 Télécom-SudParis
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. #include <math.h>
  19. #include <starpu.h>
  20. #include <starpu_profiling.h>
  21. #include <profiling/profiling.h>
  22. #include <common/utils.h>
  23. #include <core/debug.h>
  24. #include <core/sched_ctx.h>
  25. #include <drivers/driver_common/driver_common.h>
  26. #include <starpu_top.h>
  27. #include <core/sched_policy.h>
  28. #include <top/starpu_top_core.h>
  29. #include <core/debug.h>
  30. #define BACKOFF_MAX 32 /* TODO : use parameter to define them */
  31. #define BACKOFF_MIN 1
  32. void _starpu_driver_start_job(struct _starpu_worker *worker, struct _starpu_job *j, struct starpu_perfmodel_arch* perf_arch STARPU_ATTRIBUTE_UNUSED, struct timespec *codelet_start, int rank, int profiling)
  33. {
  34. struct starpu_task *task = j->task;
  35. struct starpu_codelet *cl = task->cl;
  36. struct starpu_profiling_task_info *profiling_info;
  37. int starpu_top=_starpu_top_status_get();
  38. int workerid = worker->workerid;
  39. unsigned calibrate_model = 0;
  40. if (cl->model && cl->model->benchmarking)
  41. calibrate_model = 1;
  42. /* If the job is executed on a combined worker there is no need for the
  43. * scheduler to process it : it doesn't contain any valuable data
  44. * as it's not linked to an actual worker */
  45. if (j->task_size == 1)
  46. _starpu_sched_pre_exec_hook(task);
  47. worker->status = STATUS_EXECUTING;
  48. task->status = STARPU_TASK_RUNNING;
  49. if (rank == 0)
  50. {
  51. #ifdef HAVE_AYUDAME_H
  52. if (AYU_event) AYU_event(AYU_RUNTASK, j->job_id, NULL);
  53. #endif
  54. cl->per_worker_stats[workerid]++;
  55. profiling_info = task->profiling_info;
  56. if ((profiling && profiling_info) || calibrate_model || starpu_top)
  57. {
  58. _starpu_clock_gettime(codelet_start);
  59. _starpu_worker_register_executing_start_date(workerid, codelet_start);
  60. }
  61. }
  62. if (starpu_top)
  63. _starpu_top_task_started(task,workerid,codelet_start);
  64. // Find out if the worker is the master of a parallel context
  65. struct _starpu_sched_ctx *sched_ctx = _starpu_sched_ctx_get_sched_ctx_for_worker_and_job(worker, j);
  66. STARPU_ASSERT_MSG(sched_ctx != NULL, "there should be a worker %d in the ctx of this job \n", worker->workerid);
  67. if(!sched_ctx->sched_policy)
  68. {
  69. if(!sched_ctx->awake_workers && sched_ctx->main_master == worker->workerid)
  70. {
  71. struct starpu_worker_collection *workers = sched_ctx->workers;
  72. struct starpu_sched_ctx_iterator it;
  73. if (workers->init_iterator)
  74. workers->init_iterator(workers, &it);
  75. while (workers->has_next(workers, &it))
  76. {
  77. int _workerid = workers->get_next(workers, &it);
  78. if (_workerid != workerid)
  79. {
  80. struct _starpu_worker *worker = _starpu_get_worker_struct(_workerid);
  81. _starpu_driver_start_job(worker, j, &worker->perf_arch, codelet_start, rank, profiling);
  82. }
  83. }
  84. }
  85. if(sched_ctx->main_master == worker->workerid)
  86. /* if the worker is the master of a ctx trace the perf_arch of the context */
  87. _STARPU_TRACE_START_CODELET_BODY(j, j->nimpl, &sched_ctx->perf_arch, workerid);
  88. }
  89. else
  90. _STARPU_TRACE_START_CODELET_BODY(j, j->nimpl, perf_arch, workerid);
  91. }
  92. void _starpu_driver_end_job(struct _starpu_worker *worker, struct _starpu_job *j, struct starpu_perfmodel_arch* perf_arch STARPU_ATTRIBUTE_UNUSED, struct timespec *codelet_end, int rank, int profiling)
  93. {
  94. struct starpu_task *task = j->task;
  95. struct starpu_codelet *cl = task->cl;
  96. struct starpu_profiling_task_info *profiling_info = task->profiling_info;
  97. int starpu_top=_starpu_top_status_get();
  98. int workerid = worker->workerid;
  99. unsigned calibrate_model = 0;
  100. // Find out if the worker is the master of a parallel context
  101. struct _starpu_sched_ctx *sched_ctx = _starpu_sched_ctx_get_sched_ctx_for_worker_and_job(worker, j);
  102. STARPU_ASSERT_MSG(sched_ctx != NULL, "there should be a worker %d in the ctx of this job \n", worker->workerid);
  103. if (!sched_ctx->sched_policy)
  104. {
  105. if(sched_ctx->main_master == worker->workerid)
  106. _STARPU_TRACE_END_CODELET_BODY(j, j->nimpl, &(sched_ctx->perf_arch), workerid);
  107. }
  108. else
  109. _STARPU_TRACE_END_CODELET_BODY(j, j->nimpl, perf_arch, workerid);
  110. if (cl && cl->model && cl->model->benchmarking)
  111. calibrate_model = 1;
  112. if (rank == 0)
  113. {
  114. if ((profiling && profiling_info) || calibrate_model || starpu_top)
  115. _starpu_clock_gettime(codelet_end);
  116. #ifdef HAVE_AYUDAME_H
  117. if (AYU_event) AYU_event(AYU_POSTRUNTASK, j->job_id, NULL);
  118. #endif
  119. }
  120. if (starpu_top)
  121. _starpu_top_task_ended(task,workerid,codelet_end);
  122. worker->status = STATUS_UNKNOWN;
  123. if(!sched_ctx->sched_policy && !sched_ctx->awake_workers &&
  124. sched_ctx->main_master == worker->workerid)
  125. {
  126. struct starpu_worker_collection *workers = sched_ctx->workers;
  127. struct starpu_sched_ctx_iterator it;
  128. if (workers->init_iterator)
  129. workers->init_iterator(workers, &it);
  130. while (workers->has_next(workers, &it))
  131. {
  132. int _workerid = workers->get_next(workers, &it);
  133. if (_workerid != workerid)
  134. {
  135. struct _starpu_worker *_worker = _starpu_get_worker_struct(_workerid);
  136. _starpu_driver_end_job(_worker, j, &_worker->perf_arch, codelet_end, rank, profiling);
  137. }
  138. }
  139. }
  140. }
  141. void _starpu_driver_update_job_feedback(struct _starpu_job *j, struct _starpu_worker *worker,
  142. struct starpu_perfmodel_arch* perf_arch,
  143. struct timespec *codelet_start, struct timespec *codelet_end, int profiling)
  144. {
  145. struct _starpu_sched_ctx *sched_ctx = _starpu_sched_ctx_get_sched_ctx_for_worker_and_job(worker, j);
  146. STARPU_ASSERT_MSG(sched_ctx != NULL, "there should be a worker %d in the ctx of this job \n", worker->workerid);
  147. if (!sched_ctx->sched_policy)
  148. {
  149. if(sched_ctx->main_master == worker->workerid)
  150. *perf_arch = sched_ctx->perf_arch;
  151. else
  152. return;
  153. }
  154. struct starpu_profiling_task_info *profiling_info = j->task->profiling_info;
  155. struct timespec measured_ts;
  156. double measured;
  157. int workerid = worker->workerid;
  158. struct starpu_codelet *cl = j->task->cl;
  159. int calibrate_model = 0;
  160. int updated = 0;
  161. #ifndef STARPU_SIMGRID
  162. if (cl->model && cl->model->benchmarking)
  163. calibrate_model = 1;
  164. #endif
  165. if ((profiling && profiling_info) || calibrate_model)
  166. {
  167. starpu_timespec_sub(codelet_end, codelet_start, &measured_ts);
  168. measured = starpu_timing_timespec_to_us(&measured_ts);
  169. if (profiling && profiling_info)
  170. {
  171. memcpy(&profiling_info->start_time, codelet_start, sizeof(struct timespec));
  172. memcpy(&profiling_info->end_time, codelet_end, sizeof(struct timespec));
  173. profiling_info->workerid = workerid;
  174. _starpu_worker_update_profiling_info_executing(workerid, &measured_ts, 1,
  175. profiling_info->used_cycles,
  176. profiling_info->stall_cycles,
  177. profiling_info->power_consumed);
  178. updated = 1;
  179. }
  180. if (calibrate_model)
  181. _starpu_update_perfmodel_history(j, j->task->cl->model, perf_arch, worker->devid, measured,j->nimpl);
  182. }
  183. if (!updated)
  184. _starpu_worker_update_profiling_info_executing(workerid, NULL, 1, 0, 0, 0);
  185. if (profiling_info && profiling_info->power_consumed && cl->power_model && cl->power_model->benchmarking)
  186. {
  187. _starpu_update_perfmodel_history(j, j->task->cl->power_model, perf_arch, worker->devid, profiling_info->power_consumed,j->nimpl);
  188. }
  189. }
  190. static void _starpu_worker_set_status_scheduling(int workerid)
  191. {
  192. if (_starpu_worker_get_status(workerid) != STATUS_SLEEPING
  193. && _starpu_worker_get_status(workerid) != STATUS_SCHEDULING)
  194. {
  195. _STARPU_TRACE_WORKER_SCHEDULING_START;
  196. _starpu_worker_set_status(workerid, STATUS_SCHEDULING);
  197. }
  198. }
  199. static void _starpu_worker_set_status_scheduling_done(int workerid)
  200. {
  201. if (_starpu_worker_get_status(workerid) == STATUS_SCHEDULING)
  202. {
  203. _STARPU_TRACE_WORKER_SCHEDULING_END;
  204. _starpu_worker_set_status(workerid, STATUS_UNKNOWN);
  205. }
  206. }
  207. static void _starpu_worker_set_status_sleeping(int workerid)
  208. {
  209. if ( _starpu_worker_get_status(workerid) == STATUS_WAKING_UP)
  210. _starpu_worker_set_status(workerid, STATUS_SLEEPING);
  211. else if (_starpu_worker_get_status(workerid) != STATUS_SLEEPING)
  212. {
  213. _STARPU_TRACE_WORKER_SLEEP_START;
  214. _starpu_worker_restart_sleeping(workerid);
  215. _starpu_worker_set_status(workerid, STATUS_SLEEPING);
  216. }
  217. }
  218. static void _starpu_worker_set_status_wakeup(int workerid)
  219. {
  220. if (_starpu_worker_get_status(workerid) == STATUS_SLEEPING || _starpu_worker_get_status(workerid) == STATUS_WAKING_UP)
  221. {
  222. _STARPU_TRACE_WORKER_SLEEP_END;
  223. _starpu_worker_stop_sleeping(workerid);
  224. _starpu_worker_set_status(workerid, STATUS_UNKNOWN);
  225. }
  226. }
  227. static void _starpu_exponential_backoff(struct _starpu_worker *worker)
  228. {
  229. int delay = worker->spinning_backoff;
  230. if (worker->spinning_backoff < BACKOFF_MAX)
  231. worker->spinning_backoff<<=1;
  232. while(delay--)
  233. STARPU_UYIELD();
  234. }
  235. /* Workers may block when there is no work to do at all. */
  236. struct starpu_task *_starpu_get_worker_task(struct _starpu_worker *worker, int workerid, unsigned memnode)
  237. {
  238. STARPU_PTHREAD_MUTEX_LOCK(&worker->sched_mutex);
  239. struct starpu_task *task;
  240. unsigned needed = 1;
  241. _starpu_worker_set_status_scheduling(workerid);
  242. while(needed)
  243. {
  244. struct _starpu_sched_ctx *sched_ctx = NULL;
  245. struct _starpu_sched_ctx_list *l = NULL;
  246. for (l = worker->sched_ctx_list; l; l = l->next)
  247. {
  248. sched_ctx = _starpu_get_sched_ctx_struct(l->sched_ctx);
  249. if(sched_ctx && sched_ctx->id > 0 && sched_ctx->id < STARPU_NMAX_SCHED_CTXS)
  250. {
  251. STARPU_PTHREAD_MUTEX_LOCK(&sched_ctx->parallel_sect_mutex[workerid]);
  252. if(!sched_ctx->sched_policy && sched_ctx->awake_workers)
  253. worker->slave = sched_ctx->main_master != workerid;
  254. if(sched_ctx->parallel_sect[workerid])
  255. {
  256. /* don't let the worker sleep with the sched_mutex taken */
  257. /* we need it until here bc of the list of ctxs of the workers
  258. that can change in another thread */
  259. STARPU_PTHREAD_MUTEX_UNLOCK(&worker->sched_mutex);
  260. needed = 0;
  261. _starpu_sched_ctx_signal_worker_blocked(sched_ctx->id, workerid);
  262. STARPU_PTHREAD_COND_WAIT(&sched_ctx->parallel_sect_cond[workerid], &sched_ctx->parallel_sect_mutex[workerid]);
  263. _starpu_sched_ctx_signal_worker_woke_up(sched_ctx->id, workerid);
  264. sched_ctx->parallel_sect[workerid] = 0;
  265. STARPU_PTHREAD_MUTEX_LOCK(&worker->sched_mutex);
  266. }
  267. STARPU_PTHREAD_MUTEX_UNLOCK(&sched_ctx->parallel_sect_mutex[workerid]);
  268. }
  269. if(!needed)
  270. break;
  271. }
  272. /* don't worry if the value is not correct (no lock) it will do it next time */
  273. if(worker->tmp_sched_ctx != -1)
  274. {
  275. sched_ctx = _starpu_get_sched_ctx_struct(worker->tmp_sched_ctx);
  276. STARPU_PTHREAD_MUTEX_LOCK(&sched_ctx->parallel_sect_mutex[workerid]);
  277. if(sched_ctx->parallel_sect[workerid])
  278. {
  279. // needed = 0;
  280. STARPU_PTHREAD_MUTEX_UNLOCK(&worker->sched_mutex);
  281. _starpu_sched_ctx_signal_worker_blocked(sched_ctx->id, workerid);
  282. STARPU_PTHREAD_COND_WAIT(&sched_ctx->parallel_sect_cond[workerid], &sched_ctx->parallel_sect_mutex[workerid]);
  283. _starpu_sched_ctx_signal_worker_woke_up(sched_ctx->id, workerid);
  284. sched_ctx->parallel_sect[workerid] = 0;
  285. STARPU_PTHREAD_MUTEX_LOCK(&worker->sched_mutex);
  286. }
  287. STARPU_PTHREAD_MUTEX_UNLOCK(&sched_ctx->parallel_sect_mutex[workerid]);
  288. }
  289. needed = !needed;
  290. }
  291. task = _starpu_pop_task(worker);
  292. if (task == NULL)
  293. {
  294. /* Note: we need to keep the sched condition mutex all along the path
  295. * from popping a task from the scheduler to blocking. Otherwise the
  296. * driver may go block just after the scheduler got a new task to be
  297. * executed, and thus hanging. */
  298. _starpu_worker_set_status_sleeping(workerid);
  299. if (_starpu_worker_can_block(memnode) && !_starpu_sched_ctx_last_worker_awake(worker))
  300. {
  301. STARPU_PTHREAD_COND_WAIT(&worker->sched_cond, &worker->sched_mutex);
  302. STARPU_PTHREAD_MUTEX_UNLOCK(&worker->sched_mutex);
  303. }
  304. else
  305. {
  306. STARPU_PTHREAD_MUTEX_UNLOCK(&worker->sched_mutex);
  307. if (_starpu_machine_is_running())
  308. {
  309. _starpu_exponential_backoff(worker);
  310. #ifdef STARPU_SIMGRID
  311. static int warned;
  312. if (!warned)
  313. {
  314. warned = 1;
  315. _STARPU_DISP("Has to make simgrid spin for CPU idle time. You can try to pass --enable-blocking-drivers to ./configure to avoid this\n");
  316. }
  317. MSG_process_sleep(0.000010);
  318. #endif
  319. }
  320. }
  321. return NULL;
  322. }
  323. _starpu_worker_set_status_scheduling_done(workerid);
  324. _starpu_worker_set_status_wakeup(workerid);
  325. worker->spinning_backoff = BACKOFF_MIN;
  326. STARPU_PTHREAD_MUTEX_UNLOCK(&worker->sched_mutex);
  327. #ifdef HAVE_AYUDAME_H
  328. if (AYU_event)
  329. {
  330. intptr_t id = workerid;
  331. AYU_event(AYU_PRERUNTASK, _starpu_get_job_associated_to_task(task)->job_id, &id);
  332. }
  333. #endif
  334. return task;
  335. }
  336. int _starpu_get_multi_worker_task(struct _starpu_worker *workers, struct starpu_task ** tasks, int nworkers)
  337. {
  338. int i, count = 0;
  339. struct _starpu_job * j;
  340. int is_parallel_task;
  341. struct _starpu_combined_worker *combined_worker;
  342. /*for each worker*/
  343. for (i = 0; i < nworkers; i++)
  344. {
  345. /*if the worker is already executinf a task then */
  346. if(workers[i].current_task)
  347. {
  348. tasks[i] = NULL;
  349. }
  350. /*else try to pop a task*/
  351. else
  352. {
  353. STARPU_PTHREAD_MUTEX_LOCK(&workers[i].sched_mutex);
  354. _starpu_worker_set_status_scheduling(workers[i].workerid);
  355. _starpu_set_local_worker_key(&workers[i]);
  356. tasks[i] = _starpu_pop_task(&workers[i]);
  357. if(tasks[i] != NULL)
  358. {
  359. _starpu_worker_set_status_scheduling_done(workers[i].workerid);
  360. _starpu_worker_set_status_wakeup(workers[i].workerid);
  361. STARPU_PTHREAD_MUTEX_UNLOCK(&workers[i].sched_mutex);
  362. count ++;
  363. j = _starpu_get_job_associated_to_task(tasks[i]);
  364. is_parallel_task = (j->task_size > 1);
  365. workers[i].current_task = j->task;
  366. /* Get the rank in case it is a parallel task */
  367. if (is_parallel_task)
  368. {
  369. STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  370. workers[i].current_rank = j->active_task_alias_count++;
  371. STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
  372. if(j->combined_workerid != -1)
  373. {
  374. combined_worker = _starpu_get_combined_worker_struct(j->combined_workerid);
  375. workers[i].combined_workerid = j->combined_workerid;
  376. workers[i].worker_size = combined_worker->worker_size;
  377. }
  378. }
  379. else
  380. {
  381. workers[i].combined_workerid = workers[i].workerid;
  382. workers[i].worker_size = 1;
  383. workers[i].current_rank = 0;
  384. }
  385. }
  386. else
  387. {
  388. _starpu_worker_set_status_sleeping(workers[i].workerid);
  389. STARPU_PTHREAD_MUTEX_UNLOCK(&workers[i].sched_mutex);
  390. }
  391. }
  392. }
  393. return count;
  394. }