driver_common.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2014 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012, 2013 Centre National de la Recherche Scientifique
  5. * Copyright (C) 2011 Télécom-SudParis
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. #include <math.h>
  19. #include <starpu.h>
  20. #include <starpu_profiling.h>
  21. #include <profiling/profiling.h>
  22. #include <common/utils.h>
  23. #include <core/debug.h>
  24. #include <core/sched_ctx.h>
  25. #include <drivers/driver_common/driver_common.h>
  26. #include <starpu_top.h>
  27. #include <core/sched_policy.h>
  28. #include <top/starpu_top_core.h>
  29. #include <core/debug.h>
  30. #define BACKOFF_MAX 32 /* TODO : use parameter to define them */
  31. #define BACKOFF_MIN 1
  32. void _starpu_driver_start_job(struct _starpu_worker *args, struct _starpu_job *j, struct timespec *codelet_start, int rank, int profiling)
  33. {
  34. struct starpu_task *task = j->task;
  35. struct starpu_codelet *cl = task->cl;
  36. struct starpu_profiling_task_info *profiling_info;
  37. int starpu_top=_starpu_top_status_get();
  38. int workerid = args->workerid;
  39. unsigned calibrate_model = 0;
  40. if (cl->model && cl->model->benchmarking)
  41. calibrate_model = 1;
  42. /* If the job is executed on a combined worker there is no need for the
  43. * scheduler to process it : it doesn't contain any valuable data
  44. * as it's not linked to an actual worker */
  45. if (j->task_size == 1)
  46. _starpu_sched_pre_exec_hook(task);
  47. args->status = STATUS_EXECUTING;
  48. task->status = STARPU_TASK_RUNNING;
  49. if (rank == 0)
  50. {
  51. #ifdef HAVE_AYUDAME_H
  52. if (AYU_event) AYU_event(AYU_RUNTASK, j->job_id, NULL);
  53. #endif
  54. cl->per_worker_stats[workerid]++;
  55. profiling_info = task->profiling_info;
  56. if ((profiling && profiling_info) || calibrate_model || starpu_top)
  57. {
  58. _starpu_clock_gettime(codelet_start);
  59. _starpu_worker_register_executing_start_date(workerid, codelet_start);
  60. }
  61. }
  62. if (starpu_top)
  63. _starpu_top_task_started(task,workerid,codelet_start);
  64. _STARPU_TRACE_START_CODELET_BODY(j);
  65. }
  66. void _starpu_driver_end_job(struct _starpu_worker *args, struct _starpu_job *j, struct starpu_perfmodel_arch* perf_arch STARPU_ATTRIBUTE_UNUSED, struct timespec *codelet_end, int rank, int profiling)
  67. {
  68. struct starpu_task *task = j->task;
  69. struct starpu_codelet *cl = task->cl;
  70. struct starpu_profiling_task_info *profiling_info = task->profiling_info;
  71. int starpu_top=_starpu_top_status_get();
  72. int workerid = args->workerid;
  73. unsigned calibrate_model = 0;
  74. _STARPU_TRACE_END_CODELET_BODY(j, j->nimpl, perf_arch);
  75. if (cl && cl->model && cl->model->benchmarking)
  76. calibrate_model = 1;
  77. if (rank == 0)
  78. {
  79. if ((profiling && profiling_info) || calibrate_model || starpu_top)
  80. _starpu_clock_gettime(codelet_end);
  81. #ifdef HAVE_AYUDAME_H
  82. if (AYU_event) AYU_event(AYU_POSTRUNTASK, j->job_id, NULL);
  83. #endif
  84. }
  85. if (starpu_top)
  86. _starpu_top_task_ended(task,workerid,codelet_end);
  87. args->status = STATUS_UNKNOWN;
  88. }
  89. void _starpu_driver_update_job_feedback(struct _starpu_job *j, struct _starpu_worker *worker_args,
  90. struct starpu_perfmodel_arch* perf_arch,
  91. struct timespec *codelet_start, struct timespec *codelet_end, int profiling)
  92. {
  93. struct starpu_profiling_task_info *profiling_info = j->task->profiling_info;
  94. struct timespec measured_ts;
  95. double measured;
  96. int workerid = worker_args->workerid;
  97. struct starpu_codelet *cl = j->task->cl;
  98. int calibrate_model = 0;
  99. int updated = 0;
  100. #ifndef STARPU_SIMGRID
  101. if (cl->model && cl->model->benchmarking)
  102. calibrate_model = 1;
  103. #endif
  104. if ((profiling && profiling_info) || calibrate_model)
  105. {
  106. starpu_timespec_sub(codelet_end, codelet_start, &measured_ts);
  107. measured = starpu_timing_timespec_to_us(&measured_ts);
  108. if (profiling && profiling_info)
  109. {
  110. memcpy(&profiling_info->start_time, codelet_start, sizeof(struct timespec));
  111. memcpy(&profiling_info->end_time, codelet_end, sizeof(struct timespec));
  112. profiling_info->workerid = workerid;
  113. _starpu_worker_update_profiling_info_executing(workerid, &measured_ts, 1,
  114. profiling_info->used_cycles,
  115. profiling_info->stall_cycles,
  116. profiling_info->power_consumed);
  117. updated = 1;
  118. }
  119. if (calibrate_model)
  120. _starpu_update_perfmodel_history(j, j->task->cl->model, perf_arch, worker_args->devid, measured,j->nimpl);
  121. }
  122. if (!updated)
  123. _starpu_worker_update_profiling_info_executing(workerid, NULL, 1, 0, 0, 0);
  124. if (profiling_info && profiling_info->power_consumed && cl->power_model && cl->power_model->benchmarking)
  125. {
  126. _starpu_update_perfmodel_history(j, j->task->cl->power_model, perf_arch, worker_args->devid, profiling_info->power_consumed,j->nimpl);
  127. }
  128. }
  129. static void _starpu_worker_set_status_sleeping(int workerid)
  130. {
  131. if ( _starpu_worker_get_status(workerid) == STATUS_WAKING_UP)
  132. _starpu_worker_set_status(workerid, STATUS_SLEEPING);
  133. else if (_starpu_worker_get_status(workerid) != STATUS_SLEEPING)
  134. {
  135. _STARPU_TRACE_WORKER_SLEEP_START;
  136. _starpu_worker_restart_sleeping(workerid);
  137. _starpu_worker_set_status(workerid, STATUS_SLEEPING);
  138. }
  139. }
  140. static void _starpu_worker_set_status_wakeup(int workerid)
  141. {
  142. if (_starpu_worker_get_status(workerid) == STATUS_SLEEPING || _starpu_worker_get_status(workerid) == STATUS_WAKING_UP)
  143. {
  144. _STARPU_TRACE_WORKER_SLEEP_END;
  145. _starpu_worker_stop_sleeping(workerid);
  146. _starpu_worker_set_status(workerid, STATUS_UNKNOWN);
  147. }
  148. }
  149. static void _starpu_exponential_backoff(struct _starpu_worker *args)
  150. {
  151. int delay = args->spinning_backoff;
  152. if (args->spinning_backoff < BACKOFF_MAX)
  153. args->spinning_backoff<<=1;
  154. while(delay--)
  155. STARPU_UYIELD();
  156. }
  157. /* Workers may block when there is no work to do at all. */
  158. struct starpu_task *_starpu_get_worker_task(struct _starpu_worker *args, int workerid, unsigned memnode)
  159. {
  160. struct starpu_task *task;
  161. STARPU_PTHREAD_MUTEX_LOCK(&args->parallel_sect_mutex);
  162. if(args->parallel_sect)
  163. {
  164. _starpu_sched_ctx_signal_worker_blocked(args->workerid);
  165. STARPU_PTHREAD_COND_WAIT(&args->parallel_sect_cond, &args->parallel_sect_mutex);
  166. starpu_sched_ctx_bind_current_thread_to_cpuid(args->bindid);
  167. _starpu_sched_ctx_signal_worker_woke_up(workerid);
  168. args->parallel_sect = 0;
  169. }
  170. STARPU_PTHREAD_MUTEX_UNLOCK(&args->parallel_sect_mutex);
  171. STARPU_PTHREAD_MUTEX_LOCK(&args->sched_mutex);
  172. task = _starpu_pop_task(args);
  173. if (task == NULL)
  174. {
  175. /* Note: we need to keep the sched condition mutex all along the path
  176. * from popping a task from the scheduler to blocking. Otherwise the
  177. * driver may go block just after the scheduler got a new task to be
  178. * executed, and thus hanging. */
  179. _starpu_worker_set_status_sleeping(workerid);
  180. if (_starpu_worker_can_block(memnode) && !_starpu_sched_ctx_last_worker_awake(args))
  181. {
  182. STARPU_PTHREAD_COND_WAIT(&args->sched_cond, &args->sched_mutex);
  183. STARPU_PTHREAD_MUTEX_UNLOCK(&args->sched_mutex);
  184. }
  185. else
  186. {
  187. STARPU_PTHREAD_MUTEX_UNLOCK(&args->sched_mutex);
  188. if (_starpu_machine_is_running())
  189. {
  190. _starpu_exponential_backoff(args);
  191. #ifdef STARPU_SIMGRID
  192. static int warned;
  193. if (!warned)
  194. {
  195. warned = 1;
  196. _STARPU_DISP("Has to make simgrid spin for CPU idle time. You can try to pass --enable-blocking-drivers to ./configure to avoid this\n");
  197. }
  198. MSG_process_sleep(0.000010);
  199. #endif
  200. }
  201. }
  202. return NULL;
  203. }
  204. STARPU_PTHREAD_MUTEX_UNLOCK(&args->sched_mutex);
  205. _starpu_worker_set_status_wakeup(workerid);
  206. args->spinning_backoff = BACKOFF_MIN;
  207. #ifdef HAVE_AYUDAME_H
  208. if (AYU_event)
  209. {
  210. intptr_t id = workerid;
  211. AYU_event(AYU_PRERUNTASK, _starpu_get_job_associated_to_task(task)->job_id, &id);
  212. }
  213. #endif
  214. return task;
  215. }
  216. int _starpu_get_multi_worker_task(struct _starpu_worker *workers, struct starpu_task ** tasks, int nworkers)
  217. {
  218. int i, count = 0;
  219. struct _starpu_job * j;
  220. int is_parallel_task;
  221. struct _starpu_combined_worker *combined_worker;
  222. /*for each worker*/
  223. for (i = 0; i < nworkers; i++)
  224. {
  225. /*if the worker is already executinf a task then */
  226. if(workers[i].current_task)
  227. {
  228. tasks[i] = NULL;
  229. }
  230. /*else try to pop a task*/
  231. else
  232. {
  233. STARPU_PTHREAD_MUTEX_LOCK(&workers[i].sched_mutex);
  234. _starpu_set_local_worker_key(&workers[i]);
  235. tasks[i] = _starpu_pop_task(&workers[i]);
  236. STARPU_PTHREAD_MUTEX_UNLOCK(&workers[i].sched_mutex);
  237. if(tasks[i] != NULL)
  238. {
  239. count ++;
  240. j = _starpu_get_job_associated_to_task(tasks[i]);
  241. is_parallel_task = (j->task_size > 1);
  242. workers[i].current_task = j->task;
  243. /* Get the rank in case it is a parallel task */
  244. if (is_parallel_task)
  245. {
  246. STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  247. workers[i].current_rank = j->active_task_alias_count++;
  248. STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
  249. combined_worker = _starpu_get_combined_worker_struct(j->combined_workerid);
  250. workers[i].combined_workerid = j->combined_workerid;
  251. workers[i].worker_size = combined_worker->worker_size;
  252. }
  253. else
  254. {
  255. workers[i].combined_workerid = workers[i].workerid;
  256. workers[i].worker_size = 1;
  257. workers[i].current_rank = 0;
  258. }
  259. _starpu_worker_set_status_wakeup(workers[i].workerid);
  260. }
  261. else
  262. {
  263. _starpu_worker_set_status_sleeping(workers[i].workerid);
  264. }
  265. }
  266. }
  267. return count;
  268. }