work_stealing_policy.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2013 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012, 2013 Centre National de la Recherche Scientifique
  5. * Copyright (C) 2011, 2012 INRIA
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. /* Work stealing policy */
  19. #include <float.h>
  20. #include <core/workers.h>
  21. #include <sched_policies/deque_queues.h>
  22. #include <core/debug.h>
  23. #ifdef HAVE_AYUDAME_H
  24. #include <Ayudame.h>
  25. #endif
  26. struct _starpu_work_stealing_data
  27. {
  28. struct _starpu_deque_jobq **queue_array;
  29. unsigned rr_worker;
  30. /* keep track of the work performed from the beginning of the algorithm to make
  31. * better decisions about which queue to select when stealing or deferring work
  32. */
  33. unsigned performed_total;
  34. unsigned last_pop_worker;
  35. unsigned last_push_worker;
  36. };
  37. #ifdef USE_OVERLOAD
  38. /**
  39. * Minimum number of task we wait for being processed before we start assuming
  40. * on which worker the computation would be faster.
  41. */
  42. static int calibration_value = 0;
  43. #endif /* USE_OVERLOAD */
  44. /**
  45. * Return a worker from which a task can be stolen.
  46. * Selecting a worker is done in a round-robin fashion, unless
  47. * the worker previously selected doesn't own any task,
  48. * then we return the first non-empty worker.
  49. */
  50. static unsigned select_victim_round_robin(unsigned sched_ctx_id)
  51. {
  52. struct _starpu_work_stealing_data *ws = (struct _starpu_work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  53. unsigned worker = ws->last_pop_worker;
  54. unsigned nworkers = starpu_sched_ctx_get_nworkers(sched_ctx_id);
  55. starpu_pthread_mutex_t *victim_sched_mutex;
  56. starpu_pthread_cond_t *victim_sched_cond;
  57. /* If the worker's queue is empty, let's try
  58. * the next ones */
  59. while (1)
  60. {
  61. unsigned njobs;
  62. starpu_worker_get_sched_condition(worker, &victim_sched_mutex, &victim_sched_cond);
  63. VALGRIND_HG_MUTEX_LOCK_PRE(victim_sched_mutex, 0);
  64. VALGRIND_HG_MUTEX_LOCK_POST(victim_sched_mutex);
  65. njobs = ws->queue_array[worker]->njobs;
  66. VALGRIND_HG_MUTEX_UNLOCK_PRE(victim_sched_mutex);
  67. VALGRIND_HG_MUTEX_UNLOCK_POST(victim_sched_mutex);
  68. if (njobs)
  69. break;
  70. worker = (worker + 1) % nworkers;
  71. if (worker == ws->last_pop_worker)
  72. {
  73. /* We got back to the first worker,
  74. * don't go in infinite loop */
  75. break;
  76. }
  77. }
  78. ws->last_pop_worker = (worker + 1) % nworkers;
  79. return worker;
  80. }
  81. /**
  82. * Return a worker to whom add a task.
  83. * Selecting a worker is done in a round-robin fashion.
  84. */
  85. static unsigned select_worker_round_robin(unsigned sched_ctx_id)
  86. {
  87. struct _starpu_work_stealing_data *ws = (struct _starpu_work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  88. unsigned worker = ws->last_push_worker;
  89. unsigned nworkers = starpu_sched_ctx_get_nworkers(sched_ctx_id);
  90. ws->last_push_worker = (ws->last_push_worker + 1) % nworkers;
  91. return worker;
  92. }
  93. #ifdef USE_OVERLOAD
  94. /**
  95. * Return a ratio helpful to determine whether a worker is suitable to steal
  96. * tasks from or to put some tasks in its queue.
  97. *
  98. * \return a ratio with a positive or negative value, describing the current state of the worker :
  99. * a smaller value implies a faster worker with an relatively emptier queue : more suitable to put tasks in
  100. * a bigger value implies a slower worker with an reletively more replete queue : more suitable to steal tasks from
  101. */
  102. static float overload_metric(unsigned sched_ctx_id, unsigned id)
  103. {
  104. struct _starpu_work_stealing_data *ws = (struct _starpu_work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  105. float execution_ratio = 0.0f;
  106. float current_ratio = 0.0f;
  107. int nprocessed = _starpu_get_deque_nprocessed(ws->queue_array[id]);
  108. unsigned njobs = _starpu_get_deque_njobs(ws->queue_array[id]);
  109. /* Did we get enough information ? */
  110. if (performed_total > 0 && nprocessed > 0)
  111. {
  112. /* How fast or slow is the worker compared to the other workers */
  113. execution_ratio = (float) nprocessed / performed_total;
  114. /* How replete is its queue */
  115. current_ratio = (float) njobs / nprocessed;
  116. }
  117. else
  118. {
  119. return 0.0f;
  120. }
  121. return (current_ratio - execution_ratio);
  122. }
  123. /**
  124. * Return the most suitable worker from which a task can be stolen.
  125. * The number of previously processed tasks, total and local,
  126. * and the number of tasks currently awaiting to be processed
  127. * by the tasks are taken into account to select the most suitable
  128. * worker to steal task from.
  129. */
  130. static unsigned select_victim_overload(unsigned sched_ctx_id)
  131. {
  132. unsigned worker;
  133. float worker_ratio;
  134. unsigned best_worker = 0;
  135. float best_ratio = FLT_MIN;
  136. /* Don't try to play smart until we get
  137. * enough informations. */
  138. if (performed_total < calibration_value)
  139. return select_victim_round_robin(sched_ctx_id);
  140. struct starpu_worker_collection *workers = starpu_sched_ctx_get_worker_collection(sched_ctx_id);
  141. struct starpu_sched_ctx_iterator it;
  142. if(workers->init_iterator)
  143. workers->init_iterator(workers, &it);
  144. while(workers->has_next(workers, &it))
  145. {
  146. worker = workers->get_next(workers, &it);
  147. worker_ratio = overload_metric(sched_ctx_id, worker);
  148. if (worker_ratio > best_ratio)
  149. {
  150. best_worker = worker;
  151. best_ratio = worker_ratio;
  152. }
  153. }
  154. return best_worker;
  155. }
  156. /**
  157. * Return the most suitable worker to whom add a task.
  158. * The number of previously processed tasks, total and local,
  159. * and the number of tasks currently awaiting to be processed
  160. * by the tasks are taken into account to select the most suitable
  161. * worker to add a task to.
  162. */
  163. static unsigned select_worker_overload(unsigned sched_ctx_id)
  164. {
  165. unsigned worker;
  166. float worker_ratio;
  167. unsigned best_worker = 0;
  168. float best_ratio = FLT_MAX;
  169. /* Don't try to play smart until we get
  170. * enough informations. */
  171. if (performed_total < calibration_value)
  172. return select_worker_round_robin(sched_ctx_id);
  173. struct starpu_worker_collection *workers = starpu_sched_ctx_get_worker_collection(sched_ctx_id);
  174. struct starpu_sched_ctx_iterator it;
  175. if(workers->init_iterator)
  176. workers->init_iterator(workers, &it);
  177. while(workers->has_next(workers, &it))
  178. {
  179. worker = workers->get_next(workers, &it);
  180. worker_ratio = overload_metric(sched_ctx_id, worker);
  181. if (worker_ratio < best_ratio)
  182. {
  183. best_worker = worker;
  184. best_ratio = worker_ratio;
  185. }
  186. }
  187. return best_worker;
  188. }
  189. #endif /* USE_OVERLOAD */
  190. /**
  191. * Return a worker from which a task can be stolen.
  192. * This is a phony function used to call the right
  193. * function depending on the value of USE_OVERLOAD.
  194. */
  195. static inline unsigned select_victim(unsigned sched_ctx_id)
  196. {
  197. #ifdef USE_OVERLOAD
  198. return select_victim_overload(sched_ctx_id);
  199. #else
  200. return select_victim_round_robin(sched_ctx_id);
  201. #endif /* USE_OVERLOAD */
  202. }
  203. /**
  204. * Return a worker from which a task can be stolen.
  205. * This is a phony function used to call the right
  206. * function depending on the value of USE_OVERLOAD.
  207. */
  208. static inline unsigned select_worker(unsigned sched_ctx_id)
  209. {
  210. #ifdef USE_OVERLOAD
  211. return select_worker_overload(sched_ctx_id);
  212. #else
  213. return select_worker_round_robin(sched_ctx_id);
  214. #endif /* USE_OVERLOAD */
  215. }
  216. #ifdef STARPU_DEVEL
  217. #warning TODO rewrite ... this will not scale at all now
  218. #endif
  219. static struct starpu_task *ws_pop_task(unsigned sched_ctx_id)
  220. {
  221. struct _starpu_work_stealing_data *ws = (struct _starpu_work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  222. struct starpu_task *task;
  223. struct _starpu_deque_jobq *q;
  224. int workerid = starpu_worker_get_id();
  225. STARPU_ASSERT(workerid != -1);
  226. q = ws->queue_array[workerid];
  227. task = _starpu_deque_pop_task(q, workerid);
  228. if (task)
  229. {
  230. /* there was a local task */
  231. ws->performed_total++;
  232. q->nprocessed++;
  233. q->njobs--;
  234. return task;
  235. }
  236. starpu_pthread_mutex_t *worker_sched_mutex;
  237. starpu_pthread_cond_t *worker_sched_cond;
  238. starpu_worker_get_sched_condition(workerid, &worker_sched_mutex, &worker_sched_cond);
  239. /* Note: Releasing this mutex before taking the victim mutex, to avoid interlock*/
  240. STARPU_PTHREAD_MUTEX_UNLOCK(worker_sched_mutex);
  241. /* we need to steal someone's job */
  242. unsigned victim = select_victim(sched_ctx_id);
  243. starpu_pthread_mutex_t *victim_sched_mutex;
  244. starpu_pthread_cond_t *victim_sched_cond;
  245. starpu_worker_get_sched_condition(victim, &victim_sched_mutex, &victim_sched_cond);
  246. STARPU_PTHREAD_MUTEX_LOCK(victim_sched_mutex);
  247. struct _starpu_deque_jobq *victimq = ws->queue_array[victim];
  248. task = _starpu_deque_pop_task(victimq, workerid);
  249. if (task)
  250. {
  251. _STARPU_TRACE_WORK_STEALING(q, workerid);
  252. ws->performed_total++;
  253. /* Beware : we have to increase the number of processed tasks of
  254. * the stealer, not the victim ! */
  255. q->nprocessed++;
  256. victimq->njobs--;
  257. }
  258. STARPU_PTHREAD_MUTEX_UNLOCK(victim_sched_mutex);
  259. STARPU_PTHREAD_MUTEX_LOCK(worker_sched_mutex);
  260. if(!task)
  261. {
  262. task = _starpu_deque_pop_task(q, workerid);
  263. if (task)
  264. {
  265. /* there was a local task */
  266. ws->performed_total++;
  267. q->nprocessed++;
  268. q->njobs--;
  269. return task;
  270. }
  271. }
  272. return task;
  273. }
  274. int ws_push_task(struct starpu_task *task)
  275. {
  276. unsigned sched_ctx_id = task->sched_ctx;
  277. struct _starpu_work_stealing_data *ws = (struct _starpu_work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  278. struct _starpu_deque_jobq *deque_queue;
  279. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  280. int workerid = starpu_worker_get_id();
  281. unsigned worker = 0;
  282. struct starpu_worker_collection *workers = starpu_sched_ctx_get_worker_collection(sched_ctx_id);
  283. struct starpu_sched_ctx_iterator it;
  284. if(workers->init_iterator)
  285. workers->init_iterator(workers, &it);
  286. while(workers->has_next(workers, &it))
  287. {
  288. worker = workers->get_next(workers, &it);
  289. starpu_pthread_mutex_t *sched_mutex;
  290. starpu_pthread_cond_t *sched_cond;
  291. starpu_worker_get_sched_condition(worker, &sched_mutex, &sched_cond);
  292. STARPU_PTHREAD_MUTEX_LOCK(sched_mutex);
  293. }
  294. /* If the current thread is not a worker but
  295. * the main thread (-1), we find the better one to
  296. * put task on its queue */
  297. if (workerid == -1)
  298. workerid = select_worker(sched_ctx_id);
  299. deque_queue = ws->queue_array[workerid];
  300. #ifdef HAVE_AYUDAME_H
  301. if (AYU_event)
  302. {
  303. int id = workerid;
  304. AYU_event(AYU_ADDTASKTOQUEUE, j->job_id, &id);
  305. }
  306. #endif
  307. _starpu_job_list_push_back(deque_queue->jobq, j);
  308. deque_queue->njobs++;
  309. starpu_push_task_end(task);
  310. while(workers->has_next(workers, &it))
  311. {
  312. worker = workers->get_next(workers, &it);
  313. starpu_pthread_mutex_t *sched_mutex;
  314. starpu_pthread_cond_t *sched_cond;
  315. starpu_worker_get_sched_condition(worker, &sched_mutex, &sched_cond);
  316. STARPU_PTHREAD_COND_SIGNAL(sched_cond);
  317. STARPU_PTHREAD_MUTEX_UNLOCK(sched_mutex);
  318. }
  319. return 0;
  320. }
  321. static void ws_add_workers(unsigned sched_ctx_id, int *workerids,unsigned nworkers)
  322. {
  323. struct _starpu_work_stealing_data *ws = (struct _starpu_work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  324. unsigned i;
  325. int workerid;
  326. for (i = 0; i < nworkers; i++)
  327. {
  328. workerid = workerids[i];
  329. starpu_sched_ctx_worker_shares_tasks_lists(workerid, sched_ctx_id);
  330. ws->queue_array[workerid] = _starpu_create_deque();
  331. /**
  332. * The first WS_POP_TASK will increase NPROCESSED though no task was actually performed yet,
  333. * we need to initialize it at -1.
  334. */
  335. ws->queue_array[workerid]->nprocessed = -1;
  336. ws->queue_array[workerid]->njobs = 0;
  337. }
  338. }
  339. static void ws_remove_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers)
  340. {
  341. struct _starpu_work_stealing_data *ws = (struct _starpu_work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  342. unsigned i;
  343. int workerid;
  344. for (i = 0; i < nworkers; i++)
  345. {
  346. workerid = workerids[i];
  347. _starpu_destroy_deque(ws->queue_array[workerid]);
  348. }
  349. }
  350. static void initialize_ws_policy(unsigned sched_ctx_id)
  351. {
  352. starpu_sched_ctx_create_worker_collection(sched_ctx_id, STARPU_WORKER_LIST);
  353. struct _starpu_work_stealing_data *ws = (struct _starpu_work_stealing_data*)malloc(sizeof(struct _starpu_work_stealing_data));
  354. starpu_sched_ctx_set_policy_data(sched_ctx_id, (void*)ws);
  355. ws->last_pop_worker = 0;
  356. ws->last_push_worker = 0;
  357. /**
  358. * The first WS_POP_TASK will increase PERFORMED_TOTAL though no task was actually performed yet,
  359. * we need to initialize it at -1.
  360. */
  361. ws->performed_total = -1;
  362. ws->queue_array = (struct _starpu_deque_jobq**)malloc(STARPU_NMAXWORKERS*sizeof(struct _starpu_deque_jobq*));
  363. }
  364. static void deinit_ws_policy(unsigned sched_ctx_id)
  365. {
  366. struct _starpu_work_stealing_data *ws = (struct _starpu_work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  367. free(ws->queue_array);
  368. free(ws);
  369. starpu_sched_ctx_delete_worker_collection(sched_ctx_id);
  370. }
  371. struct starpu_sched_policy _starpu_sched_ws_policy =
  372. {
  373. .init_sched = initialize_ws_policy,
  374. .deinit_sched = deinit_ws_policy,
  375. .add_workers = ws_add_workers,
  376. .remove_workers = ws_remove_workers,
  377. .push_task = ws_push_task,
  378. .pop_task = ws_pop_task,
  379. .pre_exec_hook = NULL,
  380. .post_exec_hook = NULL,
  381. .pop_every_task = NULL,
  382. .policy_name = "ws",
  383. .policy_description = "work stealing"
  384. };