heft.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010, 2011 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011 Centre National de la Recherche Scientifique
  5. * Copyright (C) 2011 Télécom-SudParis
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. /* Distributed queues using performance modeling to assign tasks */
  19. #include <float.h>
  20. #include <core/workers.h>
  21. #include <core/perfmodel/perfmodel.h>
  22. #include <starpu_parameters.h>
  23. #include <starpu_task_bundle.h>
  24. #include <starpu_top.h>
  25. static unsigned nworkers;
  26. static pthread_cond_t sched_cond[STARPU_NMAXWORKERS];
  27. static pthread_mutex_t sched_mutex[STARPU_NMAXWORKERS];
  28. static double alpha = STARPU_DEFAULT_ALPHA;
  29. static double beta = STARPU_DEFAULT_BETA;
  30. static double _gamma = STARPU_DEFAULT_GAMMA;
  31. static double idle_power = 0.0;
  32. static double exp_start[STARPU_NMAXWORKERS]; /* of the first queued task */
  33. static double exp_end[STARPU_NMAXWORKERS]; /* of the set of queued tasks */
  34. static double exp_len[STARPU_NMAXWORKERS]; /* of the last queued task */
  35. static double ntasks[STARPU_NMAXWORKERS];
  36. const float alpha_minimum=0;
  37. const float alpha_maximum=10.0;
  38. const float beta_minimum=0;
  39. const float beta_maximum=10.0;
  40. const float gamma_minimum=0;
  41. const float gamma_maximum=10000.0;
  42. const float idle_power_minimum=0;
  43. const float idle_power_maximum=10000.0;
  44. void param_modified(struct starputop_param_t* d){
  45. //just to show parameter modification
  46. fprintf(stderr,"%s has been modified : alpha=%f|beta=%f|gamma=%f|idle_power=%f !\n",
  47. d->name, alpha,beta,_gamma,idle_power);
  48. }
  49. static void heft_init(struct starpu_machine_topology_s *topology,
  50. __attribute__ ((unused)) struct starpu_sched_policy_s *_policy)
  51. {
  52. nworkers = topology->nworkers;
  53. const char *strval_alpha = getenv("STARPU_SCHED_ALPHA");
  54. if (strval_alpha)
  55. alpha = atof(strval_alpha);
  56. const char *strval_beta = getenv("STARPU_SCHED_BETA");
  57. if (strval_beta)
  58. beta = atof(strval_beta);
  59. const char *strval_gamma = getenv("STARPU_SCHED_GAMMA");
  60. if (strval_gamma)
  61. _gamma = atof(strval_gamma);
  62. const char *strval_idle_power = getenv("STARPU_IDLE_POWER");
  63. if (strval_idle_power)
  64. idle_power = atof(strval_idle_power);
  65. starputop_register_parameter_float("HEFT_ALPHA", &alpha, alpha_minimum,alpha_maximum,param_modified);
  66. starputop_register_parameter_float("HEFT_BETA", &beta, beta_minimum,beta_maximum,param_modified);
  67. starputop_register_parameter_float("HEFT_GAMMA", &_gamma, gamma_minimum,gamma_maximum,param_modified);
  68. starputop_register_parameter_float("HEFT_IDLE_POWER", &idle_power, idle_power_minimum,idle_power_maximum,param_modified);
  69. unsigned workerid;
  70. for (workerid = 0; workerid < nworkers; workerid++)
  71. {
  72. exp_start[workerid] = starpu_timing_now();
  73. exp_len[workerid] = 0.0;
  74. exp_end[workerid] = exp_start[workerid];
  75. ntasks[workerid] = 0;
  76. PTHREAD_MUTEX_INIT(&sched_mutex[workerid], NULL);
  77. PTHREAD_COND_INIT(&sched_cond[workerid], NULL);
  78. starpu_worker_set_sched_condition(workerid, &sched_cond[workerid], &sched_mutex[workerid]);
  79. }
  80. }
  81. static void heft_post_exec_hook(struct starpu_task *task)
  82. {
  83. int workerid = starpu_worker_get_id();
  84. double model = task->predicted;
  85. double transfer_model = task->predicted_transfer;
  86. /* Once we have executed the task, we can update the predicted amount
  87. * of work. */
  88. PTHREAD_MUTEX_LOCK(&sched_mutex[workerid]);
  89. exp_len[workerid] -= model + transfer_model;
  90. exp_start[workerid] = starpu_timing_now();
  91. exp_end[workerid] = exp_start[workerid] + exp_len[workerid];
  92. ntasks[workerid]--;
  93. PTHREAD_MUTEX_UNLOCK(&sched_mutex[workerid]);
  94. }
  95. static void heft_push_task_notify(struct starpu_task *task, int workerid)
  96. {
  97. /* Compute the expected penality */
  98. enum starpu_perf_archtype perf_arch = starpu_worker_get_perf_archtype(workerid);
  99. unsigned memory_node = starpu_worker_get_memory_node(workerid);
  100. double predicted = starpu_task_expected_length(task, perf_arch,
  101. _starpu_get_job_associated_to_task(task)->nimpl);
  102. double predicted_transfer = starpu_task_expected_data_transfer_time(memory_node, task);
  103. /* Update the predictions */
  104. PTHREAD_MUTEX_LOCK(&sched_mutex[workerid]);
  105. /* Sometimes workers didn't take the tasks as early as we expected */
  106. exp_start[workerid] = STARPU_MAX(exp_start[workerid], starpu_timing_now());
  107. exp_end[workerid] = exp_start[workerid] + exp_len[workerid];
  108. /* If there is no prediction available, we consider the task has a null length */
  109. if (predicted != -1.0)
  110. {
  111. task->predicted = predicted;
  112. exp_end[workerid] += predicted;
  113. exp_len[workerid] += predicted;
  114. }
  115. /* If there is no prediction available, we consider the task has a null length */
  116. if (predicted_transfer != -1.0)
  117. {
  118. if (starpu_timing_now() + predicted_transfer < exp_end[workerid]) {
  119. /* We may hope that the transfer will be finished by
  120. * the start of the task. */
  121. predicted_transfer = 0;
  122. } else {
  123. /* The transfer will not be finished by then, take the
  124. * remainder into account */
  125. predicted_transfer = (starpu_timing_now() + predicted_transfer) - exp_end[workerid];
  126. }
  127. task->predicted_transfer = predicted_transfer;
  128. exp_end[workerid] += predicted_transfer;
  129. exp_len[workerid] += predicted_transfer;
  130. }
  131. ntasks[workerid]++;
  132. PTHREAD_MUTEX_UNLOCK(&sched_mutex[workerid]);
  133. }
  134. static int push_task_on_best_worker(struct starpu_task *task, int best_workerid, double predicted, double predicted_transfer, int prio)
  135. {
  136. /* make sure someone coule execute that task ! */
  137. STARPU_ASSERT(best_workerid != -1);
  138. PTHREAD_MUTEX_LOCK(&sched_mutex[best_workerid]);
  139. /* Sometimes workers didn't take the tasks as early as we expected */
  140. exp_start[best_workerid] = STARPU_MAX(exp_start[best_workerid], starpu_timing_now());
  141. exp_end[best_workerid] = exp_start[best_workerid] + exp_len[best_workerid];
  142. exp_end[best_workerid] += predicted;
  143. exp_len[best_workerid] += predicted;
  144. if (starpu_timing_now() + predicted_transfer < exp_end[best_workerid]) {
  145. /* We may hope that the transfer will be finished by
  146. * the start of the task. */
  147. predicted_transfer = 0;
  148. } else {
  149. /* The transfer will not be finished by then, take the
  150. * remainder into account */
  151. predicted_transfer = (starpu_timing_now() + predicted_transfer) - exp_end[best_workerid];
  152. }
  153. exp_end[best_workerid] += predicted_transfer;
  154. exp_len[best_workerid] += predicted_transfer;
  155. ntasks[best_workerid]++;
  156. PTHREAD_MUTEX_UNLOCK(&sched_mutex[best_workerid]);
  157. task->predicted = predicted;
  158. task->predicted_transfer = predicted_transfer;
  159. if (starpu_top_status_get())
  160. starputop_task_prevision(task, best_workerid,
  161. (unsigned long long)(exp_end[best_workerid]-predicted)/1000,
  162. (unsigned long long)exp_end[best_workerid]/1000);
  163. if (starpu_get_prefetch_flag())
  164. {
  165. unsigned memory_node = starpu_worker_get_memory_node(best_workerid);
  166. starpu_prefetch_task_input_on_node(task, memory_node);
  167. }
  168. return starpu_push_local_task(best_workerid, task, prio);
  169. }
  170. static void compute_all_performance_predictions(struct starpu_task *task,
  171. double *local_task_length, double *exp_end,
  172. double *max_exp_endp, double *best_exp_endp,
  173. double *local_data_penalty,
  174. double *local_power, int *forced_best,
  175. struct starpu_task_bundle *bundle,
  176. unsigned int *nimpls)
  177. {
  178. int calibrating = 0;
  179. double max_exp_end = DBL_MIN;
  180. double best_exp_end = DBL_MAX;
  181. int ntasks_best = -1;
  182. double ntasks_best_end = 0.0;
  183. /* A priori, we know all estimations */
  184. int unknown = 0;
  185. unsigned worker;
  186. unsigned nimpl;
  187. for (worker = 0; worker < nworkers; worker++) {
  188. nimpls[worker] = 0;
  189. for (nimpl = 0; nimpl <STARPU_MAXIMPLEMENTATIONS; nimpl++) {
  190. /* Sometimes workers didn't take the tasks as early as we expected */
  191. exp_start[worker] = STARPU_MAX(exp_start[worker], starpu_timing_now());
  192. exp_end[worker] = exp_start[worker] + exp_len[worker];
  193. if (exp_end[worker] > max_exp_end)
  194. max_exp_end = exp_end[worker];
  195. if (!starpu_worker_may_execute_task(worker, task, nimpl))
  196. {
  197. /* no one on that queue may execute this task */
  198. continue;
  199. }
  200. enum starpu_perf_archtype perf_arch = starpu_worker_get_perf_archtype(worker);
  201. unsigned memory_node = starpu_worker_get_memory_node(worker);
  202. if (bundle)
  203. {
  204. local_task_length[worker] = starpu_task_bundle_expected_length(bundle, perf_arch, nimpl);
  205. local_data_penalty[worker] = starpu_task_bundle_expected_data_transfer_time(bundle, memory_node);
  206. local_power[worker] = starpu_task_bundle_expected_power(bundle, perf_arch,nimpl);
  207. //_STARPU_DEBUG("Scheduler heft bundle: task length (%lf) local power (%lf) worker (%u) kernel (%u) \n", local_task_length[worker],local_power[worker],worker,nimpl);
  208. }
  209. else {
  210. local_task_length[worker] = starpu_task_expected_length(task, perf_arch, nimpl);
  211. local_data_penalty[worker] = starpu_task_expected_data_transfer_time(memory_node, task);
  212. local_power[worker] = starpu_task_expected_power(task, perf_arch,nimpl);
  213. //_STARPU_DEBUG("Scheduler heft: task length (%lf) local power (%lf) worker (%u) kernel (%u) \n", local_task_length[worker],local_power[worker],worker,nimpl);
  214. }
  215. double ntasks_end = ntasks[worker] / starpu_worker_get_relative_speedup(perf_arch);
  216. if (ntasks_best == -1
  217. || (!calibrating && ntasks_end < ntasks_best_end) /* Not calibrating, take better task */
  218. || (!calibrating && local_task_length[worker] == -1.0) /* Not calibrating but this worker is being calibrated */
  219. || (calibrating && local_task_length[worker] == -1.0 && ntasks_end < ntasks_best_end) /* Calibrating, compete this worker with other non-calibrated */
  220. ) {
  221. ntasks_best_end = ntasks_end;
  222. ntasks_best = worker;
  223. }
  224. if (local_task_length[worker] == -1.0)
  225. /* we are calibrating, we want to speed-up calibration time
  226. * so we privilege non-calibrated tasks (but still
  227. * greedily distribute them to avoid dumb schedules) */
  228. calibrating = 1;
  229. if (local_task_length[worker] <= 0.0)
  230. /* there is no prediction available for that task
  231. * with that arch yet, so switch to a greedy strategy */
  232. unknown = 1;
  233. if (unknown)
  234. continue;
  235. exp_end[worker] = exp_start[worker] + exp_len[worker] + local_task_length[worker];
  236. if (exp_end[worker] < best_exp_end)
  237. {
  238. /* a better solution was found */
  239. best_exp_end = exp_end[worker];
  240. nimpls[worker] = nimpl;
  241. }
  242. if (local_power[worker] == -1.0)
  243. local_power[worker] = 0.;
  244. }
  245. }
  246. *forced_best = unknown?ntasks_best:-1;
  247. *best_exp_endp = best_exp_end;
  248. *max_exp_endp = max_exp_end;
  249. }
  250. static int _heft_push_task(struct starpu_task *task, unsigned prio)
  251. {
  252. unsigned worker;
  253. int best = -1;
  254. /* this flag is set if the corresponding worker is selected because
  255. there is no performance prediction available yet */
  256. int forced_best;
  257. double local_task_length[nworkers];
  258. double local_data_penalty[nworkers];
  259. double local_power[nworkers];
  260. double exp_end[nworkers];
  261. double max_exp_end = 0.0;
  262. unsigned int nimpls[nworkers];
  263. double best_exp_end;
  264. /*
  265. * Compute the expected end of the task on the various workers,
  266. * and detect if there is some calibration that needs to be done.
  267. */
  268. struct starpu_task_bundle *bundle = task->bundle;
  269. compute_all_performance_predictions(task, local_task_length, exp_end,
  270. &max_exp_end, &best_exp_end,
  271. local_data_penalty,
  272. local_power, &forced_best, bundle,
  273. nimpls);
  274. /* If there is no prediction available for that task with that arch we
  275. * want to speed-up calibration time so we force this measurement */
  276. if (forced_best != -1)
  277. return push_task_on_best_worker(task, forced_best, 0.0, 0.0, prio);
  278. /*
  279. * Determine which worker optimizes the fitness metric which is a
  280. * trade-off between load-balacing, data locality, and energy
  281. * consumption.
  282. */
  283. double fitness[nworkers];
  284. double best_fitness = -1;
  285. for (worker = 0; worker < nworkers; worker++)
  286. {
  287. if (!starpu_worker_may_execute_task(worker, task, 0))
  288. {
  289. /* no one on that queue may execute this task */
  290. continue;
  291. }
  292. fitness[worker] = alpha*(exp_end[worker] - best_exp_end)
  293. + beta*(local_data_penalty[worker])
  294. + _gamma*(local_power[worker]);
  295. if (exp_end[worker] > max_exp_end) {
  296. /* This placement will make the computation
  297. * longer, take into account the idle
  298. * consumption of other cpus */
  299. fitness[worker] += _gamma * idle_power * (exp_end[worker] - max_exp_end) / 1000000.0;
  300. }
  301. if (best == -1 || fitness[worker] < best_fitness)
  302. {
  303. /* we found a better solution */
  304. best_fitness = fitness[worker];
  305. best = worker;
  306. }
  307. }
  308. /* By now, we must have found a solution */
  309. STARPU_ASSERT(best != -1);
  310. /* we should now have the best worker in variable "best" */
  311. double model_best, transfer_model_best;
  312. if (bundle)
  313. {
  314. /* If we have a task bundle, we have computed the expected
  315. * length for the entire bundle, but not for the task alone. */
  316. enum starpu_perf_archtype perf_arch = starpu_worker_get_perf_archtype(best);
  317. unsigned memory_node = starpu_worker_get_memory_node(best);
  318. model_best = starpu_task_expected_length(task, perf_arch,
  319. _starpu_get_job_associated_to_task(task)->nimpl);
  320. transfer_model_best = starpu_task_expected_data_transfer_time(memory_node, task);
  321. /* Remove the task from the bundle since we have made a
  322. * decision for it, and that other tasks should not consider it
  323. * anymore. */
  324. PTHREAD_MUTEX_LOCK(&bundle->mutex);
  325. int ret = starpu_task_bundle_remove(bundle, task);
  326. /* Perhaps the bundle was destroyed when removing the last
  327. * entry */
  328. if (ret != 1)
  329. PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
  330. }
  331. else {
  332. model_best = local_task_length[best];
  333. transfer_model_best = local_data_penalty[best];
  334. }
  335. _starpu_get_job_associated_to_task(task)->nimpl = nimpls[best];
  336. return push_task_on_best_worker(task, best, model_best, transfer_model_best, prio);
  337. }
  338. static int heft_push_task(struct starpu_task *task)
  339. {
  340. if (task->priority > 0)
  341. return _heft_push_task(task, 1);
  342. return _heft_push_task(task, 0);
  343. }
  344. static void heft_deinit(__attribute__ ((unused)) struct starpu_machine_topology_s *topology,
  345. __attribute__ ((unused)) struct starpu_sched_policy_s *_policy)
  346. {
  347. unsigned workerid;
  348. for (workerid = 0; workerid < nworkers; workerid++)
  349. {
  350. PTHREAD_MUTEX_DESTROY(&sched_mutex[workerid]);
  351. PTHREAD_COND_DESTROY(&sched_cond[workerid]);
  352. }
  353. }
  354. struct starpu_sched_policy_s heft_policy = {
  355. .init_sched = heft_init,
  356. .deinit_sched = heft_deinit,
  357. .push_task = heft_push_task,
  358. .push_task_notify = heft_push_task_notify,
  359. .pop_task = NULL,
  360. .pop_every_task = NULL,
  361. .post_exec_hook = heft_post_exec_hook,
  362. .policy_name = "heft",
  363. .policy_description = "Heterogeneous Earliest Finish Task"
  364. };