heft.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010, 2011 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011 Centre National de la Recherche Scientifique
  5. * Copyright (C) 2011 Télécom-SudParis
  6. * Copyright (C) 2011 INRIA
  7. *
  8. * StarPU is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU Lesser General Public License as published by
  10. * the Free Software Foundation; either version 2.1 of the License, or (at
  11. * your option) any later version.
  12. *
  13. * StarPU is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  16. *
  17. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  18. */
  19. /* Distributed queues using performance modeling to assign tasks */
  20. #include <float.h>
  21. #include <core/workers.h>
  22. #include <core/sched_ctx.h>
  23. #include <core/perfmodel/perfmodel.h>
  24. #include <starpu_parameters.h>
  25. #include <starpu_task_bundle.h>
  26. #include <starpu_top.h>
  27. typedef struct {
  28. double alpha;
  29. double beta;
  30. double _gamma;
  31. double idle_power;
  32. } heft_data;
  33. static double exp_start[STARPU_NMAXWORKERS]; /* of the first queued task */
  34. static double exp_end[STARPU_NMAXWORKERS]; /* of the set of queued tasks */
  35. static double exp_len[STARPU_NMAXWORKERS]; /* of the last queued task */
  36. static double ntasks[STARPU_NMAXWORKERS];
  37. const float alpha_minimum=0;
  38. const float alpha_maximum=10.0;
  39. const float beta_minimum=0;
  40. const float beta_maximum=10.0;
  41. const float gamma_minimum=0;
  42. const float gamma_maximum=10000.0;
  43. const float idle_power_minimum=0;
  44. const float idle_power_maximum=10000.0;
  45. void param_modified(struct starputop_param_t* d){
  46. //just to show parameter modification
  47. fprintf(stderr,"%s has been modified : %f !\n", d->name, d->value);
  48. }
  49. static void heft_init_for_workers(unsigned sched_ctx_id, unsigned nnew_workers)
  50. {
  51. struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_structure(sched_ctx_id);
  52. unsigned nworkers_ctx = sched_ctx->nworkers;
  53. struct starpu_machine_config_s *config = (struct starpu_machine_config_s *)_starpu_get_machine_config();
  54. unsigned nworkers = config->topology.nworkers;
  55. unsigned all_workers = nnew_workers == nworkers ? nworkers : nworkers_ctx + nnew_workers;
  56. unsigned workerid_ctx;
  57. int workerid;
  58. for (workerid_ctx = nworkers_ctx; workerid_ctx < all_workers; workerid_ctx++)
  59. {
  60. workerid = sched_ctx->workerids[workerid_ctx];
  61. struct starpu_worker_s *workerarg = _starpu_get_worker_struct(workerid);
  62. /* init these structures only once for each worker */
  63. if(workerarg->nsched_ctxs == 1)
  64. {
  65. exp_start[workerid] = starpu_timing_now();
  66. exp_len[workerid] = 0.0;
  67. exp_end[workerid] = exp_start[workerid];
  68. ntasks[workerid] = 0;
  69. }
  70. /* we push the tasks on the local lists of the workers
  71. therefore the synchronisations mechanisms of the strategy
  72. are the global ones */
  73. sched_ctx->sched_mutex[workerid_ctx] = workerarg->sched_mutex;
  74. sched_ctx->sched_cond[workerid_ctx] = workerarg->sched_cond;
  75. }
  76. /* take into account the new number of threads at the next push */
  77. PTHREAD_MUTEX_LOCK(&sched_ctx->changing_ctx_mutex);
  78. sched_ctx->temp_nworkers = all_workers;
  79. PTHREAD_MUTEX_UNLOCK(&sched_ctx->changing_ctx_mutex);
  80. }
  81. static void heft_init(unsigned sched_ctx_id)
  82. {
  83. heft_data *hd = (heft_data*)malloc(sizeof(heft_data));
  84. hd->alpha = STARPU_DEFAULT_ALPHA;
  85. hd->beta = STARPU_DEFAULT_BETA;
  86. hd->_gamma = STARPU_DEFAULT_GAMMA;
  87. hd->idle_power = 0.0;
  88. struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_structure(sched_ctx_id);
  89. unsigned nworkers = sched_ctx->nworkers;
  90. sched_ctx->policy_data = (void*)hd;
  91. const char *strval_alpha = getenv("STARPU_SCHED_ALPHA");
  92. if (strval_alpha)
  93. hd->alpha = atof(strval_alpha);
  94. const char *strval_beta = getenv("STARPU_SCHED_BETA");
  95. if (strval_beta)
  96. hd->beta = atof(strval_beta);
  97. const char *strval_gamma = getenv("STARPU_SCHED_GAMMA");
  98. if (strval_gamma)
  99. hd->_gamma = atof(strval_gamma);
  100. const char *strval_idle_power = getenv("STARPU_IDLE_POWER");
  101. if (strval_idle_power)
  102. hd->idle_power = atof(strval_idle_power);
  103. starputop_register_parameter_float("HEFT_ALPHA", &hd->alpha, alpha_minimum,alpha_maximum,param_modified);
  104. starputop_register_parameter_float("HEFT_BETA", &hd->beta, beta_minimum,beta_maximum,param_modified);
  105. starputop_register_parameter_float("HEFT_GAMMA", &hd->_gamma, gamma_minimum,gamma_maximum,param_modified);
  106. starputop_register_parameter_float("HEFT_IDLE_POWER", &hd->idle_power, idle_power_minimum,idle_power_maximum,param_modified);
  107. unsigned workerid_ctx;
  108. for (workerid_ctx = 0; workerid_ctx < nworkers; workerid_ctx++)
  109. {
  110. int workerid = sched_ctx->workerids[workerid_ctx];
  111. struct starpu_worker_s *workerarg = _starpu_get_worker_struct(workerid);
  112. /* init these structures only once for each worker */
  113. if(workerarg->nsched_ctxs == 1)
  114. {
  115. exp_start[workerid] = starpu_timing_now();
  116. exp_len[workerid] = 0.0;
  117. exp_end[workerid] = exp_start[workerid];
  118. ntasks[workerid] = 0;
  119. }
  120. /* we push the tasks on the local lists of the workers
  121. therefore the synchronisations mechanisms of the strategy
  122. are the global ones */
  123. sched_ctx->sched_mutex[workerid_ctx] = workerarg->sched_mutex;
  124. sched_ctx->sched_cond[workerid_ctx] = workerarg->sched_cond;
  125. }
  126. }
  127. static void heft_post_exec_hook(struct starpu_task *task, unsigned sched_ctx_id)
  128. {
  129. int workerid = starpu_worker_get_id();
  130. STARPU_ASSERT(workerid >= 0);
  131. struct starpu_worker_s *worker = _starpu_get_worker_struct(workerid);
  132. double model = task->predicted;
  133. /* Once we have executed the task, we can update the predicted amount
  134. * of work. */
  135. PTHREAD_MUTEX_LOCK(worker->sched_mutex);
  136. exp_len[workerid] -= model;
  137. exp_start[workerid] = starpu_timing_now() + model;
  138. exp_end[workerid] = exp_start[workerid] + exp_len[workerid];
  139. ntasks[workerid]--;
  140. PTHREAD_MUTEX_UNLOCK(worker->sched_mutex);
  141. }
  142. static void heft_push_task_notify(struct starpu_task *task, int workerid, unsigned sched_ctx_id)
  143. {
  144. struct starpu_worker_s *worker = _starpu_get_worker_struct(workerid);
  145. /* Compute the expected penality */
  146. enum starpu_perf_archtype perf_arch = starpu_worker_get_perf_archtype(workerid);
  147. double predicted = starpu_task_expected_length(task, perf_arch,
  148. _starpu_get_job_associated_to_task(task)->nimpl);
  149. /* Update the predictions */
  150. PTHREAD_MUTEX_LOCK(worker->sched_mutex);
  151. /* Sometimes workers didn't take the tasks as early as we expected */
  152. exp_start[workerid] = STARPU_MAX(exp_start[workerid], starpu_timing_now());
  153. exp_end[workerid] = STARPU_MAX(exp_start[workerid], starpu_timing_now());
  154. /* If there is no prediction available, we consider the task has a null length */
  155. if (predicted != -1.0)
  156. {
  157. task->predicted = predicted;
  158. exp_end[workerid] += predicted;
  159. exp_len[workerid] += predicted;
  160. }
  161. ntasks[workerid]++;
  162. PTHREAD_MUTEX_UNLOCK(worker->sched_mutex);
  163. }
  164. static int push_task_on_best_worker(struct starpu_task *task, int best_workerid, double predicted, int prio)
  165. {
  166. /* make sure someone coule execute that task ! */
  167. STARPU_ASSERT(best_workerid != -1);
  168. struct starpu_worker_s *best_worker = _starpu_get_worker_struct(best_workerid);
  169. PTHREAD_MUTEX_LOCK(best_worker->sched_mutex);
  170. exp_end[best_workerid] += predicted;
  171. exp_len[best_workerid] += predicted;
  172. ntasks[best_workerid]++;
  173. PTHREAD_MUTEX_UNLOCK(best_worker->sched_mutex);
  174. task->predicted = predicted;
  175. if (starpu_top_status_get())
  176. starputop_task_prevision(task, best_workerid,
  177. (unsigned long long)(exp_end[best_workerid]-predicted)/1000,
  178. (unsigned long long)exp_end[best_workerid]/1000);
  179. if (starpu_get_prefetch_flag())
  180. {
  181. unsigned memory_node = starpu_worker_get_memory_node(best_workerid);
  182. starpu_prefetch_task_input_on_node(task, memory_node);
  183. }
  184. return starpu_push_local_task(best_workerid, task, prio);
  185. }
  186. static void compute_all_performance_predictions(struct starpu_task *task,
  187. double *local_task_length, double *exp_end,
  188. double *max_exp_endp, double *best_exp_endp,
  189. double *local_data_penalty,
  190. double *local_power, int *forced_best,
  191. struct starpu_task_bundle *bundle,
  192. struct starpu_sched_ctx *sched_ctx )
  193. {
  194. int calibrating = 0;
  195. double max_exp_end = DBL_MIN;
  196. double best_exp_end = DBL_MAX;
  197. int ntasks_best = -1;
  198. double ntasks_best_end = 0.0;
  199. /* A priori, we know all estimations */
  200. int unknown = 0;
  201. unsigned nworkers = sched_ctx->nworkers;
  202. unsigned nimpl;
  203. unsigned best_impl = 0;
  204. unsigned worker, worker_ctx;
  205. for (worker_ctx = 0; worker_ctx < nworkers; worker_ctx++)
  206. {
  207. worker = sched_ctx->workerids[worker_ctx];
  208. for (nimpl = 0; nimpl <STARPU_MAXIMPLEMENTATIONS; nimpl++)
  209. {
  210. /* Sometimes workers didn't take the tasks as early as we expected */
  211. exp_start[worker] = STARPU_MAX(exp_start[worker], starpu_timing_now());
  212. exp_end[worker_ctx] = exp_start[worker] + exp_len[worker];
  213. if (exp_end[worker_ctx] > max_exp_end)
  214. max_exp_end = exp_end[worker_ctx];
  215. if (!starpu_worker_may_execute_task(worker, task, nimpl))
  216. {
  217. /* no one on that queue may execute this task */
  218. continue;
  219. }
  220. enum starpu_perf_archtype perf_arch = starpu_worker_get_perf_archtype(worker);
  221. unsigned memory_node = starpu_worker_get_memory_node(worker);
  222. if (bundle)
  223. {
  224. local_task_length[worker_ctx] = starpu_task_bundle_expected_length(bundle, perf_arch, nimpl);
  225. local_data_penalty[worker_ctx] = starpu_task_bundle_expected_data_transfer_time(bundle, memory_node);
  226. local_power[worker_ctx] = starpu_task_bundle_expected_power(bundle, perf_arch, nimpl);
  227. //_STARPU_DEBUG("Scheduler heft bundle: task length (%lf) local power (%lf) worker (%u) kernel (%u) \n", local_task_length[worker_ctx],local_power[worker_ctx],worker,nimpl);
  228. }
  229. else
  230. {
  231. local_task_length[worker_ctx] = starpu_task_expected_length(task, perf_arch, nimpl);
  232. local_data_penalty[worker_ctx] = starpu_task_expected_data_transfer_time(memory_node, task);
  233. local_power[worker_ctx] = starpu_task_expected_power(task, perf_arch, nimpl);
  234. //_STARPU_DEBUG("Scheduler heft: task length (%lf) local power (%lf) worker (%u) kernel (%u) \n", local_task_length[worker_ctx],local_power[worker_ctx],worker,nimpl);
  235. }
  236. double ntasks_end = ntasks[worker] / starpu_worker_get_relative_speedup(perf_arch);
  237. if (ntasks_best == -1
  238. || (!calibrating && ntasks_end < ntasks_best_end) /* Not calibrating, take better task */
  239. || (!calibrating && local_task_length[worker_ctx] == -1.0) /* Not calibrating but this worker is being calibrated */
  240. || (calibrating && local_task_length[worker_ctx] == -1.0 && ntasks_end < ntasks_best_end) /* Calibrating, compete this worker with other non-calibrated */
  241. ) {
  242. ntasks_best_end = ntasks_end;
  243. ntasks_best = worker;
  244. }
  245. if (local_task_length[worker_ctx] == -1.0)
  246. /* we are calibrating, we want to speed-up calibration time
  247. * so we privilege non-calibrated tasks (but still
  248. * greedily distribute them to avoid dumb schedules) */
  249. calibrating = 1;
  250. if (local_task_length[worker_ctx] <= 0.0)
  251. /* there is no prediction available for that task
  252. * with that arch yet, so switch to a greedy strategy */
  253. unknown = 1;
  254. if (unknown)
  255. continue;
  256. exp_end[worker_ctx] = exp_start[worker] + exp_len[worker] + local_task_length[worker_ctx];
  257. if (exp_end[worker_ctx] < best_exp_end)
  258. {
  259. /* a better solution was found */
  260. best_exp_end = exp_end[worker_ctx];
  261. best_impl = nimpl;
  262. }
  263. if (local_power[worker_ctx] == -1.0)
  264. local_power[worker_ctx] = 0.;
  265. }
  266. }
  267. *forced_best = unknown?ntasks_best:-1;
  268. *best_exp_endp = best_exp_end;
  269. *max_exp_endp = max_exp_end;
  270. /* save the best implementation */
  271. //_STARPU_DEBUG("Scheduler heft: kernel (%u)\n", best_impl);
  272. _starpu_get_job_associated_to_task(task)->nimpl = best_impl;
  273. }
  274. static int _heft_push_task(struct starpu_task *task, unsigned prio, unsigned sched_ctx_id)
  275. {
  276. struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_structure(sched_ctx_id);
  277. heft_data *hd = (heft_data*)sched_ctx->policy_data;
  278. unsigned worker, worker_ctx;
  279. int best = -1, best_id_ctx = -1;
  280. /* this flag is set if the corresponding worker is selected because
  281. there is no performance prediction available yet */
  282. int forced_best;
  283. unsigned nworkers_ctx = sched_ctx->nworkers;
  284. double local_task_length[nworkers_ctx];
  285. double local_data_penalty[nworkers_ctx];
  286. double local_power[nworkers_ctx];
  287. double exp_end[nworkers_ctx];
  288. double max_exp_end = 0.0;
  289. double best_exp_end;
  290. /*
  291. * Compute the expected end of the task on the various workers,
  292. * and detect if there is some calibration that needs to be done.
  293. */
  294. struct starpu_task_bundle *bundle = task->bundle;
  295. compute_all_performance_predictions(task, local_task_length, exp_end,
  296. &max_exp_end, &best_exp_end,
  297. local_data_penalty,
  298. local_power, &forced_best, bundle, sched_ctx);
  299. /* If there is no prediction available for that task with that arch we
  300. * want to speed-up calibration time so we force this measurement */
  301. if (forced_best != -1){
  302. _starpu_increment_nsubmitted_tasks_of_worker(forced_best);
  303. return push_task_on_best_worker(task, forced_best, 0.0, prio);
  304. }
  305. /*
  306. * Determine which worker optimizes the fitness metric which is a
  307. * trade-off between load-balacing, data locality, and energy
  308. * consumption.
  309. */
  310. double fitness[nworkers_ctx];
  311. double best_fitness = -1;
  312. for (worker_ctx = 0; worker_ctx < nworkers_ctx; worker_ctx++)
  313. {
  314. worker = sched_ctx->workerids[worker_ctx];
  315. if (!starpu_worker_may_execute_task(worker, task, 0))
  316. {
  317. /* no one on that queue may execute this task */
  318. continue;
  319. }
  320. fitness[worker_ctx] = hd->alpha*(exp_end[worker_ctx] - best_exp_end)
  321. + hd->beta*(local_data_penalty[worker_ctx])
  322. + hd->_gamma*(local_power[worker_ctx]);
  323. if (exp_end[worker_ctx] > max_exp_end)
  324. /* This placement will make the computation
  325. * longer, take into account the idle
  326. * consumption of other cpus */
  327. fitness[worker_ctx] += hd->_gamma * hd->idle_power * (exp_end[worker_ctx] - max_exp_end) / 1000000.0;
  328. if (best == -1 || fitness[worker_ctx] < best_fitness)
  329. {
  330. /* we found a better solution */
  331. best_fitness = fitness[worker_ctx];
  332. best = worker;
  333. best_id_ctx = worker_ctx;
  334. }
  335. }
  336. /* By now, we must have found a solution */
  337. STARPU_ASSERT(best != -1);
  338. /* we should now have the best worker in variable "best" */
  339. double model_best;
  340. if (bundle)
  341. {
  342. /* If we have a task bundle, we have computed the expected
  343. * length for the entire bundle, but not for the task alone. */
  344. enum starpu_perf_archtype perf_arch = starpu_worker_get_perf_archtype(best);
  345. model_best = starpu_task_expected_length(task, perf_arch,
  346. _starpu_get_job_associated_to_task(task)->nimpl);
  347. /* Remove the task from the bundle since we have made a
  348. * decision for it, and that other tasks should not consider it
  349. * anymore. */
  350. PTHREAD_MUTEX_LOCK(&bundle->mutex);
  351. int ret = starpu_task_bundle_remove(bundle, task);
  352. /* Perhaps the bundle was destroyed when removing the last
  353. * entry */
  354. if (ret != 1)
  355. PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
  356. }
  357. else {
  358. model_best = local_task_length[best_id_ctx];
  359. }
  360. _starpu_increment_nsubmitted_tasks_of_worker(best);
  361. return push_task_on_best_worker(task, best, model_best, prio);
  362. }
  363. static int heft_push_task(struct starpu_task *task, unsigned sched_ctx_id)
  364. {
  365. if (task->priority > 0)
  366. return _heft_push_task(task, 1, sched_ctx_id);
  367. return _heft_push_task(task, 0, sched_ctx_id);
  368. }
  369. static void heft_deinit(unsigned sched_ctx_id)
  370. {
  371. struct starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_structure(sched_ctx_id);
  372. heft_data *ht = (heft_data*)sched_ctx->policy_data;
  373. free(ht);
  374. }
  375. struct starpu_sched_policy_s heft_policy = {
  376. .init_sched = heft_init,
  377. .deinit_sched = heft_deinit,
  378. .push_task = heft_push_task,
  379. .push_task_notify = heft_push_task_notify,
  380. .pop_task = NULL,
  381. .pop_every_task = NULL,
  382. .post_exec_hook = heft_post_exec_hook,
  383. .policy_name = "heft",
  384. .policy_description = "Heterogeneous Earliest Finish Task",
  385. .init_sched_for_workers = heft_init_for_workers
  386. };