teft_lp_policy.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011, 2012 INRIA
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <starpu_config.h>
  17. #include "sc_hypervisor_lp.h"
  18. #include "sc_hypervisor_policy.h"
  19. #include <math.h>
  20. #include <sys/time.h>
  21. static struct sc_hypervisor_policy_task_pool *task_pools = NULL;
  22. static starpu_pthread_mutex_t mutex = STARPU_PTHREAD_MUTEX_INITIALIZER;
  23. struct teft_lp_data
  24. {
  25. int nt;
  26. double **tasks;
  27. unsigned *in_sched_ctxs;
  28. int *workers;
  29. struct sc_hypervisor_policy_task_pool *tmp_task_pools;
  30. unsigned size_ctxs;
  31. };
  32. static double _compute_workers_distrib(int ns, int nw, double final_w_in_s[ns][nw],
  33. unsigned is_integer, double tmax, void *specific_data)
  34. {
  35. struct teft_lp_data *sd = (struct teft_lp_data *)specific_data;
  36. int nt = sd->nt;
  37. double **final_tasks = sd->tasks;
  38. unsigned *in_sched_ctxs = sd->in_sched_ctxs;
  39. int *workers = sd->workers;
  40. struct sc_hypervisor_policy_task_pool *tmp_task_pools = sd->tmp_task_pools;
  41. unsigned size_ctxs = sd->size_ctxs;
  42. if(tmp_task_pools == NULL)
  43. return 0.0;
  44. double w_in_s[ns][nw];
  45. double tasks[nw][nt];
  46. double times[nw][nt];
  47. /* times in ms */
  48. sc_hypervisor_get_tasks_times(nw, nt, times, workers, size_ctxs, task_pools);
  49. double res = 0.0;
  50. #ifdef STARPU_HAVE_GLPK_H
  51. res = sc_hypervisor_lp_simulate_distrib_tasks(ns, nw, nt, w_in_s, tasks, times, is_integer, tmax, in_sched_ctxs, tmp_task_pools);
  52. #endif //STARPU_HAVE_GLPK_H
  53. if(res != 0.0)
  54. {
  55. int s, w, t;
  56. for(s = 0; s < ns; s++)
  57. for(w = 0; w < nw; w++)
  58. final_w_in_s[s][w] = w_in_s[s][w];
  59. for(w = 0; w < nw; w++)
  60. for(t = 0; t < nt; t++)
  61. final_tasks[w][t] = tasks[w][t];
  62. }
  63. return res;
  64. }
  65. static void _size_ctxs(unsigned *sched_ctxs, int nsched_ctxs , int *workers, int nworkers)
  66. {
  67. int ns = sched_ctxs == NULL ? sc_hypervisor_get_nsched_ctxs() : nsched_ctxs;
  68. int nw = workers == NULL ? (int)starpu_worker_get_count() : nworkers; /* Number of different workers */
  69. int nt = 0; /* Number of different kinds of tasks */
  70. starpu_pthread_mutex_lock(&mutex);
  71. struct sc_hypervisor_policy_task_pool * tp;
  72. for (tp = task_pools; tp; tp = tp->next)
  73. nt++;
  74. double w_in_s[ns][nw];
  75. double **tasks=(double**)malloc(nw*sizeof(double*));
  76. int i;
  77. for(i = 0; i < nw; i++)
  78. tasks[i] = (double*)malloc(nt*sizeof(double));
  79. struct teft_lp_data specific_data;
  80. specific_data.nt = nt;
  81. specific_data.tasks = tasks;
  82. specific_data.in_sched_ctxs = sched_ctxs;
  83. specific_data.workers = workers;
  84. specific_data.tmp_task_pools = task_pools;
  85. specific_data.size_ctxs = 1;
  86. /* smallest possible tmax, difficult to obtain as we
  87. compute the nr of flops and not the tasks */
  88. /*lp computes it in s but it's converted to ms just before return */
  89. double possible_tmax = sc_hypervisor_lp_get_tmax(nw, workers);
  90. double smallest_tmax = possible_tmax / 3;
  91. double tmax = possible_tmax * ns;
  92. double tmin = smallest_tmax;
  93. unsigned found_sol = sc_hypervisor_lp_execute_dichotomy(ns, nw, w_in_s, 1, (void*)&specific_data,
  94. tmin, tmax, smallest_tmax, _compute_workers_distrib);
  95. starpu_pthread_mutex_unlock(&mutex);
  96. /* if we did find at least one solution redistribute the resources */
  97. if(found_sol)
  98. {
  99. struct types_of_workers *tw = sc_hypervisor_get_types_of_workers(workers, nw);
  100. sc_hypervisor_lp_place_resources_in_ctx(ns, nw, w_in_s, sched_ctxs, workers, 1, tw);
  101. }
  102. for(i = 0; i < nw; i++)
  103. free(tasks[i]);
  104. free(tasks);
  105. }
  106. static void size_if_required()
  107. {
  108. int nsched_ctxs, nworkers;
  109. unsigned *sched_ctxs;
  110. int *workers;
  111. unsigned has_req = sc_hypervisor_get_size_req(&sched_ctxs, &nsched_ctxs, &workers, &nworkers);
  112. if(has_req)
  113. {
  114. struct sc_hypervisor_wrapper* sc_w = NULL;
  115. unsigned ready_to_size = 1;
  116. int s;
  117. starpu_pthread_mutex_lock(&act_hypervisor_mutex);
  118. for(s = 0; s < nsched_ctxs; s++)
  119. {
  120. sc_w = sc_hypervisor_get_wrapper(sched_ctxs[s]);
  121. // if(sc_w->submitted_flops < sc_w->total_flops)
  122. if((sc_w->submitted_flops + (0.1*sc_w->total_flops)) < sc_w->total_flops)
  123. ready_to_size = 0;
  124. }
  125. if(ready_to_size)
  126. {
  127. _size_ctxs(sched_ctxs, nsched_ctxs, workers, nworkers);
  128. sc_hypervisor_free_size_req();
  129. }
  130. starpu_pthread_mutex_unlock(&act_hypervisor_mutex);
  131. }
  132. }
  133. static void teft_lp_handle_submitted_job(struct starpu_codelet *cl, unsigned sched_ctx, uint32_t footprint, size_t data_size)
  134. {
  135. /* count the tasks of the same type */
  136. starpu_pthread_mutex_lock(&mutex);
  137. sc_hypervisor_policy_add_task_to_pool(cl, sched_ctx, footprint, &task_pools, data_size);
  138. starpu_pthread_mutex_unlock(&mutex);
  139. size_if_required();
  140. }
  141. static void _try_resizing(unsigned *sched_ctxs, int nsched_ctxs , int *workers, int nworkers)
  142. {
  143. int ns = sched_ctxs == NULL ? sc_hypervisor_get_nsched_ctxs() : nsched_ctxs;
  144. int nw = workers == NULL ? (int)starpu_worker_get_count() : nworkers; /* Number of different workers */
  145. sched_ctxs = sched_ctxs == NULL ? sc_hypervisor_get_sched_ctxs() : sched_ctxs;
  146. int nt = 0; /* Number of different kinds of tasks */
  147. // starpu_pthread_mutex_lock(&mutex);
  148. /* we don't take the mutex bc a correct value of the number of tasks is
  149. not required but we do a copy in order to be sure
  150. that the linear progr won't segfault if the list of
  151. submitted task will change during the exec */
  152. struct sc_hypervisor_policy_task_pool *tp = NULL;
  153. struct sc_hypervisor_policy_task_pool *tmp_task_pools = sc_hypervisor_policy_clone_task_pool(task_pools);
  154. for (tp = task_pools; tp; tp = tp->next)
  155. nt++;
  156. double w_in_s[ns][nw];
  157. double **tasks_per_worker=(double**)malloc(nw*sizeof(double*));
  158. int i;
  159. for(i = 0; i < nw; i++)
  160. tasks_per_worker[i] = (double*)malloc(nt*sizeof(double));
  161. struct teft_lp_data specific_data;
  162. specific_data.nt = nt;
  163. specific_data.tasks = tasks_per_worker;
  164. specific_data.in_sched_ctxs = NULL;
  165. specific_data.workers = NULL;
  166. specific_data.tmp_task_pools = tmp_task_pools;
  167. specific_data.size_ctxs = 0;
  168. /* smallest possible tmax, difficult to obtain as we
  169. compute the nr of flops and not the tasks */
  170. /*lp computes it in s but it's converted to ms just before return */
  171. double possible_tmax = sc_hypervisor_lp_get_tmax(nw, NULL);
  172. double smallest_tmax = 0.0;
  173. double tmax = possible_tmax * ns;
  174. double tmin = smallest_tmax;
  175. unsigned found_sol = sc_hypervisor_lp_execute_dichotomy(ns, nw, w_in_s, 1, (void*)&specific_data,
  176. tmin, tmax, smallest_tmax, _compute_workers_distrib);
  177. // starpu_pthread_mutex_unlock(&mutex);
  178. /* if we did find at least one solution redistribute the resources */
  179. if(found_sol)
  180. {
  181. struct types_of_workers *tw = sc_hypervisor_get_types_of_workers(workers, nw);
  182. sc_hypervisor_lp_place_resources_in_ctx(ns, nw, w_in_s, sched_ctxs, workers, 0, tw);
  183. }
  184. struct sc_hypervisor_policy_task_pool *next = NULL;
  185. struct sc_hypervisor_policy_task_pool *tmp_tp = tmp_task_pools;
  186. while(tmp_task_pools)
  187. {
  188. next = tmp_tp->next;
  189. free(tmp_tp);
  190. tmp_tp = next;
  191. tmp_task_pools = next;
  192. }
  193. for(i = 0; i < nw; i++)
  194. free(tasks_per_worker[i]);
  195. free(tasks_per_worker);
  196. }
  197. static void teft_lp_handle_poped_task(unsigned sched_ctx, __attribute__((unused))int worker, struct starpu_task *task, uint32_t footprint)
  198. {
  199. struct sc_hypervisor_wrapper* sc_w = sc_hypervisor_get_wrapper(sched_ctx);
  200. int ret = starpu_pthread_mutex_trylock(&act_hypervisor_mutex);
  201. if(ret != EBUSY)
  202. {
  203. if((sc_w->submitted_flops + (0.1*sc_w->total_flops)) < sc_w->total_flops)
  204. {
  205. starpu_pthread_mutex_unlock(&act_hypervisor_mutex);
  206. return;
  207. }
  208. unsigned criteria = sc_hypervisor_get_resize_criteria();
  209. if(criteria != SC_NOTHING && criteria == SC_SPEED)
  210. {
  211. if(sc_hypervisor_check_speed_gap_btw_ctxs(NULL, -1, NULL, -1))
  212. {
  213. _try_resizing(NULL, -1, NULL, -1);
  214. }
  215. }
  216. starpu_pthread_mutex_unlock(&act_hypervisor_mutex);
  217. }
  218. /* too expensive to take this mutex and correct value of the number of tasks is not compulsory */
  219. // starpu_pthread_mutex_lock(&mutex);
  220. sc_hypervisor_policy_remove_task_from_pool(task, footprint, &task_pools);
  221. // starpu_pthread_mutex_unlock(&mutex);
  222. }
  223. static void teft_lp_handle_idle_cycle(unsigned sched_ctx, int worker)
  224. {
  225. struct sc_hypervisor_wrapper* sc_w = sc_hypervisor_get_wrapper(sched_ctx);
  226. int ret = starpu_pthread_mutex_trylock(&act_hypervisor_mutex);
  227. if(ret != EBUSY)
  228. {
  229. if((sc_w->submitted_flops + (0.1*sc_w->total_flops)) < sc_w->total_flops)
  230. {
  231. starpu_pthread_mutex_unlock(&act_hypervisor_mutex);
  232. return;
  233. }
  234. unsigned criteria = sc_hypervisor_get_resize_criteria();
  235. if(criteria != SC_NOTHING && criteria == SC_IDLE)
  236. {
  237. if(sc_hypervisor_check_idle(sched_ctx, worker))
  238. {
  239. _try_resizing(NULL, -1, NULL, -1);
  240. }
  241. }
  242. starpu_pthread_mutex_unlock(&act_hypervisor_mutex);
  243. }
  244. return;
  245. }
  246. static void teft_lp_size_ctxs(unsigned *sched_ctxs, int nsched_ctxs , int *workers, int nworkers)
  247. {
  248. sc_hypervisor_save_size_req(sched_ctxs, nsched_ctxs, workers, nworkers);
  249. }
  250. static void teft_lp_resize_ctxs(unsigned *sched_ctxs, int nsched_ctxs , int *workers, int nworkers)
  251. {
  252. int ret = starpu_pthread_mutex_trylock(&act_hypervisor_mutex);
  253. if(ret != EBUSY)
  254. {
  255. struct sc_hypervisor_wrapper* sc_w = NULL;
  256. int s = 0;
  257. for(s = 0; s < nsched_ctxs; s++)
  258. {
  259. sc_w = sc_hypervisor_get_wrapper(sched_ctxs[s]);
  260. if((sc_w->submitted_flops + (0.1*sc_w->total_flops)) < sc_w->total_flops)
  261. {
  262. starpu_pthread_mutex_unlock(&act_hypervisor_mutex);
  263. return;
  264. }
  265. }
  266. _try_resizing(sched_ctxs, nsched_ctxs, workers, nworkers);
  267. starpu_pthread_mutex_unlock(&act_hypervisor_mutex);
  268. }
  269. }
  270. struct sc_hypervisor_policy teft_lp_policy = {
  271. .size_ctxs = teft_lp_size_ctxs,
  272. .resize_ctxs = teft_lp_resize_ctxs,
  273. .handle_poped_task = teft_lp_handle_poped_task,
  274. .handle_pushed_task = NULL,
  275. .handle_idle_cycle = teft_lp_handle_idle_cycle,
  276. .handle_idle_end = NULL,
  277. .handle_post_exec_hook = NULL,
  278. .handle_submitted_job = teft_lp_handle_submitted_job,
  279. .end_ctx = NULL,
  280. .custom = 0,
  281. .name = "teft_lp"
  282. };