lp2_policy.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011, 2012 INRIA
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include "lp_tools.h"
  17. #include <math.h>
  18. static struct bound_task_pool *task_pools = NULL;
  19. static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
  20. static double _glp_resolve(int ns, int nw, int nt, double tasks[nw][nt], double tmax, double w_in_s[ns][nw], int *in_sched_ctxs, int *workers);
  21. static double _find_tmax(double t1, double t2);
  22. static unsigned _compute_task_distribution_over_ctxs(int ns, int nw, int nt, double w_in_s[ns][nw], double tasks[nw][nt], int *sched_ctxs, int *workers)
  23. {
  24. double draft_tasks[nw][nt];
  25. double draft_w_in_s[ns][nw];
  26. int w,t, s;
  27. for(w = 0; w < nw; w++)
  28. for(t = 0; t < nt; t++)
  29. {
  30. tasks[w][t] = 0.0;
  31. draft_tasks[w][t] = 0.0;
  32. }
  33. for(s = 0; s < ns; s++)
  34. for(w = 0; w < nw; w++)
  35. {
  36. w_in_s[s][w] = 0.0;
  37. draft_w_in_s[s][w] = 0.0;
  38. }
  39. /* smallest possible tmax, difficult to obtain as we
  40. compute the nr of flops and not the tasks */
  41. double smallest_tmax = _lp_get_tmax(nw, workers);
  42. double tmax = smallest_tmax * ns;
  43. double res = 1.0;
  44. unsigned has_sol = 0;
  45. double tmin = 0.0;
  46. double old_tmax = 0.0;
  47. unsigned found_sol = 0;
  48. struct timeval start_time;
  49. struct timeval end_time;
  50. int nd = 0;
  51. gettimeofday(&start_time, NULL);
  52. /* we fix tmax and we do not treat it as an unknown
  53. we just vary by dichotomy its values*/
  54. while(tmax > 1.0)
  55. {
  56. /* find solution and save the values in draft tables
  57. only if there is a solution for the system we save them
  58. in the proper table */
  59. res = _glp_resolve(ns, nw, nt, draft_tasks, tmax, draft_w_in_s, sched_ctxs, workers);
  60. if(res != 0.0)
  61. {
  62. for(w = 0; w < nw; w++)
  63. for(t = 0; t < nt; t++)
  64. tasks[w][t] = draft_tasks[w][t];
  65. for(s = 0; s < ns; s++)
  66. for(w = 0; w < nw; w++)
  67. w_in_s[s][w] = draft_w_in_s[s][w];
  68. has_sol = 1;
  69. found_sol = 1;
  70. }
  71. else
  72. has_sol = 0;
  73. /* if we have a solution with this tmax try a smaller value
  74. bigger than the old min */
  75. if(has_sol)
  76. {
  77. if(old_tmax != 0.0 && (old_tmax - tmax) < 0.5)
  78. break;
  79. old_tmax = tmax;
  80. }
  81. else /*else try a bigger one but smaller than the old tmax */
  82. {
  83. tmin = tmax;
  84. if(old_tmax != 0.0)
  85. tmax = old_tmax;
  86. }
  87. if(tmin == tmax) break;
  88. tmax = _find_tmax(tmin, tmax);
  89. if(tmax < smallest_tmax)
  90. {
  91. tmax = old_tmax;
  92. tmin = smallest_tmax;
  93. tmax = _find_tmax(tmin, tmax);
  94. }
  95. nd++;
  96. }
  97. gettimeofday(&end_time, NULL);
  98. long diff_s = end_time.tv_sec - start_time.tv_sec;
  99. long diff_us = end_time.tv_usec - start_time.tv_usec;
  100. float timing = (float)(diff_s*1000000 + diff_us)/1000;
  101. // fprintf(stdout, "nd = %d total time: %f ms \n", nd, timing);
  102. return found_sol;
  103. }
  104. static void _redistribute_resources_in_ctxs(int ns, int nw, int nt, double w_in_s[ns][nw], unsigned first_time, int *in_sched_ctxs, int *workers)
  105. {
  106. int *sched_ctxs = in_sched_ctxs == NULL ? sched_ctx_hypervisor_get_sched_ctxs() : in_sched_ctxs;
  107. int s, s2, w;
  108. for(s = 0; s < ns; s++)
  109. {
  110. int workers_to_add[nw], workers_to_remove[nw];
  111. int destination_ctx[nw][ns];
  112. for(w = 0; w < nw; w++)
  113. {
  114. workers_to_add[w] = -1;
  115. workers_to_remove[w] = -1;
  116. for(s2 = 0; s2 < ns; s2++)
  117. destination_ctx[w][s2] = -1;
  118. }
  119. int nadd = 0, nremove = 0;
  120. for(w = 0; w < nw; w++)
  121. {
  122. enum starpu_perf_archtype arch = workers == NULL ? starpu_worker_get_type(w) :
  123. starpu_worker_get_type(workers[w]);
  124. if(arch == STARPU_CPU_WORKER)
  125. {
  126. if(w_in_s[s][w] >= 0.5)
  127. {
  128. workers_to_add[nadd++] = workers == NULL ? w : workers[w];
  129. }
  130. else
  131. {
  132. workers_to_remove[nremove++] = workers == NULL ? w : workers[w];
  133. for(s2 = 0; s2 < ns; s2++)
  134. if(s2 != s && w_in_s[s2][w] >= 0.5)
  135. destination_ctx[w][s2] = 1;
  136. else
  137. destination_ctx[w][s2] = 0;
  138. }
  139. }
  140. else
  141. {
  142. if(w_in_s[s][w] >= 0.3)
  143. {
  144. workers_to_add[nadd++] = workers == NULL ? w : workers[w];
  145. }
  146. else
  147. {
  148. workers_to_remove[nremove++] = workers == NULL ? w : workers[w];
  149. for(s2 = 0; s2 < ns; s2++)
  150. if(s2 != s && w_in_s[s2][w] >= 0.3)
  151. destination_ctx[w][s2] = 1;
  152. else
  153. destination_ctx[w][s2] = 0;
  154. }
  155. }
  156. }
  157. sched_ctx_hypervisor_add_workers_to_sched_ctx(workers_to_add, nadd, sched_ctxs[s]);
  158. struct policy_config *new_config = sched_ctx_hypervisor_get_config(sched_ctxs[s]);
  159. int i;
  160. for(i = 0; i < nadd; i++)
  161. new_config->max_idle[workers_to_add[i]] = new_config->max_idle[workers_to_add[i]] != MAX_IDLE_TIME ? new_config->max_idle[workers_to_add[i]] : new_config->new_workers_max_idle;
  162. if(!first_time)
  163. {
  164. /* do not remove workers if they can't go anywhere */
  165. int w2;
  166. unsigned found_one_dest[nremove];
  167. unsigned all_have_dest = 1;
  168. for(w2 = 0; w2 < nremove; w2++)
  169. found_one_dest[w2] = 0;
  170. for(w2 = 0; w2 < nremove; w2++)
  171. for(s2 = 0; s2 < ns; s2++)
  172. {
  173. /* if the worker has to be removed we should find a destination
  174. otherwise we are not interested */
  175. if(destination_ctx[w2][s2] == -1)
  176. found_one_dest[w2] = -1;
  177. if(destination_ctx[w2][s2] == 1)// && sched_ctx_hypervisor_can_resize(sched_ctxs[s2]))
  178. {
  179. found_one_dest[w2] = 1;
  180. break;
  181. }
  182. }
  183. for(w2 = 0; w2 < nremove; w2++)
  184. {
  185. if(found_one_dest[w2] == 0)
  186. {
  187. all_have_dest = 0;
  188. break;
  189. }
  190. }
  191. if(all_have_dest)
  192. sched_ctx_hypervisor_remove_workers_from_sched_ctx(workers_to_remove, nremove, sched_ctxs[s], 0);
  193. }
  194. }
  195. }
  196. static void _size_ctxs(int *sched_ctxs, int nsched_ctxs , int *workers, int nworkers)
  197. {
  198. int ns = sched_ctxs == NULL ? sched_ctx_hypervisor_get_nsched_ctxs() : nsched_ctxs;
  199. int nw = workers == NULL ? starpu_worker_get_count() : nworkers; /* Number of different workers */
  200. int nt = 0; /* Number of different kinds of tasks */
  201. struct bound_task_pool * tp;
  202. for (tp = task_pools; tp; tp = tp->next)
  203. nt++;
  204. double w_in_s[ns][nw];
  205. unsigned found_sol = _compute_task_distribution_over_ctxs(ns, nw, nt, w_in_s, NULL, sched_ctxs, workers);
  206. /* if we did find at least one solution redistribute the resources */
  207. if(found_sol)
  208. _redistribute_resources_in_ctxs(ns, nw, nt, w_in_s, 1, sched_ctxs, workers);
  209. }
  210. static void size_if_required()
  211. {
  212. int nsched_ctxs, nworkers;
  213. int *sched_ctxs, *workers;
  214. unsigned has_req = sched_ctx_hypervisor_get_size_req(&sched_ctxs, &nsched_ctxs, &workers, &nworkers);
  215. if(has_req)
  216. {
  217. struct sched_ctx_wrapper* sc_w = NULL;
  218. unsigned ready_to_size = 1;
  219. int s;
  220. pthread_mutex_lock(&act_hypervisor_mutex);
  221. for(s = 0; s < nsched_ctxs; s++)
  222. {
  223. sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctxs[s]);
  224. if(sc_w->submitted_flops < sc_w->total_flops)
  225. ready_to_size = 0;
  226. }
  227. if(ready_to_size)
  228. _size_ctxs(sched_ctxs, nsched_ctxs, workers, nworkers);
  229. pthread_mutex_unlock(&act_hypervisor_mutex);
  230. }
  231. }
  232. static void lp2_handle_submitted_job(struct starpu_task *task, uint32_t footprint)
  233. {
  234. /* count the tasks of the same type */
  235. pthread_mutex_lock(&mutex);
  236. struct bound_task_pool *tp = NULL;
  237. for (tp = task_pools; tp; tp = tp->next)
  238. {
  239. if (tp->cl == task->cl && tp->footprint == footprint && tp->sched_ctx_id == task->sched_ctx)
  240. break;
  241. }
  242. if (!tp)
  243. {
  244. tp = (struct bound_task_pool *) malloc(sizeof(struct bound_task_pool));
  245. tp->cl = task->cl;
  246. tp->footprint = footprint;
  247. tp->sched_ctx_id = task->sched_ctx;
  248. tp->n = 0;
  249. tp->next = task_pools;
  250. task_pools = tp;
  251. }
  252. /* One more task of this kind */
  253. tp->n++;
  254. pthread_mutex_unlock(&mutex);
  255. size_if_required();
  256. }
  257. static void _starpu_get_tasks_times(int nw, int nt, double times[nw][nt], int *workers)
  258. {
  259. struct bound_task_pool *tp;
  260. int w, t;
  261. for (w = 0; w < nw; w++)
  262. {
  263. for (t = 0, tp = task_pools; tp; t++, tp = tp->next)
  264. {
  265. enum starpu_perf_archtype arch = workers == NULL ? starpu_worker_get_perf_archtype(w) :
  266. starpu_worker_get_perf_archtype(workers[w]);
  267. double length = starpu_history_based_job_expected_perf(tp->cl->model, arch, tp->footprint);
  268. if (isnan(length))
  269. times[w][t] = NAN;
  270. else
  271. times[w][t] = length / 1000.;
  272. }
  273. }
  274. }
  275. /*
  276. * GNU Linear Programming Kit backend
  277. */
  278. #ifdef HAVE_GLPK_H
  279. #include <glpk.h>
  280. static double _glp_resolve(int ns, int nw, int nt, double tasks[nw][nt], double tmax, double w_in_s[ns][nw], int *in_sched_ctxs, int *workers)
  281. {
  282. struct bound_task_pool * tp;
  283. int t, w, s;
  284. glp_prob *lp;
  285. lp = glp_create_prob();
  286. glp_set_prob_name(lp, "StarPU theoretical bound");
  287. glp_set_obj_dir(lp, GLP_MAX);
  288. glp_set_obj_name(lp, "total execution time");
  289. {
  290. double times[nw][nt];
  291. int ne = nt * nw /* worker execution time */
  292. + nw * ns
  293. + nw * (nt + ns)
  294. + 1; /* glp dumbness */
  295. int n = 1;
  296. int ia[ne], ja[ne];
  297. double ar[ne];
  298. _starpu_get_tasks_times(nw, nt, times, workers);
  299. /* Variables: number of tasks i assigned to worker j, and tmax */
  300. glp_add_cols(lp, nw*nt+ns*nw);
  301. #define colnum(w, t) ((t)*nw+(w)+1)
  302. for(s = 0; s < ns; s++)
  303. for(w = 0; w < nw; w++)
  304. glp_set_obj_coef(lp, nw*nt+s*nw+w+1, 1.);
  305. for (w = 0; w < nw; w++)
  306. for (t = 0, tp = task_pools; tp; t++, tp = tp->next)
  307. {
  308. char name[32];
  309. snprintf(name, sizeof(name), "w%dt%dn", w, t);
  310. glp_set_col_name(lp, colnum(w, t), name);
  311. glp_set_col_bnds(lp, colnum(w, t), GLP_LO, 0., 0.);
  312. }
  313. for(s = 0; s < ns; s++)
  314. for(w = 0; w < nw; w++)
  315. {
  316. char name[32];
  317. snprintf(name, sizeof(name), "w%ds%dn", w, s);
  318. glp_set_col_name(lp, nw*nt+s*nw+w+1, name);
  319. glp_set_col_bnds(lp, nw*nt+s*nw+w+1, GLP_DB, 0.0, 1.0);
  320. }
  321. int *sched_ctxs = in_sched_ctxs == NULL ? sched_ctx_hypervisor_get_sched_ctxs() : in_sched_ctxs;
  322. int curr_row_idx = 0;
  323. /* Total worker execution time */
  324. glp_add_rows(lp, nw*ns);
  325. for (t = 0, tp = task_pools; tp; t++, tp = tp->next)
  326. {
  327. int someone = 0;
  328. for (w = 0; w < nw; w++)
  329. if (!isnan(times[w][t]))
  330. someone = 1;
  331. if (!someone)
  332. {
  333. /* This task does not have any performance model at all, abort */
  334. glp_delete_prob(lp);
  335. return 0.0;
  336. }
  337. }
  338. /*sum(t[t][w]*n[t][w]) < x[s][w]*tmax */
  339. for(s = 0; s < ns; s++)
  340. {
  341. for (w = 0; w < nw; w++)
  342. {
  343. char name[32], title[64];
  344. starpu_worker_get_name(w, name, sizeof(name));
  345. snprintf(title, sizeof(title), "worker %s", name);
  346. glp_set_row_name(lp, curr_row_idx+s*nw+w+1, title);
  347. for (t = 0, tp = task_pools; tp; t++, tp = tp->next)
  348. {
  349. if(tp->sched_ctx_id == sched_ctxs[s])
  350. {
  351. ia[n] = curr_row_idx+s*nw+w+1;
  352. ja[n] = colnum(w, t);
  353. if (isnan(times[w][t]))
  354. ar[n] = 1000000000.;
  355. else
  356. ar[n] = times[w][t];
  357. n++;
  358. }
  359. }
  360. /* x[s][w] = 1 | 0 */
  361. ia[n] = curr_row_idx+s*nw+w+1;
  362. ja[n] = nw*nt+s*nw+w+1;
  363. ar[n] = (-1) * tmax;
  364. n++;
  365. glp_set_row_bnds(lp, curr_row_idx+s*nw+w+1, GLP_UP, 0.0, 0.0);
  366. }
  367. }
  368. curr_row_idx += nw*ns;
  369. /* Total task completion */
  370. glp_add_rows(lp, nt);
  371. for (t = 0, tp = task_pools; tp; t++, tp = tp->next)
  372. {
  373. char name[32], title[64];
  374. starpu_worker_get_name(w, name, sizeof(name));
  375. snprintf(title, sizeof(title), "task %s key %x", tp->cl->name, (unsigned) tp->footprint);
  376. glp_set_row_name(lp, curr_row_idx+t+1, title);
  377. for (w = 0; w < nw; w++)
  378. {
  379. ia[n] = curr_row_idx+t+1;
  380. ja[n] = colnum(w, t);
  381. ar[n] = 1;
  382. n++;
  383. }
  384. glp_set_row_bnds(lp, curr_row_idx+t+1, GLP_FX, tp->n, tp->n);
  385. }
  386. curr_row_idx += nt;
  387. /* sum(x[s][i]) = 1 */
  388. glp_add_rows(lp, nw);
  389. for (w = 0; w < nw; w++)
  390. {
  391. char name[32], title[64];
  392. starpu_worker_get_name(w, name, sizeof(name));
  393. snprintf(title, sizeof(title), "w%x", w);
  394. glp_set_row_name(lp, curr_row_idx+w+1, title);
  395. for(s = 0; s < ns; s++)
  396. {
  397. ia[n] = curr_row_idx+w+1;
  398. ja[n] = nw*nt+s*nw+w+1;
  399. ar[n] = 1;
  400. n++;
  401. }
  402. glp_set_row_bnds(lp, curr_row_idx+w+1, GLP_FX, 1.0, 1.0);
  403. }
  404. if(n != ne)
  405. printf("ns= %d nw = %d nt = %d n = %d ne = %d\n", ns, nw, nt, n, ne);
  406. STARPU_ASSERT(n == ne);
  407. glp_load_matrix(lp, ne-1, ia, ja, ar);
  408. }
  409. glp_smcp parm;
  410. glp_init_smcp(&parm);
  411. parm.msg_lev = GLP_MSG_OFF;
  412. int ret = glp_simplex(lp, &parm);
  413. if (ret)
  414. {
  415. glp_delete_prob(lp);
  416. lp = NULL;
  417. return 0.0;
  418. }
  419. int stat = glp_get_prim_stat(lp);
  420. /* if we don't have a solution return */
  421. if(stat == GLP_NOFEAS)
  422. {
  423. glp_delete_prob(lp);
  424. lp = NULL;
  425. return 0.0;
  426. }
  427. double res = glp_get_obj_val(lp);
  428. for (w = 0; w < nw; w++)
  429. for (t = 0, tp = task_pools; tp; t++, tp = tp->next)
  430. tasks[w][t] = glp_get_col_prim(lp, colnum(w, t));
  431. for(s = 0; s < ns; s++)
  432. for(w = 0; w < nw; w++)
  433. w_in_s[s][w] = glp_get_col_prim(lp, nw*nt+s*nw+w+1);
  434. glp_delete_prob(lp);
  435. return res;
  436. }
  437. static double _find_tmax(double t1, double t2)
  438. {
  439. return t1 + ((t2 - t1)/2);
  440. }
  441. static void lp2_handle_poped_task(unsigned sched_ctx, int worker)
  442. {
  443. struct sched_ctx_wrapper* sc_w = sched_ctx_hypervisor_get_wrapper(sched_ctx);
  444. int ret = pthread_mutex_trylock(&act_hypervisor_mutex);
  445. if(ret != EBUSY)
  446. {
  447. if(sc_w->submitted_flops < sc_w->total_flops)
  448. {
  449. pthread_mutex_unlock(&act_hypervisor_mutex);
  450. return;
  451. }
  452. if(_velocity_gap_btw_ctxs())
  453. {
  454. int ns = sched_ctx_hypervisor_get_nsched_ctxs();
  455. int nw = starpu_worker_get_count(); /* Number of different workers */
  456. int nt = 0; /* Number of different kinds of tasks */
  457. struct bound_task_pool * tp;
  458. for (tp = task_pools; tp; tp = tp->next)
  459. nt++;
  460. double w_in_s[ns][nw];
  461. double tasks_per_worker[nw][nt];
  462. unsigned found_sol = _compute_task_distribution_over_ctxs(ns, nw, nt, w_in_s, tasks_per_worker, NULL, NULL);
  463. /* if we did find at least one solution redistribute the resources */
  464. if(found_sol)
  465. {
  466. int w, s;
  467. double nworkers[ns][2];
  468. int nworkers_rounded[ns][2];
  469. for(s = 0; s < ns; s++)
  470. {
  471. nworkers[s][0] = 0.0;
  472. nworkers[s][1] = 0.0;
  473. nworkers_rounded[s][0] = 0;
  474. nworkers_rounded[s][1] = 0;
  475. }
  476. for(s = 0; s < ns; s++)
  477. {
  478. for(w = 0; w < nw; w++)
  479. {
  480. enum starpu_perf_archtype arch = starpu_worker_get_type(w);
  481. if(arch == STARPU_CUDA_WORKER)
  482. {
  483. nworkers[s][0] += w_in_s[s][w];
  484. if(w_in_s[s][w] >= 0.3)
  485. nworkers_rounded[s][0]++;
  486. }
  487. else
  488. {
  489. nworkers[s][1] += w_in_s[s][w];
  490. if(w_in_s[s][w] > 0.3)
  491. nworkers_rounded[s][1]++;
  492. }
  493. }
  494. }
  495. /* for(s = 0; s < ns; s++) */
  496. /* printf("%d: cpus = %lf gpus = %lf cpus_round = %d gpus_round = %d\n", s, nworkers[s][1], nworkers[s][0], */
  497. /* nworkers_rounded[s][1], nworkers_rounded[s][0]); */
  498. _lp_redistribute_resources_in_ctxs(ns, 2, nworkers_rounded, nworkers);
  499. }
  500. }
  501. pthread_mutex_unlock(&act_hypervisor_mutex);
  502. }
  503. }
  504. static void lp2_size_ctxs(int *sched_ctxs, int nsched_ctxs , int *workers, int nworkers)
  505. {
  506. sched_ctx_hypervisor_save_size_req(sched_ctxs, nsched_ctxs, workers, nworkers);
  507. }
  508. struct hypervisor_policy lp2_policy = {
  509. .size_ctxs = lp2_size_ctxs,
  510. .handle_poped_task = lp2_handle_poped_task,
  511. .handle_pushed_task = NULL,
  512. .handle_idle_cycle = NULL,
  513. .handle_idle_end = NULL,
  514. .handle_post_exec_hook = NULL,
  515. .handle_submitted_job = lp2_handle_submitted_job,
  516. .custom = 0,
  517. .name = "lp2"
  518. };
  519. #endif /* HAVE_GLPK_H */