sched_ctx_utils.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011, 2012 INRIA
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include "sched_ctx_utils.h"
  17. #include <starpu.h>
  18. #include "sc_hypervisor.h"
  19. #define NSAMPLES 3
  20. unsigned size1;
  21. unsigned size2;
  22. unsigned nblocks1;
  23. unsigned nblocks2;
  24. unsigned cpu1;
  25. unsigned cpu2;
  26. unsigned gpu;
  27. unsigned gpu1;
  28. unsigned gpu2;
  29. typedef struct
  30. {
  31. unsigned id;
  32. unsigned ctx;
  33. int the_other_ctx;
  34. int *workers;
  35. int nworkers;
  36. void (*bench)(float*, unsigned, unsigned);
  37. unsigned size;
  38. unsigned nblocks;
  39. float *mat[NSAMPLES];
  40. } params;
  41. typedef struct
  42. {
  43. double flops;
  44. double avg_timing;
  45. } retvals;
  46. int first = 1;
  47. starpu_pthread_mutex_t mut;
  48. retvals rv[2];
  49. params p1, p2;
  50. int it = 0;
  51. int it2 = 0;
  52. starpu_pthread_key_t key;
  53. void init()
  54. {
  55. size1 = 4*1024;
  56. size2 = 4*1024;
  57. nblocks1 = 16;
  58. nblocks2 = 16;
  59. cpu1 = 0;
  60. cpu2 = 0;
  61. gpu = 0;
  62. gpu1 = 0;
  63. gpu2 = 0;
  64. rv[0].flops = 0.0;
  65. rv[1].flops = 0.0;
  66. rv[1].avg_timing = 0.0;
  67. rv[1].avg_timing = 0.0;
  68. p1.ctx = 0;
  69. p2.ctx = 0;
  70. p1.id = 0;
  71. p2.id = 1;
  72. starpu_pthread_key_create(&key, NULL);
  73. }
  74. void update_sched_ctx_timing_results(double flops, double avg_timing)
  75. {
  76. unsigned *id = starpu_pthread_getspecific(key);
  77. rv[*id].flops += flops;
  78. rv[*id].avg_timing += avg_timing;
  79. }
  80. void* start_bench(void *val)
  81. {
  82. params *p = (params*)val;
  83. int i;
  84. starpu_pthread_setspecific(key, &p->id);
  85. if(p->ctx != 0)
  86. starpu_sched_ctx_set_context(&p->ctx);
  87. for(i = 0; i < NSAMPLES; i++)
  88. p->bench(p->mat[i], p->size, p->nblocks);
  89. /* if(p->ctx != 0) */
  90. /* { */
  91. /* starpu_pthread_mutex_lock(&mut); */
  92. /* if(first){ */
  93. /* sc_hypervisor_unregiser_ctx(p->ctx); */
  94. /* starpu_sched_ctx_delete(p->ctx, p->the_other_ctx); */
  95. /* } */
  96. /* first = 0; */
  97. /* starpu_pthread_mutex_unlock(&mut); */
  98. /* } */
  99. sc_hypervisor_stop_resize(p->the_other_ctx);
  100. rv[p->id].flops /= NSAMPLES;
  101. rv[p->id].avg_timing /= NSAMPLES;
  102. return NULL;
  103. }
  104. float* construct_matrix(unsigned size)
  105. {
  106. float *mat;
  107. starpu_malloc((void **)&mat, (size_t)size*size*sizeof(float));
  108. unsigned i,j;
  109. for (i = 0; i < size; i++)
  110. {
  111. for (j = 0; j < size; j++)
  112. {
  113. mat[j +i*size] = (1.0f/(1.0f+i+j)) + ((i == j)?1.0f*size:0.0f);
  114. /* mat[j +i*size] = ((i == j)?1.0f*size:0.0f); */
  115. }
  116. }
  117. return mat;
  118. }
  119. void start_2benchs(void (*bench)(float*, unsigned, unsigned))
  120. {
  121. p1.bench = bench;
  122. p1.size = size1;
  123. p1.nblocks = nblocks1;
  124. p2.bench = bench;
  125. p2.size = size2;
  126. p2.nblocks = nblocks2;
  127. int i;
  128. for(i = 0; i < NSAMPLES; i++)
  129. {
  130. p1.mat[i] = construct_matrix(p1.size);
  131. p2.mat[i] = construct_matrix(p2.size);
  132. }
  133. starpu_pthread_t tid[2];
  134. starpu_pthread_mutex_init(&mut, NULL);
  135. struct timeval start;
  136. struct timeval end;
  137. gettimeofday(&start, NULL);
  138. starpu_pthread_create(&tid[0], NULL, (void*)start_bench, (void*)&p1);
  139. starpu_pthread_create(&tid[1], NULL, (void*)start_bench, (void*)&p2);
  140. starpu_pthread_join(tid[0], NULL);
  141. starpu_pthread_join(tid[1], NULL);
  142. gettimeofday(&end, NULL);
  143. starpu_pthread_mutex_destroy(&mut);
  144. double timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  145. timing /= 1000000;
  146. printf("%2.2f %2.2f ", rv[0].flops, rv[1].flops);
  147. printf("%2.2f %2.2f %2.2f\n", rv[0].avg_timing, rv[1].avg_timing, timing);
  148. }
  149. void start_1stbench(void (*bench)(float*, unsigned, unsigned))
  150. {
  151. p1.bench = bench;
  152. p1.size = size1;
  153. p1.nblocks = nblocks1;
  154. int i;
  155. for(i = 0; i < NSAMPLES; i++)
  156. {
  157. p1.mat[i] = construct_matrix(p1.size);
  158. }
  159. struct timeval start;
  160. struct timeval end;
  161. gettimeofday(&start, NULL);
  162. start_bench((void*)&p1);
  163. gettimeofday(&end, NULL);
  164. starpu_pthread_mutex_destroy(&mut);
  165. double timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  166. timing /= 1000000;
  167. printf("%2.2f ", rv[0].flops);
  168. printf("%2.2f %2.2f\n", rv[0].avg_timing, timing);
  169. }
  170. void start_2ndbench(void (*bench)(float*, unsigned, unsigned))
  171. {
  172. p2.bench = bench;
  173. p2.size = size2;
  174. p2.nblocks = nblocks2;
  175. int i;
  176. for(i = 0; i < NSAMPLES; i++)
  177. {
  178. p2.mat[i] = construct_matrix(p2.size);
  179. }
  180. struct timeval start;
  181. struct timeval end;
  182. gettimeofday(&start, NULL);
  183. start_bench((void*)&p2);
  184. gettimeofday(&end, NULL);
  185. starpu_pthread_mutex_destroy(&mut);
  186. double timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  187. timing /= 1000000;
  188. printf("%2.2f ", rv[1].flops);
  189. printf("%2.2f %2.2f\n", rv[1].avg_timing, timing);
  190. }
  191. void construct_contexts(void (*bench)(float*, unsigned, unsigned))
  192. {
  193. struct sc_hypervisor_policy policy;
  194. policy.custom = 0;
  195. policy.name = "idle";
  196. void *perf_counters = sc_hypervisor_init(&policy);
  197. int nworkers1 = cpu1 + gpu + gpu1;
  198. int nworkers2 = cpu2 + gpu + gpu2;
  199. /* unsigned n_all_gpus = gpu + gpu1 + gpu2; */
  200. int i;
  201. /* int k = 0; */
  202. nworkers1 = 12;
  203. p1.workers = (int*)malloc(nworkers1*sizeof(int));
  204. /* for(i = 0; i < gpu; i++) */
  205. /* p1.workers[k++] = i; */
  206. /* for(i = gpu; i < gpu + gpu1; i++) */
  207. /* p1.workers[k++] = i; */
  208. /* for(i = n_all_gpus; i < n_all_gpus + cpu1; i++) */
  209. /* p1.workers[k++] = i; */
  210. for(i = 0; i < 12; i++)
  211. p1.workers[i] = i;
  212. p1.ctx = starpu_sched_ctx_create(p1.workers, nworkers1, "sched_ctx1", STARPU_SCHED_CTX_POLICY_NAME, "heft", 0);
  213. starpu_sched_ctx_set_perf_counters(p1.ctx, perf_counters);
  214. p2.the_other_ctx = (int)p1.ctx;
  215. p1.nworkers = nworkers1;
  216. sc_hypervisor_register_ctx(p1.ctx, 0.0);
  217. /* sc_hypervisor_ctl(p1.ctx, */
  218. /* SC_HYPERVISOR_MAX_IDLE, p1.workers, p1.nworkers, 5000.0, */
  219. /* SC_HYPERVISOR_MAX_IDLE, p1.workers, gpu+gpu1, 100000.0, */
  220. /* SC_HYPERVISOR_EMPTY_CTX_MAX_IDLE, p1.workers, p1.nworkers, 500000.0, */
  221. /* SC_HYPERVISOR_GRANULARITY, 2, */
  222. /* SC_HYPERVISOR_MIN_TASKS, 1000, */
  223. /* SC_HYPERVISOR_NEW_WORKERS_MAX_IDLE, 100000.0, */
  224. /* SC_HYPERVISOR_MIN_WORKERS, 6, */
  225. /* SC_HYPERVISOR_MAX_WORKERS, 12, */
  226. /* NULL); */
  227. sc_hypervisor_ctl(p1.ctx,
  228. SC_HYPERVISOR_GRANULARITY, 2,
  229. SC_HYPERVISOR_MIN_TASKS, 1000,
  230. SC_HYPERVISOR_MIN_WORKERS, 6,
  231. SC_HYPERVISOR_MAX_WORKERS, 12,
  232. NULL);
  233. /* k = 0; */
  234. p2.workers = (int*)malloc(nworkers2*sizeof(int));
  235. /* for(i = 0; i < gpu; i++) */
  236. /* p2.workers[k++] = i; */
  237. /* for(i = gpu + gpu1; i < gpu + gpu1 + gpu2; i++) */
  238. /* p2.workers[k++] = i; */
  239. /* for(i = n_all_gpus + cpu1; i < n_all_gpus + cpu1 + cpu2; i++) */
  240. /* p2.workers[k++] = i; */
  241. p2.ctx = starpu_sched_ctx_create(p2.workers, 0, "sched_ctx2", STARPU_SCHED_CTX_POLICY_NAME, "heft", 0);
  242. starpu_sched_ctx_set_perf_counters(p2.ctx, perf_counters);
  243. p1.the_other_ctx = (int)p2.ctx;
  244. p2.nworkers = 0;
  245. sc_hypervisor_register_ctx(p2.ctx, 0.0);
  246. /* sc_hypervisor_ctl(p2.ctx, */
  247. /* SC_HYPERVISOR_MAX_IDLE, p2.workers, p2.nworkers, 2000.0, */
  248. /* SC_HYPERVISOR_MAX_IDLE, p2.workers, gpu+gpu2, 5000.0, */
  249. /* SC_HYPERVISOR_EMPTY_CTX_MAX_IDLE, p1.workers, p1.nworkers, 500000.0, */
  250. /* SC_HYPERVISOR_GRANULARITY, 2, */
  251. /* SC_HYPERVISOR_MIN_TASKS, 500, */
  252. /* SC_HYPERVISOR_NEW_WORKERS_MAX_IDLE, 1000.0, */
  253. /* SC_HYPERVISOR_MIN_WORKERS, 4, */
  254. /* SC_HYPERVISOR_MAX_WORKERS, 8, */
  255. /* NULL); */
  256. sc_hypervisor_ctl(p2.ctx,
  257. SC_HYPERVISOR_GRANULARITY, 2,
  258. SC_HYPERVISOR_MIN_TASKS, 500,
  259. SC_HYPERVISOR_MIN_WORKERS, 0,
  260. SC_HYPERVISOR_MAX_WORKERS, 6,
  261. NULL);
  262. }
  263. void set_hypervisor_conf(int event, int task_tag)
  264. {
  265. /* unsigned *id = starpu_pthread_getspecific(key); */
  266. /* if(*id == 0) */
  267. /* { */
  268. /* if(event == END_BENCH) */
  269. /* { */
  270. /* if(it < 2) */
  271. /* { */
  272. /* sc_hypervisor_ctl(p2.ctx, */
  273. /* SC_HYPERVISOR_MIN_WORKERS, 2, */
  274. /* SC_HYPERVISOR_MAX_WORKERS, 4, */
  275. /* SC_HYPERVISOR_TIME_TO_APPLY, task_tag, */
  276. /* NULL); */
  277. /* printf("%d: set max %d for tag %d\n", p2.ctx, 4, task_tag); */
  278. /* sc_hypervisor_ctl(p1.ctx, */
  279. /* SC_HYPERVISOR_MIN_WORKERS, 6, */
  280. /* SC_HYPERVISOR_MAX_WORKERS, 8, */
  281. /* SC_HYPERVISOR_TIME_TO_APPLY, task_tag, */
  282. /* NULL); */
  283. /* printf("%d: set max %d for tag %d\n", p1.ctx, 8, task_tag); */
  284. /* sc_hypervisor_resize(p1.ctx, task_tag); */
  285. /* } */
  286. /* if(it == 2) */
  287. /* { */
  288. /* sc_hypervisor_ctl(p2.ctx, */
  289. /* SC_HYPERVISOR_MIN_WORKERS, 12, */
  290. /* SC_HYPERVISOR_MAX_WORKERS, 12, */
  291. /* SC_HYPERVISOR_TIME_TO_APPLY, task_tag, */
  292. /* NULL); */
  293. /* printf("%d: set max %d for tag %d\n", p2.ctx, 12, task_tag); */
  294. /* sc_hypervisor_ctl(p1.ctx, */
  295. /* SC_HYPERVISOR_MIN_WORKERS, 0, */
  296. /* SC_HYPERVISOR_MAX_WORKERS, 0, */
  297. /* SC_HYPERVISOR_TIME_TO_APPLY, task_tag, */
  298. /* NULL); */
  299. /* printf("%d: set max %d for tag %d\n", p1.ctx, 0, task_tag); */
  300. /* sc_hypervisor_resize(p1.ctx, task_tag); */
  301. /* } */
  302. /* it++; */
  303. /* } */
  304. /* } */
  305. /* else */
  306. /* { */
  307. /* if(event == END_BENCH) */
  308. /* { */
  309. /* if(it2 < 3) */
  310. /* { */
  311. /* sc_hypervisor_ctl(p1.ctx, */
  312. /* SC_HYPERVISOR_MIN_WORKERS, 6, */
  313. /* SC_HYPERVISOR_MAX_WORKERS, 12, */
  314. /* SC_HYPERVISOR_TIME_TO_APPLY, task_tag, */
  315. /* NULL); */
  316. /* printf("%d: set max %d for tag %d\n", p1.ctx, 12, task_tag); */
  317. /* sc_hypervisor_ctl(p2.ctx, */
  318. /* SC_HYPERVISOR_MIN_WORKERS, 0, */
  319. /* SC_HYPERVISOR_MAX_WORKERS, 0, */
  320. /* SC_HYPERVISOR_TIME_TO_APPLY, task_tag, */
  321. /* NULL); */
  322. /* printf("%d: set max %d for tag %d\n", p2.ctx, 0, task_tag); */
  323. /* sc_hypervisor_resize(p2.ctx, task_tag); */
  324. /* } */
  325. /* it2++; */
  326. /* } */
  327. /* } */
  328. /* if(*id == 1) */
  329. /* { */
  330. /* if(event == START_BENCH) */
  331. /* { */
  332. /* int workers[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; */
  333. /* sc_hypervisor_ctl(p1.ctx, */
  334. /* SC_HYPERVISOR_MAX_IDLE, workers, 12, 800000.0, */
  335. /* SC_HYPERVISOR_TIME_TO_APPLY, task_tag, */
  336. /* NULL); */
  337. /* } */
  338. /* else */
  339. /* { */
  340. /* if(it2 < 2) */
  341. /* { */
  342. /* int workers[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; */
  343. /* sc_hypervisor_ctl(p2.ctx, */
  344. /* SC_HYPERVISOR_MAX_IDLE, workers, 12, 500.0, */
  345. /* SC_HYPERVISOR_MAX_IDLE, workers, 3, 200.0, */
  346. /* SC_HYPERVISOR_TIME_TO_APPLY, task_tag, */
  347. /* NULL); */
  348. /* } */
  349. /* if(it2 == 2) */
  350. /* { */
  351. /* int workers[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; */
  352. /* sc_hypervisor_ctl(p2.ctx, */
  353. /* SC_HYPERVISOR_MAX_IDLE, workers, 12, 1000.0, */
  354. /* SC_HYPERVISOR_MAX_IDLE, workers, 3, 500.0, */
  355. /* SC_HYPERVISOR_TIME_TO_APPLY, task_tag, */
  356. /* SC_HYPERVISOR_MAX_WORKERS, 12, */
  357. /* NULL); */
  358. /* } */
  359. /* it2++; */
  360. /* } */
  361. /* } else { */
  362. /* if(event == START_BENCH) */
  363. /* { */
  364. /* int workers[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; */
  365. /* sc_hypervisor_ctl(p1.ctx, */
  366. /* SC_HYPERVISOR_MAX_IDLE, workers, 12, 1500.0, */
  367. /* SC_HYPERVISOR_MAX_IDLE, workers, 3, 4000.0, */
  368. /* SC_HYPERVISOR_TIME_TO_APPLY, task_tag, */
  369. /* NULL); */
  370. /* } */
  371. /* if(event == END_BENCH) */
  372. /* { */
  373. /* if(it < 2) */
  374. /* { */
  375. /* int workers[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; */
  376. /* sc_hypervisor_ctl(p1.ctx, */
  377. /* SC_HYPERVISOR_MAX_IDLE, workers, 12, 100.0, */
  378. /* SC_HYPERVISOR_MAX_IDLE, workers, 3, 5000.0, */
  379. /* SC_HYPERVISOR_TIME_TO_APPLY, task_tag, */
  380. /* NULL); */
  381. /* } */
  382. /* if(it == 2) */
  383. /* { */
  384. /* int workers[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; */
  385. /* sc_hypervisor_ctl(p1.ctx, */
  386. /* SC_HYPERVISOR_MAX_IDLE, workers, 12, 5000.0, */
  387. /* SC_HYPERVISOR_MAX_IDLE, workers, 3, 10000.0, */
  388. /* SC_HYPERVISOR_TIME_TO_APPLY, task_tag, */
  389. /* NULL); */
  390. /* } */
  391. /* it++; */
  392. /* } */
  393. /* } */
  394. }
  395. void end_contexts()
  396. {
  397. free(p1.workers);
  398. free(p2.workers);
  399. sc_hypervisor_shutdown();
  400. }
  401. void parse_args_ctx(int argc, char **argv)
  402. {
  403. init();
  404. int i;
  405. for (i = 1; i < argc; i++) {
  406. if (strcmp(argv[i], "-size1") == 0) {
  407. char *argptr;
  408. size1 = strtol(argv[++i], &argptr, 10);
  409. }
  410. if (strcmp(argv[i], "-nblocks1") == 0) {
  411. char *argptr;
  412. nblocks1 = strtol(argv[++i], &argptr, 10);
  413. }
  414. if (strcmp(argv[i], "-size2") == 0) {
  415. char *argptr;
  416. size2 = strtol(argv[++i], &argptr, 10);
  417. }
  418. if (strcmp(argv[i], "-nblocks2") == 0) {
  419. char *argptr;
  420. nblocks2 = strtol(argv[++i], &argptr, 10);
  421. }
  422. if (strcmp(argv[i], "-cpu1") == 0) {
  423. char *argptr;
  424. cpu1 = strtol(argv[++i], &argptr, 10);
  425. }
  426. if (strcmp(argv[i], "-cpu2") == 0) {
  427. char *argptr;
  428. cpu2 = strtol(argv[++i], &argptr, 10);
  429. }
  430. if (strcmp(argv[i], "-gpu") == 0) {
  431. char *argptr;
  432. gpu = strtol(argv[++i], &argptr, 10);
  433. }
  434. if (strcmp(argv[i], "-gpu1") == 0) {
  435. char *argptr;
  436. gpu1 = strtol(argv[++i], &argptr, 10);
  437. }
  438. if (strcmp(argv[i], "-gpu2") == 0) {
  439. char *argptr;
  440. gpu2 = strtol(argv[++i], &argptr, 10);
  441. }
  442. }
  443. }