sched_ctx_utils.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011, 2012 INRIA
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include "sched_ctx_utils.h"
  17. #include <starpu.h>
  18. #include "sc_hypervisor.h"
  19. #define NSAMPLES 3
  20. unsigned size1;
  21. unsigned size2;
  22. unsigned nblocks1;
  23. unsigned nblocks2;
  24. unsigned cpu1;
  25. unsigned cpu2;
  26. unsigned gpu;
  27. unsigned gpu1;
  28. unsigned gpu2;
  29. typedef struct
  30. {
  31. unsigned id;
  32. unsigned ctx;
  33. int the_other_ctx;
  34. int *workers;
  35. int nworkers;
  36. void (*bench)(float*, unsigned, unsigned);
  37. unsigned size;
  38. unsigned nblocks;
  39. float *mat[NSAMPLES];
  40. } params;
  41. typedef struct
  42. {
  43. double flops;
  44. double avg_timing;
  45. } retvals;
  46. int first = 1;
  47. starpu_pthread_mutex_t mut;
  48. retvals rv[2];
  49. params p1, p2;
  50. int it = 0;
  51. int it2 = 0;
  52. starpu_pthread_key_t key;
  53. void init()
  54. {
  55. size1 = 4*1024;
  56. size2 = 4*1024;
  57. nblocks1 = 16;
  58. nblocks2 = 16;
  59. cpu1 = 0;
  60. cpu2 = 0;
  61. gpu = 0;
  62. gpu1 = 0;
  63. gpu2 = 0;
  64. rv[0].flops = 0.0;
  65. rv[1].flops = 0.0;
  66. rv[1].avg_timing = 0.0;
  67. rv[1].avg_timing = 0.0;
  68. p1.ctx = 0;
  69. p2.ctx = 0;
  70. p1.id = 0;
  71. p2.id = 1;
  72. starpu_pthread_key_create(&key, NULL);
  73. }
  74. void update_sched_ctx_timing_results(double flops, double avg_timing)
  75. {
  76. unsigned *id = starpu_pthread_getspecific(key);
  77. rv[*id].flops += flops;
  78. rv[*id].avg_timing += avg_timing;
  79. }
  80. void* start_bench(void *val)
  81. {
  82. params *p = (params*)val;
  83. int i;
  84. starpu_pthread_setspecific(key, &p->id);
  85. if(p->ctx != 0)
  86. starpu_sched_ctx_set_context(&p->ctx);
  87. for(i = 0; i < NSAMPLES; i++)
  88. p->bench(p->mat[i], p->size, p->nblocks);
  89. /* if(p->ctx != 0) */
  90. /* { */
  91. /* starpu_pthread_mutex_lock(&mut); */
  92. /* if(first){ */
  93. /* sc_hypervisor_unregiser_ctx(p->ctx); */
  94. /* starpu_sched_ctx_delete(p->ctx, p->the_other_ctx); */
  95. /* } */
  96. /* first = 0; */
  97. /* starpu_pthread_mutex_unlock(&mut); */
  98. /* } */
  99. sc_hypervisor_stop_resize(p->the_other_ctx);
  100. rv[p->id].flops /= NSAMPLES;
  101. rv[p->id].avg_timing /= NSAMPLES;
  102. }
  103. float* construct_matrix(unsigned size)
  104. {
  105. float *mat;
  106. starpu_malloc((void **)&mat, (size_t)size*size*sizeof(float));
  107. unsigned i,j;
  108. for (i = 0; i < size; i++)
  109. {
  110. for (j = 0; j < size; j++)
  111. {
  112. mat[j +i*size] = (1.0f/(1.0f+i+j)) + ((i == j)?1.0f*size:0.0f);
  113. /* mat[j +i*size] = ((i == j)?1.0f*size:0.0f); */
  114. }
  115. }
  116. return mat;
  117. }
  118. void start_2benchs(void (*bench)(float*, unsigned, unsigned))
  119. {
  120. p1.bench = bench;
  121. p1.size = size1;
  122. p1.nblocks = nblocks1;
  123. p2.bench = bench;
  124. p2.size = size2;
  125. p2.nblocks = nblocks2;
  126. int i;
  127. for(i = 0; i < NSAMPLES; i++)
  128. {
  129. p1.mat[i] = construct_matrix(p1.size);
  130. p2.mat[i] = construct_matrix(p2.size);
  131. }
  132. starpu_pthread_t tid[2];
  133. starpu_pthread_mutex_init(&mut, NULL);
  134. struct timeval start;
  135. struct timeval end;
  136. gettimeofday(&start, NULL);
  137. starpu_pthread_create(&tid[0], NULL, (void*)start_bench, (void*)&p1);
  138. starpu_pthread_create(&tid[1], NULL, (void*)start_bench, (void*)&p2);
  139. starpu_pthread_join(tid[0], NULL);
  140. starpu_pthread_join(tid[1], NULL);
  141. gettimeofday(&end, NULL);
  142. starpu_pthread_mutex_destroy(&mut);
  143. double timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  144. timing /= 1000000;
  145. printf("%2.2f %2.2f ", rv[0].flops, rv[1].flops);
  146. printf("%2.2f %2.2f %2.2f\n", rv[0].avg_timing, rv[1].avg_timing, timing);
  147. }
  148. void start_1stbench(void (*bench)(float*, unsigned, unsigned))
  149. {
  150. p1.bench = bench;
  151. p1.size = size1;
  152. p1.nblocks = nblocks1;
  153. int i;
  154. for(i = 0; i < NSAMPLES; i++)
  155. {
  156. p1.mat[i] = construct_matrix(p1.size);
  157. }
  158. struct timeval start;
  159. struct timeval end;
  160. gettimeofday(&start, NULL);
  161. start_bench((void*)&p1);
  162. gettimeofday(&end, NULL);
  163. starpu_pthread_mutex_destroy(&mut);
  164. double timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  165. timing /= 1000000;
  166. printf("%2.2f ", rv[0].flops);
  167. printf("%2.2f %2.2f\n", rv[0].avg_timing, timing);
  168. }
  169. void start_2ndbench(void (*bench)(float*, unsigned, unsigned))
  170. {
  171. p2.bench = bench;
  172. p2.size = size2;
  173. p2.nblocks = nblocks2;
  174. int i;
  175. for(i = 0; i < NSAMPLES; i++)
  176. {
  177. p2.mat[i] = construct_matrix(p2.size);
  178. }
  179. struct timeval start;
  180. struct timeval end;
  181. gettimeofday(&start, NULL);
  182. start_bench((void*)&p2);
  183. gettimeofday(&end, NULL);
  184. starpu_pthread_mutex_destroy(&mut);
  185. double timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  186. timing /= 1000000;
  187. printf("%2.2f ", rv[1].flops);
  188. printf("%2.2f %2.2f\n", rv[1].avg_timing, timing);
  189. }
  190. void construct_contexts(void (*bench)(float*, unsigned, unsigned))
  191. {
  192. struct sc_hypervisor_policy policy;
  193. policy.custom = 0;
  194. policy.name = "idle";
  195. void *perf_counters = sc_hypervisor_init(&policy);
  196. int nworkers1 = cpu1 + gpu + gpu1;
  197. int nworkers2 = cpu2 + gpu + gpu2;
  198. unsigned n_all_gpus = gpu + gpu1 + gpu2;
  199. int i;
  200. int k = 0;
  201. nworkers1 = 12;
  202. p1.workers = (int*)malloc(nworkers1*sizeof(int));
  203. /* for(i = 0; i < gpu; i++) */
  204. /* p1.workers[k++] = i; */
  205. /* for(i = gpu; i < gpu + gpu1; i++) */
  206. /* p1.workers[k++] = i; */
  207. /* for(i = n_all_gpus; i < n_all_gpus + cpu1; i++) */
  208. /* p1.workers[k++] = i; */
  209. for(i = 0; i < 12; i++)
  210. p1.workers[i] = i;
  211. p1.ctx = starpu_sched_ctx_create(p1.workers, nworkers1, "sched_ctx1", STARPU_SCHED_CTX_POLICY_NAME, "heft", 0);
  212. starpu_sched_ctx_set_perf_counters(p1.ctx, perf_counters);
  213. p2.the_other_ctx = (int)p1.ctx;
  214. p1.nworkers = nworkers1;
  215. sc_hypervisor_register_ctx(p1.ctx, 0.0);
  216. /* sc_hypervisor_ctl(p1.ctx, */
  217. /* SC_HYPERVISOR_MAX_IDLE, p1.workers, p1.nworkers, 5000.0, */
  218. /* SC_HYPERVISOR_MAX_IDLE, p1.workers, gpu+gpu1, 100000.0, */
  219. /* SC_HYPERVISOR_EMPTY_CTX_MAX_IDLE, p1.workers, p1.nworkers, 500000.0, */
  220. /* SC_HYPERVISOR_GRANULARITY, 2, */
  221. /* SC_HYPERVISOR_MIN_TASKS, 1000, */
  222. /* SC_HYPERVISOR_NEW_WORKERS_MAX_IDLE, 100000.0, */
  223. /* SC_HYPERVISOR_MIN_WORKERS, 6, */
  224. /* SC_HYPERVISOR_MAX_WORKERS, 12, */
  225. /* NULL); */
  226. sc_hypervisor_ctl(p1.ctx,
  227. SC_HYPERVISOR_GRANULARITY, 2,
  228. SC_HYPERVISOR_MIN_TASKS, 1000,
  229. SC_HYPERVISOR_MIN_WORKERS, 6,
  230. SC_HYPERVISOR_MAX_WORKERS, 12,
  231. NULL);
  232. k = 0;
  233. p2.workers = (int*)malloc(nworkers2*sizeof(int));
  234. /* for(i = 0; i < gpu; i++) */
  235. /* p2.workers[k++] = i; */
  236. /* for(i = gpu + gpu1; i < gpu + gpu1 + gpu2; i++) */
  237. /* p2.workers[k++] = i; */
  238. /* for(i = n_all_gpus + cpu1; i < n_all_gpus + cpu1 + cpu2; i++) */
  239. /* p2.workers[k++] = i; */
  240. p2.ctx = starpu_sched_ctx_create(p2.workers, 0, "sched_ctx2", STARPU_SCHED_CTX_POLICY_NAME, "heft", 0);
  241. starpu_sched_ctx_set_perf_counters(p2.ctx, perf_counters);
  242. p1.the_other_ctx = (int)p2.ctx;
  243. p2.nworkers = 0;
  244. sc_hypervisor_register_ctx(p2.ctx, 0.0);
  245. /* sc_hypervisor_ctl(p2.ctx, */
  246. /* SC_HYPERVISOR_MAX_IDLE, p2.workers, p2.nworkers, 2000.0, */
  247. /* SC_HYPERVISOR_MAX_IDLE, p2.workers, gpu+gpu2, 5000.0, */
  248. /* SC_HYPERVISOR_EMPTY_CTX_MAX_IDLE, p1.workers, p1.nworkers, 500000.0, */
  249. /* SC_HYPERVISOR_GRANULARITY, 2, */
  250. /* SC_HYPERVISOR_MIN_TASKS, 500, */
  251. /* SC_HYPERVISOR_NEW_WORKERS_MAX_IDLE, 1000.0, */
  252. /* SC_HYPERVISOR_MIN_WORKERS, 4, */
  253. /* SC_HYPERVISOR_MAX_WORKERS, 8, */
  254. /* NULL); */
  255. sc_hypervisor_ctl(p2.ctx,
  256. SC_HYPERVISOR_GRANULARITY, 2,
  257. SC_HYPERVISOR_MIN_TASKS, 500,
  258. SC_HYPERVISOR_MIN_WORKERS, 0,
  259. SC_HYPERVISOR_MAX_WORKERS, 6,
  260. NULL);
  261. }
  262. void set_hypervisor_conf(int event, int task_tag)
  263. {
  264. /* unsigned *id = starpu_pthread_getspecific(key); */
  265. /* if(*id == 0) */
  266. /* { */
  267. /* if(event == END_BENCH) */
  268. /* { */
  269. /* if(it < 2) */
  270. /* { */
  271. /* sc_hypervisor_ctl(p2.ctx, */
  272. /* SC_HYPERVISOR_MIN_WORKERS, 2, */
  273. /* SC_HYPERVISOR_MAX_WORKERS, 4, */
  274. /* SC_HYPERVISOR_TIME_TO_APPLY, task_tag, */
  275. /* NULL); */
  276. /* printf("%d: set max %d for tag %d\n", p2.ctx, 4, task_tag); */
  277. /* sc_hypervisor_ctl(p1.ctx, */
  278. /* SC_HYPERVISOR_MIN_WORKERS, 6, */
  279. /* SC_HYPERVISOR_MAX_WORKERS, 8, */
  280. /* SC_HYPERVISOR_TIME_TO_APPLY, task_tag, */
  281. /* NULL); */
  282. /* printf("%d: set max %d for tag %d\n", p1.ctx, 8, task_tag); */
  283. /* sc_hypervisor_resize(p1.ctx, task_tag); */
  284. /* } */
  285. /* if(it == 2) */
  286. /* { */
  287. /* sc_hypervisor_ctl(p2.ctx, */
  288. /* SC_HYPERVISOR_MIN_WORKERS, 12, */
  289. /* SC_HYPERVISOR_MAX_WORKERS, 12, */
  290. /* SC_HYPERVISOR_TIME_TO_APPLY, task_tag, */
  291. /* NULL); */
  292. /* printf("%d: set max %d for tag %d\n", p2.ctx, 12, task_tag); */
  293. /* sc_hypervisor_ctl(p1.ctx, */
  294. /* SC_HYPERVISOR_MIN_WORKERS, 0, */
  295. /* SC_HYPERVISOR_MAX_WORKERS, 0, */
  296. /* SC_HYPERVISOR_TIME_TO_APPLY, task_tag, */
  297. /* NULL); */
  298. /* printf("%d: set max %d for tag %d\n", p1.ctx, 0, task_tag); */
  299. /* sc_hypervisor_resize(p1.ctx, task_tag); */
  300. /* } */
  301. /* it++; */
  302. /* } */
  303. /* } */
  304. /* else */
  305. /* { */
  306. /* if(event == END_BENCH) */
  307. /* { */
  308. /* if(it2 < 3) */
  309. /* { */
  310. /* sc_hypervisor_ctl(p1.ctx, */
  311. /* SC_HYPERVISOR_MIN_WORKERS, 6, */
  312. /* SC_HYPERVISOR_MAX_WORKERS, 12, */
  313. /* SC_HYPERVISOR_TIME_TO_APPLY, task_tag, */
  314. /* NULL); */
  315. /* printf("%d: set max %d for tag %d\n", p1.ctx, 12, task_tag); */
  316. /* sc_hypervisor_ctl(p2.ctx, */
  317. /* SC_HYPERVISOR_MIN_WORKERS, 0, */
  318. /* SC_HYPERVISOR_MAX_WORKERS, 0, */
  319. /* SC_HYPERVISOR_TIME_TO_APPLY, task_tag, */
  320. /* NULL); */
  321. /* printf("%d: set max %d for tag %d\n", p2.ctx, 0, task_tag); */
  322. /* sc_hypervisor_resize(p2.ctx, task_tag); */
  323. /* } */
  324. /* it2++; */
  325. /* } */
  326. /* } */
  327. /* if(*id == 1) */
  328. /* { */
  329. /* if(event == START_BENCH) */
  330. /* { */
  331. /* int workers[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; */
  332. /* sc_hypervisor_ctl(p1.ctx, */
  333. /* SC_HYPERVISOR_MAX_IDLE, workers, 12, 800000.0, */
  334. /* SC_HYPERVISOR_TIME_TO_APPLY, task_tag, */
  335. /* NULL); */
  336. /* } */
  337. /* else */
  338. /* { */
  339. /* if(it2 < 2) */
  340. /* { */
  341. /* int workers[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; */
  342. /* sc_hypervisor_ctl(p2.ctx, */
  343. /* SC_HYPERVISOR_MAX_IDLE, workers, 12, 500.0, */
  344. /* SC_HYPERVISOR_MAX_IDLE, workers, 3, 200.0, */
  345. /* SC_HYPERVISOR_TIME_TO_APPLY, task_tag, */
  346. /* NULL); */
  347. /* } */
  348. /* if(it2 == 2) */
  349. /* { */
  350. /* int workers[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; */
  351. /* sc_hypervisor_ctl(p2.ctx, */
  352. /* SC_HYPERVISOR_MAX_IDLE, workers, 12, 1000.0, */
  353. /* SC_HYPERVISOR_MAX_IDLE, workers, 3, 500.0, */
  354. /* SC_HYPERVISOR_TIME_TO_APPLY, task_tag, */
  355. /* SC_HYPERVISOR_MAX_WORKERS, 12, */
  356. /* NULL); */
  357. /* } */
  358. /* it2++; */
  359. /* } */
  360. /* } else { */
  361. /* if(event == START_BENCH) */
  362. /* { */
  363. /* int workers[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; */
  364. /* sc_hypervisor_ctl(p1.ctx, */
  365. /* SC_HYPERVISOR_MAX_IDLE, workers, 12, 1500.0, */
  366. /* SC_HYPERVISOR_MAX_IDLE, workers, 3, 4000.0, */
  367. /* SC_HYPERVISOR_TIME_TO_APPLY, task_tag, */
  368. /* NULL); */
  369. /* } */
  370. /* if(event == END_BENCH) */
  371. /* { */
  372. /* if(it < 2) */
  373. /* { */
  374. /* int workers[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; */
  375. /* sc_hypervisor_ctl(p1.ctx, */
  376. /* SC_HYPERVISOR_MAX_IDLE, workers, 12, 100.0, */
  377. /* SC_HYPERVISOR_MAX_IDLE, workers, 3, 5000.0, */
  378. /* SC_HYPERVISOR_TIME_TO_APPLY, task_tag, */
  379. /* NULL); */
  380. /* } */
  381. /* if(it == 2) */
  382. /* { */
  383. /* int workers[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; */
  384. /* sc_hypervisor_ctl(p1.ctx, */
  385. /* SC_HYPERVISOR_MAX_IDLE, workers, 12, 5000.0, */
  386. /* SC_HYPERVISOR_MAX_IDLE, workers, 3, 10000.0, */
  387. /* SC_HYPERVISOR_TIME_TO_APPLY, task_tag, */
  388. /* NULL); */
  389. /* } */
  390. /* it++; */
  391. /* } */
  392. /* } */
  393. }
  394. void end_contexts()
  395. {
  396. free(p1.workers);
  397. free(p2.workers);
  398. sc_hypervisor_shutdown();
  399. }
  400. void parse_args_ctx(int argc, char **argv)
  401. {
  402. init();
  403. int i;
  404. for (i = 1; i < argc; i++) {
  405. if (strcmp(argv[i], "-size1") == 0) {
  406. char *argptr;
  407. size1 = strtol(argv[++i], &argptr, 10);
  408. }
  409. if (strcmp(argv[i], "-nblocks1") == 0) {
  410. char *argptr;
  411. nblocks1 = strtol(argv[++i], &argptr, 10);
  412. }
  413. if (strcmp(argv[i], "-size2") == 0) {
  414. char *argptr;
  415. size2 = strtol(argv[++i], &argptr, 10);
  416. }
  417. if (strcmp(argv[i], "-nblocks2") == 0) {
  418. char *argptr;
  419. nblocks2 = strtol(argv[++i], &argptr, 10);
  420. }
  421. if (strcmp(argv[i], "-cpu1") == 0) {
  422. char *argptr;
  423. cpu1 = strtol(argv[++i], &argptr, 10);
  424. }
  425. if (strcmp(argv[i], "-cpu2") == 0) {
  426. char *argptr;
  427. cpu2 = strtol(argv[++i], &argptr, 10);
  428. }
  429. if (strcmp(argv[i], "-gpu") == 0) {
  430. char *argptr;
  431. gpu = strtol(argv[++i], &argptr, 10);
  432. }
  433. if (strcmp(argv[i], "-gpu1") == 0) {
  434. char *argptr;
  435. gpu1 = strtol(argv[++i], &argptr, 10);
  436. }
  437. if (strcmp(argv[i], "-gpu2") == 0) {
  438. char *argptr;
  439. gpu2 = strtol(argv[++i], &argptr, 10);
  440. }
  441. }
  442. }