perfmodel_history.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2012 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012 Centre National de la Recherche Scientifique
  5. * Copyright (C) 2011 Télécom-SudParis
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. #include <dirent.h>
  19. #include <unistd.h>
  20. #include <sys/stat.h>
  21. #include <errno.h>
  22. #include <common/config.h>
  23. #include <common/utils.h>
  24. #include <core/perfmodel/perfmodel.h>
  25. #include <core/jobs.h>
  26. #include <core/workers.h>
  27. #include <pthread.h>
  28. #include <datawizard/datawizard.h>
  29. #include <core/perfmodel/regression.h>
  30. #include <common/config.h>
  31. #include <starpu_parameters.h>
  32. #ifdef STARPU_HAVE_WINDOWS
  33. #include <windows.h>
  34. #endif
  35. /* We want more than 10% variance on X to trust regression */
  36. #define VALID_REGRESSION(reg_model) \
  37. ((reg_model)->minx < (9*(reg_model)->maxx)/10 && (reg_model)->nsample >= _STARPU_CALIBRATION_MINIMUM)
  38. static pthread_rwlock_t registered_models_rwlock;
  39. static struct starpu_model_list *registered_models = NULL;
  40. /*
  41. * History based model
  42. */
  43. static void insert_history_entry(struct starpu_history_entry *entry, struct starpu_history_list **list, struct starpu_htbl32_node **history_ptr)
  44. {
  45. struct starpu_history_list *link;
  46. struct starpu_history_entry *old;
  47. link = (struct starpu_history_list *) malloc(sizeof(struct starpu_history_list));
  48. link->next = *list;
  49. link->entry = entry;
  50. *list = link;
  51. old = (struct starpu_history_entry *) _starpu_htbl_insert_32(history_ptr, entry->footprint, entry);
  52. /* that may fail in case there is some concurrency issue */
  53. STARPU_ASSERT(old == NULL);
  54. }
  55. static void dump_reg_model(FILE *f, struct starpu_perfmodel *model, unsigned arch, unsigned nimpl)
  56. {
  57. struct starpu_per_arch_perfmodel *per_arch_model;
  58. per_arch_model = &model->per_arch[arch][nimpl];
  59. struct starpu_regression_model *reg_model;
  60. reg_model = &per_arch_model->regression;
  61. /*
  62. * Linear Regression model
  63. */
  64. /* Unless we have enough measurements, we put NaN in the file to indicate the model is invalid */
  65. double alpha = nan(""), beta = nan("");
  66. if (model->type == STARPU_REGRESSION_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  67. {
  68. if (reg_model->nsample > 1)
  69. {
  70. alpha = reg_model->alpha;
  71. beta = reg_model->beta;
  72. }
  73. }
  74. fprintf(f, "# sumlnx\tsumlnx2\t\tsumlny\t\tsumlnxlny\talpha\t\tbeta\t\tn\tminx\t\tmaxx\n");
  75. fprintf(f, "%-15le\t%-15le\t%-15le\t%-15le\t%-15le\t%-15le\t%u\t%-15lu\t%-15lu\n", reg_model->sumlnx, reg_model->sumlnx2, reg_model->sumlny, reg_model->sumlnxlny, alpha, beta, reg_model->nsample, reg_model->minx, reg_model->maxx);
  76. /*
  77. * Non-Linear Regression model
  78. */
  79. double a = nan(""), b = nan(""), c = nan("");
  80. if (model->type == STARPU_NL_REGRESSION_BASED)
  81. _starpu_regression_non_linear_power(per_arch_model->list, &a, &b, &c);
  82. fprintf(f, "# a\t\tb\t\tc\n");
  83. fprintf(f, "%-15le\t%-15le\t%-15le\n", a, b, c);
  84. }
  85. static void scan_reg_model(FILE *f, struct starpu_regression_model *reg_model)
  86. {
  87. int res;
  88. /*
  89. * Linear Regression model
  90. */
  91. _starpu_drop_comments(f);
  92. res = fscanf(f, "%le\t%le\t%le\t%le\t%le\t%le\t%u\t%lu\t%lu\n",
  93. &reg_model->sumlnx, &reg_model->sumlnx2, &reg_model->sumlny,
  94. &reg_model->sumlnxlny, &reg_model->alpha, &reg_model->beta,
  95. &reg_model->nsample,
  96. &reg_model->minx, &reg_model->maxx);
  97. STARPU_ASSERT(res == 9);
  98. /* If any of the parameters describing the linear regression model is NaN, the model is invalid */
  99. unsigned invalid = (isnan(reg_model->alpha)||isnan(reg_model->beta));
  100. reg_model->valid = !invalid && VALID_REGRESSION(reg_model);
  101. /*
  102. * Non-Linear Regression model
  103. */
  104. _starpu_drop_comments(f);
  105. res = fscanf(f, "%le\t%le\t%le\n", &reg_model->a, &reg_model->b, &reg_model->c);
  106. STARPU_ASSERT(res == 3);
  107. /* If any of the parameters describing the non-linear regression model is NaN, the model is invalid */
  108. unsigned nl_invalid = (isnan(reg_model->a)||isnan(reg_model->b)||isnan(reg_model->c));
  109. reg_model->nl_valid = !nl_invalid && VALID_REGRESSION(reg_model);
  110. }
  111. static void dump_history_entry(FILE *f, struct starpu_history_entry *entry)
  112. {
  113. fprintf(f, "%08x\t%-15lu\t%-15le\t%-15le\t%-15le\t%-15le\t%u\n", entry->footprint, (unsigned long) entry->size, entry->mean, entry->deviation, entry->sum, entry->sum2, entry->nsample);
  114. }
  115. static void scan_history_entry(FILE *f, struct starpu_history_entry *entry)
  116. {
  117. int res;
  118. _starpu_drop_comments(f);
  119. /* In case entry is NULL, we just drop these values */
  120. unsigned nsample;
  121. uint32_t footprint;
  122. #ifdef STARPU_HAVE_WINDOWS
  123. unsigned size; /* in bytes */
  124. #else
  125. size_t size; /* in bytes */
  126. #endif
  127. double mean;
  128. double deviation;
  129. double sum;
  130. double sum2;
  131. /* Read the values from the file */
  132. res = fscanf(f, "%x\t%"
  133. #ifndef STARPU_HAVE_WINDOWS
  134. "z"
  135. #endif
  136. "u\t%le\t%le\t%le\t%le\t%u\n", &footprint, &size, &mean, &deviation, &sum, &sum2, &nsample);
  137. STARPU_ASSERT(res == 7);
  138. if (entry)
  139. {
  140. entry->footprint = footprint;
  141. entry->size = size;
  142. entry->mean = mean;
  143. entry->deviation = deviation;
  144. entry->sum = sum;
  145. entry->sum2 = sum2;
  146. entry->nsample = nsample;
  147. }
  148. }
  149. static void parse_per_arch_model_file(FILE *f, struct starpu_per_arch_perfmodel *per_arch_model, unsigned scan_history)
  150. {
  151. unsigned nentries;
  152. _starpu_drop_comments(f);
  153. int res = fscanf(f, "%u\n", &nentries);
  154. STARPU_ASSERT(res == 1);
  155. scan_reg_model(f, &per_arch_model->regression);
  156. /* parse cpu entries */
  157. unsigned i;
  158. for (i = 0; i < nentries; i++)
  159. {
  160. struct starpu_history_entry *entry = NULL;
  161. if (scan_history)
  162. {
  163. entry = (struct starpu_history_entry *) malloc(sizeof(struct starpu_history_entry));
  164. STARPU_ASSERT(entry);
  165. }
  166. scan_history_entry(f, entry);
  167. /* insert the entry in the hashtable and the list structures */
  168. if (scan_history)
  169. insert_history_entry(entry, &per_arch_model->list, &per_arch_model->history);
  170. }
  171. }
  172. static void parse_arch(FILE *f, struct starpu_perfmodel *model, unsigned scan_history, unsigned archmin, unsigned archmax, unsigned skiparch)
  173. {
  174. struct starpu_per_arch_perfmodel dummy;
  175. int nimpls, implmax, skipimpl, impl;
  176. unsigned ret, arch;
  177. for (arch = archmin; arch < archmax; arch++)
  178. {
  179. _STARPU_DEBUG("Parsing arch %u\n", arch);
  180. _starpu_drop_comments(f);
  181. ret = fscanf(f, "%d\n", &nimpls);
  182. _STARPU_DEBUG("%u implementations\n", nimpls);
  183. STARPU_ASSERT(ret == 1);
  184. implmax = STARPU_MIN(nimpls, STARPU_MAXIMPLEMENTATIONS);
  185. skipimpl = nimpls - STARPU_MAXIMPLEMENTATIONS;
  186. for (impl = 0; impl < implmax; impl++)
  187. {
  188. parse_per_arch_model_file(f, &model->per_arch[arch][impl], scan_history);
  189. }
  190. if (skipimpl > 0)
  191. {
  192. for (impl = 0; impl < skipimpl; impl++)
  193. {
  194. parse_per_arch_model_file(f, &dummy, 0);
  195. }
  196. }
  197. }
  198. if (skiparch > 0)
  199. {
  200. _starpu_drop_comments(f);
  201. for (arch = 0; arch < skiparch; arch ++)
  202. {
  203. _STARPU_DEBUG("skipping arch %u\n", arch);
  204. ret = fscanf(f, "%d\n", &nimpls);
  205. _STARPU_DEBUG("%u implementations\n", nimpls);
  206. STARPU_ASSERT(ret == 1);
  207. implmax = STARPU_MIN(nimpls, STARPU_MAXIMPLEMENTATIONS);
  208. skipimpl = nimpls - STARPU_MAXIMPLEMENTATIONS;
  209. for (impl = 0; impl < implmax; impl++)
  210. {
  211. parse_per_arch_model_file(f, &dummy, 0);
  212. }
  213. if (skipimpl > 0)
  214. {
  215. for (impl = 0; impl < skipimpl; impl++)
  216. {
  217. parse_per_arch_model_file(f, &dummy, 0);
  218. }
  219. }
  220. }
  221. }
  222. }
  223. static void parse_model_file(FILE *f, struct starpu_perfmodel *model, unsigned scan_history)
  224. {
  225. unsigned ret;
  226. unsigned archmin = 0;
  227. unsigned max_gordondevs = 1; /* XXX : we need a STARPU_MAXGORDONDEVS cst */
  228. unsigned narchs;
  229. /* We could probably write a clean loop here, but the code would not
  230. * really be easier to read. */
  231. /* Parsing CPUs */
  232. _starpu_drop_comments(f);
  233. ret = fscanf(f, "%u\n", &narchs);
  234. STARPU_ASSERT(ret == 1);
  235. _STARPU_DEBUG("Parsing %u CPUs\n", narchs);
  236. if (narchs > 0)
  237. {
  238. parse_arch(f, model, scan_history,
  239. archmin,
  240. STARPU_MIN(narchs, STARPU_MAXCPUS),
  241. narchs > STARPU_MAXCPUS ? narchs - STARPU_MAXCPUS : 0);
  242. }
  243. /* Parsing CUDA devs */
  244. _starpu_drop_comments(f);
  245. ret = fscanf(f, "%u\n", &narchs);
  246. STARPU_ASSERT(ret == 1);
  247. archmin += STARPU_MAXCPUS;
  248. _STARPU_DEBUG("Parsing %u CUDA devices\n", narchs);
  249. if (narchs > 0)
  250. {
  251. parse_arch(f, model, scan_history,
  252. archmin,
  253. archmin + STARPU_MIN(narchs, STARPU_MAXCUDADEVS),
  254. narchs > STARPU_MAXCUDADEVS ? narchs - STARPU_MAXCUDADEVS : 0);
  255. }
  256. /* Parsing OpenCL devs */
  257. _starpu_drop_comments(f);
  258. ret = fscanf(f, "%u\n", &narchs);
  259. STARPU_ASSERT(ret == 1);
  260. archmin += STARPU_MAXCUDADEVS;
  261. _STARPU_DEBUG("Parsing %u OpenCL devices\n", narchs);
  262. if (narchs > 0)
  263. {
  264. parse_arch(f, model, scan_history,
  265. archmin,
  266. archmin + STARPU_MIN(narchs, STARPU_MAXOPENCLDEVS),
  267. narchs > STARPU_MAXOPENCLDEVS ? narchs - STARPU_MAXOPENCLDEVS : 0);
  268. }
  269. /* Parsing Gordon implementations */
  270. _starpu_drop_comments(f);
  271. ret = fscanf(f, "%u\n", &narchs);
  272. STARPU_ASSERT(ret == 1);
  273. archmin += STARPU_MAXOPENCLDEVS;
  274. _STARPU_DEBUG("Parsing %u Gordon devices\n", narchs);
  275. if (narchs > 0)
  276. {
  277. parse_arch(f, model, scan_history,
  278. archmin,
  279. archmin + max_gordondevs,
  280. narchs > max_gordondevs ? narchs - max_gordondevs : 0);
  281. }
  282. }
  283. static void dump_per_arch_model_file(FILE *f, struct starpu_perfmodel *model, unsigned arch, unsigned nimpl)
  284. {
  285. struct starpu_per_arch_perfmodel *per_arch_model;
  286. per_arch_model = &model->per_arch[arch][nimpl];
  287. /* count the number of elements in the lists */
  288. struct starpu_history_list *ptr = NULL;
  289. unsigned nentries = 0;
  290. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  291. {
  292. /* Dump the list of all entries in the history */
  293. ptr = per_arch_model->list;
  294. while(ptr)
  295. {
  296. nentries++;
  297. ptr = ptr->next;
  298. }
  299. }
  300. /* header */
  301. char archname[32];
  302. starpu_perfmodel_get_arch_name((enum starpu_perf_archtype) arch, archname, 32, nimpl);
  303. fprintf(f, "# Model for %s\n", archname);
  304. fprintf(f, "# number of entries\n%u\n", nentries);
  305. dump_reg_model(f, model, arch, nimpl);
  306. /* Dump the history into the model file in case it is necessary */
  307. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  308. {
  309. fprintf(f, "# hash\t\tsize\t\tmean\t\tdev\t\tsum\t\tsum2\t\tn\n");
  310. ptr = per_arch_model->list;
  311. while (ptr)
  312. {
  313. dump_history_entry(f, ptr->entry);
  314. ptr = ptr->next;
  315. }
  316. }
  317. fprintf(f, "\n##################\n");
  318. }
  319. static unsigned get_n_entries(struct starpu_perfmodel *model, unsigned arch, unsigned impl)
  320. {
  321. struct starpu_per_arch_perfmodel *per_arch_model;
  322. per_arch_model = &model->per_arch[arch][impl];
  323. /* count the number of elements in the lists */
  324. struct starpu_history_list *ptr = NULL;
  325. unsigned nentries = 0;
  326. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  327. {
  328. /* Dump the list of all entries in the history */
  329. ptr = per_arch_model->list;
  330. while(ptr)
  331. {
  332. nentries++;
  333. ptr = ptr->next;
  334. }
  335. }
  336. return nentries;
  337. }
  338. static void dump_model_file(FILE *f, struct starpu_perfmodel *model)
  339. {
  340. unsigned narch[4] = { 0, 0, 0, 0};
  341. unsigned arch, arch_base = 0, my_narch = 0;
  342. unsigned nimpl;
  343. unsigned idx = 0;
  344. /* Finding the number of archs to write for each kind of device */
  345. for (arch = 0; arch < STARPU_NARCH_VARIATIONS; arch++)
  346. {
  347. switch (arch)
  348. {
  349. case STARPU_CUDA_DEFAULT:
  350. case STARPU_OPENCL_DEFAULT:
  351. case STARPU_GORDON_DEFAULT:
  352. arch_base = arch;
  353. idx++;
  354. break;
  355. default:
  356. break;
  357. }
  358. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  359. {
  360. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  361. if (get_n_entries(model, arch, nimpl))
  362. {
  363. narch[idx]=arch-arch_base+1;
  364. break;
  365. }
  366. }
  367. else if (model->type == STARPU_REGRESSION_BASED)
  368. {
  369. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  370. if (model->per_arch[arch][nimpl].regression.nsample)
  371. {
  372. narch[idx]=arch-arch_base+1;
  373. break;
  374. }
  375. }
  376. else
  377. STARPU_ASSERT_MSG(0, "Unknown history-based performance model");
  378. }
  379. /* Writing stuff */
  380. char *name = "unknown";
  381. unsigned substract_to_arch = 0;
  382. for (arch = 0; arch < STARPU_NARCH_VARIATIONS; arch++)
  383. {
  384. switch (arch)
  385. {
  386. case STARPU_CPU_DEFAULT:
  387. arch_base = arch;
  388. name = "CPU";
  389. fprintf(f, "##################\n");
  390. fprintf(f, "# %ss\n", name);
  391. fprintf(f, "# maximum number of %ss\n", name);
  392. fprintf(f, "%u\n", my_narch = narch[0]);
  393. break;
  394. case STARPU_CUDA_DEFAULT:
  395. arch_base = arch;
  396. name = "CUDA";
  397. substract_to_arch = STARPU_MAXCPUS;
  398. fprintf(f, "##################\n");
  399. fprintf(f, "# %ss\n", name);
  400. fprintf(f, "# number of %s architectures\n", name);
  401. fprintf(f, "%u\n", my_narch = narch[1]);
  402. break;
  403. case STARPU_OPENCL_DEFAULT:
  404. arch_base = arch;
  405. name = "OPENCL";
  406. substract_to_arch += STARPU_MAXCUDADEVS;
  407. fprintf(f, "##################\n");
  408. fprintf(f, "# %ss\n", name);
  409. fprintf(f, "# number of %s architectures\n", name);
  410. fprintf(f, "%u\n", my_narch = narch[2]);
  411. break;
  412. case STARPU_GORDON_DEFAULT:
  413. arch_base = arch;
  414. name = "GORDON";
  415. substract_to_arch += STARPU_MAXOPENCLDEVS;
  416. fprintf(f, "##################\n");
  417. fprintf(f, "# %ss\n", name);
  418. fprintf(f, "# number of %s architectures\n", name);
  419. fprintf(f, "%u\n", my_narch = narch[3]);
  420. break;
  421. default:
  422. break;
  423. }
  424. unsigned max_impl = 0;
  425. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  426. {
  427. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  428. if (get_n_entries(model, arch, nimpl))
  429. max_impl = nimpl + 1;
  430. }
  431. else if (model->type == STARPU_REGRESSION_BASED)
  432. {
  433. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  434. if (model->per_arch[arch][nimpl].regression.nsample)
  435. max_impl = nimpl + 1;
  436. }
  437. else
  438. STARPU_ASSERT_MSG(0, "Unknown history-based performance model");
  439. if (arch >= my_narch + arch_base)
  440. continue;
  441. fprintf(f, "###########\n");
  442. if (substract_to_arch)
  443. fprintf(f, "# %s_%u\n", name, arch - substract_to_arch);
  444. else
  445. /* CPU */
  446. fprintf(f, "# %u CPU(s) in parallel\n", arch + 1);
  447. fprintf(f, "# number of implementations\n");
  448. fprintf(f, "%u\n", max_impl);
  449. for (nimpl = 0; nimpl < max_impl; nimpl++)
  450. {
  451. dump_per_arch_model_file(f, model, arch, nimpl);
  452. }
  453. }
  454. }
  455. static void initialize_per_arch_model(struct starpu_per_arch_perfmodel *per_arch_model)
  456. {
  457. per_arch_model->history = NULL;
  458. per_arch_model->list = NULL;
  459. }
  460. static void initialize_model(struct starpu_perfmodel *model)
  461. {
  462. unsigned arch;
  463. unsigned nimpl;
  464. for (arch = 0; arch < STARPU_NARCH_VARIATIONS; arch++)
  465. {
  466. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  467. {
  468. initialize_per_arch_model(&model->per_arch[arch][nimpl]);
  469. }
  470. }
  471. }
  472. static void get_model_debug_path(struct starpu_perfmodel *model, const char *arch, char *path, size_t maxlen)
  473. {
  474. STARPU_ASSERT(path);
  475. _starpu_get_perf_model_dir_debug(path, maxlen);
  476. strncat(path, model->symbol, maxlen);
  477. char hostname[32];
  478. char *forced_hostname = getenv("STARPU_HOSTNAME");
  479. if (forced_hostname && forced_hostname[0])
  480. snprintf(hostname, sizeof(hostname), "%s", forced_hostname);
  481. else
  482. gethostname(hostname, sizeof(hostname));
  483. strncat(path, ".", maxlen);
  484. strncat(path, hostname, maxlen);
  485. strncat(path, ".", maxlen);
  486. strncat(path, arch, maxlen);
  487. strncat(path, ".debug", maxlen);
  488. }
  489. /*
  490. * Returns 0 is the model was already loaded, 1 otherwise.
  491. */
  492. int _starpu_register_model(struct starpu_perfmodel *model)
  493. {
  494. /* If the model has already been loaded, there is nothing to do */
  495. _STARPU_PTHREAD_RWLOCK_RDLOCK(&registered_models_rwlock);
  496. if (model->is_loaded)
  497. {
  498. _STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  499. return 0;
  500. }
  501. _STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  502. /* We have to make sure the model has not been loaded since the
  503. * last time we took the lock */
  504. _STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  505. if (model->is_loaded)
  506. {
  507. _STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  508. return 0;
  509. }
  510. /* add the model to a linked list */
  511. struct starpu_model_list *node = (struct starpu_model_list *) malloc(sizeof(struct starpu_model_list));
  512. node->model = model;
  513. //model->debug_modelid = debug_modelid++;
  514. /* put this model at the beginning of the list */
  515. node->next = registered_models;
  516. registered_models = node;
  517. #ifdef STARPU_MODEL_DEBUG
  518. _starpu_create_sampling_directory_if_needed();
  519. unsigned arch;
  520. unsigned nimpl;
  521. for (arch = 0; arch < STARPU_NARCH_VARIATIONS; arch++)
  522. {
  523. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  524. {
  525. starpu_perfmodel_debugfilepath(model, arch, model->per_arch[arch][nimpl].debug_path, 256, nimpl);
  526. }
  527. }
  528. #endif
  529. _STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  530. return 1;
  531. }
  532. static void get_model_path(struct starpu_perfmodel *model, char *path, size_t maxlen)
  533. {
  534. _starpu_get_perf_model_dir_codelets(path, maxlen);
  535. strncat(path, model->symbol, maxlen);
  536. char hostname[32];
  537. char *forced_hostname = getenv("STARPU_HOSTNAME");
  538. if (forced_hostname && forced_hostname[0])
  539. snprintf(hostname, sizeof(hostname), "%s", forced_hostname);
  540. else
  541. gethostname(hostname, sizeof(hostname));
  542. strncat(path, ".", maxlen);
  543. strncat(path, hostname, maxlen);
  544. }
  545. static void save_history_based_model(struct starpu_perfmodel *model)
  546. {
  547. STARPU_ASSERT(model);
  548. STARPU_ASSERT(model->symbol);
  549. /* TODO checks */
  550. /* filename = $STARPU_PERF_MODEL_DIR/codelets/symbol.hostname */
  551. char path[256];
  552. get_model_path(model, path, 256);
  553. _STARPU_DEBUG("Opening performance model file %s for model %s\n", path, model->symbol);
  554. /* overwrite existing file, or create it */
  555. FILE *f;
  556. f = fopen(path, "w+");
  557. STARPU_ASSERT(f);
  558. dump_model_file(f, model);
  559. fclose(f);
  560. }
  561. static void _starpu_dump_registered_models(void)
  562. {
  563. _STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  564. struct starpu_model_list *node;
  565. node = registered_models;
  566. _STARPU_DEBUG("DUMP MODELS !\n");
  567. while (node)
  568. {
  569. save_history_based_model(node->model);
  570. node = node->next;
  571. }
  572. _STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  573. }
  574. void _starpu_initialize_registered_performance_models(void)
  575. {
  576. registered_models = NULL;
  577. _STARPU_PTHREAD_RWLOCK_INIT(&registered_models_rwlock, NULL);
  578. }
  579. void _starpu_deinitialize_registered_performance_models(void)
  580. {
  581. if (_starpu_get_calibrate_flag())
  582. _starpu_dump_registered_models();
  583. _STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  584. struct starpu_model_list *node, *pnode;
  585. node = registered_models;
  586. _STARPU_DEBUG("FREE MODELS !\n");
  587. while (node)
  588. {
  589. struct starpu_perfmodel *model = node->model;
  590. unsigned arch;
  591. unsigned nimpl;
  592. _STARPU_PTHREAD_RWLOCK_WRLOCK(&model->model_rwlock);
  593. for (arch = 0; arch < STARPU_NARCH_VARIATIONS; arch++)
  594. {
  595. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  596. {
  597. struct starpu_per_arch_perfmodel *archmodel = &model->per_arch[arch][nimpl];
  598. struct starpu_history_list *list, *plist;
  599. _starpu_htbl_destroy_32(archmodel->history, NULL);
  600. archmodel->history = NULL;
  601. list = archmodel->list;
  602. while (list) {
  603. free(list->entry);
  604. plist = list;
  605. list = list->next;
  606. free(plist);
  607. }
  608. archmodel->list = NULL;
  609. }
  610. }
  611. model->is_loaded = 0;
  612. _STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
  613. pnode = node;
  614. node = node->next;
  615. free(pnode);
  616. }
  617. registered_models = NULL;
  618. _STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  619. _STARPU_PTHREAD_RWLOCK_DESTROY(&registered_models_rwlock);
  620. }
  621. /* We first try to grab the global lock in read mode to check whether the model
  622. * was loaded or not (this is very likely to have been already loaded). If the
  623. * model was not loaded yet, we take the lock in write mode, and if the model
  624. * is still not loaded once we have the lock, we do load it. */
  625. void _starpu_load_history_based_model(struct starpu_perfmodel *model, unsigned scan_history)
  626. {
  627. STARPU_ASSERT(model);
  628. STARPU_ASSERT(model->symbol);
  629. int already_loaded;
  630. _STARPU_PTHREAD_RWLOCK_RDLOCK(&registered_models_rwlock);
  631. already_loaded = model->is_loaded;
  632. _STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  633. if (already_loaded)
  634. return;
  635. /* The model is still not loaded so we grab the lock in write mode, and
  636. * if it's not loaded once we have the lock, we do load it. */
  637. _STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  638. /* Was the model initialized since the previous test ? */
  639. if (model->is_loaded)
  640. {
  641. _STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  642. return;
  643. }
  644. _STARPU_PTHREAD_RWLOCK_INIT(&model->model_rwlock, NULL);
  645. _STARPU_PTHREAD_RWLOCK_WRLOCK(&model->model_rwlock);
  646. /* make sure the performance model directory exists (or create it) */
  647. _starpu_create_sampling_directory_if_needed();
  648. char path[256];
  649. get_model_path(model, path, 256);
  650. _STARPU_DEBUG("Opening performance model file %s for model %s ...\n", path, model->symbol);
  651. unsigned calibrate_flag = _starpu_get_calibrate_flag();
  652. model->benchmarking = calibrate_flag;
  653. /* try to open an existing file and load it */
  654. int res;
  655. res = access(path, F_OK);
  656. if (res == 0)
  657. {
  658. if (calibrate_flag == 2)
  659. {
  660. /* The user specified that the performance model should
  661. * be overwritten, so we don't load the existing file !
  662. * */
  663. _STARPU_DEBUG("Overwrite existing file\n");
  664. initialize_model(model);
  665. }
  666. else
  667. {
  668. /* We load the available file */
  669. _STARPU_DEBUG("File exists\n");
  670. FILE *f;
  671. f = fopen(path, "r");
  672. STARPU_ASSERT(f);
  673. parse_model_file(f, model, scan_history);
  674. fclose(f);
  675. }
  676. }
  677. else
  678. {
  679. _STARPU_DEBUG("File does not exists\n");
  680. if (!calibrate_flag)
  681. {
  682. _STARPU_DISP("Warning: model %s is not calibrated, forcing calibration for this run. Use the STARPU_CALIBRATE environment variable to control this.\n", model->symbol);
  683. _starpu_set_calibrate_flag(1);
  684. model->benchmarking = 1;
  685. }
  686. initialize_model(model);
  687. }
  688. _STARPU_DEBUG("Performance model file %s for model %s is loaded\n", path, model->symbol);
  689. model->is_loaded = 1;
  690. _STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
  691. _STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  692. }
  693. /* This function is intended to be used by external tools that should read
  694. * the performance model files */
  695. int starpu_list_models(FILE *output)
  696. {
  697. char path[256];
  698. DIR *dp;
  699. struct dirent *ep;
  700. char perf_model_dir_codelets[256];
  701. _starpu_get_perf_model_dir_codelets(perf_model_dir_codelets, 256);
  702. strncpy(path, perf_model_dir_codelets, 256);
  703. dp = opendir(path);
  704. if (dp != NULL)
  705. {
  706. while ((ep = readdir(dp)))
  707. {
  708. if (strcmp(ep->d_name, ".") && strcmp(ep->d_name, ".."))
  709. fprintf(output, "file: <%s>\n", ep->d_name);
  710. }
  711. closedir (dp);
  712. return 0;
  713. }
  714. else
  715. {
  716. perror("Couldn't open the directory");
  717. return 1;
  718. }
  719. }
  720. /* This function is intended to be used by external tools that should read the
  721. * performance model files */
  722. int starpu_load_history_debug(const char *symbol, struct starpu_perfmodel *model)
  723. {
  724. model->symbol = strdup(symbol);
  725. /* where is the file if it exists ? */
  726. char path[256];
  727. get_model_path(model, path, 256);
  728. // _STARPU_DEBUG("get_model_path -> %s\n", path);
  729. /* does it exist ? */
  730. int res;
  731. res = access(path, F_OK);
  732. if (res)
  733. {
  734. const char *dot = strrchr(symbol, '.');
  735. if (dot)
  736. {
  737. char *symbol2 = strdup(symbol);
  738. symbol2[dot-symbol] = '\0';
  739. int ret;
  740. fprintf(stderr,"note: loading history from %s instead of %s\n", symbol2, symbol);
  741. ret = starpu_load_history_debug(symbol2,model);
  742. free(symbol2);
  743. return ret;
  744. }
  745. _STARPU_DISP("There is no performance model for symbol %s\n", symbol);
  746. return 1;
  747. }
  748. FILE *f = fopen(path, "r");
  749. STARPU_ASSERT(f);
  750. parse_model_file(f, model, 1);
  751. STARPU_ASSERT(fclose(f) == 0);
  752. return 0;
  753. }
  754. void starpu_perfmodel_get_arch_name(enum starpu_perf_archtype arch, char *archname, size_t maxlen,unsigned nimpl)
  755. {
  756. if (arch < STARPU_CUDA_DEFAULT)
  757. {
  758. if (arch == STARPU_CPU_DEFAULT)
  759. {
  760. /* NB: We could just use cpu_1 as well ... */
  761. snprintf(archname, maxlen, "cpu_impl_%u",nimpl);
  762. }
  763. else
  764. {
  765. /* For combined CPU workers */
  766. int cpu_count = arch - STARPU_CPU_DEFAULT + 1;
  767. snprintf(archname, maxlen, "cpu_%d_impl_%u", cpu_count,nimpl);
  768. }
  769. }
  770. else if ((STARPU_CUDA_DEFAULT <= arch)
  771. && (arch < STARPU_CUDA_DEFAULT + STARPU_MAXCUDADEVS))
  772. {
  773. int devid = arch - STARPU_CUDA_DEFAULT;
  774. snprintf(archname, maxlen, "cuda_%d_impl_%u", devid,nimpl);
  775. }
  776. else if ((STARPU_OPENCL_DEFAULT <= arch)
  777. && (arch < STARPU_OPENCL_DEFAULT + STARPU_MAXOPENCLDEVS))
  778. {
  779. int devid = arch - STARPU_OPENCL_DEFAULT;
  780. snprintf(archname, maxlen, "opencl_%d_impl_%u", devid,nimpl);
  781. }
  782. else if (arch == STARPU_GORDON_DEFAULT)
  783. {
  784. snprintf(archname, maxlen, "gordon_impl_%u",nimpl);
  785. }
  786. else
  787. {
  788. STARPU_ABORT();
  789. }
  790. }
  791. void starpu_perfmodel_debugfilepath(struct starpu_perfmodel *model,
  792. enum starpu_perf_archtype arch, char *path, size_t maxlen, unsigned nimpl)
  793. {
  794. char archname[32];
  795. starpu_perfmodel_get_arch_name(arch, archname, 32, nimpl);
  796. STARPU_ASSERT(path);
  797. get_model_debug_path(model, archname, path, maxlen);
  798. }
  799. double _starpu_regression_based_job_expected_perf(struct starpu_perfmodel *model, enum starpu_perf_archtype arch, struct _starpu_job *j, unsigned nimpl)
  800. {
  801. double exp = NAN;
  802. size_t size = _starpu_job_get_data_size(model, arch, nimpl, j);
  803. struct starpu_regression_model *regmodel;
  804. regmodel = &model->per_arch[arch][nimpl].regression;
  805. if (regmodel->valid)
  806. exp = regmodel->alpha*pow((double)size, regmodel->beta);
  807. return exp;
  808. }
  809. double _starpu_non_linear_regression_based_job_expected_perf(struct starpu_perfmodel *model, enum starpu_perf_archtype arch, struct _starpu_job *j,unsigned nimpl)
  810. {
  811. double exp = NAN;
  812. size_t size = _starpu_job_get_data_size(model, arch, nimpl, j);
  813. struct starpu_regression_model *regmodel;
  814. regmodel = &model->per_arch[arch][nimpl].regression;
  815. if (regmodel->nl_valid && size >= regmodel->minx * 0.9 && size <= regmodel->maxx * 1.1)
  816. exp = regmodel->a*pow((double)size, regmodel->b) + regmodel->c;
  817. else
  818. {
  819. uint32_t key = _starpu_compute_buffers_footprint(model, arch, nimpl, j);
  820. struct starpu_per_arch_perfmodel *per_arch_model = &model->per_arch[arch][nimpl];
  821. struct starpu_htbl32_node *history;
  822. struct starpu_history_entry *entry;
  823. _STARPU_PTHREAD_RWLOCK_RDLOCK(&model->model_rwlock);
  824. history = per_arch_model->history;
  825. entry = (struct starpu_history_entry *) _starpu_htbl_search_32(history, key);
  826. _STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
  827. if (entry && entry->nsample >= _STARPU_CALIBRATION_MINIMUM)
  828. exp = entry->mean;
  829. else if (!model->benchmarking)
  830. {
  831. _STARPU_DISP("Warning: model %s is not calibrated enough, forcing calibration for this run. Use the STARPU_CALIBRATE environment variable to control this.\n", model->symbol);
  832. _starpu_set_calibrate_flag(1);
  833. model->benchmarking = 1;
  834. }
  835. }
  836. return exp;
  837. }
  838. double _starpu_history_based_job_expected_perf(struct starpu_perfmodel *model, enum starpu_perf_archtype arch, struct _starpu_job *j,unsigned nimpl)
  839. {
  840. double exp;
  841. struct starpu_per_arch_perfmodel *per_arch_model;
  842. struct starpu_history_entry *entry;
  843. struct starpu_htbl32_node *history;
  844. uint32_t key = _starpu_compute_buffers_footprint(model, arch, nimpl, j);
  845. per_arch_model = &model->per_arch[arch][nimpl];
  846. _STARPU_PTHREAD_RWLOCK_RDLOCK(&model->model_rwlock);
  847. history = per_arch_model->history;
  848. if (!history) {
  849. _STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
  850. return NAN;
  851. }
  852. entry = (struct starpu_history_entry *) _starpu_htbl_search_32(history, key);
  853. _STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
  854. exp = entry?entry->mean:NAN;
  855. if (entry && entry->nsample < _STARPU_CALIBRATION_MINIMUM)
  856. /* TODO: report differently if we've scheduled really enough
  857. * of that task and the scheduler should perhaps put it aside */
  858. /* Not calibrated enough */
  859. {
  860. printf("nan pt ca stupid algo nsamples %d\n", entry->nsample);
  861. exp = NAN;
  862. }
  863. if (isnan(exp) && !model->benchmarking)
  864. {
  865. _STARPU_DISP("Warning: model %s is not calibrated enough, forcing calibration for this run. Use the STARPU_CALIBRATE environment variable to control this.\n", model->symbol);
  866. _starpu_set_calibrate_flag(1);
  867. model->benchmarking = 1;
  868. }
  869. return exp;
  870. }
  871. double starpu_history_based_job_expected_perf(struct starpu_perfmodel *model, enum starpu_perf_archtype arch, uint32_t footprint)
  872. {
  873. struct _starpu_job j =
  874. {
  875. .footprint = footprint,
  876. .footprint_is_computed = 1,
  877. };
  878. return _starpu_history_based_job_expected_perf(model, arch, &j, j.nimpl);
  879. }
  880. void _starpu_update_perfmodel_history(struct _starpu_job *j, struct starpu_perfmodel *model, enum starpu_perf_archtype arch, unsigned cpuid STARPU_ATTRIBUTE_UNUSED, double measured, unsigned nimpl)
  881. {
  882. if (model)
  883. {
  884. _STARPU_PTHREAD_RWLOCK_WRLOCK(&model->model_rwlock);
  885. struct starpu_per_arch_perfmodel *per_arch_model = &model->per_arch[arch][nimpl];
  886. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  887. {
  888. struct starpu_history_entry *entry;
  889. struct starpu_htbl32_node *history;
  890. struct starpu_htbl32_node **history_ptr;
  891. struct starpu_history_list **list;
  892. uint32_t key = _starpu_compute_buffers_footprint(model, arch, nimpl, j);
  893. history = per_arch_model->history;
  894. history_ptr = &per_arch_model->history;
  895. list = &per_arch_model->list;
  896. entry = (struct starpu_history_entry *) _starpu_htbl_search_32(history, key);
  897. if (!entry)
  898. {
  899. /* this is the first entry with such a footprint */
  900. entry = (struct starpu_history_entry *) malloc(sizeof(struct starpu_history_entry));
  901. STARPU_ASSERT(entry);
  902. entry->mean = measured;
  903. entry->sum = measured;
  904. entry->deviation = 0.0;
  905. entry->sum2 = measured*measured;
  906. entry->size = _starpu_job_get_data_size(model, arch, nimpl, j);
  907. entry->footprint = key;
  908. entry->nsample = 1;
  909. insert_history_entry(entry, list, history_ptr);
  910. }
  911. else
  912. {
  913. /* there is already some entry with the same footprint */
  914. entry->sum += measured;
  915. entry->sum2 += measured*measured;
  916. entry->nsample++;
  917. unsigned n = entry->nsample;
  918. entry->mean = entry->sum / n;
  919. entry->deviation = sqrt((entry->sum2 - (entry->sum*entry->sum)/n)/n);
  920. }
  921. STARPU_ASSERT(entry);
  922. }
  923. if (model->type == STARPU_REGRESSION_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  924. {
  925. struct starpu_regression_model *reg_model;
  926. reg_model = &per_arch_model->regression;
  927. /* update the regression model */
  928. size_t job_size = _starpu_job_get_data_size(model, arch, nimpl, j);
  929. double logy, logx;
  930. logx = log((double)job_size);
  931. logy = log(measured);
  932. reg_model->sumlnx += logx;
  933. reg_model->sumlnx2 += logx*logx;
  934. reg_model->sumlny += logy;
  935. reg_model->sumlnxlny += logx*logy;
  936. if (reg_model->minx == 0 || job_size < reg_model->minx)
  937. reg_model->minx = job_size;
  938. if (reg_model->maxx == 0 || job_size > reg_model->maxx)
  939. reg_model->maxx = job_size;
  940. reg_model->nsample++;
  941. unsigned n = reg_model->nsample;
  942. double num = (n*reg_model->sumlnxlny - reg_model->sumlnx*reg_model->sumlny);
  943. double denom = (n*reg_model->sumlnx2 - reg_model->sumlnx*reg_model->sumlnx);
  944. reg_model->beta = num/denom;
  945. reg_model->alpha = exp((reg_model->sumlny - reg_model->beta*reg_model->sumlnx)/n);
  946. if (VALID_REGRESSION(reg_model))
  947. reg_model->valid = 1;
  948. }
  949. #ifdef STARPU_MODEL_DEBUG
  950. struct starpu_task *task = j->task;
  951. FILE *f = fopen(per_arch_model->debug_path, "a+");
  952. if (f == NULL)
  953. {
  954. _STARPU_DISP("Error <%s> when opening file <%s>\n", strerror(errno), per_arch_model->debug_path);
  955. STARPU_ASSERT(0);
  956. }
  957. if (!j->footprint_is_computed)
  958. (void) _starpu_compute_buffers_footprint(model, arch, nimpl, j);
  959. STARPU_ASSERT(j->footprint_is_computed);
  960. fprintf(f, "0x%x\t%lu\t%f\t%f\t%f\t%d\t\t", j->footprint, (unsigned long) _starpu_job_get_data_size(model, arch, nimpl, j), measured, task->predicted, task->predicted_transfer, cpuid);
  961. unsigned i;
  962. for (i = 0; i < task->cl->nbuffers; i++)
  963. {
  964. starpu_data_handle_t handle = task->handles[i];
  965. STARPU_ASSERT(handle->ops);
  966. STARPU_ASSERT(handle->ops->display);
  967. handle->ops->display(handle, f);
  968. }
  969. fprintf(f, "\n");
  970. fclose(f);
  971. #endif
  972. _STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
  973. }
  974. }