perfmodel_history.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2012 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012 Centre National de la Recherche Scientifique
  5. * Copyright (C) 2011 Télécom-SudParis
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. #include <dirent.h>
  19. #include <unistd.h>
  20. #include <sys/stat.h>
  21. #include <errno.h>
  22. #include <common/config.h>
  23. #include <common/utils.h>
  24. #include <core/perfmodel/perfmodel.h>
  25. #include <core/jobs.h>
  26. #include <core/workers.h>
  27. #include <pthread.h>
  28. #include <datawizard/datawizard.h>
  29. #include <core/perfmodel/regression.h>
  30. #include <common/config.h>
  31. #include <starpu_parameters.h>
  32. #include <common/uthash.h>
  33. #ifdef STARPU_HAVE_WINDOWS
  34. #include <windows.h>
  35. #endif
  36. #define HASH_ADD_UINT32_T(head,field,add) HASH_ADD(hh,head,field,sizeof(uint32_t),add)
  37. #define HASH_FIND_UINT32_T(head,find,out) HASH_FIND(hh,head,find,sizeof(uint32_t),out)
  38. struct starpu_perfmodel_history_table
  39. {
  40. UT_hash_handle hh;
  41. uint32_t footprint;
  42. struct starpu_perfmodel_history_entry *history_entry;
  43. };
  44. /* We want more than 10% variance on X to trust regression */
  45. #define VALID_REGRESSION(reg_model) \
  46. ((reg_model)->minx < (9*(reg_model)->maxx)/10 && (reg_model)->nsample >= _STARPU_CALIBRATION_MINIMUM)
  47. static _starpu_pthread_rwlock_t registered_models_rwlock;
  48. static struct _starpu_perfmodel_list *registered_models = NULL;
  49. size_t _starpu_job_get_data_size(struct starpu_perfmodel *model, enum starpu_perf_archtype arch, unsigned nimpl, struct _starpu_job *j)
  50. {
  51. struct starpu_task *task = j->task;
  52. if (model && model->per_arch[arch][nimpl].size_base) {
  53. return model->per_arch[arch][nimpl].size_base(task, arch, nimpl);
  54. } else if (model && model->size_base) {
  55. return model->size_base(task, nimpl);
  56. } else {
  57. unsigned nbuffers = task->cl->nbuffers;
  58. size_t size = 0;
  59. unsigned buffer;
  60. for (buffer = 0; buffer < nbuffers; buffer++)
  61. {
  62. starpu_data_handle_t handle = task->handles[buffer];
  63. size += _starpu_data_get_size(handle);
  64. }
  65. return size;
  66. }
  67. }
  68. /*
  69. * History based model
  70. */
  71. static void insert_history_entry(struct starpu_perfmodel_history_entry *entry, struct starpu_perfmodel_history_list **list, struct starpu_perfmodel_history_table **history_ptr)
  72. {
  73. struct starpu_perfmodel_history_list *link;
  74. struct starpu_perfmodel_history_table *table;
  75. link = (struct starpu_perfmodel_history_list *) malloc(sizeof(struct starpu_perfmodel_history_list));
  76. link->next = *list;
  77. link->entry = entry;
  78. *list = link;
  79. /* detect concurrency issue */
  80. //HASH_FIND_UINT32_T(*history_ptr, &entry->footprint, table);
  81. //STARPU_ASSERT(table == NULL);
  82. table = (struct starpu_perfmodel_history_table*) malloc(sizeof(*table));
  83. STARPU_ASSERT(table != NULL);
  84. table->footprint = entry->footprint;
  85. table->history_entry = entry;
  86. HASH_ADD_UINT32_T(*history_ptr, footprint, table);
  87. }
  88. static void dump_reg_model(FILE *f, struct starpu_perfmodel *model, unsigned arch, unsigned nimpl)
  89. {
  90. struct starpu_perfmodel_per_arch *per_arch_model;
  91. per_arch_model = &model->per_arch[arch][nimpl];
  92. struct starpu_perfmodel_regression_model *reg_model;
  93. reg_model = &per_arch_model->regression;
  94. /*
  95. * Linear Regression model
  96. */
  97. /* Unless we have enough measurements, we put NaN in the file to indicate the model is invalid */
  98. double alpha = nan(""), beta = nan("");
  99. if (model->type == STARPU_REGRESSION_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  100. {
  101. if (reg_model->nsample > 1)
  102. {
  103. alpha = reg_model->alpha;
  104. beta = reg_model->beta;
  105. }
  106. }
  107. fprintf(f, "# sumlnx\tsumlnx2\t\tsumlny\t\tsumlnxlny\talpha\t\tbeta\t\tn\tminx\t\tmaxx\n");
  108. fprintf(f, "%-15le\t%-15le\t%-15le\t%-15le\t%-15le\t%-15le\t%u\t%-15lu\t%-15lu\n", reg_model->sumlnx, reg_model->sumlnx2, reg_model->sumlny, reg_model->sumlnxlny, alpha, beta, reg_model->nsample, reg_model->minx, reg_model->maxx);
  109. /*
  110. * Non-Linear Regression model
  111. */
  112. double a = nan(""), b = nan(""), c = nan("");
  113. if (model->type == STARPU_NL_REGRESSION_BASED)
  114. _starpu_regression_non_linear_power(per_arch_model->list, &a, &b, &c);
  115. fprintf(f, "# a\t\tb\t\tc\n");
  116. fprintf(f, "%-15le\t%-15le\t%-15le\n", a, b, c);
  117. }
  118. static void scan_reg_model(FILE *f, struct starpu_perfmodel_regression_model *reg_model)
  119. {
  120. int res;
  121. /*
  122. * Linear Regression model
  123. */
  124. _starpu_drop_comments(f);
  125. res = fscanf(f, "%le\t%le\t%le\t%le\t%le\t%le\t%u\t%lu\t%lu\n",
  126. &reg_model->sumlnx, &reg_model->sumlnx2, &reg_model->sumlny,
  127. &reg_model->sumlnxlny, &reg_model->alpha, &reg_model->beta,
  128. &reg_model->nsample,
  129. &reg_model->minx, &reg_model->maxx);
  130. STARPU_ASSERT_MSG(res == 9, "Incorrect performance model file");
  131. /* If any of the parameters describing the linear regression model is NaN, the model is invalid */
  132. unsigned invalid = (isnan(reg_model->alpha)||isnan(reg_model->beta));
  133. reg_model->valid = !invalid && VALID_REGRESSION(reg_model);
  134. /*
  135. * Non-Linear Regression model
  136. */
  137. _starpu_drop_comments(f);
  138. res = fscanf(f, "%le\t%le\t%le\n", &reg_model->a, &reg_model->b, &reg_model->c);
  139. STARPU_ASSERT_MSG(res == 3, "Incorrect performance model file");
  140. /* If any of the parameters describing the non-linear regression model is NaN, the model is invalid */
  141. unsigned nl_invalid = (isnan(reg_model->a)||isnan(reg_model->b)||isnan(reg_model->c));
  142. reg_model->nl_valid = !nl_invalid && VALID_REGRESSION(reg_model);
  143. }
  144. static void dump_history_entry(FILE *f, struct starpu_perfmodel_history_entry *entry)
  145. {
  146. fprintf(f, "%08x\t%-15lu\t%-15le\t%-15le\t%-15le\t%-15le\t%u\n", entry->footprint, (unsigned long) entry->size, entry->mean, entry->deviation, entry->sum, entry->sum2, entry->nsample);
  147. }
  148. static void scan_history_entry(FILE *f, struct starpu_perfmodel_history_entry *entry)
  149. {
  150. int res;
  151. _starpu_drop_comments(f);
  152. /* In case entry is NULL, we just drop these values */
  153. unsigned nsample;
  154. uint32_t footprint;
  155. #ifdef STARPU_HAVE_WINDOWS
  156. unsigned size; /* in bytes */
  157. #else
  158. size_t size; /* in bytes */
  159. #endif
  160. double mean;
  161. double deviation;
  162. double sum;
  163. double sum2;
  164. /* Read the values from the file */
  165. res = fscanf(f, "%x\t%"
  166. #ifndef STARPU_HAVE_WINDOWS
  167. "z"
  168. #endif
  169. "u\t%le\t%le\t%le\t%le\t%u\n", &footprint, &size, &mean, &deviation, &sum, &sum2, &nsample);
  170. STARPU_ASSERT_MSG(res == 7, "Incorrect performance model file");
  171. if (entry)
  172. {
  173. entry->footprint = footprint;
  174. entry->size = size;
  175. entry->mean = mean;
  176. entry->deviation = deviation;
  177. entry->sum = sum;
  178. entry->sum2 = sum2;
  179. entry->nsample = nsample;
  180. }
  181. }
  182. static void parse_per_arch_model_file(FILE *f, struct starpu_perfmodel_per_arch *per_arch_model, unsigned scan_history)
  183. {
  184. unsigned nentries;
  185. _starpu_drop_comments(f);
  186. int res = fscanf(f, "%u\n", &nentries);
  187. STARPU_ASSERT_MSG(res == 1, "Incorrect performance model file");
  188. scan_reg_model(f, &per_arch_model->regression);
  189. /* parse cpu entries */
  190. unsigned i;
  191. for (i = 0; i < nentries; i++)
  192. {
  193. struct starpu_perfmodel_history_entry *entry = NULL;
  194. if (scan_history)
  195. {
  196. entry = (struct starpu_perfmodel_history_entry *) malloc(sizeof(struct starpu_perfmodel_history_entry));
  197. STARPU_ASSERT(entry);
  198. }
  199. scan_history_entry(f, entry);
  200. /* insert the entry in the hashtable and the list structures */
  201. if (scan_history)
  202. insert_history_entry(entry, &per_arch_model->list, &per_arch_model->history);
  203. }
  204. }
  205. static void parse_arch(FILE *f, struct starpu_perfmodel *model, unsigned scan_history, unsigned archmin, unsigned archmax, unsigned skiparch)
  206. {
  207. struct starpu_perfmodel_per_arch dummy;
  208. int nimpls, implmax, skipimpl, impl;
  209. unsigned ret, arch;
  210. for (arch = archmin; arch < archmax; arch++)
  211. {
  212. _STARPU_DEBUG("Parsing arch %u\n", arch);
  213. _starpu_drop_comments(f);
  214. ret = fscanf(f, "%d\n", &nimpls);
  215. _STARPU_DEBUG("%d implementations\n", nimpls);
  216. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  217. implmax = STARPU_MIN(nimpls, STARPU_MAXIMPLEMENTATIONS);
  218. skipimpl = nimpls - STARPU_MAXIMPLEMENTATIONS;
  219. for (impl = 0; impl < implmax; impl++)
  220. {
  221. parse_per_arch_model_file(f, &model->per_arch[arch][impl], scan_history);
  222. }
  223. if (skipimpl > 0)
  224. {
  225. for (impl = 0; impl < skipimpl; impl++)
  226. {
  227. parse_per_arch_model_file(f, &dummy, 0);
  228. }
  229. }
  230. }
  231. if (skiparch > 0)
  232. {
  233. _starpu_drop_comments(f);
  234. for (arch = 0; arch < skiparch; arch ++)
  235. {
  236. _STARPU_DEBUG("skipping arch %u\n", arch);
  237. ret = fscanf(f, "%d\n", &nimpls);
  238. _STARPU_DEBUG("%d implementations\n", nimpls);
  239. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  240. implmax = STARPU_MIN(nimpls, STARPU_MAXIMPLEMENTATIONS);
  241. skipimpl = nimpls - STARPU_MAXIMPLEMENTATIONS;
  242. for (impl = 0; impl < implmax; impl++)
  243. {
  244. parse_per_arch_model_file(f, &dummy, 0);
  245. }
  246. if (skipimpl > 0)
  247. {
  248. for (impl = 0; impl < skipimpl; impl++)
  249. {
  250. parse_per_arch_model_file(f, &dummy, 0);
  251. }
  252. }
  253. }
  254. }
  255. }
  256. static void parse_model_file(FILE *f, struct starpu_perfmodel *model, unsigned scan_history)
  257. {
  258. unsigned ret;
  259. unsigned archmin = 0;
  260. unsigned narchs;
  261. /* We could probably write a clean loop here, but the code would not
  262. * really be easier to read. */
  263. /* Parsing CPUs */
  264. _starpu_drop_comments(f);
  265. ret = fscanf(f, "%u\n", &narchs);
  266. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  267. _STARPU_DEBUG("Parsing %u CPUs\n", narchs);
  268. if (narchs > 0)
  269. {
  270. parse_arch(f, model, scan_history,
  271. archmin,
  272. STARPU_MIN(narchs, STARPU_MAXCPUS),
  273. narchs > STARPU_MAXCPUS ? narchs - STARPU_MAXCPUS : 0);
  274. }
  275. /* Parsing CUDA devs */
  276. _starpu_drop_comments(f);
  277. ret = fscanf(f, "%u\n", &narchs);
  278. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  279. archmin += STARPU_MAXCPUS;
  280. _STARPU_DEBUG("Parsing %u CUDA devices\n", narchs);
  281. if (narchs > 0)
  282. {
  283. parse_arch(f, model, scan_history,
  284. archmin,
  285. archmin + STARPU_MIN(narchs, STARPU_MAXCUDADEVS),
  286. narchs > STARPU_MAXCUDADEVS ? narchs - STARPU_MAXCUDADEVS : 0);
  287. }
  288. /* Parsing OpenCL devs */
  289. _starpu_drop_comments(f);
  290. ret = fscanf(f, "%u\n", &narchs);
  291. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  292. archmin += STARPU_MAXCUDADEVS;
  293. _STARPU_DEBUG("Parsing %u OpenCL devices\n", narchs);
  294. if (narchs > 0)
  295. {
  296. parse_arch(f, model, scan_history,
  297. archmin,
  298. archmin + STARPU_MIN(narchs, STARPU_MAXOPENCLDEVS),
  299. narchs > STARPU_MAXOPENCLDEVS ? narchs - STARPU_MAXOPENCLDEVS : 0);
  300. }
  301. /* Parsing Gordon implementations */
  302. _starpu_drop_comments(f);
  303. ret = fscanf(f, "%u\n", &narchs);
  304. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  305. archmin += STARPU_MAXOPENCLDEVS;
  306. _STARPU_DEBUG("Parsing %u Gordon devices\n", narchs);
  307. if (narchs > 0)
  308. {
  309. parse_arch(f, model, scan_history,
  310. archmin,
  311. archmin + STARPU_MAXGORDONDEVS,
  312. narchs > STARPU_MAXGORDONDEVS ? narchs - STARPU_MAXGORDONDEVS : 0);
  313. }
  314. }
  315. static void dump_per_arch_model_file(FILE *f, struct starpu_perfmodel *model, unsigned arch, unsigned nimpl)
  316. {
  317. struct starpu_perfmodel_per_arch *per_arch_model;
  318. per_arch_model = &model->per_arch[arch][nimpl];
  319. /* count the number of elements in the lists */
  320. struct starpu_perfmodel_history_list *ptr = NULL;
  321. unsigned nentries = 0;
  322. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  323. {
  324. /* Dump the list of all entries in the history */
  325. ptr = per_arch_model->list;
  326. while(ptr)
  327. {
  328. nentries++;
  329. ptr = ptr->next;
  330. }
  331. }
  332. /* header */
  333. char archname[32];
  334. starpu_perfmodel_get_arch_name((enum starpu_perf_archtype) arch, archname, 32, nimpl);
  335. fprintf(f, "# Model for %s\n", archname);
  336. fprintf(f, "# number of entries\n%u\n", nentries);
  337. dump_reg_model(f, model, arch, nimpl);
  338. /* Dump the history into the model file in case it is necessary */
  339. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  340. {
  341. fprintf(f, "# hash\t\tsize\t\tmean\t\tdev\t\tsum\t\tsum2\t\tn\n");
  342. ptr = per_arch_model->list;
  343. while (ptr)
  344. {
  345. dump_history_entry(f, ptr->entry);
  346. ptr = ptr->next;
  347. }
  348. }
  349. fprintf(f, "\n##################\n");
  350. }
  351. static unsigned get_n_entries(struct starpu_perfmodel *model, unsigned arch, unsigned impl)
  352. {
  353. struct starpu_perfmodel_per_arch *per_arch_model;
  354. per_arch_model = &model->per_arch[arch][impl];
  355. /* count the number of elements in the lists */
  356. struct starpu_perfmodel_history_list *ptr = NULL;
  357. unsigned nentries = 0;
  358. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  359. {
  360. /* Dump the list of all entries in the history */
  361. ptr = per_arch_model->list;
  362. while(ptr)
  363. {
  364. nentries++;
  365. ptr = ptr->next;
  366. }
  367. }
  368. return nentries;
  369. }
  370. static void dump_model_file(FILE *f, struct starpu_perfmodel *model)
  371. {
  372. unsigned narch[4] = { 0, 0, 0, 0};
  373. unsigned arch, arch_base = 0, my_narch = 0;
  374. unsigned nimpl;
  375. unsigned idx = 0;
  376. /* Finding the number of archs to write for each kind of device */
  377. for (arch = 0; arch < STARPU_NARCH_VARIATIONS; arch++)
  378. {
  379. switch (arch)
  380. {
  381. case STARPU_CUDA_DEFAULT:
  382. case STARPU_OPENCL_DEFAULT:
  383. case STARPU_GORDON_DEFAULT:
  384. arch_base = arch;
  385. idx++;
  386. break;
  387. default:
  388. break;
  389. }
  390. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  391. {
  392. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  393. if (get_n_entries(model, arch, nimpl))
  394. {
  395. narch[idx]=arch-arch_base+1;
  396. break;
  397. }
  398. }
  399. else if (model->type == STARPU_REGRESSION_BASED || model->type == STARPU_PER_ARCH || model->type == STARPU_COMMON)
  400. {
  401. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  402. if (model->per_arch[arch][nimpl].regression.nsample)
  403. {
  404. narch[idx]=arch-arch_base+1;
  405. break;
  406. }
  407. }
  408. else
  409. {
  410. STARPU_ASSERT_MSG(0, "Unknown history-based performance model %d", model->type);
  411. }
  412. }
  413. /* Writing stuff */
  414. char *name = "unknown";
  415. unsigned substract_to_arch = 0;
  416. for (arch = 0; arch < STARPU_NARCH_VARIATIONS; arch++)
  417. {
  418. switch (arch)
  419. {
  420. case STARPU_CPU_DEFAULT:
  421. arch_base = arch;
  422. name = "CPU";
  423. fprintf(f, "##################\n");
  424. fprintf(f, "# %ss\n", name);
  425. fprintf(f, "# maximum number of %ss\n", name);
  426. fprintf(f, "%u\n", my_narch = narch[0]);
  427. break;
  428. case STARPU_CUDA_DEFAULT:
  429. arch_base = arch;
  430. name = "CUDA";
  431. substract_to_arch = STARPU_MAXCPUS;
  432. fprintf(f, "##################\n");
  433. fprintf(f, "# %ss\n", name);
  434. fprintf(f, "# number of %s architectures\n", name);
  435. fprintf(f, "%u\n", my_narch = narch[1]);
  436. break;
  437. case STARPU_OPENCL_DEFAULT:
  438. arch_base = arch;
  439. name = "OPENCL";
  440. substract_to_arch += STARPU_MAXCUDADEVS;
  441. fprintf(f, "##################\n");
  442. fprintf(f, "# %ss\n", name);
  443. fprintf(f, "# number of %s architectures\n", name);
  444. fprintf(f, "%u\n", my_narch = narch[2]);
  445. break;
  446. case STARPU_GORDON_DEFAULT:
  447. arch_base = arch;
  448. name = "GORDON";
  449. substract_to_arch += STARPU_MAXOPENCLDEVS;
  450. fprintf(f, "##################\n");
  451. fprintf(f, "# %ss\n", name);
  452. fprintf(f, "# number of %s architectures\n", name);
  453. fprintf(f, "%u\n", my_narch = narch[3]);
  454. break;
  455. default:
  456. break;
  457. }
  458. unsigned max_impl = 0;
  459. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  460. {
  461. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  462. if (get_n_entries(model, arch, nimpl))
  463. max_impl = nimpl + 1;
  464. }
  465. else if (model->type == STARPU_REGRESSION_BASED || model->type == STARPU_PER_ARCH || model->type == STARPU_COMMON)
  466. {
  467. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  468. if (model->per_arch[arch][nimpl].regression.nsample)
  469. max_impl = nimpl + 1;
  470. }
  471. else
  472. STARPU_ASSERT_MSG(0, "Unknown history-based performance model %u", arch);
  473. if (arch >= my_narch + arch_base)
  474. continue;
  475. fprintf(f, "###########\n");
  476. if (substract_to_arch)
  477. fprintf(f, "# %s_%u\n", name, arch - substract_to_arch);
  478. else
  479. /* CPU */
  480. fprintf(f, "# %u CPU(s) in parallel\n", arch + 1);
  481. fprintf(f, "# number of implementations\n");
  482. fprintf(f, "%u\n", max_impl);
  483. for (nimpl = 0; nimpl < max_impl; nimpl++)
  484. {
  485. dump_per_arch_model_file(f, model, arch, nimpl);
  486. }
  487. }
  488. }
  489. static void initialize_per_arch_model(struct starpu_perfmodel_per_arch *per_arch_model)
  490. {
  491. per_arch_model->history = NULL;
  492. per_arch_model->list = NULL;
  493. per_arch_model->regression.nsample = 0;
  494. per_arch_model->regression.valid = 0;
  495. per_arch_model->regression.nl_valid = 0;
  496. }
  497. static void initialize_model(struct starpu_perfmodel *model)
  498. {
  499. unsigned arch;
  500. unsigned nimpl;
  501. for (arch = 0; arch < STARPU_NARCH_VARIATIONS; arch++)
  502. {
  503. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  504. {
  505. initialize_per_arch_model(&model->per_arch[arch][nimpl]);
  506. }
  507. }
  508. }
  509. static void get_model_debug_path(struct starpu_perfmodel *model, const char *arch, char *path, size_t maxlen)
  510. {
  511. STARPU_ASSERT(path);
  512. _starpu_get_perf_model_dir_debug(path, maxlen);
  513. strncat(path, model->symbol, maxlen);
  514. char hostname[32];
  515. char *forced_hostname = getenv("STARPU_HOSTNAME");
  516. if (forced_hostname && forced_hostname[0])
  517. snprintf(hostname, sizeof(hostname), "%s", forced_hostname);
  518. else
  519. gethostname(hostname, sizeof(hostname));
  520. strncat(path, ".", maxlen);
  521. strncat(path, hostname, maxlen);
  522. strncat(path, ".", maxlen);
  523. strncat(path, arch, maxlen);
  524. strncat(path, ".debug", maxlen);
  525. }
  526. /*
  527. * Returns 0 is the model was already loaded, 1 otherwise.
  528. */
  529. int _starpu_register_model(struct starpu_perfmodel *model)
  530. {
  531. /* If the model has already been loaded, there is nothing to do */
  532. _STARPU_PTHREAD_RWLOCK_RDLOCK(&registered_models_rwlock);
  533. if (model->is_loaded)
  534. {
  535. _STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  536. return 0;
  537. }
  538. _STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  539. /* We have to make sure the model has not been loaded since the
  540. * last time we took the lock */
  541. _STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  542. if (model->is_loaded)
  543. {
  544. _STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  545. return 0;
  546. }
  547. /* add the model to a linked list */
  548. struct _starpu_perfmodel_list *node = (struct _starpu_perfmodel_list *) malloc(sizeof(struct _starpu_perfmodel_list));
  549. node->model = model;
  550. //model->debug_modelid = debug_modelid++;
  551. /* put this model at the beginning of the list */
  552. node->next = registered_models;
  553. registered_models = node;
  554. #ifdef STARPU_MODEL_DEBUG
  555. _starpu_create_sampling_directory_if_needed();
  556. unsigned arch;
  557. unsigned nimpl;
  558. for (arch = 0; arch < STARPU_NARCH_VARIATIONS; arch++)
  559. {
  560. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  561. {
  562. starpu_perfmodel_debugfilepath(model, arch, model->per_arch[arch][nimpl].debug_path, 256, nimpl);
  563. }
  564. }
  565. #endif
  566. _STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  567. return 1;
  568. }
  569. static void get_model_path(struct starpu_perfmodel *model, char *path, size_t maxlen)
  570. {
  571. _starpu_get_perf_model_dir_codelets(path, maxlen);
  572. strncat(path, model->symbol, maxlen);
  573. char hostname[32];
  574. char *forced_hostname = getenv("STARPU_HOSTNAME");
  575. if (forced_hostname && forced_hostname[0])
  576. snprintf(hostname, sizeof(hostname), "%s", forced_hostname);
  577. else
  578. gethostname(hostname, sizeof(hostname));
  579. strncat(path, ".", maxlen);
  580. strncat(path, hostname, maxlen);
  581. }
  582. static void save_history_based_model(struct starpu_perfmodel *model)
  583. {
  584. STARPU_ASSERT(model);
  585. STARPU_ASSERT(model->symbol);
  586. /* TODO checks */
  587. /* filename = $STARPU_PERF_MODEL_DIR/codelets/symbol.hostname */
  588. char path[256];
  589. get_model_path(model, path, 256);
  590. _STARPU_DEBUG("Opening performance model file %s for model %s\n", path, model->symbol);
  591. /* overwrite existing file, or create it */
  592. FILE *f;
  593. f = fopen(path, "w+");
  594. STARPU_ASSERT(f);
  595. dump_model_file(f, model);
  596. fclose(f);
  597. }
  598. static void _starpu_dump_registered_models(void)
  599. {
  600. _STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  601. struct _starpu_perfmodel_list *node;
  602. node = registered_models;
  603. _STARPU_DEBUG("DUMP MODELS !\n");
  604. while (node)
  605. {
  606. save_history_based_model(node->model);
  607. node = node->next;
  608. }
  609. _STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  610. }
  611. void _starpu_initialize_registered_performance_models(void)
  612. {
  613. registered_models = NULL;
  614. _STARPU_PTHREAD_RWLOCK_INIT(&registered_models_rwlock, NULL);
  615. }
  616. void _starpu_deinitialize_registered_performance_models(void)
  617. {
  618. if (_starpu_get_calibrate_flag())
  619. _starpu_dump_registered_models();
  620. _STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  621. struct _starpu_perfmodel_list *node, *pnode;
  622. node = registered_models;
  623. _STARPU_DEBUG("FREE MODELS !\n");
  624. while (node)
  625. {
  626. struct starpu_perfmodel *model = node->model;
  627. unsigned arch;
  628. unsigned nimpl;
  629. _STARPU_PTHREAD_RWLOCK_WRLOCK(&model->model_rwlock);
  630. for (arch = 0; arch < STARPU_NARCH_VARIATIONS; arch++)
  631. {
  632. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  633. {
  634. struct starpu_perfmodel_per_arch *archmodel = &model->per_arch[arch][nimpl];
  635. struct starpu_perfmodel_history_list *list, *plist;
  636. struct starpu_perfmodel_history_table *entry, *tmp;
  637. HASH_ITER(hh, archmodel->history, entry, tmp)
  638. {
  639. HASH_DEL(archmodel->history, entry);
  640. free(entry);
  641. }
  642. archmodel->history = NULL;
  643. list = archmodel->list;
  644. while (list) {
  645. free(list->entry);
  646. plist = list;
  647. list = list->next;
  648. free(plist);
  649. }
  650. archmodel->list = NULL;
  651. }
  652. }
  653. model->is_loaded = 0;
  654. _STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
  655. pnode = node;
  656. node = node->next;
  657. free(pnode);
  658. }
  659. registered_models = NULL;
  660. _STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  661. _STARPU_PTHREAD_RWLOCK_DESTROY(&registered_models_rwlock);
  662. }
  663. /*
  664. * XXX: We should probably factorize the beginning of the _starpu_load_*_model
  665. * functions. This is a bit tricky though, because we must be sure to unlock
  666. * registered_models_rwlock at the right place.
  667. */
  668. void _starpu_load_per_arch_based_model(struct starpu_perfmodel *model)
  669. {
  670. STARPU_ASSERT(model && model->symbol);
  671. int already_loaded;
  672. _STARPU_PTHREAD_RWLOCK_RDLOCK(&registered_models_rwlock);
  673. already_loaded = model->is_loaded;
  674. _STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  675. if (already_loaded)
  676. return;
  677. /* The model is still not loaded so we grab the lock in write mode, and
  678. * if it's not loaded once we have the lock, we do load it. */
  679. _STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  680. /* Was the model initialized since the previous test ? */
  681. if (model->is_loaded)
  682. {
  683. _STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  684. return;
  685. }
  686. _STARPU_PTHREAD_RWLOCK_INIT(&model->model_rwlock, NULL);
  687. _STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  688. }
  689. void _starpu_load_common_based_model(struct starpu_perfmodel *model)
  690. {
  691. STARPU_ASSERT(model && model->symbol);
  692. int already_loaded;
  693. _STARPU_PTHREAD_RWLOCK_RDLOCK(&registered_models_rwlock);
  694. already_loaded = model->is_loaded;
  695. _STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  696. if (already_loaded)
  697. return;
  698. /* The model is still not loaded so we grab the lock in write mode, and
  699. * if it's not loaded once we have the lock, we do load it. */
  700. _STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  701. /* Was the model initialized since the previous test ? */
  702. if (model->is_loaded)
  703. {
  704. _STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  705. return;
  706. }
  707. _STARPU_PTHREAD_RWLOCK_INIT(&model->model_rwlock, NULL);
  708. _STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  709. }
  710. /* We first try to grab the global lock in read mode to check whether the model
  711. * was loaded or not (this is very likely to have been already loaded). If the
  712. * model was not loaded yet, we take the lock in write mode, and if the model
  713. * is still not loaded once we have the lock, we do load it. */
  714. void _starpu_load_history_based_model(struct starpu_perfmodel *model, unsigned scan_history)
  715. {
  716. STARPU_ASSERT(model);
  717. STARPU_ASSERT(model->symbol);
  718. int already_loaded;
  719. _STARPU_PTHREAD_RWLOCK_RDLOCK(&registered_models_rwlock);
  720. already_loaded = model->is_loaded;
  721. _STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  722. if (already_loaded)
  723. return;
  724. /* The model is still not loaded so we grab the lock in write mode, and
  725. * if it's not loaded once we have the lock, we do load it. */
  726. _STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  727. /* Was the model initialized since the previous test ? */
  728. if (model->is_loaded)
  729. {
  730. _STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  731. return;
  732. }
  733. _STARPU_PTHREAD_RWLOCK_INIT(&model->model_rwlock, NULL);
  734. _STARPU_PTHREAD_RWLOCK_WRLOCK(&model->model_rwlock);
  735. /* make sure the performance model directory exists (or create it) */
  736. _starpu_create_sampling_directory_if_needed();
  737. char path[256];
  738. get_model_path(model, path, 256);
  739. _STARPU_DEBUG("Opening performance model file %s for model %s ...\n", path, model->symbol);
  740. unsigned calibrate_flag = _starpu_get_calibrate_flag();
  741. model->benchmarking = calibrate_flag;
  742. /* try to open an existing file and load it */
  743. int res;
  744. res = access(path, F_OK);
  745. if (res == 0)
  746. {
  747. if (calibrate_flag == 2)
  748. {
  749. /* The user specified that the performance model should
  750. * be overwritten, so we don't load the existing file !
  751. * */
  752. _STARPU_DEBUG("Overwrite existing file\n");
  753. initialize_model(model);
  754. }
  755. else
  756. {
  757. /* We load the available file */
  758. _STARPU_DEBUG("File exists\n");
  759. FILE *f;
  760. f = fopen(path, "r");
  761. STARPU_ASSERT(f);
  762. parse_model_file(f, model, scan_history);
  763. fclose(f);
  764. }
  765. }
  766. else
  767. {
  768. _STARPU_DEBUG("File does not exists\n");
  769. if (!calibrate_flag)
  770. {
  771. _STARPU_DISP("Warning: model %s is not calibrated, forcing calibration for this run. Use the STARPU_CALIBRATE environment variable to control this.\n", model->symbol);
  772. _starpu_set_calibrate_flag(1);
  773. model->benchmarking = 1;
  774. }
  775. initialize_model(model);
  776. }
  777. _STARPU_DEBUG("Performance model file %s for model %s is loaded\n", path, model->symbol);
  778. model->is_loaded = 1;
  779. _STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
  780. _STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  781. }
  782. /* This function is intended to be used by external tools that should read
  783. * the performance model files */
  784. int starpu_perfmodel_list(FILE *output)
  785. {
  786. char path[256];
  787. DIR *dp;
  788. struct dirent *ep;
  789. char perf_model_dir_codelets[256];
  790. _starpu_get_perf_model_dir_codelets(perf_model_dir_codelets, 256);
  791. strncpy(path, perf_model_dir_codelets, 256);
  792. dp = opendir(path);
  793. if (dp != NULL)
  794. {
  795. while ((ep = readdir(dp)))
  796. {
  797. if (strcmp(ep->d_name, ".") && strcmp(ep->d_name, ".."))
  798. fprintf(output, "file: <%s>\n", ep->d_name);
  799. }
  800. closedir (dp);
  801. }
  802. else
  803. {
  804. _STARPU_DISP("Could not open the perfmodel directory <%s>\n", path);
  805. }
  806. return 0;
  807. }
  808. /* This function is intended to be used by external tools that should read the
  809. * performance model files */
  810. int starpu_perfmodel_load_symbol(const char *symbol, struct starpu_perfmodel *model)
  811. {
  812. model->symbol = strdup(symbol);
  813. initialize_model(model);
  814. /* where is the file if it exists ? */
  815. char path[256];
  816. get_model_path(model, path, 256);
  817. // _STARPU_DEBUG("get_model_path -> %s\n", path);
  818. /* does it exist ? */
  819. int res;
  820. res = access(path, F_OK);
  821. if (res)
  822. {
  823. const char *dot = strrchr(symbol, '.');
  824. if (dot)
  825. {
  826. char *symbol2 = strdup(symbol);
  827. symbol2[dot-symbol] = '\0';
  828. int ret;
  829. fprintf(stderr,"note: loading history from %s instead of %s\n", symbol2, symbol);
  830. ret = starpu_perfmodel_load_symbol(symbol2,model);
  831. free(symbol2);
  832. return ret;
  833. }
  834. _STARPU_DISP("There is no performance model for symbol %s\n", symbol);
  835. return 1;
  836. }
  837. FILE *f = fopen(path, "r");
  838. STARPU_ASSERT(f);
  839. parse_model_file(f, model, 1);
  840. STARPU_ASSERT(fclose(f) == 0);
  841. return 0;
  842. }
  843. void starpu_perfmodel_get_arch_name(enum starpu_perf_archtype arch, char *archname, size_t maxlen,unsigned nimpl)
  844. {
  845. if (arch < STARPU_CUDA_DEFAULT)
  846. {
  847. if (arch == STARPU_CPU_DEFAULT)
  848. {
  849. /* NB: We could just use cpu_1 as well ... */
  850. snprintf(archname, maxlen, "cpu_impl_%u",nimpl);
  851. }
  852. else
  853. {
  854. /* For combined CPU workers */
  855. int cpu_count = arch - STARPU_CPU_DEFAULT + 1;
  856. snprintf(archname, maxlen, "cpu_%d_impl_%u", cpu_count,nimpl);
  857. }
  858. }
  859. else if ((STARPU_CUDA_DEFAULT <= arch)
  860. && (arch < STARPU_CUDA_DEFAULT + STARPU_MAXCUDADEVS))
  861. {
  862. int devid = arch - STARPU_CUDA_DEFAULT;
  863. snprintf(archname, maxlen, "cuda_%d_impl_%u", devid,nimpl);
  864. }
  865. else if ((STARPU_OPENCL_DEFAULT <= arch)
  866. && (arch < STARPU_OPENCL_DEFAULT + STARPU_MAXOPENCLDEVS))
  867. {
  868. int devid = arch - STARPU_OPENCL_DEFAULT;
  869. snprintf(archname, maxlen, "opencl_%d_impl_%u", devid,nimpl);
  870. }
  871. else if (arch == STARPU_GORDON_DEFAULT)
  872. {
  873. snprintf(archname, maxlen, "gordon_impl_%u",nimpl);
  874. }
  875. else
  876. {
  877. STARPU_ABORT();
  878. }
  879. }
  880. void starpu_perfmodel_debugfilepath(struct starpu_perfmodel *model,
  881. enum starpu_perf_archtype arch, char *path, size_t maxlen, unsigned nimpl)
  882. {
  883. char archname[32];
  884. starpu_perfmodel_get_arch_name(arch, archname, 32, nimpl);
  885. STARPU_ASSERT(path);
  886. get_model_debug_path(model, archname, path, maxlen);
  887. }
  888. double _starpu_regression_based_job_expected_perf(struct starpu_perfmodel *model, enum starpu_perf_archtype arch, struct _starpu_job *j, unsigned nimpl)
  889. {
  890. double exp = NAN;
  891. size_t size = _starpu_job_get_data_size(model, arch, nimpl, j);
  892. struct starpu_perfmodel_regression_model *regmodel;
  893. regmodel = &model->per_arch[arch][nimpl].regression;
  894. if (regmodel->valid)
  895. exp = regmodel->alpha*pow((double)size, regmodel->beta);
  896. return exp;
  897. }
  898. double _starpu_non_linear_regression_based_job_expected_perf(struct starpu_perfmodel *model, enum starpu_perf_archtype arch, struct _starpu_job *j,unsigned nimpl)
  899. {
  900. double exp = NAN;
  901. size_t size = _starpu_job_get_data_size(model, arch, nimpl, j);
  902. struct starpu_perfmodel_regression_model *regmodel;
  903. regmodel = &model->per_arch[arch][nimpl].regression;
  904. if (regmodel->nl_valid && size >= regmodel->minx * 0.9 && size <= regmodel->maxx * 1.1)
  905. exp = regmodel->a*pow((double)size, regmodel->b) + regmodel->c;
  906. else
  907. {
  908. uint32_t key = _starpu_compute_buffers_footprint(model, arch, nimpl, j);
  909. struct starpu_perfmodel_per_arch *per_arch_model = &model->per_arch[arch][nimpl];
  910. struct starpu_perfmodel_history_table *history;
  911. struct starpu_perfmodel_history_table *entry;
  912. _STARPU_PTHREAD_RWLOCK_RDLOCK(&model->model_rwlock);
  913. history = per_arch_model->history;
  914. HASH_FIND_UINT32_T(history, &key, entry);
  915. _STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
  916. if (entry && entry->history_entry && entry->history_entry->nsample >= _STARPU_CALIBRATION_MINIMUM)
  917. exp = entry->history_entry->mean;
  918. else if (!model->benchmarking)
  919. {
  920. char archname[32];
  921. starpu_perfmodel_get_arch_name(arch, archname, sizeof(archname), nimpl);
  922. _STARPU_DISP("Warning: model %s is not calibrated enough for %s, forcing calibration for this run. Use the STARPU_CALIBRATE environment variable to control this.\n", model->symbol, archname);
  923. _starpu_set_calibrate_flag(1);
  924. model->benchmarking = 1;
  925. }
  926. }
  927. return exp;
  928. }
  929. double _starpu_history_based_job_expected_perf(struct starpu_perfmodel *model, enum starpu_perf_archtype arch, struct _starpu_job *j,unsigned nimpl)
  930. {
  931. double exp;
  932. struct starpu_perfmodel_per_arch *per_arch_model;
  933. struct starpu_perfmodel_history_entry *entry;
  934. struct starpu_perfmodel_history_table *history, *elt;
  935. uint32_t key = _starpu_compute_buffers_footprint(model, arch, nimpl, j);
  936. per_arch_model = &model->per_arch[arch][nimpl];
  937. _STARPU_PTHREAD_RWLOCK_RDLOCK(&model->model_rwlock);
  938. history = per_arch_model->history;
  939. HASH_FIND_UINT32_T(history, &key, elt);
  940. entry = (elt == NULL) ? NULL : elt->history_entry;
  941. _STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
  942. exp = entry?entry->mean:NAN;
  943. if (entry && entry->nsample < _STARPU_CALIBRATION_MINIMUM)
  944. /* TODO: report differently if we've scheduled really enough
  945. * of that task and the scheduler should perhaps put it aside */
  946. /* Not calibrated enough */
  947. exp = NAN;
  948. if (isnan(exp) && !model->benchmarking)
  949. {
  950. char archname[32];
  951. starpu_perfmodel_get_arch_name(arch, archname, sizeof(archname), nimpl);
  952. _STARPU_DISP("Warning: model %s is not calibrated enough for %s, forcing calibration for this run. Use the STARPU_CALIBRATE environment variable to control this.\n", model->symbol, archname);
  953. _starpu_set_calibrate_flag(1);
  954. model->benchmarking = 1;
  955. }
  956. return exp;
  957. }
  958. double starpu_history_based_expected_perf(struct starpu_perfmodel *model, enum starpu_perf_archtype arch, uint32_t footprint)
  959. {
  960. struct _starpu_job j =
  961. {
  962. .footprint = footprint,
  963. .footprint_is_computed = 1,
  964. };
  965. return _starpu_history_based_job_expected_perf(model, arch, &j, j.nimpl);
  966. }
  967. void _starpu_update_perfmodel_history(struct _starpu_job *j, struct starpu_perfmodel *model, enum starpu_perf_archtype arch, unsigned cpuid STARPU_ATTRIBUTE_UNUSED, double measured, unsigned nimpl)
  968. {
  969. if (model)
  970. {
  971. _STARPU_PTHREAD_RWLOCK_WRLOCK(&model->model_rwlock);
  972. struct starpu_perfmodel_per_arch *per_arch_model = &model->per_arch[arch][nimpl];
  973. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  974. {
  975. struct starpu_perfmodel_history_entry *entry;
  976. struct starpu_perfmodel_history_table *elt;
  977. struct starpu_perfmodel_history_list **list;
  978. uint32_t key = _starpu_compute_buffers_footprint(model, arch, nimpl, j);
  979. list = &per_arch_model->list;
  980. HASH_FIND_UINT32_T(per_arch_model->history, &key, elt);
  981. entry = (elt == NULL) ? NULL : elt->history_entry;
  982. if (!entry)
  983. {
  984. /* this is the first entry with such a footprint */
  985. entry = (struct starpu_perfmodel_history_entry *) malloc(sizeof(struct starpu_perfmodel_history_entry));
  986. STARPU_ASSERT(entry);
  987. entry->mean = measured;
  988. entry->sum = measured;
  989. entry->deviation = 0.0;
  990. entry->sum2 = measured*measured;
  991. entry->size = _starpu_job_get_data_size(model, arch, nimpl, j);
  992. entry->footprint = key;
  993. entry->nsample = 1;
  994. insert_history_entry(entry, list, &per_arch_model->history);
  995. }
  996. else
  997. {
  998. /* there is already some entry with the same footprint */
  999. entry->sum += measured;
  1000. entry->sum2 += measured*measured;
  1001. entry->nsample++;
  1002. unsigned n = entry->nsample;
  1003. entry->mean = entry->sum / n;
  1004. entry->deviation = sqrt((entry->sum2 - (entry->sum*entry->sum)/n)/n);
  1005. }
  1006. STARPU_ASSERT(entry);
  1007. }
  1008. if (model->type == STARPU_REGRESSION_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  1009. {
  1010. struct starpu_perfmodel_regression_model *reg_model;
  1011. reg_model = &per_arch_model->regression;
  1012. /* update the regression model */
  1013. size_t job_size = _starpu_job_get_data_size(model, arch, nimpl, j);
  1014. double logy, logx;
  1015. logx = log((double)job_size);
  1016. logy = log(measured);
  1017. reg_model->sumlnx += logx;
  1018. reg_model->sumlnx2 += logx*logx;
  1019. reg_model->sumlny += logy;
  1020. reg_model->sumlnxlny += logx*logy;
  1021. if (reg_model->minx == 0 || job_size < reg_model->minx)
  1022. reg_model->minx = job_size;
  1023. if (reg_model->maxx == 0 || job_size > reg_model->maxx)
  1024. reg_model->maxx = job_size;
  1025. reg_model->nsample++;
  1026. unsigned n = reg_model->nsample;
  1027. double num = (n*reg_model->sumlnxlny - reg_model->sumlnx*reg_model->sumlny);
  1028. double denom = (n*reg_model->sumlnx2 - reg_model->sumlnx*reg_model->sumlnx);
  1029. reg_model->beta = num/denom;
  1030. reg_model->alpha = exp((reg_model->sumlny - reg_model->beta*reg_model->sumlnx)/n);
  1031. if (VALID_REGRESSION(reg_model))
  1032. reg_model->valid = 1;
  1033. }
  1034. #ifdef STARPU_MODEL_DEBUG
  1035. struct starpu_task *task = j->task;
  1036. FILE *f = fopen(per_arch_model->debug_path, "a+");
  1037. if (f == NULL)
  1038. {
  1039. _STARPU_DISP("Error <%s> when opening file <%s>\n", strerror(errno), per_arch_model->debug_path);
  1040. STARPU_ABORT();
  1041. }
  1042. if (!j->footprint_is_computed)
  1043. (void) _starpu_compute_buffers_footprint(model, arch, nimpl, j);
  1044. STARPU_ASSERT(j->footprint_is_computed);
  1045. fprintf(f, "0x%x\t%lu\t%f\t%f\t%f\t%d\t\t", j->footprint, (unsigned long) _starpu_job_get_data_size(model, arch, nimpl, j), measured, task->predicted, task->predicted_transfer, cpuid);
  1046. unsigned i;
  1047. for (i = 0; i < task->cl->nbuffers; i++)
  1048. {
  1049. starpu_data_handle_t handle = task->handles[i];
  1050. STARPU_ASSERT(handle->ops);
  1051. STARPU_ASSERT(handle->ops->display);
  1052. handle->ops->display(handle, f);
  1053. }
  1054. fprintf(f, "\n");
  1055. fclose(f);
  1056. #endif
  1057. _STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
  1058. }
  1059. }
  1060. void starpu_perfmodel_update_history(struct starpu_perfmodel *model, struct starpu_task *task, enum starpu_perf_archtype arch, unsigned cpuid, unsigned nimpl, double measured) {
  1061. struct _starpu_job *job = _starpu_get_job_associated_to_task(task);
  1062. _starpu_load_perfmodel(model);
  1063. /* Record measurement */
  1064. _starpu_update_perfmodel_history(job, model, arch, cpuid, measured, nimpl);
  1065. /* and save perfmodel on termination */
  1066. _starpu_set_calibrate_flag(1);
  1067. }