perfmodel_history.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2013 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012, 2013 Centre National de la Recherche Scientifique
  5. * Copyright (C) 2011 Télécom-SudParis
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. #include <dirent.h>
  19. #include <unistd.h>
  20. #include <sys/stat.h>
  21. #include <errno.h>
  22. #include <common/config.h>
  23. #include <common/utils.h>
  24. #include <core/perfmodel/perfmodel.h>
  25. #include <core/jobs.h>
  26. #include <core/workers.h>
  27. #include <datawizard/datawizard.h>
  28. #include <core/perfmodel/regression.h>
  29. #include <common/config.h>
  30. #include <starpu_parameters.h>
  31. #include <common/uthash.h>
  32. #ifdef STARPU_HAVE_WINDOWS
  33. #include <windows.h>
  34. #endif
  35. #define HASH_ADD_UINT32_T(head,field,add) HASH_ADD(hh,head,field,sizeof(uint32_t),add)
  36. #define HASH_FIND_UINT32_T(head,find,out) HASH_FIND(hh,head,find,sizeof(uint32_t),out)
  37. struct starpu_perfmodel_history_table
  38. {
  39. UT_hash_handle hh;
  40. uint32_t footprint;
  41. struct starpu_perfmodel_history_entry *history_entry;
  42. };
  43. /* We want more than 10% variance on X to trust regression */
  44. #define VALID_REGRESSION(reg_model) \
  45. ((reg_model)->minx < (9*(reg_model)->maxx)/10 && (reg_model)->nsample >= _STARPU_CALIBRATION_MINIMUM)
  46. static starpu_pthread_rwlock_t registered_models_rwlock;
  47. static struct _starpu_perfmodel_list *registered_models = NULL;
  48. size_t _starpu_job_get_data_size(struct starpu_perfmodel *model, struct starpu_perfmodel_arch* arch, unsigned nimpl, struct _starpu_job *j)
  49. {
  50. struct starpu_task *task = j->task;
  51. if (model && model->per_arch[arch->type][arch->devid][arch->ncore][nimpl].size_base)
  52. {
  53. return model->per_arch[arch->type][arch->devid][arch->ncore][nimpl].size_base(task, arch, nimpl);
  54. }
  55. else if (model && model->size_base)
  56. {
  57. return model->size_base(task, nimpl);
  58. }
  59. else
  60. {
  61. unsigned nbuffers = task->cl->nbuffers;
  62. size_t size = 0;
  63. unsigned buffer;
  64. for (buffer = 0; buffer < nbuffers; buffer++)
  65. {
  66. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(task, buffer);
  67. size += _starpu_data_get_size(handle);
  68. }
  69. return size;
  70. }
  71. }
  72. /*
  73. * History based model
  74. */
  75. static void insert_history_entry(struct starpu_perfmodel_history_entry *entry, struct starpu_perfmodel_history_list **list, struct starpu_perfmodel_history_table **history_ptr)
  76. {
  77. struct starpu_perfmodel_history_list *link;
  78. struct starpu_perfmodel_history_table *table;
  79. link = (struct starpu_perfmodel_history_list *) malloc(sizeof(struct starpu_perfmodel_history_list));
  80. link->next = *list;
  81. link->entry = entry;
  82. *list = link;
  83. /* detect concurrency issue */
  84. //HASH_FIND_UINT32_T(*history_ptr, &entry->footprint, table);
  85. //STARPU_ASSERT(table == NULL);
  86. table = (struct starpu_perfmodel_history_table*) malloc(sizeof(*table));
  87. STARPU_ASSERT(table != NULL);
  88. table->footprint = entry->footprint;
  89. table->history_entry = entry;
  90. HASH_ADD_UINT32_T(*history_ptr, footprint, table);
  91. }
  92. static void dump_reg_model(FILE *f, struct starpu_perfmodel *model, struct starpu_perfmodel_arch* arch, unsigned nimpl)
  93. {
  94. struct starpu_perfmodel_per_arch *per_arch_model;
  95. per_arch_model = &model->per_arch[arch->type][arch->devid][arch->ncore][nimpl];
  96. struct starpu_perfmodel_regression_model *reg_model;
  97. reg_model = &per_arch_model->regression;
  98. /*
  99. * Linear Regression model
  100. */
  101. /* Unless we have enough measurements, we put NaN in the file to indicate the model is invalid */
  102. double alpha = nan(""), beta = nan("");
  103. if (model->type == STARPU_REGRESSION_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  104. {
  105. if (reg_model->nsample > 1)
  106. {
  107. alpha = reg_model->alpha;
  108. beta = reg_model->beta;
  109. }
  110. }
  111. fprintf(f, "# sumlnx\tsumlnx2\t\tsumlny\t\tsumlnxlny\talpha\t\tbeta\t\tn\tminx\t\tmaxx\n");
  112. fprintf(f, "%-15le\t%-15le\t%-15le\t%-15le\t%-15le\t%-15le\t%u\t%-15lu\t%-15lu\n", reg_model->sumlnx, reg_model->sumlnx2, reg_model->sumlny, reg_model->sumlnxlny, alpha, beta, reg_model->nsample, reg_model->minx, reg_model->maxx);
  113. /*
  114. * Non-Linear Regression model
  115. */
  116. double a = nan(""), b = nan(""), c = nan("");
  117. if (model->type == STARPU_NL_REGRESSION_BASED)
  118. _starpu_regression_non_linear_power(per_arch_model->list, &a, &b, &c);
  119. fprintf(f, "# a\t\tb\t\tc\n");
  120. fprintf(f, "%-15le\t%-15le\t%-15le\n", a, b, c);
  121. }
  122. static void scan_reg_model(FILE *f, struct starpu_perfmodel_regression_model *reg_model)
  123. {
  124. int res;
  125. /*
  126. * Linear Regression model
  127. */
  128. _starpu_drop_comments(f);
  129. res = fscanf(f, "%le\t%le\t%le\t%le", &reg_model->sumlnx, &reg_model->sumlnx2, &reg_model->sumlny, &reg_model->sumlnxlny);
  130. STARPU_ASSERT_MSG(res == 4, "Incorrect performance model file");
  131. res = _starpu_read_double(f, "\t%le", &reg_model->alpha);
  132. STARPU_ASSERT_MSG(res == 1, "Incorrect performance model file");
  133. res = _starpu_read_double(f, "\t%le", &reg_model->beta);
  134. STARPU_ASSERT_MSG(res == 1, "Incorrect performance model file");
  135. res = fscanf(f, "\t%u\t%lu\t%lu\n", &reg_model->nsample, &reg_model->minx, &reg_model->maxx);
  136. STARPU_ASSERT_MSG(res == 3, "Incorrect performance model file");
  137. /* If any of the parameters describing the linear regression model is NaN, the model is invalid */
  138. unsigned invalid = (isnan(reg_model->alpha)||isnan(reg_model->beta));
  139. reg_model->valid = !invalid && VALID_REGRESSION(reg_model);
  140. /*
  141. * Non-Linear Regression model
  142. */
  143. _starpu_drop_comments(f);
  144. res = _starpu_read_double(f, "%le\t", &reg_model->a);
  145. STARPU_ASSERT_MSG(res == 1, "Incorrect performance model file");
  146. res = _starpu_read_double(f, "%le\t", &reg_model->b);
  147. STARPU_ASSERT_MSG(res == 1, "Incorrect performance model file");
  148. res = _starpu_read_double(f, "%le\n", &reg_model->c);
  149. STARPU_ASSERT_MSG(res == 1, "Incorrect performance model file");
  150. /* If any of the parameters describing the non-linear regression model is NaN, the model is invalid */
  151. unsigned nl_invalid = (isnan(reg_model->a)||isnan(reg_model->b)||isnan(reg_model->c));
  152. reg_model->nl_valid = !nl_invalid && VALID_REGRESSION(reg_model);
  153. }
  154. static void dump_history_entry(FILE *f, struct starpu_perfmodel_history_entry *entry)
  155. {
  156. fprintf(f, "%08x\t%-15lu\t%-15le\t%-15le\t%-15le\t%-15le\t%-15le\t%u\n", entry->footprint, (unsigned long) entry->size, entry->flops, entry->mean, entry->deviation, entry->sum, entry->sum2, entry->nsample);
  157. }
  158. static void scan_history_entry(FILE *f, struct starpu_perfmodel_history_entry *entry)
  159. {
  160. int res;
  161. _starpu_drop_comments(f);
  162. /* In case entry is NULL, we just drop these values */
  163. unsigned nsample;
  164. uint32_t footprint;
  165. unsigned long size; /* in bytes */
  166. double flops;
  167. double mean;
  168. double deviation;
  169. double sum;
  170. double sum2;
  171. char line[256];
  172. char *ret;
  173. ret = fgets(line, sizeof(line), f);
  174. STARPU_ASSERT(ret);
  175. STARPU_ASSERT(strchr(line, '\n'));
  176. /* Read the values from the file */
  177. res = sscanf(line, "%x\t%lu\t%le\t%le\t%le\t%le\t%le\t%u", &footprint, &size, &flops, &mean, &deviation, &sum, &sum2, &nsample);
  178. if (res != 8)
  179. {
  180. flops = 0.;
  181. /* Read the values from the file */
  182. res = sscanf(line, "%x\t%lu\t%le\t%le\t%le\t%le\t%u", &footprint, &size, &mean, &deviation, &sum, &sum2, &nsample);
  183. STARPU_ASSERT_MSG(res == 7, "Incorrect performance model file");
  184. }
  185. if (entry)
  186. {
  187. entry->footprint = footprint;
  188. entry->size = size;
  189. entry->flops = flops;
  190. entry->mean = mean;
  191. entry->deviation = deviation;
  192. entry->sum = sum;
  193. entry->sum2 = sum2;
  194. entry->nsample = nsample;
  195. }
  196. }
  197. static void parse_per_arch_model_file(FILE *f, struct starpu_perfmodel_per_arch *per_arch_model, unsigned scan_history)
  198. {
  199. unsigned nentries;
  200. _starpu_drop_comments(f);
  201. int res = fscanf(f, "%u\n", &nentries);
  202. STARPU_ASSERT_MSG(res == 1, "Incorrect performance model file");
  203. scan_reg_model(f, &per_arch_model->regression);
  204. /* parse entries */
  205. unsigned i;
  206. for (i = 0; i < nentries; i++)
  207. {
  208. struct starpu_perfmodel_history_entry *entry = NULL;
  209. if (scan_history)
  210. {
  211. entry = (struct starpu_perfmodel_history_entry *) malloc(sizeof(struct starpu_perfmodel_history_entry));
  212. STARPU_ASSERT(entry);
  213. /* Tell helgrind that we do not care about
  214. * racing access to the sampling, we only want a
  215. * good-enough estimation */
  216. STARPU_HG_DISABLE_CHECKING(entry->nsample);
  217. STARPU_HG_DISABLE_CHECKING(entry->mean);
  218. entry->nerror = 0;
  219. }
  220. scan_history_entry(f, entry);
  221. /* insert the entry in the hashtable and the list structures */
  222. /* TODO: Insert it at the end of the list, to avoid reversing
  223. * the order... But efficiently! We may have a lot of entries */
  224. if (scan_history)
  225. insert_history_entry(entry, &per_arch_model->list, &per_arch_model->history);
  226. }
  227. }
  228. static void parse_arch(FILE *f, struct starpu_perfmodel *model, unsigned scan_history,struct starpu_perfmodel_arch* arch)
  229. {
  230. struct starpu_perfmodel_per_arch dummy;
  231. unsigned nimpls, implmax, impl, i, ret;
  232. //_STARPU_DEBUG("Parsing %s_%u_ncore_%u\n",
  233. // starpu_perfmodel_get_archtype_name(arch->type),
  234. // arch->devid,
  235. // arch->ncore);
  236. /* Parsing number of implementation */
  237. _starpu_drop_comments(f);
  238. ret = fscanf(f, "%u\n", &nimpls);
  239. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  240. if( model != NULL)
  241. {
  242. /* Parsing each implementation */
  243. implmax = STARPU_MIN(nimpls, STARPU_MAXIMPLEMENTATIONS);
  244. for (impl = 0; impl < implmax; impl++)
  245. parse_per_arch_model_file(f, &model->per_arch[arch->type][arch->devid][arch->ncore][impl], scan_history);
  246. }
  247. else
  248. {
  249. impl = 0;
  250. }
  251. /* if the number of implementation is greater than STARPU_MAXIMPLEMENTATIONS
  252. * we skip the last implementation */
  253. for (i = impl; i < nimpls; i++)
  254. parse_per_arch_model_file(f, &dummy, 0);
  255. }
  256. static void parse_device(FILE *f, struct starpu_perfmodel *model, unsigned scan_history, enum starpu_worker_archtype archtype, unsigned devid)
  257. {
  258. unsigned maxncore, ncore, ret, i;
  259. struct starpu_perfmodel_arch arch;
  260. arch.type = archtype;
  261. arch.devid = devid;
  262. //_STARPU_DEBUG("Parsing device %s_%u arch\n",
  263. // starpu_perfmodel_get_archtype_name(archtype),
  264. // devid);
  265. /* Parsing maximun number of worker for this device */
  266. _starpu_drop_comments(f);
  267. ret = fscanf(f, "%u\n", &maxncore);
  268. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  269. /* Parsing each arch */
  270. if(model !=NULL)
  271. {
  272. for(ncore=0; ncore < maxncore && model->per_arch[archtype][devid][ncore] != NULL; ncore++)
  273. {
  274. arch.ncore = ncore;
  275. parse_arch(f,model,scan_history,&arch);
  276. }
  277. }
  278. else
  279. {
  280. ncore=0;
  281. }
  282. for(i=ncore; i < maxncore; i++)
  283. {
  284. arch.ncore = i;
  285. parse_arch(f,NULL,scan_history,&arch);
  286. }
  287. }
  288. static void parse_archtype(FILE *f, struct starpu_perfmodel *model, unsigned scan_history, enum starpu_worker_archtype archtype)
  289. {
  290. unsigned ndevice, devid, ret, i;
  291. //_STARPU_DEBUG("Parsing %s arch\n", starpu_perfmodel_get_archtype_name(archtype));
  292. /* Parsing number of device for this archtype */
  293. _starpu_drop_comments(f);
  294. ret = fscanf(f, "%u\n", &ndevice);
  295. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  296. /* Parsing each device for this archtype*/
  297. if(model != NULL)
  298. {
  299. for(devid=0; devid < ndevice && model->per_arch[archtype][devid] != NULL; devid++)
  300. {
  301. parse_device(f,model,scan_history,archtype,devid);
  302. }
  303. }
  304. else
  305. {
  306. devid=0;
  307. }
  308. for(i=devid; i < ndevice; i++)
  309. {
  310. parse_device(f,NULL,scan_history,archtype,i);
  311. }
  312. }
  313. static void parse_model_file(FILE *f, struct starpu_perfmodel *model, unsigned scan_history)
  314. {
  315. unsigned archtype;
  316. int ret, version;
  317. //_STARPU_DEBUG("Start parsing\n");
  318. /* Parsing performance model version */
  319. _starpu_drop_comments(f);
  320. ret = fscanf(f, "%d\n", &version);
  321. STARPU_ASSERT_MSG(version == _STARPU_PERFMODEL_VERSION, "Incorrect performance model file with a model version %d not being the current model version (%d)\n",
  322. version, _STARPU_PERFMODEL_VERSION);
  323. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  324. /* Parsing each kind of archtype */
  325. for(archtype=0; archtype<STARPU_NARCH; archtype++)
  326. {
  327. parse_archtype(f, model, scan_history, archtype);
  328. }
  329. }
  330. static void dump_per_arch_model_file(FILE *f, struct starpu_perfmodel *model, struct starpu_perfmodel_arch * arch, unsigned nimpl)
  331. {
  332. struct starpu_perfmodel_per_arch *per_arch_model;
  333. per_arch_model = &model->per_arch[arch->type][arch->devid][arch->ncore][nimpl];
  334. /* count the number of elements in the lists */
  335. struct starpu_perfmodel_history_list *ptr = NULL;
  336. unsigned nentries = 0;
  337. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  338. {
  339. /* Dump the list of all entries in the history */
  340. ptr = per_arch_model->list;
  341. while(ptr)
  342. {
  343. nentries++;
  344. ptr = ptr->next;
  345. }
  346. }
  347. /* header */
  348. char archname[32];
  349. starpu_perfmodel_get_arch_name(arch, archname, 32, nimpl);
  350. fprintf(f, "#####\n");
  351. fprintf(f, "# Model for %s\n", archname);
  352. fprintf(f, "# number of entries\n%u\n", nentries);
  353. dump_reg_model(f, model, arch, nimpl);
  354. /* Dump the history into the model file in case it is necessary */
  355. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  356. {
  357. fprintf(f, "# hash\t\tsize\t\tflops\t\tmean (us)\tdev (us)\t\tsum\t\tsum2\t\tn\n");
  358. ptr = per_arch_model->list;
  359. while (ptr)
  360. {
  361. dump_history_entry(f, ptr->entry);
  362. ptr = ptr->next;
  363. }
  364. }
  365. fprintf(f, "\n");
  366. }
  367. static unsigned get_n_entries(struct starpu_perfmodel *model, struct starpu_perfmodel_arch * arch, unsigned impl)
  368. {
  369. struct starpu_perfmodel_per_arch *per_arch_model;
  370. per_arch_model = &model->per_arch[arch->type][arch->devid][arch->ncore][impl];
  371. /* count the number of elements in the lists */
  372. struct starpu_perfmodel_history_list *ptr = NULL;
  373. unsigned nentries = 0;
  374. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  375. {
  376. /* Dump the list of all entries in the history */
  377. ptr = per_arch_model->list;
  378. while(ptr)
  379. {
  380. nentries++;
  381. ptr = ptr->next;
  382. }
  383. }
  384. return nentries;
  385. }
  386. static void dump_model_file(FILE *f, struct starpu_perfmodel *model)
  387. {
  388. struct _starpu_machine_config *conf = _starpu_get_machine_config();
  389. char *name = "unknown";
  390. unsigned archtype, ndevice, *ncore, devid, nc, nimpl;
  391. struct starpu_perfmodel_arch arch;
  392. fprintf(f, "##################\n");
  393. fprintf(f, "# Performance Model Version\n");
  394. fprintf(f, "%d\n\n", _STARPU_PERFMODEL_VERSION);
  395. for(archtype=0; archtype<STARPU_NARCH; archtype++)
  396. {
  397. arch.type = archtype;
  398. switch (archtype)
  399. {
  400. case STARPU_CPU_WORKER:
  401. ndevice = 1;
  402. ncore = &conf->topology.nhwcpus;
  403. name = "CPU";
  404. break;
  405. case STARPU_CUDA_WORKER:
  406. ndevice = conf->topology.nhwcudagpus;
  407. ncore = NULL;
  408. name = "CUDA";
  409. break;
  410. case STARPU_OPENCL_WORKER:
  411. ndevice = conf->topology.nhwopenclgpus;
  412. ncore = NULL;
  413. name = "OPENCL";
  414. break;
  415. case STARPU_MIC_WORKER:
  416. ndevice = conf->topology.nhwmicdevices;
  417. ncore = conf->topology.nhwmiccores;
  418. name = "MIC";
  419. break;
  420. case STARPU_SCC_WORKER:
  421. ndevice = conf->topology.nhwscc;
  422. ncore = NULL;
  423. name = "SCC";
  424. break;
  425. default:
  426. /* Unknown arch */
  427. STARPU_ABORT();
  428. break;
  429. }
  430. fprintf(f, "####################\n");
  431. fprintf(f, "# %ss\n", name);
  432. fprintf(f, "# number of %s devices\n", name);
  433. fprintf(f, "%u\n", ndevice);
  434. for(devid=0; devid<ndevice; devid++)
  435. {
  436. arch.devid = devid;
  437. fprintf(f, "###############\n");
  438. fprintf(f, "# %s_%u\n", name, devid);
  439. fprintf(f, "# number of workers on device %s_%d\n", name, devid);
  440. if(ncore != NULL)
  441. fprintf(f, "%u\n", ncore[devid]);
  442. else
  443. fprintf(f, "1\n");
  444. for(nc=0; model->per_arch[archtype][devid][nc] != NULL; nc++)
  445. {
  446. arch.ncore = nc;
  447. unsigned max_impl = 0;
  448. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  449. {
  450. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  451. if (get_n_entries(model, &arch, nimpl))
  452. max_impl = nimpl + 1;
  453. }
  454. else if (model->type == STARPU_REGRESSION_BASED || model->type == STARPU_PER_ARCH || model->type == STARPU_COMMON)
  455. {
  456. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  457. if (model->per_arch[archtype][devid][nc][nimpl].regression.nsample)
  458. max_impl = nimpl + 1;
  459. }
  460. else
  461. STARPU_ASSERT_MSG(0, "Unknown history-based performance model %u", archtype);
  462. fprintf(f, "##########\n");
  463. fprintf(f, "# %u worker(s) in parallel\n", nc+1);
  464. fprintf(f, "# number of implementations\n");
  465. fprintf(f, "%u\n", max_impl);
  466. for (nimpl = 0; nimpl < max_impl; nimpl++)
  467. {
  468. dump_per_arch_model_file(f, model, &arch, nimpl);
  469. }
  470. }
  471. }
  472. }
  473. }
  474. static void initialize_per_arch_model(struct starpu_perfmodel_per_arch *per_arch_model)
  475. {
  476. memset(per_arch_model, 0, sizeof(struct starpu_perfmodel_per_arch));
  477. }
  478. static struct starpu_perfmodel_per_arch*** initialize_arch_model(int maxdevid, unsigned* maxncore_table)
  479. {
  480. int devid, ncore, nimpl;
  481. struct starpu_perfmodel_per_arch *** arch_model = malloc(sizeof(*arch_model)*(maxdevid+1));
  482. arch_model[maxdevid] = NULL;
  483. for(devid=0; devid<maxdevid; devid++)
  484. {
  485. int maxncore;
  486. if(maxncore_table != NULL)
  487. maxncore = maxncore_table[devid];
  488. else
  489. maxncore = 1;
  490. arch_model[devid] = malloc(sizeof(*arch_model[devid])*(maxncore+1));
  491. arch_model[devid][maxncore] = NULL;
  492. for(ncore=0; ncore<maxncore; ncore++)
  493. {
  494. arch_model[devid][ncore] = malloc(sizeof(*arch_model[devid][ncore])*STARPU_MAXIMPLEMENTATIONS);
  495. for(nimpl=0; nimpl<STARPU_MAXIMPLEMENTATIONS; nimpl++)
  496. {
  497. initialize_per_arch_model(&arch_model[devid][ncore][nimpl]);
  498. }
  499. }
  500. }
  501. return arch_model;
  502. }
  503. static void initialize_model(struct starpu_perfmodel *model)
  504. {
  505. struct _starpu_machine_config *conf = _starpu_get_machine_config();
  506. model->per_arch = malloc(sizeof(*model->per_arch)*(STARPU_NARCH));
  507. model->per_arch[STARPU_CPU_WORKER] = initialize_arch_model(1,&conf->topology.nhwcpus);
  508. model->per_arch[STARPU_CUDA_WORKER] = initialize_arch_model(conf->topology.nhwcudagpus,NULL);
  509. model->per_arch[STARPU_OPENCL_WORKER] = initialize_arch_model(conf->topology.nhwopenclgpus,NULL);
  510. model->per_arch[STARPU_MIC_WORKER] = initialize_arch_model(conf->topology.nhwmicdevices,conf->topology.nhwmiccores);
  511. model->per_arch[STARPU_SCC_WORKER] = initialize_arch_model(conf->topology.nhwscc,NULL);
  512. }
  513. static void initialize_model_with_file(FILE*f, struct starpu_perfmodel *model)
  514. {
  515. unsigned ret, archtype, devid, i, ndevice, * maxncore;
  516. struct starpu_perfmodel_arch arch;
  517. int version;
  518. /* Parsing performance model version */
  519. _starpu_drop_comments(f);
  520. ret = fscanf(f, "%d\n", &version);
  521. STARPU_ASSERT_MSG(version == _STARPU_PERFMODEL_VERSION, "Incorrect performance model file with a model version %d not being the current model version (%d)\n",
  522. version, _STARPU_PERFMODEL_VERSION);
  523. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  524. model->per_arch = malloc(sizeof(*model->per_arch)*(STARPU_NARCH));
  525. for(archtype=0; archtype<STARPU_NARCH; archtype++)
  526. {
  527. arch.type = archtype;
  528. _starpu_drop_comments(f);
  529. ret = fscanf(f, "%u\n", &ndevice);
  530. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  531. if(ndevice != 0)
  532. maxncore = malloc(sizeof(*maxncore)*ndevice);
  533. else
  534. maxncore = NULL;
  535. for(devid=0; devid < ndevice; devid++)
  536. {
  537. arch.devid = devid;
  538. _starpu_drop_comments(f);
  539. ret = fscanf(f, "%u\n", &maxncore[devid]);
  540. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  541. for(i=0; i<maxncore[devid]; i++)
  542. {
  543. arch.ncore = i;
  544. parse_arch(f,NULL,0,&arch);
  545. }
  546. }
  547. model->per_arch[archtype] = initialize_arch_model(ndevice,maxncore);
  548. if(maxncore != NULL)
  549. free(maxncore);
  550. }
  551. }
  552. void starpu_perfmodel_init(struct starpu_perfmodel *model)
  553. {
  554. STARPU_ASSERT(model && model->symbol);
  555. int already_init;
  556. STARPU_PTHREAD_RWLOCK_RDLOCK(&registered_models_rwlock);
  557. already_init = model->is_init;
  558. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  559. if (already_init)
  560. return;
  561. /* The model is still not loaded so we grab the lock in write mode, and
  562. * if it's not loaded once we have the lock, we do load it. */
  563. STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  564. /* Was the model initialized since the previous test ? */
  565. if (model->is_init)
  566. {
  567. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  568. return;
  569. }
  570. STARPU_PTHREAD_RWLOCK_INIT(&model->model_rwlock, NULL);
  571. if(model->type != STARPU_COMMON)
  572. initialize_model(model);
  573. model->is_init = 1;
  574. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  575. }
  576. void starpu_perfmodel_init_with_file(FILE*f, struct starpu_perfmodel *model)
  577. {
  578. STARPU_ASSERT(model && model->symbol);
  579. int already_init;
  580. STARPU_PTHREAD_RWLOCK_RDLOCK(&registered_models_rwlock);
  581. already_init = model->is_init;
  582. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  583. if (already_init)
  584. return;
  585. /* The model is still not loaded so we grab the lock in write mode, and
  586. * if it's not loaded once we have the lock, we do load it. */
  587. STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  588. /* Was the model initialized since the previous test ? */
  589. if (model->is_init)
  590. {
  591. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  592. return;
  593. }
  594. STARPU_PTHREAD_RWLOCK_INIT(&model->model_rwlock, NULL);
  595. if(model->type != STARPU_COMMON)
  596. initialize_model_with_file(f,model);
  597. model->is_init = 1;
  598. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  599. }
  600. static void get_model_debug_path(struct starpu_perfmodel *model, const char *arch, char *path, size_t maxlen)
  601. {
  602. STARPU_ASSERT(path);
  603. _starpu_get_perf_model_dir_debug(path, maxlen);
  604. strncat(path, model->symbol, maxlen);
  605. char hostname[65];
  606. _starpu_gethostname(hostname, sizeof(hostname));
  607. strncat(path, ".", maxlen);
  608. strncat(path, hostname, maxlen);
  609. strncat(path, ".", maxlen);
  610. strncat(path, arch, maxlen);
  611. strncat(path, ".debug", maxlen);
  612. }
  613. /*
  614. * Returns 0 is the model was already loaded, 1 otherwise.
  615. */
  616. int _starpu_register_model(struct starpu_perfmodel *model)
  617. {
  618. starpu_perfmodel_init(model);
  619. /* If the model has already been loaded, there is nothing to do */
  620. STARPU_PTHREAD_RWLOCK_RDLOCK(&registered_models_rwlock);
  621. if (model->is_loaded)
  622. {
  623. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  624. return 0;
  625. }
  626. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  627. /* We have to make sure the model has not been loaded since the
  628. * last time we took the lock */
  629. STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  630. if (model->is_loaded)
  631. {
  632. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  633. return 0;
  634. }
  635. /* add the model to a linked list */
  636. struct _starpu_perfmodel_list *node = (struct _starpu_perfmodel_list *) malloc(sizeof(struct _starpu_perfmodel_list));
  637. node->model = model;
  638. //model->debug_modelid = debug_modelid++;
  639. /* put this model at the beginning of the list */
  640. node->next = registered_models;
  641. registered_models = node;
  642. #ifdef STARPU_MODEL_DEBUG
  643. _starpu_create_sampling_directory_if_needed();
  644. unsigned archtype, devid, ncore, nimpl;
  645. struct starpu_perfmodel_arch arch;
  646. _STARPU_DEBUG("\n\n ###\nHere\n ###\n\n");
  647. if(model->is_init)
  648. {
  649. _STARPU_DEBUG("Init\n");
  650. for (archtype = 0; archtype < STARPU_NARCH; archtype++)
  651. {
  652. _STARPU_DEBUG("Archtype\n");
  653. arch.type = archtype;
  654. if(model->per_arch[archtype] != NULL)
  655. {
  656. for(devid=0; model->per_arch[archtype][devid] != NULL; devid++)
  657. {
  658. _STARPU_DEBUG("Devid\n");
  659. arch.devid = devid;
  660. for(ncore=0; model->per_arch[archtype][devid][ncore] != NULL; ncore++)
  661. {
  662. _STARPU_DEBUG("Ncore\n");
  663. arch.ncore = ncore;
  664. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  665. {
  666. starpu_perfmodel_debugfilepath(model, &arch, model->per_arch[archtype][devid][ncore][nimpl].debug_path, 256, nimpl);
  667. }
  668. }
  669. }
  670. }
  671. }
  672. }
  673. #endif
  674. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  675. return 1;
  676. }
  677. static void get_model_path(struct starpu_perfmodel *model, char *path, size_t maxlen)
  678. {
  679. _starpu_get_perf_model_dir_codelets(path, maxlen);
  680. strncat(path, model->symbol, maxlen);
  681. char hostname[65];
  682. _starpu_gethostname(hostname, sizeof(hostname));
  683. strncat(path, ".", maxlen);
  684. strncat(path, hostname, maxlen);
  685. }
  686. static void save_history_based_model(struct starpu_perfmodel *model)
  687. {
  688. STARPU_ASSERT(model);
  689. STARPU_ASSERT(model->symbol);
  690. /* TODO checks */
  691. /* filename = $STARPU_PERF_MODEL_DIR/codelets/symbol.hostname */
  692. char path[256];
  693. get_model_path(model, path, 256);
  694. _STARPU_DEBUG("Opening performance model file %s for model %s\n", path, model->symbol);
  695. /* overwrite existing file, or create it */
  696. FILE *f;
  697. f = fopen(path, "w+");
  698. STARPU_ASSERT_MSG(f, "Could not save performance model %s\n", path);
  699. dump_model_file(f, model);
  700. fclose(f);
  701. }
  702. static void _starpu_dump_registered_models(void)
  703. {
  704. #ifndef STARPU_SIMGRID
  705. STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  706. struct _starpu_perfmodel_list *node;
  707. node = registered_models;
  708. _STARPU_DEBUG("DUMP MODELS !\n");
  709. while (node)
  710. {
  711. save_history_based_model(node->model);
  712. node = node->next;
  713. }
  714. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  715. #endif
  716. }
  717. void _starpu_initialize_registered_performance_models(void)
  718. {
  719. registered_models = NULL;
  720. STARPU_PTHREAD_RWLOCK_INIT(&registered_models_rwlock, NULL);
  721. }
  722. void _starpu_deinitialize_performance_model(struct starpu_perfmodel *model)
  723. {
  724. unsigned arch, devid, ncore, nimpl;
  725. if(model->is_init && model->per_arch != NULL)
  726. {
  727. for (arch = 0; arch < STARPU_NARCH; arch++)
  728. {
  729. if( model->per_arch[arch] != NULL)
  730. {
  731. for(devid=0; model->per_arch[arch][devid] != NULL; devid++)
  732. {
  733. for(ncore=0; model->per_arch[arch][devid][ncore] != NULL; ncore++)
  734. {
  735. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  736. {
  737. struct starpu_perfmodel_per_arch *archmodel = &model->per_arch[arch][devid][ncore][nimpl];
  738. struct starpu_perfmodel_history_list *list, *plist;
  739. struct starpu_perfmodel_history_table *entry, *tmp;
  740. HASH_ITER(hh, archmodel->history, entry, tmp)
  741. {
  742. HASH_DEL(archmodel->history, entry);
  743. free(entry);
  744. }
  745. archmodel->history = NULL;
  746. list = archmodel->list;
  747. while (list)
  748. {
  749. free(list->entry);
  750. plist = list;
  751. list = list->next;
  752. free(plist);
  753. }
  754. archmodel->list = NULL;
  755. }
  756. free(model->per_arch[arch][devid][ncore]);
  757. model->per_arch[arch][devid][ncore] = NULL;
  758. }
  759. free(model->per_arch[arch][devid]);
  760. model->per_arch[arch][devid] = NULL;
  761. }
  762. free(model->per_arch[arch]);
  763. model->per_arch[arch] = NULL;
  764. }
  765. }
  766. free(model->per_arch);
  767. model->per_arch = NULL;
  768. }
  769. model->is_init = 0;
  770. model->is_loaded = 0;
  771. }
  772. void _starpu_deinitialize_registered_performance_models(void)
  773. {
  774. if (_starpu_get_calibrate_flag())
  775. _starpu_dump_registered_models();
  776. STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  777. struct _starpu_perfmodel_list *node, *pnode;
  778. node = registered_models;
  779. _STARPU_DEBUG("FREE MODELS !\n");
  780. while (node)
  781. {
  782. struct starpu_perfmodel *model = node->model;
  783. STARPU_PTHREAD_RWLOCK_WRLOCK(&model->model_rwlock);
  784. _starpu_deinitialize_performance_model(model);
  785. STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
  786. pnode = node;
  787. node = node->next;
  788. free(pnode);
  789. }
  790. registered_models = NULL;
  791. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  792. STARPU_PTHREAD_RWLOCK_DESTROY(&registered_models_rwlock);
  793. }
  794. /*
  795. * XXX: We should probably factorize the beginning of the _starpu_load_*_model
  796. * functions. This is a bit tricky though, because we must be sure to unlock
  797. * registered_models_rwlock at the right place.
  798. */
  799. void _starpu_load_per_arch_based_model(struct starpu_perfmodel *model)
  800. {
  801. starpu_perfmodel_init(model);
  802. }
  803. void _starpu_load_common_based_model(struct starpu_perfmodel *model)
  804. {
  805. starpu_perfmodel_init(model);
  806. }
  807. /* We first try to grab the global lock in read mode to check whether the model
  808. * was loaded or not (this is very likely to have been already loaded). If the
  809. * model was not loaded yet, we take the lock in write mode, and if the model
  810. * is still not loaded once we have the lock, we do load it. */
  811. void _starpu_load_history_based_model(struct starpu_perfmodel *model, unsigned scan_history)
  812. {
  813. starpu_perfmodel_init(model);
  814. STARPU_PTHREAD_RWLOCK_WRLOCK(&model->model_rwlock);
  815. if(!model->is_loaded)
  816. {
  817. /* make sure the performance model directory exists (or create it) */
  818. _starpu_create_sampling_directory_if_needed();
  819. char path[256];
  820. get_model_path(model, path, 256);
  821. _STARPU_DEBUG("Opening performance model file %s for model %s ...\n", path, model->symbol);
  822. unsigned calibrate_flag = _starpu_get_calibrate_flag();
  823. model->benchmarking = calibrate_flag;
  824. /* try to open an existing file and load it */
  825. int res;
  826. res = access(path, F_OK);
  827. if (res == 0)
  828. {
  829. if (calibrate_flag == 2)
  830. {
  831. /* The user specified that the performance model should
  832. * be overwritten, so we don't load the existing file !
  833. * */
  834. _STARPU_DEBUG("Overwrite existing file\n");
  835. }
  836. else
  837. {
  838. /* We load the available file */
  839. _STARPU_DEBUG("File exists\n");
  840. FILE *f;
  841. f = fopen(path, "r");
  842. STARPU_ASSERT(f);
  843. parse_model_file(f, model, scan_history);
  844. fclose(f);
  845. }
  846. }
  847. else
  848. {
  849. _STARPU_DEBUG("File does not exists\n");
  850. }
  851. _STARPU_DEBUG("Performance model file %s for model %s is loaded\n", path, model->symbol);
  852. model->is_loaded = 1;
  853. }
  854. STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
  855. }
  856. void starpu_perfmodel_directory(FILE *output)
  857. {
  858. char perf_model_dir[256];
  859. _starpu_get_perf_model_dir_codelets(perf_model_dir, 256);
  860. fprintf(output, "directory: <%s>\n", perf_model_dir);
  861. }
  862. /* This function is intended to be used by external tools that should read
  863. * the performance model files */
  864. int starpu_perfmodel_list(FILE *output)
  865. {
  866. char path[256];
  867. DIR *dp;
  868. struct dirent *ep;
  869. char perf_model_dir_codelets[256];
  870. _starpu_get_perf_model_dir_codelets(perf_model_dir_codelets, 256);
  871. strncpy(path, perf_model_dir_codelets, 256);
  872. dp = opendir(path);
  873. if (dp != NULL)
  874. {
  875. while ((ep = readdir(dp)))
  876. {
  877. if (strcmp(ep->d_name, ".") && strcmp(ep->d_name, ".."))
  878. fprintf(output, "file: <%s>\n", ep->d_name);
  879. }
  880. closedir (dp);
  881. }
  882. else
  883. {
  884. _STARPU_DISP("Could not open the perfmodel directory <%s>: %s\n", path, strerror(errno));
  885. }
  886. return 0;
  887. }
  888. /* This function is intended to be used by external tools that should read the
  889. * performance model files */
  890. /* TODO: write an clear function, to free symbol and history */
  891. int starpu_perfmodel_load_symbol(const char *symbol, struct starpu_perfmodel *model)
  892. {
  893. model->symbol = strdup(symbol);
  894. /* where is the file if it exists ? */
  895. char path[256];
  896. get_model_path(model, path, 256);
  897. // _STARPU_DEBUG("get_model_path -> %s\n", path);
  898. /* does it exist ? */
  899. int res;
  900. res = access(path, F_OK);
  901. if (res)
  902. {
  903. const char *dot = strrchr(symbol, '.');
  904. if (dot)
  905. {
  906. char *symbol2 = strdup(symbol);
  907. symbol2[dot-symbol] = '\0';
  908. int ret;
  909. _STARPU_DISP("note: loading history from %s instead of %s\n", symbol2, symbol);
  910. ret = starpu_perfmodel_load_symbol(symbol2,model);
  911. free(symbol2);
  912. return ret;
  913. }
  914. _STARPU_DISP("There is no performance model for symbol %s\n", symbol);
  915. return 1;
  916. }
  917. FILE *f = fopen(path, "r");
  918. STARPU_ASSERT(f);
  919. starpu_perfmodel_init_with_file(f, model);
  920. rewind(f);
  921. parse_model_file(f, model, 1);
  922. STARPU_ASSERT(fclose(f) == 0);
  923. return 0;
  924. }
  925. int starpu_perfmodel_unload_model(struct starpu_perfmodel *model)
  926. {
  927. free((char *)model->symbol);
  928. _starpu_deinitialize_performance_model(model);
  929. return 0;
  930. }
  931. char* starpu_perfmodel_get_archtype_name(enum starpu_worker_archtype archtype)
  932. {
  933. switch(archtype)
  934. {
  935. case(STARPU_CPU_WORKER):
  936. return "cpu";
  937. break;
  938. case(STARPU_CUDA_WORKER):
  939. return "cuda";
  940. break;
  941. case(STARPU_OPENCL_WORKER):
  942. return "opencl";
  943. break;
  944. case(STARPU_MIC_WORKER):
  945. return "mic";
  946. break;
  947. case(STARPU_SCC_WORKER):
  948. return "scc";
  949. break;
  950. default:
  951. STARPU_ABORT();
  952. break;
  953. }
  954. }
  955. void starpu_perfmodel_get_arch_name(struct starpu_perfmodel_arch* arch, char *archname, size_t maxlen,unsigned nimpl)
  956. {
  957. snprintf(archname, maxlen, "%s%d_ncore%d_impl%u",
  958. starpu_perfmodel_get_archtype_name(arch->type),
  959. arch->devid,
  960. arch->ncore,
  961. nimpl);
  962. }
  963. void starpu_perfmodel_debugfilepath(struct starpu_perfmodel *model,
  964. struct starpu_perfmodel_arch* arch, char *path, size_t maxlen, unsigned nimpl)
  965. {
  966. char archname[32];
  967. starpu_perfmodel_get_arch_name(arch, archname, 32, nimpl);
  968. STARPU_ASSERT(path);
  969. get_model_debug_path(model, archname, path, maxlen);
  970. }
  971. double _starpu_regression_based_job_expected_perf(struct starpu_perfmodel *model, struct starpu_perfmodel_arch* arch, struct _starpu_job *j, unsigned nimpl)
  972. {
  973. double exp = NAN;
  974. size_t size = _starpu_job_get_data_size(model, arch, nimpl, j);
  975. struct starpu_perfmodel_regression_model *regmodel;
  976. regmodel = &model->per_arch[arch->type][arch->devid][arch->ncore][nimpl].regression;
  977. if (regmodel->valid && size >= regmodel->minx * 0.9 && size <= regmodel->maxx * 1.1)
  978. exp = regmodel->alpha*pow((double)size, regmodel->beta);
  979. return exp;
  980. }
  981. double _starpu_non_linear_regression_based_job_expected_perf(struct starpu_perfmodel *model, struct starpu_perfmodel_arch* arch, struct _starpu_job *j,unsigned nimpl)
  982. {
  983. double exp = NAN;
  984. size_t size = _starpu_job_get_data_size(model, arch, nimpl, j);
  985. struct starpu_perfmodel_regression_model *regmodel;
  986. regmodel = &model->per_arch[arch->type][arch->devid][arch->ncore][nimpl].regression;
  987. if (regmodel->nl_valid && size >= regmodel->minx * 0.9 && size <= regmodel->maxx * 1.1)
  988. exp = regmodel->a*pow((double)size, regmodel->b) + regmodel->c;
  989. else
  990. {
  991. uint32_t key = _starpu_compute_buffers_footprint(model, arch, nimpl, j);
  992. struct starpu_perfmodel_per_arch *per_arch_model = &model->per_arch[arch->type][arch->devid][arch->ncore][nimpl];
  993. struct starpu_perfmodel_history_table *history;
  994. struct starpu_perfmodel_history_table *entry;
  995. STARPU_PTHREAD_RWLOCK_RDLOCK(&model->model_rwlock);
  996. history = per_arch_model->history;
  997. HASH_FIND_UINT32_T(history, &key, entry);
  998. STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
  999. /* Here helgrind would shout that this is unprotected access.
  1000. * We do not care about racing access to the mean, we only want
  1001. * a good-enough estimation */
  1002. if (entry && entry->history_entry && entry->history_entry->nsample >= _STARPU_CALIBRATION_MINIMUM)
  1003. exp = entry->history_entry->mean;
  1004. STARPU_HG_DISABLE_CHECKING(model->benchmarking);
  1005. if (isnan(exp) && !model->benchmarking)
  1006. {
  1007. char archname[32];
  1008. starpu_perfmodel_get_arch_name(arch, archname, sizeof(archname), nimpl);
  1009. _STARPU_DISP("Warning: model %s is not calibrated enough for %s, forcing calibration for this run. Use the STARPU_CALIBRATE environment variable to control this.\n", model->symbol, archname);
  1010. _starpu_set_calibrate_flag(1);
  1011. model->benchmarking = 1;
  1012. }
  1013. }
  1014. return exp;
  1015. }
  1016. double _starpu_history_based_job_expected_perf(struct starpu_perfmodel *model, struct starpu_perfmodel_arch* arch, struct _starpu_job *j,unsigned nimpl)
  1017. {
  1018. double exp = NAN;
  1019. struct starpu_perfmodel_per_arch *per_arch_model;
  1020. struct starpu_perfmodel_history_entry *entry;
  1021. struct starpu_perfmodel_history_table *history, *elt;
  1022. uint32_t key = _starpu_compute_buffers_footprint(model, arch, nimpl, j);
  1023. per_arch_model = &model->per_arch[arch->type][arch->devid][arch->ncore][nimpl];
  1024. STARPU_PTHREAD_RWLOCK_RDLOCK(&model->model_rwlock);
  1025. history = per_arch_model->history;
  1026. HASH_FIND_UINT32_T(history, &key, elt);
  1027. entry = (elt == NULL) ? NULL : elt->history_entry;
  1028. STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
  1029. /* Here helgrind would shout that this is unprotected access.
  1030. * We do not care about racing access to the mean, we only want
  1031. * a good-enough estimation */
  1032. if (entry && entry->nsample >= _STARPU_CALIBRATION_MINIMUM)
  1033. /* TODO: report differently if we've scheduled really enough
  1034. * of that task and the scheduler should perhaps put it aside */
  1035. /* Calibrated enough */
  1036. exp = entry->mean;
  1037. STARPU_HG_DISABLE_CHECKING(model->benchmarking);
  1038. if (isnan(exp) && !model->benchmarking)
  1039. {
  1040. char archname[32];
  1041. starpu_perfmodel_get_arch_name(arch, archname, sizeof(archname), nimpl);
  1042. _STARPU_DISP("Warning: model %s is not calibrated enough for %s, forcing calibration for this run. Use the STARPU_CALIBRATE environment variable to control this.\n", model->symbol, archname);
  1043. _starpu_set_calibrate_flag(1);
  1044. model->benchmarking = 1;
  1045. }
  1046. return exp;
  1047. }
  1048. double starpu_permodel_history_based_expected_perf(struct starpu_perfmodel *model, struct starpu_perfmodel_arch * arch, uint32_t footprint)
  1049. {
  1050. struct _starpu_job j =
  1051. {
  1052. .footprint = footprint,
  1053. .footprint_is_computed = 1,
  1054. };
  1055. return _starpu_history_based_job_expected_perf(model, arch, &j, j.nimpl);
  1056. }
  1057. void _starpu_update_perfmodel_history(struct _starpu_job *j, struct starpu_perfmodel *model, struct starpu_perfmodel_arch* arch, unsigned cpuid STARPU_ATTRIBUTE_UNUSED, double measured, unsigned nimpl)
  1058. {
  1059. if (model)
  1060. {
  1061. STARPU_PTHREAD_RWLOCK_WRLOCK(&model->model_rwlock);
  1062. struct starpu_perfmodel_per_arch *per_arch_model = &model->per_arch[arch->type][arch->devid][arch->ncore][nimpl];
  1063. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  1064. {
  1065. struct starpu_perfmodel_history_entry *entry;
  1066. struct starpu_perfmodel_history_table *elt;
  1067. struct starpu_perfmodel_history_list **list;
  1068. uint32_t key = _starpu_compute_buffers_footprint(model, arch, nimpl, j);
  1069. list = &per_arch_model->list;
  1070. HASH_FIND_UINT32_T(per_arch_model->history, &key, elt);
  1071. entry = (elt == NULL) ? NULL : elt->history_entry;
  1072. if (!entry)
  1073. {
  1074. /* this is the first entry with such a footprint */
  1075. entry = (struct starpu_perfmodel_history_entry *) malloc(sizeof(struct starpu_perfmodel_history_entry));
  1076. STARPU_ASSERT(entry);
  1077. /* Tell helgrind that we do not care about
  1078. * racing access to the sampling, we only want a
  1079. * good-enough estimation */
  1080. STARPU_HG_DISABLE_CHECKING(entry->nsample);
  1081. STARPU_HG_DISABLE_CHECKING(entry->mean);
  1082. entry->mean = measured;
  1083. entry->sum = measured;
  1084. entry->deviation = 0.0;
  1085. entry->sum2 = measured*measured;
  1086. entry->size = _starpu_job_get_data_size(model, arch, nimpl, j);
  1087. entry->flops = j->task->flops;
  1088. entry->footprint = key;
  1089. entry->nsample = 1;
  1090. entry->nerror = 0;
  1091. insert_history_entry(entry, list, &per_arch_model->history);
  1092. }
  1093. else
  1094. {
  1095. /* There is already an entry with the same footprint */
  1096. double local_deviation = measured/entry->mean;
  1097. int historymaxerror = starpu_get_env_number_default("STARPU_HISTORY_MAX_ERROR", STARPU_HISTORYMAXERROR);
  1098. if (entry->nsample &&
  1099. (100 * local_deviation > (100 + historymaxerror)
  1100. || (100 / local_deviation > (100 + historymaxerror))))
  1101. {
  1102. /* TODO: add aging, otherwise with
  1103. * millions of tasks we're sure to
  1104. * flush at least once... */
  1105. entry->nerror++;
  1106. /* Too many errors: we flush out all the entries */
  1107. if (entry->nerror >= entry->nsample)
  1108. {
  1109. char archname[32];
  1110. starpu_perfmodel_get_arch_name(arch, archname, sizeof(archname), nimpl);
  1111. _STARPU_DISP("Too big deviation for model %s on %s: %f vs average %f over %u samples (%+f%%), flushing the performance model. Use the STARPU_HISTORY_MAX_ERROR environement variable to control the threshold (currently %d%%)\n", model->symbol, archname, measured, entry->mean, entry->nsample, measured * 100. / entry->mean - 100, historymaxerror);
  1112. entry->sum = 0.0;
  1113. entry->sum2 = 0.0;
  1114. entry->nsample = 0;
  1115. entry->nerror = 0;
  1116. entry->mean = 0.0;
  1117. entry->deviation = 0.0;
  1118. }
  1119. }
  1120. else
  1121. {
  1122. entry->sum += measured;
  1123. entry->sum2 += measured*measured;
  1124. entry->nsample++;
  1125. unsigned n = entry->nsample;
  1126. entry->mean = entry->sum / n;
  1127. entry->deviation = sqrt((entry->sum2 - (entry->sum*entry->sum)/n)/n);
  1128. }
  1129. if (j->task->flops != 0.)
  1130. {
  1131. if (entry->flops == 0.)
  1132. entry->flops = j->task->flops;
  1133. else if (entry->flops != j->task->flops)
  1134. /* Incoherent flops! forget about trying to record flops */
  1135. entry->flops = NAN;
  1136. }
  1137. }
  1138. STARPU_ASSERT(entry);
  1139. }
  1140. if (model->type == STARPU_REGRESSION_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  1141. {
  1142. struct starpu_perfmodel_regression_model *reg_model;
  1143. reg_model = &per_arch_model->regression;
  1144. /* update the regression model */
  1145. size_t job_size = _starpu_job_get_data_size(model, arch, nimpl, j);
  1146. double logy, logx;
  1147. logx = log((double)job_size);
  1148. logy = log(measured);
  1149. reg_model->sumlnx += logx;
  1150. reg_model->sumlnx2 += logx*logx;
  1151. reg_model->sumlny += logy;
  1152. reg_model->sumlnxlny += logx*logy;
  1153. if (reg_model->minx == 0 || job_size < reg_model->minx)
  1154. reg_model->minx = job_size;
  1155. if (reg_model->maxx == 0 || job_size > reg_model->maxx)
  1156. reg_model->maxx = job_size;
  1157. reg_model->nsample++;
  1158. if (VALID_REGRESSION(reg_model))
  1159. {
  1160. unsigned n = reg_model->nsample;
  1161. double num = (n*reg_model->sumlnxlny - reg_model->sumlnx*reg_model->sumlny);
  1162. double denom = (n*reg_model->sumlnx2 - reg_model->sumlnx*reg_model->sumlnx);
  1163. reg_model->beta = num/denom;
  1164. reg_model->alpha = exp((reg_model->sumlny - reg_model->beta*reg_model->sumlnx)/n);
  1165. reg_model->valid = 1;
  1166. }
  1167. }
  1168. #ifdef STARPU_MODEL_DEBUG
  1169. struct starpu_task *task = j->task;
  1170. FILE *f = fopen(per_arch_model->debug_path, "a+");
  1171. if (f == NULL)
  1172. {
  1173. _STARPU_DISP("Error <%s> when opening file <%s>\n", strerror(errno), per_arch_model->debug_path);
  1174. STARPU_ABORT();
  1175. }
  1176. if (!j->footprint_is_computed)
  1177. (void) _starpu_compute_buffers_footprint(model, arch, nimpl, j);
  1178. STARPU_ASSERT(j->footprint_is_computed);
  1179. fprintf(f, "0x%x\t%lu\t%f\t%f\t%f\t%d\t\t", j->footprint, (unsigned long) _starpu_job_get_data_size(model, arch, nimpl, j), measured, task->predicted, task->predicted_transfer, cpuid);
  1180. unsigned i;
  1181. for (i = 0; i < task->cl->nbuffers; i++)
  1182. {
  1183. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(task, i);
  1184. STARPU_ASSERT(handle->ops);
  1185. STARPU_ASSERT(handle->ops->display);
  1186. handle->ops->display(handle, f);
  1187. }
  1188. fprintf(f, "\n");
  1189. fclose(f);
  1190. #endif
  1191. STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
  1192. }
  1193. }
  1194. void starpu_perfmodel_update_history(struct starpu_perfmodel *model, struct starpu_task *task, struct starpu_perfmodel_arch * arch, unsigned cpuid, unsigned nimpl, double measured)
  1195. {
  1196. struct _starpu_job *job = _starpu_get_job_associated_to_task(task);
  1197. _starpu_load_perfmodel(model);
  1198. /* Record measurement */
  1199. _starpu_update_perfmodel_history(job, model, arch, cpuid, measured, nimpl);
  1200. /* and save perfmodel on termination */
  1201. _starpu_set_calibrate_flag(1);
  1202. }