perfmodel_history.c 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2014 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012, 2013, 2014 Centre National de la Recherche Scientifique
  5. * Copyright (C) 2011 Télécom-SudParis
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. #include <dirent.h>
  19. #include <unistd.h>
  20. #include <sys/stat.h>
  21. #include <errno.h>
  22. #include <common/config.h>
  23. #include <common/utils.h>
  24. #include <core/perfmodel/perfmodel.h>
  25. #include <core/jobs.h>
  26. #include <core/workers.h>
  27. #include <datawizard/datawizard.h>
  28. #include <core/perfmodel/regression.h>
  29. #include <common/config.h>
  30. #include <starpu_parameters.h>
  31. #include <common/uthash.h>
  32. #ifdef STARPU_HAVE_WINDOWS
  33. #include <windows.h>
  34. #endif
  35. #define HASH_ADD_UINT32_T(head,field,add) HASH_ADD(hh,head,field,sizeof(uint32_t),add)
  36. #define HASH_FIND_UINT32_T(head,find,out) HASH_FIND(hh,head,find,sizeof(uint32_t),out)
  37. struct starpu_perfmodel_history_table
  38. {
  39. UT_hash_handle hh;
  40. uint32_t footprint;
  41. struct starpu_perfmodel_history_entry *history_entry;
  42. };
  43. /* We want more than 10% variance on X to trust regression */
  44. #define VALID_REGRESSION(reg_model) \
  45. ((reg_model)->minx < (9*(reg_model)->maxx)/10 && (reg_model)->nsample >= _STARPU_CALIBRATION_MINIMUM)
  46. static starpu_pthread_rwlock_t registered_models_rwlock;
  47. static struct _starpu_perfmodel_list *registered_models = NULL;
  48. size_t _starpu_job_get_data_size(struct starpu_perfmodel *model, struct starpu_perfmodel_arch* arch, unsigned nimpl, struct _starpu_job *j)
  49. {
  50. struct starpu_task *task = j->task;
  51. if (model && model->per_arch && model->per_arch[arch->type][arch->devid][arch->ncore][nimpl].size_base)
  52. {
  53. return model->per_arch[arch->type][arch->devid][arch->ncore][nimpl].size_base(task, arch, nimpl);
  54. }
  55. else if (model && model->size_base)
  56. {
  57. return model->size_base(task, nimpl);
  58. }
  59. else
  60. {
  61. unsigned nbuffers = task->cl->nbuffers;
  62. size_t size = 0;
  63. unsigned buffer;
  64. for (buffer = 0; buffer < nbuffers; buffer++)
  65. {
  66. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(task, buffer);
  67. size += _starpu_data_get_size(handle);
  68. }
  69. return size;
  70. }
  71. }
  72. /*
  73. * History based model
  74. */
  75. static void insert_history_entry(struct starpu_perfmodel_history_entry *entry, struct starpu_perfmodel_history_list **list, struct starpu_perfmodel_history_table **history_ptr)
  76. {
  77. struct starpu_perfmodel_history_list *link;
  78. struct starpu_perfmodel_history_table *table;
  79. link = (struct starpu_perfmodel_history_list *) malloc(sizeof(struct starpu_perfmodel_history_list));
  80. link->next = *list;
  81. link->entry = entry;
  82. *list = link;
  83. /* detect concurrency issue */
  84. //HASH_FIND_UINT32_T(*history_ptr, &entry->footprint, table);
  85. //STARPU_ASSERT(table == NULL);
  86. table = (struct starpu_perfmodel_history_table*) malloc(sizeof(*table));
  87. STARPU_ASSERT(table != NULL);
  88. table->footprint = entry->footprint;
  89. table->history_entry = entry;
  90. HASH_ADD_UINT32_T(*history_ptr, footprint, table);
  91. }
  92. static void dump_reg_model(FILE *f, struct starpu_perfmodel *model, struct starpu_perfmodel_arch* arch, unsigned nimpl)
  93. {
  94. struct starpu_perfmodel_per_arch *per_arch_model;
  95. per_arch_model = &model->per_arch[arch->type][arch->devid][arch->ncore][nimpl];
  96. struct starpu_perfmodel_regression_model *reg_model;
  97. reg_model = &per_arch_model->regression;
  98. /*
  99. * Linear Regression model
  100. */
  101. /* Unless we have enough measurements, we put NaN in the file to indicate the model is invalid */
  102. double alpha = nan(""), beta = nan("");
  103. if (model->type == STARPU_REGRESSION_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  104. {
  105. if (reg_model->nsample > 1)
  106. {
  107. alpha = reg_model->alpha;
  108. beta = reg_model->beta;
  109. }
  110. }
  111. fprintf(f, "# sumlnx\tsumlnx2\t\tsumlny\t\tsumlnxlny\talpha\t\tbeta\t\tn\tminx\t\tmaxx\n");
  112. fprintf(f, "%-15le\t%-15le\t%-15le\t%-15le\t%-15le\t%-15le\t%u\t%-15lu\t%-15lu\n", reg_model->sumlnx, reg_model->sumlnx2, reg_model->sumlny, reg_model->sumlnxlny, alpha, beta, reg_model->nsample, reg_model->minx, reg_model->maxx);
  113. /*
  114. * Non-Linear Regression model
  115. */
  116. double a = nan(""), b = nan(""), c = nan("");
  117. if (model->type == STARPU_NL_REGRESSION_BASED)
  118. _starpu_regression_non_linear_power(per_arch_model->list, &a, &b, &c);
  119. fprintf(f, "# a\t\tb\t\tc\n");
  120. fprintf(f, "%-15le\t%-15le\t%-15le\n", a, b, c);
  121. }
  122. static void scan_reg_model(FILE *f, struct starpu_perfmodel_regression_model *reg_model)
  123. {
  124. int res;
  125. /*
  126. * Linear Regression model
  127. */
  128. _starpu_drop_comments(f);
  129. res = fscanf(f, "%le\t%le\t%le\t%le", &reg_model->sumlnx, &reg_model->sumlnx2, &reg_model->sumlny, &reg_model->sumlnxlny);
  130. STARPU_ASSERT_MSG(res == 4, "Incorrect performance model file");
  131. res = _starpu_read_double(f, "\t%le", &reg_model->alpha);
  132. STARPU_ASSERT_MSG(res == 1, "Incorrect performance model file");
  133. res = _starpu_read_double(f, "\t%le", &reg_model->beta);
  134. STARPU_ASSERT_MSG(res == 1, "Incorrect performance model file");
  135. res = fscanf(f, "\t%u\t%lu\t%lu\n", &reg_model->nsample, &reg_model->minx, &reg_model->maxx);
  136. STARPU_ASSERT_MSG(res == 3, "Incorrect performance model file");
  137. /* If any of the parameters describing the linear regression model is NaN, the model is invalid */
  138. unsigned invalid = (isnan(reg_model->alpha)||isnan(reg_model->beta));
  139. reg_model->valid = !invalid && VALID_REGRESSION(reg_model);
  140. /*
  141. * Non-Linear Regression model
  142. */
  143. _starpu_drop_comments(f);
  144. res = _starpu_read_double(f, "%le\t", &reg_model->a);
  145. STARPU_ASSERT_MSG(res == 1, "Incorrect performance model file");
  146. res = _starpu_read_double(f, "%le\t", &reg_model->b);
  147. STARPU_ASSERT_MSG(res == 1, "Incorrect performance model file");
  148. res = _starpu_read_double(f, "%le\n", &reg_model->c);
  149. STARPU_ASSERT_MSG(res == 1, "Incorrect performance model file");
  150. /* If any of the parameters describing the non-linear regression model is NaN, the model is invalid */
  151. unsigned nl_invalid = (isnan(reg_model->a)||isnan(reg_model->b)||isnan(reg_model->c));
  152. reg_model->nl_valid = !nl_invalid && VALID_REGRESSION(reg_model);
  153. }
  154. static void dump_history_entry(FILE *f, struct starpu_perfmodel_history_entry *entry)
  155. {
  156. fprintf(f, "%08x\t%-15lu\t%-15le\t%-15le\t%-15le\t%-15le\t%-15le\t%u\n", entry->footprint, (unsigned long) entry->size, entry->flops, entry->mean, entry->deviation, entry->sum, entry->sum2, entry->nsample);
  157. }
  158. static void scan_history_entry(FILE *f, struct starpu_perfmodel_history_entry *entry)
  159. {
  160. int res;
  161. _starpu_drop_comments(f);
  162. /* In case entry is NULL, we just drop these values */
  163. unsigned nsample;
  164. uint32_t footprint;
  165. unsigned long size; /* in bytes */
  166. double flops;
  167. double mean;
  168. double deviation;
  169. double sum;
  170. double sum2;
  171. char line[256];
  172. char *ret;
  173. ret = fgets(line, sizeof(line), f);
  174. STARPU_ASSERT(ret);
  175. STARPU_ASSERT(strchr(line, '\n'));
  176. /* Read the values from the file */
  177. res = sscanf(line, "%x\t%lu\t%le\t%le\t%le\t%le\t%le\t%u", &footprint, &size, &flops, &mean, &deviation, &sum, &sum2, &nsample);
  178. if (res != 8)
  179. {
  180. flops = 0.;
  181. /* Read the values from the file */
  182. res = sscanf(line, "%x\t%lu\t%le\t%le\t%le\t%le\t%u", &footprint, &size, &mean, &deviation, &sum, &sum2, &nsample);
  183. STARPU_ASSERT_MSG(res == 7, "Incorrect performance model file");
  184. }
  185. if (entry)
  186. {
  187. entry->footprint = footprint;
  188. entry->size = size;
  189. entry->flops = flops;
  190. entry->mean = mean;
  191. entry->deviation = deviation;
  192. entry->sum = sum;
  193. entry->sum2 = sum2;
  194. entry->nsample = nsample;
  195. }
  196. }
  197. static void parse_per_arch_model_file(FILE *f, struct starpu_perfmodel_per_arch *per_arch_model, unsigned scan_history)
  198. {
  199. unsigned nentries;
  200. _starpu_drop_comments(f);
  201. int res = fscanf(f, "%u\n", &nentries);
  202. STARPU_ASSERT_MSG(res == 1, "Incorrect performance model file");
  203. scan_reg_model(f, &per_arch_model->regression);
  204. /* parse entries */
  205. unsigned i;
  206. for (i = 0; i < nentries; i++)
  207. {
  208. struct starpu_perfmodel_history_entry *entry = NULL;
  209. if (scan_history)
  210. {
  211. entry = (struct starpu_perfmodel_history_entry *) malloc(sizeof(struct starpu_perfmodel_history_entry));
  212. STARPU_ASSERT(entry);
  213. /* Tell helgrind that we do not care about
  214. * racing access to the sampling, we only want a
  215. * good-enough estimation */
  216. STARPU_HG_DISABLE_CHECKING(entry->nsample);
  217. STARPU_HG_DISABLE_CHECKING(entry->mean);
  218. entry->nerror = 0;
  219. }
  220. scan_history_entry(f, entry);
  221. /* insert the entry in the hashtable and the list structures */
  222. /* TODO: Insert it at the end of the list, to avoid reversing
  223. * the order... But efficiently! We may have a lot of entries */
  224. if (scan_history)
  225. insert_history_entry(entry, &per_arch_model->list, &per_arch_model->history);
  226. }
  227. }
  228. static void parse_arch(FILE *f, struct starpu_perfmodel *model, unsigned scan_history,struct starpu_perfmodel_arch* arch)
  229. {
  230. struct starpu_perfmodel_per_arch dummy;
  231. unsigned nimpls, implmax, impl, i, ret;
  232. //_STARPU_DEBUG("Parsing %s_%u_parallel_%u\n",
  233. // starpu_perfmodel_get_archtype_name(arch->type),
  234. // arch->devid,
  235. // arch->ncore + 1);
  236. /* Parsing number of implementation */
  237. _starpu_drop_comments(f);
  238. ret = fscanf(f, "%u\n", &nimpls);
  239. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  240. if( model != NULL)
  241. {
  242. /* Parsing each implementation */
  243. implmax = STARPU_MIN(nimpls, STARPU_MAXIMPLEMENTATIONS);
  244. for (impl = 0; impl < implmax; impl++)
  245. parse_per_arch_model_file(f, &model->per_arch[arch->type][arch->devid][arch->ncore][impl], scan_history);
  246. }
  247. else
  248. {
  249. impl = 0;
  250. }
  251. /* if the number of implementation is greater than STARPU_MAXIMPLEMENTATIONS
  252. * we skip the last implementation */
  253. for (i = impl; i < nimpls; i++)
  254. parse_per_arch_model_file(f, &dummy, 0);
  255. }
  256. static void parse_device(FILE *f, struct starpu_perfmodel *model, unsigned scan_history, enum starpu_worker_archtype archtype, unsigned devid)
  257. {
  258. unsigned maxncore, ncore, ret, i;
  259. struct starpu_perfmodel_arch arch;
  260. arch.type = archtype;
  261. arch.devid = devid;
  262. //_STARPU_DEBUG("Parsing device %s_%u arch\n",
  263. // starpu_perfmodel_get_archtype_name(archtype),
  264. // devid);
  265. /* Parsing maximun number of worker for this device */
  266. _starpu_drop_comments(f);
  267. ret = fscanf(f, "%u\n", &maxncore);
  268. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  269. /* Parsing each arch */
  270. if(model !=NULL)
  271. {
  272. for(ncore=0; ncore < maxncore && model->per_arch[archtype][devid][ncore] != NULL; ncore++)
  273. {
  274. arch.ncore = ncore;
  275. parse_arch(f,model,scan_history,&arch);
  276. }
  277. }
  278. else
  279. {
  280. ncore=0;
  281. }
  282. for(i=ncore; i < maxncore; i++)
  283. {
  284. arch.ncore = i;
  285. parse_arch(f,NULL,scan_history,&arch);
  286. }
  287. }
  288. static void parse_archtype(FILE *f, struct starpu_perfmodel *model, unsigned scan_history, enum starpu_worker_archtype archtype)
  289. {
  290. unsigned ndevice, devid, ret, i;
  291. //_STARPU_DEBUG("Parsing %s arch\n", starpu_perfmodel_get_archtype_name(archtype));
  292. /* Parsing number of device for this archtype */
  293. _starpu_drop_comments(f);
  294. ret = fscanf(f, "%u\n", &ndevice);
  295. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  296. /* Parsing each device for this archtype*/
  297. if(model != NULL)
  298. {
  299. for(devid=0; devid < ndevice && model->per_arch[archtype][devid] != NULL; devid++)
  300. {
  301. parse_device(f,model,scan_history,archtype,devid);
  302. }
  303. }
  304. else
  305. {
  306. devid=0;
  307. }
  308. for(i=devid; i < ndevice; i++)
  309. {
  310. parse_device(f,NULL,scan_history,archtype,i);
  311. }
  312. }
  313. static void parse_model_file(FILE *f, struct starpu_perfmodel *model, unsigned scan_history)
  314. {
  315. unsigned archtype;
  316. int ret, version;
  317. //_STARPU_DEBUG("Start parsing\n");
  318. /* Parsing performance model version */
  319. _starpu_drop_comments(f);
  320. ret = fscanf(f, "%d\n", &version);
  321. STARPU_ASSERT_MSG(version == _STARPU_PERFMODEL_VERSION, "Incorrect performance model file with a model version %d not being the current model version (%d)\n",
  322. version, _STARPU_PERFMODEL_VERSION);
  323. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  324. /* Parsing each kind of archtype */
  325. for(archtype=0; archtype<STARPU_NARCH; archtype++)
  326. {
  327. parse_archtype(f, model, scan_history, archtype);
  328. }
  329. }
  330. static void dump_per_arch_model_file(FILE *f, struct starpu_perfmodel *model, struct starpu_perfmodel_arch * arch, unsigned nimpl)
  331. {
  332. struct starpu_perfmodel_per_arch *per_arch_model;
  333. per_arch_model = &model->per_arch[arch->type][arch->devid][arch->ncore][nimpl];
  334. /* count the number of elements in the lists */
  335. struct starpu_perfmodel_history_list *ptr = NULL;
  336. unsigned nentries = 0;
  337. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  338. {
  339. /* Dump the list of all entries in the history */
  340. ptr = per_arch_model->list;
  341. while(ptr)
  342. {
  343. nentries++;
  344. ptr = ptr->next;
  345. }
  346. }
  347. /* header */
  348. char archname[32];
  349. starpu_perfmodel_get_arch_name(arch, archname, 32, nimpl);
  350. fprintf(f, "#####\n");
  351. fprintf(f, "# Model for %s\n", archname);
  352. fprintf(f, "# number of entries\n%u\n", nentries);
  353. dump_reg_model(f, model, arch, nimpl);
  354. /* Dump the history into the model file in case it is necessary */
  355. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  356. {
  357. fprintf(f, "# hash\t\tsize\t\tflops\t\tmean (us)\tdev (us)\tsum\t\tsum2\t\tn\n");
  358. ptr = per_arch_model->list;
  359. while (ptr)
  360. {
  361. dump_history_entry(f, ptr->entry);
  362. ptr = ptr->next;
  363. }
  364. }
  365. fprintf(f, "\n");
  366. }
  367. static unsigned get_n_entries(struct starpu_perfmodel *model, struct starpu_perfmodel_arch * arch, unsigned impl)
  368. {
  369. struct starpu_perfmodel_per_arch *per_arch_model;
  370. per_arch_model = &model->per_arch[arch->type][arch->devid][arch->ncore][impl];
  371. /* count the number of elements in the lists */
  372. struct starpu_perfmodel_history_list *ptr = NULL;
  373. unsigned nentries = 0;
  374. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  375. {
  376. /* Dump the list of all entries in the history */
  377. ptr = per_arch_model->list;
  378. while(ptr)
  379. {
  380. nentries++;
  381. ptr = ptr->next;
  382. }
  383. }
  384. return nentries;
  385. }
  386. static void dump_model_file(FILE *f, struct starpu_perfmodel *model)
  387. {
  388. struct _starpu_machine_config *conf = _starpu_get_machine_config();
  389. char *name = "unknown";
  390. unsigned archtype, ndevice, *ncore, devid, nc, nimpl;
  391. struct starpu_perfmodel_arch arch;
  392. fprintf(f, "##################\n");
  393. fprintf(f, "# Performance Model Version\n");
  394. fprintf(f, "%d\n\n", _STARPU_PERFMODEL_VERSION);
  395. for(archtype=0; archtype<STARPU_NARCH; archtype++)
  396. {
  397. arch.type = archtype;
  398. switch (archtype)
  399. {
  400. case STARPU_CPU_WORKER:
  401. ndevice = 1;
  402. ncore = &conf->topology.nhwcpus;
  403. name = "CPU";
  404. break;
  405. case STARPU_CUDA_WORKER:
  406. ndevice = conf->topology.nhwcudagpus;
  407. ncore = NULL;
  408. name = "CUDA";
  409. break;
  410. case STARPU_OPENCL_WORKER:
  411. ndevice = conf->topology.nhwopenclgpus;
  412. ncore = NULL;
  413. name = "OPENCL";
  414. break;
  415. case STARPU_MIC_WORKER:
  416. ndevice = conf->topology.nhwmicdevices;
  417. ncore = conf->topology.nhwmiccores;
  418. name = "MIC";
  419. break;
  420. case STARPU_SCC_WORKER:
  421. ndevice = conf->topology.nhwscc;
  422. ncore = NULL;
  423. name = "SCC";
  424. break;
  425. default:
  426. /* Unknown arch */
  427. STARPU_ABORT();
  428. break;
  429. }
  430. fprintf(f, "####################\n");
  431. fprintf(f, "# %ss\n", name);
  432. fprintf(f, "# number of %s devices\n", name);
  433. fprintf(f, "%u\n", ndevice);
  434. for(devid=0; devid<ndevice; devid++)
  435. {
  436. arch.devid = devid;
  437. fprintf(f, "###############\n");
  438. fprintf(f, "# %s_%u\n", name, devid);
  439. fprintf(f, "# number of workers on device %s_%d\n", name, devid);
  440. if(ncore != NULL)
  441. fprintf(f, "%u\n", ncore[devid]);
  442. else
  443. fprintf(f, "1\n");
  444. for(nc=0; model->per_arch[archtype][devid][nc] != NULL; nc++)
  445. {
  446. arch.ncore = nc;
  447. unsigned max_impl = 0;
  448. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  449. {
  450. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  451. if (get_n_entries(model, &arch, nimpl))
  452. max_impl = nimpl + 1;
  453. }
  454. else if (model->type == STARPU_REGRESSION_BASED || model->type == STARPU_PER_ARCH || model->type == STARPU_COMMON)
  455. {
  456. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  457. if (model->per_arch[archtype][devid][nc][nimpl].regression.nsample)
  458. max_impl = nimpl + 1;
  459. }
  460. else
  461. STARPU_ASSERT_MSG(0, "Unknown history-based performance model %u", model->type);
  462. fprintf(f, "##########\n");
  463. fprintf(f, "# %u worker(s) in parallel\n", nc+1);
  464. fprintf(f, "# number of implementations\n");
  465. fprintf(f, "%u\n", max_impl);
  466. for (nimpl = 0; nimpl < max_impl; nimpl++)
  467. {
  468. dump_per_arch_model_file(f, model, &arch, nimpl);
  469. }
  470. }
  471. }
  472. }
  473. }
  474. static void initialize_per_arch_model(struct starpu_perfmodel_per_arch *per_arch_model)
  475. {
  476. memset(per_arch_model, 0, sizeof(struct starpu_perfmodel_per_arch));
  477. }
  478. static struct starpu_perfmodel_per_arch*** initialize_arch_model(int maxdevid, unsigned* maxncore_table)
  479. {
  480. int devid, ncore, nimpl;
  481. struct starpu_perfmodel_per_arch *** arch_model = malloc(sizeof(*arch_model)*(maxdevid+1));
  482. arch_model[maxdevid] = NULL;
  483. for(devid=0; devid<maxdevid; devid++)
  484. {
  485. int maxncore;
  486. if(maxncore_table != NULL)
  487. maxncore = maxncore_table[devid];
  488. else
  489. maxncore = 1;
  490. arch_model[devid] = malloc(sizeof(*arch_model[devid])*(maxncore+1));
  491. arch_model[devid][maxncore] = NULL;
  492. for(ncore=0; ncore<maxncore; ncore++)
  493. {
  494. arch_model[devid][ncore] = malloc(sizeof(*arch_model[devid][ncore])*STARPU_MAXIMPLEMENTATIONS);
  495. for(nimpl=0; nimpl<STARPU_MAXIMPLEMENTATIONS; nimpl++)
  496. {
  497. initialize_per_arch_model(&arch_model[devid][ncore][nimpl]);
  498. }
  499. }
  500. }
  501. return arch_model;
  502. }
  503. static void initialize_model(struct starpu_perfmodel *model)
  504. {
  505. struct _starpu_machine_config *conf = _starpu_get_machine_config();
  506. model->per_arch = malloc(sizeof(*model->per_arch)*(STARPU_NARCH));
  507. model->per_arch[STARPU_CPU_WORKER] = initialize_arch_model(1,&conf->topology.nhwcpus);
  508. model->per_arch[STARPU_CUDA_WORKER] = initialize_arch_model(conf->topology.nhwcudagpus,NULL);
  509. model->per_arch[STARPU_OPENCL_WORKER] = initialize_arch_model(conf->topology.nhwopenclgpus,NULL);
  510. model->per_arch[STARPU_MIC_WORKER] = initialize_arch_model(conf->topology.nhwmicdevices,conf->topology.nhwmiccores);
  511. model->per_arch[STARPU_SCC_WORKER] = initialize_arch_model(conf->topology.nhwscc,NULL);
  512. }
  513. static void initialize_model_with_file(FILE*f, struct starpu_perfmodel *model)
  514. {
  515. unsigned ret, archtype, devid, i, ndevice, * maxncore;
  516. struct starpu_perfmodel_arch arch;
  517. int version;
  518. /* Parsing performance model version */
  519. _starpu_drop_comments(f);
  520. ret = fscanf(f, "%d\n", &version);
  521. STARPU_ASSERT_MSG(version == _STARPU_PERFMODEL_VERSION, "Incorrect performance model file with a model version %d not being the current model version (%d)\n",
  522. version, _STARPU_PERFMODEL_VERSION);
  523. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  524. model->per_arch = malloc(sizeof(*model->per_arch)*(STARPU_NARCH));
  525. for(archtype=0; archtype<STARPU_NARCH; archtype++)
  526. {
  527. arch.type = archtype;
  528. _starpu_drop_comments(f);
  529. ret = fscanf(f, "%u\n", &ndevice);
  530. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  531. if(ndevice != 0)
  532. maxncore = malloc(sizeof(*maxncore)*ndevice);
  533. else
  534. maxncore = NULL;
  535. for(devid=0; devid < ndevice; devid++)
  536. {
  537. arch.devid = devid;
  538. _starpu_drop_comments(f);
  539. ret = fscanf(f, "%u\n", &maxncore[devid]);
  540. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  541. for(i=0; i<maxncore[devid]; i++)
  542. {
  543. arch.ncore = i;
  544. parse_arch(f,NULL,0,&arch);
  545. }
  546. }
  547. model->per_arch[archtype] = initialize_arch_model(ndevice,maxncore);
  548. if(maxncore != NULL)
  549. free(maxncore);
  550. }
  551. }
  552. void starpu_perfmodel_init(struct starpu_perfmodel *model)
  553. {
  554. STARPU_ASSERT(model && model->symbol);
  555. int already_init;
  556. STARPU_PTHREAD_RWLOCK_RDLOCK(&registered_models_rwlock);
  557. already_init = model->is_init;
  558. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  559. if (already_init)
  560. return;
  561. /* The model is still not loaded so we grab the lock in write mode, and
  562. * if it's not loaded once we have the lock, we do load it. */
  563. STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  564. /* Was the model initialized since the previous test ? */
  565. if (model->is_init)
  566. {
  567. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  568. return;
  569. }
  570. STARPU_PTHREAD_RWLOCK_INIT(&model->model_rwlock, NULL);
  571. if(model->type != STARPU_COMMON)
  572. initialize_model(model);
  573. model->is_init = 1;
  574. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  575. }
  576. void starpu_perfmodel_init_with_file(FILE*f, struct starpu_perfmodel *model)
  577. {
  578. STARPU_ASSERT(model && model->symbol);
  579. int already_init;
  580. STARPU_PTHREAD_RWLOCK_RDLOCK(&registered_models_rwlock);
  581. already_init = model->is_init;
  582. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  583. if (already_init)
  584. return;
  585. /* The model is still not loaded so we grab the lock in write mode, and
  586. * if it's not loaded once we have the lock, we do load it. */
  587. STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  588. /* Was the model initialized since the previous test ? */
  589. if (model->is_init)
  590. {
  591. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  592. return;
  593. }
  594. STARPU_PTHREAD_RWLOCK_INIT(&model->model_rwlock, NULL);
  595. if(model->type != STARPU_COMMON)
  596. initialize_model_with_file(f,model);
  597. model->is_init = 1;
  598. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  599. }
  600. static void get_model_debug_path(struct starpu_perfmodel *model, const char *arch, char *path, size_t maxlen)
  601. {
  602. STARPU_ASSERT(path);
  603. _starpu_get_perf_model_dir_debug(path, maxlen);
  604. strncat(path, model->symbol, maxlen);
  605. char hostname[65];
  606. _starpu_gethostname(hostname, sizeof(hostname));
  607. strncat(path, ".", maxlen);
  608. strncat(path, hostname, maxlen);
  609. strncat(path, ".", maxlen);
  610. strncat(path, arch, maxlen);
  611. strncat(path, ".debug", maxlen);
  612. }
  613. /*
  614. * Returns 0 is the model was already loaded, 1 otherwise.
  615. */
  616. int _starpu_register_model(struct starpu_perfmodel *model)
  617. {
  618. starpu_perfmodel_init(model);
  619. /* If the model has already been loaded, there is nothing to do */
  620. STARPU_PTHREAD_RWLOCK_RDLOCK(&registered_models_rwlock);
  621. if (model->is_loaded)
  622. {
  623. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  624. return 0;
  625. }
  626. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  627. /* We have to make sure the model has not been loaded since the
  628. * last time we took the lock */
  629. STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  630. if (model->is_loaded)
  631. {
  632. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  633. return 0;
  634. }
  635. /* add the model to a linked list */
  636. struct _starpu_perfmodel_list *node = (struct _starpu_perfmodel_list *) malloc(sizeof(struct _starpu_perfmodel_list));
  637. node->model = model;
  638. //model->debug_modelid = debug_modelid++;
  639. /* put this model at the beginning of the list */
  640. node->next = registered_models;
  641. registered_models = node;
  642. #ifdef STARPU_MODEL_DEBUG
  643. _starpu_create_sampling_directory_if_needed();
  644. unsigned archtype, devid, ncore, nimpl;
  645. struct starpu_perfmodel_arch arch;
  646. _STARPU_DEBUG("\n\n ###\nHere\n ###\n\n");
  647. if(model->is_init)
  648. {
  649. _STARPU_DEBUG("Init\n");
  650. for (archtype = 0; archtype < STARPU_NARCH; archtype++)
  651. {
  652. _STARPU_DEBUG("Archtype\n");
  653. arch.type = archtype;
  654. if(model->per_arch[archtype] != NULL)
  655. {
  656. for(devid=0; model->per_arch[archtype][devid] != NULL; devid++)
  657. {
  658. _STARPU_DEBUG("Devid\n");
  659. arch.devid = devid;
  660. for(ncore=0; model->per_arch[archtype][devid][ncore] != NULL; ncore++)
  661. {
  662. _STARPU_DEBUG("Ncore\n");
  663. arch.ncore = ncore;
  664. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  665. {
  666. starpu_perfmodel_debugfilepath(model, &arch, model->per_arch[archtype][devid][ncore][nimpl].debug_path, 256, nimpl);
  667. }
  668. }
  669. }
  670. }
  671. }
  672. }
  673. #endif
  674. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  675. return 1;
  676. }
  677. static void get_model_path(struct starpu_perfmodel *model, char *path, size_t maxlen)
  678. {
  679. _starpu_get_perf_model_dir_codelets(path, maxlen);
  680. strncat(path, model->symbol, maxlen);
  681. char hostname[65];
  682. _starpu_gethostname(hostname, sizeof(hostname));
  683. strncat(path, ".", maxlen);
  684. strncat(path, hostname, maxlen);
  685. }
  686. static void save_history_based_model(struct starpu_perfmodel *model)
  687. {
  688. STARPU_ASSERT(model);
  689. STARPU_ASSERT(model->symbol);
  690. /* TODO checks */
  691. /* filename = $STARPU_PERF_MODEL_DIR/codelets/symbol.hostname */
  692. char path[256];
  693. get_model_path(model, path, 256);
  694. _STARPU_DEBUG("Opening performance model file %s for model %s\n", path, model->symbol);
  695. /* overwrite existing file, or create it */
  696. FILE *f;
  697. f = fopen(path, "w+");
  698. STARPU_ASSERT_MSG(f, "Could not save performance model %s\n", path);
  699. dump_model_file(f, model);
  700. fclose(f);
  701. }
  702. static void _starpu_dump_registered_models(void)
  703. {
  704. #ifndef STARPU_SIMGRID
  705. STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  706. struct _starpu_perfmodel_list *node;
  707. node = registered_models;
  708. _STARPU_DEBUG("DUMP MODELS !\n");
  709. while (node)
  710. {
  711. save_history_based_model(node->model);
  712. node = node->next;
  713. }
  714. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  715. #endif
  716. }
  717. void _starpu_initialize_registered_performance_models(void)
  718. {
  719. registered_models = NULL;
  720. STARPU_PTHREAD_RWLOCK_INIT(&registered_models_rwlock, NULL);
  721. }
  722. void _starpu_deinitialize_performance_model(struct starpu_perfmodel *model)
  723. {
  724. unsigned arch, devid, ncore, nimpl;
  725. if(model->is_init && model->per_arch != NULL)
  726. {
  727. for (arch = 0; arch < STARPU_NARCH; arch++)
  728. {
  729. if( model->per_arch[arch] != NULL)
  730. {
  731. for(devid=0; model->per_arch[arch][devid] != NULL; devid++)
  732. {
  733. for(ncore=0; model->per_arch[arch][devid][ncore] != NULL; ncore++)
  734. {
  735. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  736. {
  737. struct starpu_perfmodel_per_arch *archmodel = &model->per_arch[arch][devid][ncore][nimpl];
  738. struct starpu_perfmodel_history_list *list, *plist;
  739. struct starpu_perfmodel_history_table *entry, *tmp;
  740. HASH_ITER(hh, archmodel->history, entry, tmp)
  741. {
  742. HASH_DEL(archmodel->history, entry);
  743. free(entry);
  744. }
  745. archmodel->history = NULL;
  746. list = archmodel->list;
  747. while (list)
  748. {
  749. free(list->entry);
  750. plist = list;
  751. list = list->next;
  752. free(plist);
  753. }
  754. archmodel->list = NULL;
  755. }
  756. free(model->per_arch[arch][devid][ncore]);
  757. model->per_arch[arch][devid][ncore] = NULL;
  758. }
  759. free(model->per_arch[arch][devid]);
  760. model->per_arch[arch][devid] = NULL;
  761. }
  762. free(model->per_arch[arch]);
  763. model->per_arch[arch] = NULL;
  764. }
  765. }
  766. free(model->per_arch);
  767. model->per_arch = NULL;
  768. }
  769. model->is_init = 0;
  770. model->is_loaded = 0;
  771. }
  772. void _starpu_deinitialize_registered_performance_models(void)
  773. {
  774. if (_starpu_get_calibrate_flag())
  775. _starpu_dump_registered_models();
  776. STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  777. struct _starpu_perfmodel_list *node, *pnode;
  778. node = registered_models;
  779. _STARPU_DEBUG("FREE MODELS !\n");
  780. while (node)
  781. {
  782. struct starpu_perfmodel *model = node->model;
  783. STARPU_PTHREAD_RWLOCK_WRLOCK(&model->model_rwlock);
  784. _starpu_deinitialize_performance_model(model);
  785. STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
  786. pnode = node;
  787. node = node->next;
  788. free(pnode);
  789. }
  790. registered_models = NULL;
  791. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  792. STARPU_PTHREAD_RWLOCK_DESTROY(&registered_models_rwlock);
  793. }
  794. /*
  795. * XXX: We should probably factorize the beginning of the _starpu_load_*_model
  796. * functions. This is a bit tricky though, because we must be sure to unlock
  797. * registered_models_rwlock at the right place.
  798. */
  799. void _starpu_load_per_arch_based_model(struct starpu_perfmodel *model)
  800. {
  801. starpu_perfmodel_init(model);
  802. }
  803. void _starpu_load_common_based_model(struct starpu_perfmodel *model)
  804. {
  805. starpu_perfmodel_init(model);
  806. }
  807. /* We first try to grab the global lock in read mode to check whether the model
  808. * was loaded or not (this is very likely to have been already loaded). If the
  809. * model was not loaded yet, we take the lock in write mode, and if the model
  810. * is still not loaded once we have the lock, we do load it. */
  811. void _starpu_load_history_based_model(struct starpu_perfmodel *model, unsigned scan_history)
  812. {
  813. starpu_perfmodel_init(model);
  814. STARPU_PTHREAD_RWLOCK_WRLOCK(&model->model_rwlock);
  815. if(!model->is_loaded)
  816. {
  817. /* make sure the performance model directory exists (or create it) */
  818. _starpu_create_sampling_directory_if_needed();
  819. char path[256];
  820. get_model_path(model, path, 256);
  821. _STARPU_DEBUG("Opening performance model file %s for model %s ...\n", path, model->symbol);
  822. unsigned calibrate_flag = _starpu_get_calibrate_flag();
  823. model->benchmarking = calibrate_flag;
  824. /* try to open an existing file and load it */
  825. int res;
  826. res = access(path, F_OK);
  827. if (res == 0)
  828. {
  829. if (calibrate_flag == 2)
  830. {
  831. /* The user specified that the performance model should
  832. * be overwritten, so we don't load the existing file !
  833. * */
  834. _STARPU_DEBUG("Overwrite existing file\n");
  835. }
  836. else
  837. {
  838. /* We load the available file */
  839. _STARPU_DEBUG("File exists\n");
  840. FILE *f;
  841. f = fopen(path, "r");
  842. STARPU_ASSERT(f);
  843. parse_model_file(f, model, scan_history);
  844. fclose(f);
  845. }
  846. }
  847. else
  848. {
  849. _STARPU_DEBUG("File does not exists\n");
  850. }
  851. _STARPU_DEBUG("Performance model file %s for model %s is loaded\n", path, model->symbol);
  852. model->is_loaded = 1;
  853. }
  854. STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
  855. }
  856. void starpu_perfmodel_directory(FILE *output)
  857. {
  858. char perf_model_dir[256];
  859. _starpu_get_perf_model_dir_codelets(perf_model_dir, 256);
  860. fprintf(output, "directory: <%s>\n", perf_model_dir);
  861. }
  862. /* This function is intended to be used by external tools that should read
  863. * the performance model files */
  864. int starpu_perfmodel_list(FILE *output)
  865. {
  866. char path[256];
  867. DIR *dp;
  868. struct dirent *ep;
  869. char perf_model_dir_codelets[256];
  870. _starpu_get_perf_model_dir_codelets(perf_model_dir_codelets, 256);
  871. strncpy(path, perf_model_dir_codelets, 256);
  872. dp = opendir(path);
  873. if (dp != NULL)
  874. {
  875. while ((ep = readdir(dp)))
  876. {
  877. if (strcmp(ep->d_name, ".") && strcmp(ep->d_name, ".."))
  878. fprintf(output, "file: <%s>\n", ep->d_name);
  879. }
  880. closedir (dp);
  881. }
  882. else
  883. {
  884. _STARPU_DISP("Could not open the perfmodel directory <%s>: %s\n", path, strerror(errno));
  885. }
  886. return 0;
  887. }
  888. /* This function is intended to be used by external tools that should read the
  889. * performance model files */
  890. /* TODO: write an clear function, to free symbol and history */
  891. int starpu_perfmodel_load_symbol(const char *symbol, struct starpu_perfmodel *model)
  892. {
  893. model->symbol = strdup(symbol);
  894. /* where is the file if it exists ? */
  895. char path[256];
  896. get_model_path(model, path, 256);
  897. // _STARPU_DEBUG("get_model_path -> %s\n", path);
  898. /* does it exist ? */
  899. int res;
  900. res = access(path, F_OK);
  901. if (res)
  902. {
  903. const char *dot = strrchr(symbol, '.');
  904. if (dot)
  905. {
  906. char *symbol2 = strdup(symbol);
  907. symbol2[dot-symbol] = '\0';
  908. int ret;
  909. _STARPU_DISP("note: loading history from %s instead of %s\n", symbol2, symbol);
  910. ret = starpu_perfmodel_load_symbol(symbol2,model);
  911. free(symbol2);
  912. return ret;
  913. }
  914. _STARPU_DISP("There is no performance model for symbol %s\n", symbol);
  915. return 1;
  916. }
  917. FILE *f = fopen(path, "r");
  918. STARPU_ASSERT(f);
  919. starpu_perfmodel_init_with_file(f, model);
  920. rewind(f);
  921. parse_model_file(f, model, 1);
  922. STARPU_ASSERT(fclose(f) == 0);
  923. return 0;
  924. }
  925. int starpu_perfmodel_unload_model(struct starpu_perfmodel *model)
  926. {
  927. free((char *)model->symbol);
  928. _starpu_deinitialize_performance_model(model);
  929. return 0;
  930. }
  931. char* starpu_perfmodel_get_archtype_name(enum starpu_worker_archtype archtype)
  932. {
  933. switch(archtype)
  934. {
  935. case(STARPU_CPU_WORKER):
  936. return "cpu";
  937. break;
  938. case(STARPU_CUDA_WORKER):
  939. return "cuda";
  940. break;
  941. case(STARPU_OPENCL_WORKER):
  942. return "opencl";
  943. break;
  944. case(STARPU_MIC_WORKER):
  945. return "mic";
  946. break;
  947. case(STARPU_SCC_WORKER):
  948. return "scc";
  949. break;
  950. default:
  951. STARPU_ABORT();
  952. break;
  953. }
  954. }
  955. void starpu_perfmodel_get_arch_name(struct starpu_perfmodel_arch* arch, char *archname, size_t maxlen,unsigned nimpl)
  956. {
  957. snprintf(archname, maxlen, "%s%d_parallel%d_impl%u",
  958. starpu_perfmodel_get_archtype_name(arch->type),
  959. arch->devid,
  960. arch->ncore + 1,
  961. nimpl);
  962. }
  963. void starpu_perfmodel_debugfilepath(struct starpu_perfmodel *model,
  964. struct starpu_perfmodel_arch* arch, char *path, size_t maxlen, unsigned nimpl)
  965. {
  966. char archname[32];
  967. starpu_perfmodel_get_arch_name(arch, archname, 32, nimpl);
  968. STARPU_ASSERT(path);
  969. get_model_debug_path(model, archname, path, maxlen);
  970. }
  971. double _starpu_regression_based_job_expected_perf(struct starpu_perfmodel *model, struct starpu_perfmodel_arch* arch, struct _starpu_job *j, unsigned nimpl)
  972. {
  973. double exp = NAN;
  974. size_t size = _starpu_job_get_data_size(model, arch, nimpl, j);
  975. struct starpu_perfmodel_regression_model *regmodel;
  976. regmodel = &model->per_arch[arch->type][arch->devid][arch->ncore][nimpl].regression;
  977. if (regmodel->valid && size >= regmodel->minx * 0.9 && size <= regmodel->maxx * 1.1)
  978. exp = regmodel->alpha*pow((double)size, regmodel->beta);
  979. return exp;
  980. }
  981. double _starpu_non_linear_regression_based_job_expected_perf(struct starpu_perfmodel *model, struct starpu_perfmodel_arch* arch, struct _starpu_job *j,unsigned nimpl)
  982. {
  983. double exp = NAN;
  984. size_t size = _starpu_job_get_data_size(model, arch, nimpl, j);
  985. struct starpu_perfmodel_regression_model *regmodel;
  986. regmodel = &model->per_arch[arch->type][arch->devid][arch->ncore][nimpl].regression;
  987. if (regmodel->nl_valid && size >= regmodel->minx * 0.9 && size <= regmodel->maxx * 1.1)
  988. exp = regmodel->a*pow((double)size, regmodel->b) + regmodel->c;
  989. else
  990. {
  991. uint32_t key = _starpu_compute_buffers_footprint(model, arch, nimpl, j);
  992. struct starpu_perfmodel_per_arch *per_arch_model = &model->per_arch[arch->type][arch->devid][arch->ncore][nimpl];
  993. struct starpu_perfmodel_history_table *history;
  994. struct starpu_perfmodel_history_table *entry;
  995. STARPU_PTHREAD_RWLOCK_RDLOCK(&model->model_rwlock);
  996. history = per_arch_model->history;
  997. HASH_FIND_UINT32_T(history, &key, entry);
  998. STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
  999. /* Here helgrind would shout that this is unprotected access.
  1000. * We do not care about racing access to the mean, we only want
  1001. * a good-enough estimation */
  1002. if (entry && entry->history_entry && entry->history_entry->nsample >= _STARPU_CALIBRATION_MINIMUM)
  1003. exp = entry->history_entry->mean;
  1004. STARPU_HG_DISABLE_CHECKING(model->benchmarking);
  1005. if (isnan(exp) && !model->benchmarking)
  1006. {
  1007. char archname[32];
  1008. starpu_perfmodel_get_arch_name(arch, archname, sizeof(archname), nimpl);
  1009. _STARPU_DISP("Warning: model %s is not calibrated enough for %s, forcing calibration for this run. Use the STARPU_CALIBRATE environment variable to control this.\n", model->symbol, archname);
  1010. _starpu_set_calibrate_flag(1);
  1011. model->benchmarking = 1;
  1012. }
  1013. }
  1014. return exp;
  1015. }
  1016. double _starpu_history_based_job_expected_perf(struct starpu_perfmodel *model, struct starpu_perfmodel_arch* arch, struct _starpu_job *j,unsigned nimpl)
  1017. {
  1018. double exp = NAN;
  1019. struct starpu_perfmodel_per_arch *per_arch_model;
  1020. struct starpu_perfmodel_history_entry *entry;
  1021. struct starpu_perfmodel_history_table *history, *elt;
  1022. uint32_t key = _starpu_compute_buffers_footprint(model, arch, nimpl, j);
  1023. per_arch_model = &model->per_arch[arch->type][arch->devid][arch->ncore][nimpl];
  1024. STARPU_PTHREAD_RWLOCK_RDLOCK(&model->model_rwlock);
  1025. history = per_arch_model->history;
  1026. HASH_FIND_UINT32_T(history, &key, elt);
  1027. entry = (elt == NULL) ? NULL : elt->history_entry;
  1028. STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
  1029. /* Here helgrind would shout that this is unprotected access.
  1030. * We do not care about racing access to the mean, we only want
  1031. * a good-enough estimation */
  1032. if (entry && entry->nsample >= _STARPU_CALIBRATION_MINIMUM)
  1033. /* TODO: report differently if we've scheduled really enough
  1034. * of that task and the scheduler should perhaps put it aside */
  1035. /* Calibrated enough */
  1036. exp = entry->mean;
  1037. STARPU_HG_DISABLE_CHECKING(model->benchmarking);
  1038. if (isnan(exp) && !model->benchmarking)
  1039. {
  1040. char archname[32];
  1041. starpu_perfmodel_get_arch_name(arch, archname, sizeof(archname), nimpl);
  1042. _STARPU_DISP("Warning: model %s is not calibrated enough for %s, forcing calibration for this run. Use the STARPU_CALIBRATE environment variable to control this.\n", model->symbol, archname);
  1043. _starpu_set_calibrate_flag(1);
  1044. model->benchmarking = 1;
  1045. }
  1046. return exp;
  1047. }
  1048. double starpu_permodel_history_based_expected_perf(struct starpu_perfmodel *model, struct starpu_perfmodel_arch * arch, uint32_t footprint)
  1049. {
  1050. struct _starpu_job j =
  1051. {
  1052. .footprint = footprint,
  1053. .footprint_is_computed = 1,
  1054. };
  1055. return _starpu_history_based_job_expected_perf(model, arch, &j, j.nimpl);
  1056. }
  1057. void _starpu_update_perfmodel_history(struct _starpu_job *j, struct starpu_perfmodel *model, struct starpu_perfmodel_arch* arch, unsigned cpuid STARPU_ATTRIBUTE_UNUSED, double measured, unsigned nimpl)
  1058. {
  1059. if (model)
  1060. {
  1061. STARPU_PTHREAD_RWLOCK_WRLOCK(&model->model_rwlock);
  1062. struct starpu_perfmodel_per_arch *per_arch_model = &model->per_arch[arch->type][arch->devid][arch->ncore][nimpl];
  1063. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  1064. {
  1065. struct starpu_perfmodel_history_entry *entry;
  1066. struct starpu_perfmodel_history_table *elt;
  1067. struct starpu_perfmodel_history_list **list;
  1068. uint32_t key = _starpu_compute_buffers_footprint(model, arch, nimpl, j);
  1069. list = &per_arch_model->list;
  1070. HASH_FIND_UINT32_T(per_arch_model->history, &key, elt);
  1071. entry = (elt == NULL) ? NULL : elt->history_entry;
  1072. if (!entry)
  1073. {
  1074. /* this is the first entry with such a footprint */
  1075. entry = (struct starpu_perfmodel_history_entry *) malloc(sizeof(struct starpu_perfmodel_history_entry));
  1076. STARPU_ASSERT(entry);
  1077. /* Tell helgrind that we do not care about
  1078. * racing access to the sampling, we only want a
  1079. * good-enough estimation */
  1080. STARPU_HG_DISABLE_CHECKING(entry->nsample);
  1081. STARPU_HG_DISABLE_CHECKING(entry->mean);
  1082. entry->mean = measured;
  1083. entry->sum = measured;
  1084. entry->deviation = 0.0;
  1085. entry->sum2 = measured*measured;
  1086. entry->size = _starpu_job_get_data_size(model, arch, nimpl, j);
  1087. entry->flops = j->task->flops;
  1088. entry->footprint = key;
  1089. entry->nsample = 1;
  1090. entry->nerror = 0;
  1091. insert_history_entry(entry, list, &per_arch_model->history);
  1092. }
  1093. else
  1094. {
  1095. /* There is already an entry with the same footprint */
  1096. double local_deviation = measured/entry->mean;
  1097. int historymaxerror = starpu_get_env_number_default("STARPU_HISTORY_MAX_ERROR", STARPU_HISTORYMAXERROR);
  1098. if (entry->nsample &&
  1099. (100 * local_deviation > (100 + historymaxerror)
  1100. || (100 / local_deviation > (100 + historymaxerror))))
  1101. {
  1102. entry->nerror++;
  1103. /* More errors than measurements, we're most probably completely wrong, we flush out all the entries */
  1104. if (entry->nerror >= entry->nsample)
  1105. {
  1106. char archname[32];
  1107. starpu_perfmodel_get_arch_name(arch, archname, sizeof(archname), nimpl);
  1108. _STARPU_DISP("Too big deviation for model %s on %s: %f vs average %f, %u such errors against %u samples (%+f%%), flushing the performance model. Use the STARPU_HISTORY_MAX_ERROR environement variable to control the threshold (currently %d%%)\n", model->symbol, archname, measured, entry->mean, entry->nerror, entry->nsample, measured * 100. / entry->mean - 100, historymaxerror);
  1109. entry->sum = 0.0;
  1110. entry->sum2 = 0.0;
  1111. entry->nsample = 0;
  1112. entry->nerror = 0;
  1113. entry->mean = 0.0;
  1114. entry->deviation = 0.0;
  1115. }
  1116. }
  1117. else
  1118. {
  1119. entry->sum += measured;
  1120. entry->sum2 += measured*measured;
  1121. entry->nsample++;
  1122. unsigned n = entry->nsample;
  1123. entry->mean = entry->sum / n;
  1124. entry->deviation = sqrt((entry->sum2 - (entry->sum*entry->sum)/n)/n);
  1125. }
  1126. if (j->task->flops != 0.)
  1127. {
  1128. if (entry->flops == 0.)
  1129. entry->flops = j->task->flops;
  1130. else if (entry->flops != j->task->flops)
  1131. /* Incoherent flops! forget about trying to record flops */
  1132. entry->flops = NAN;
  1133. }
  1134. }
  1135. STARPU_ASSERT(entry);
  1136. }
  1137. if (model->type == STARPU_REGRESSION_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  1138. {
  1139. struct starpu_perfmodel_regression_model *reg_model;
  1140. reg_model = &per_arch_model->regression;
  1141. /* update the regression model */
  1142. size_t job_size = _starpu_job_get_data_size(model, arch, nimpl, j);
  1143. double logy, logx;
  1144. logx = log((double)job_size);
  1145. logy = log(measured);
  1146. reg_model->sumlnx += logx;
  1147. reg_model->sumlnx2 += logx*logx;
  1148. reg_model->sumlny += logy;
  1149. reg_model->sumlnxlny += logx*logy;
  1150. if (reg_model->minx == 0 || job_size < reg_model->minx)
  1151. reg_model->minx = job_size;
  1152. if (reg_model->maxx == 0 || job_size > reg_model->maxx)
  1153. reg_model->maxx = job_size;
  1154. reg_model->nsample++;
  1155. if (VALID_REGRESSION(reg_model))
  1156. {
  1157. unsigned n = reg_model->nsample;
  1158. double num = (n*reg_model->sumlnxlny - reg_model->sumlnx*reg_model->sumlny);
  1159. double denom = (n*reg_model->sumlnx2 - reg_model->sumlnx*reg_model->sumlnx);
  1160. reg_model->beta = num/denom;
  1161. reg_model->alpha = exp((reg_model->sumlny - reg_model->beta*reg_model->sumlnx)/n);
  1162. reg_model->valid = 1;
  1163. }
  1164. }
  1165. #ifdef STARPU_MODEL_DEBUG
  1166. struct starpu_task *task = j->task;
  1167. FILE *f = fopen(per_arch_model->debug_path, "a+");
  1168. if (f == NULL)
  1169. {
  1170. _STARPU_DISP("Error <%s> when opening file <%s>\n", strerror(errno), per_arch_model->debug_path);
  1171. STARPU_ABORT();
  1172. }
  1173. if (!j->footprint_is_computed)
  1174. (void) _starpu_compute_buffers_footprint(model, arch, nimpl, j);
  1175. STARPU_ASSERT(j->footprint_is_computed);
  1176. fprintf(f, "0x%x\t%lu\t%f\t%f\t%f\t%d\t\t", j->footprint, (unsigned long) _starpu_job_get_data_size(model, arch, nimpl, j), measured, task->predicted, task->predicted_transfer, cpuid);
  1177. unsigned i;
  1178. for (i = 0; i < task->cl->nbuffers; i++)
  1179. {
  1180. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(task, i);
  1181. STARPU_ASSERT(handle->ops);
  1182. STARPU_ASSERT(handle->ops->display);
  1183. handle->ops->display(handle, f);
  1184. }
  1185. fprintf(f, "\n");
  1186. fclose(f);
  1187. #endif
  1188. STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
  1189. }
  1190. }
  1191. void starpu_perfmodel_update_history(struct starpu_perfmodel *model, struct starpu_task *task, struct starpu_perfmodel_arch * arch, unsigned cpuid, unsigned nimpl, double measured)
  1192. {
  1193. struct _starpu_job *job = _starpu_get_job_associated_to_task(task);
  1194. #ifdef STARPU_SIMGRID
  1195. STARPU_ASSERT_MSG(0, "We are not supposed to update history when simulating execution");
  1196. #endif
  1197. _starpu_load_perfmodel(model);
  1198. /* Record measurement */
  1199. _starpu_update_perfmodel_history(job, model, arch, cpuid, measured, nimpl);
  1200. /* and save perfmodel on termination */
  1201. _starpu_set_calibrate_flag(1);
  1202. }