perfmodel_history.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2013 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012, 2013 Centre National de la Recherche Scientifique
  5. * Copyright (C) 2011 Télécom-SudParis
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. #include <dirent.h>
  19. #include <unistd.h>
  20. #include <sys/stat.h>
  21. #include <errno.h>
  22. #include <common/config.h>
  23. #include <common/utils.h>
  24. #include <core/perfmodel/perfmodel.h>
  25. #include <core/jobs.h>
  26. #include <core/workers.h>
  27. #include <datawizard/datawizard.h>
  28. #include <core/perfmodel/regression.h>
  29. #include <common/config.h>
  30. #include <starpu_parameters.h>
  31. #include <common/uthash.h>
  32. #ifdef STARPU_HAVE_WINDOWS
  33. #include <windows.h>
  34. #endif
  35. #define HASH_ADD_UINT32_T(head,field,add) HASH_ADD(hh,head,field,sizeof(uint32_t),add)
  36. #define HASH_FIND_UINT32_T(head,find,out) HASH_FIND(hh,head,find,sizeof(uint32_t),out)
  37. struct starpu_perfmodel_history_table
  38. {
  39. UT_hash_handle hh;
  40. uint32_t footprint;
  41. struct starpu_perfmodel_history_entry *history_entry;
  42. };
  43. /* We want more than 10% variance on X to trust regression */
  44. #define VALID_REGRESSION(reg_model) \
  45. ((reg_model)->minx < (9*(reg_model)->maxx)/10 && (reg_model)->nsample >= _STARPU_CALIBRATION_MINIMUM)
  46. static starpu_pthread_rwlock_t registered_models_rwlock;
  47. static struct _starpu_perfmodel_list *registered_models = NULL;
  48. size_t _starpu_job_get_data_size(struct starpu_perfmodel *model, struct starpu_perfmodel_arch* arch, unsigned nimpl, struct _starpu_job *j)
  49. {
  50. struct starpu_task *task = j->task;
  51. if (model && model->per_arch[arch->type][arch->devid][arch->ncore][nimpl].size_base)
  52. {
  53. return model->per_arch[arch->type][arch->devid][arch->ncore][nimpl].size_base(task, arch, nimpl);
  54. }
  55. else if (model && model->size_base)
  56. {
  57. return model->size_base(task, nimpl);
  58. }
  59. else
  60. {
  61. unsigned nbuffers = task->cl->nbuffers;
  62. size_t size = 0;
  63. unsigned buffer;
  64. for (buffer = 0; buffer < nbuffers; buffer++)
  65. {
  66. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(task, buffer);
  67. size += _starpu_data_get_size(handle);
  68. }
  69. return size;
  70. }
  71. }
  72. /*
  73. * History based model
  74. */
  75. static void insert_history_entry(struct starpu_perfmodel_history_entry *entry, struct starpu_perfmodel_history_list **list, struct starpu_perfmodel_history_table **history_ptr)
  76. {
  77. struct starpu_perfmodel_history_list *link;
  78. struct starpu_perfmodel_history_table *table;
  79. link = (struct starpu_perfmodel_history_list *) malloc(sizeof(struct starpu_perfmodel_history_list));
  80. link->next = *list;
  81. link->entry = entry;
  82. *list = link;
  83. /* detect concurrency issue */
  84. //HASH_FIND_UINT32_T(*history_ptr, &entry->footprint, table);
  85. //STARPU_ASSERT(table == NULL);
  86. table = (struct starpu_perfmodel_history_table*) malloc(sizeof(*table));
  87. STARPU_ASSERT(table != NULL);
  88. table->footprint = entry->footprint;
  89. table->history_entry = entry;
  90. HASH_ADD_UINT32_T(*history_ptr, footprint, table);
  91. }
  92. static void dump_reg_model(FILE *f, struct starpu_perfmodel *model, struct starpu_perfmodel_arch* arch, unsigned nimpl)
  93. {
  94. struct starpu_perfmodel_per_arch *per_arch_model;
  95. per_arch_model = &model->per_arch[arch->type][arch->devid][arch->ncore][nimpl];
  96. struct starpu_perfmodel_regression_model *reg_model;
  97. reg_model = &per_arch_model->regression;
  98. /*
  99. * Linear Regression model
  100. */
  101. /* Unless we have enough measurements, we put NaN in the file to indicate the model is invalid */
  102. double alpha = nan(""), beta = nan("");
  103. if (model->type == STARPU_REGRESSION_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  104. {
  105. if (reg_model->nsample > 1)
  106. {
  107. alpha = reg_model->alpha;
  108. beta = reg_model->beta;
  109. }
  110. }
  111. fprintf(f, "# sumlnx\tsumlnx2\t\tsumlny\t\tsumlnxlny\talpha\t\tbeta\t\tn\tminx\t\tmaxx\n");
  112. fprintf(f, "%-15le\t%-15le\t%-15le\t%-15le\t%-15le\t%-15le\t%u\t%-15lu\t%-15lu\n", reg_model->sumlnx, reg_model->sumlnx2, reg_model->sumlny, reg_model->sumlnxlny, alpha, beta, reg_model->nsample, reg_model->minx, reg_model->maxx);
  113. /*
  114. * Non-Linear Regression model
  115. */
  116. double a = nan(""), b = nan(""), c = nan("");
  117. if (model->type == STARPU_NL_REGRESSION_BASED)
  118. _starpu_regression_non_linear_power(per_arch_model->list, &a, &b, &c);
  119. fprintf(f, "# a\t\tb\t\tc\n");
  120. fprintf(f, "%-15le\t%-15le\t%-15le\n", a, b, c);
  121. }
  122. static void scan_reg_model(FILE *f, struct starpu_perfmodel_regression_model *reg_model)
  123. {
  124. int res;
  125. /*
  126. * Linear Regression model
  127. */
  128. _starpu_drop_comments(f);
  129. res = fscanf(f, "%le\t%le\t%le\t%le\t%le\t%le\t%u\t%lu\t%lu\n",
  130. &reg_model->sumlnx, &reg_model->sumlnx2, &reg_model->sumlny,
  131. &reg_model->sumlnxlny, &reg_model->alpha, &reg_model->beta,
  132. &reg_model->nsample,
  133. &reg_model->minx, &reg_model->maxx);
  134. STARPU_ASSERT_MSG(res == 9, "Incorrect performance model file");
  135. /* If any of the parameters describing the linear regression model is NaN, the model is invalid */
  136. unsigned invalid = (isnan(reg_model->alpha)||isnan(reg_model->beta));
  137. reg_model->valid = !invalid && VALID_REGRESSION(reg_model);
  138. /*
  139. * Non-Linear Regression model
  140. */
  141. _starpu_drop_comments(f);
  142. res = fscanf(f, "%le\t%le\t%le\n", &reg_model->a, &reg_model->b, &reg_model->c);
  143. STARPU_ASSERT_MSG(res == 3, "Incorrect performance model file");
  144. /* If any of the parameters describing the non-linear regression model is NaN, the model is invalid */
  145. unsigned nl_invalid = (isnan(reg_model->a)||isnan(reg_model->b)||isnan(reg_model->c));
  146. reg_model->nl_valid = !nl_invalid && VALID_REGRESSION(reg_model);
  147. }
  148. static void dump_history_entry(FILE *f, struct starpu_perfmodel_history_entry *entry)
  149. {
  150. fprintf(f, "%08x\t%-15lu\t%-15le\t%-15le\t%-15le\t%-15le\t%-15le\t%u\n", entry->footprint, (unsigned long) entry->size, entry->flops, entry->mean, entry->deviation, entry->sum, entry->sum2, entry->nsample);
  151. }
  152. static void scan_history_entry(FILE *f, struct starpu_perfmodel_history_entry *entry)
  153. {
  154. int res;
  155. _starpu_drop_comments(f);
  156. /* In case entry is NULL, we just drop these values */
  157. unsigned nsample;
  158. uint32_t footprint;
  159. unsigned long size; /* in bytes */
  160. double flops;
  161. double mean;
  162. double deviation;
  163. double sum;
  164. double sum2;
  165. char line[256];
  166. char *ret;
  167. ret = fgets(line, sizeof(line), f);
  168. STARPU_ASSERT(ret);
  169. STARPU_ASSERT(strchr(line, '\n'));
  170. /* Read the values from the file */
  171. res = sscanf(line, "%x\t%lu\t%le\t%le\t%le\t%le\t%le\t%u", &footprint, &size, &flops, &mean, &deviation, &sum, &sum2, &nsample);
  172. if (res != 8)
  173. {
  174. flops = 0.;
  175. /* Read the values from the file */
  176. res = sscanf(line, "%x\t%lu\t%le\t%le\t%le\t%le\t%u", &footprint, &size, &mean, &deviation, &sum, &sum2, &nsample);
  177. STARPU_ASSERT_MSG(res == 7, "Incorrect performance model file");
  178. }
  179. if (entry)
  180. {
  181. entry->footprint = footprint;
  182. entry->size = size;
  183. entry->flops = flops;
  184. entry->mean = mean;
  185. entry->deviation = deviation;
  186. entry->sum = sum;
  187. entry->sum2 = sum2;
  188. entry->nsample = nsample;
  189. }
  190. }
  191. static void parse_per_arch_model_file(FILE *f, struct starpu_perfmodel_per_arch *per_arch_model, unsigned scan_history)
  192. {
  193. unsigned nentries;
  194. _starpu_drop_comments(f);
  195. int res = fscanf(f, "%u\n", &nentries);
  196. STARPU_ASSERT_MSG(res == 1, "Incorrect performance model file");
  197. _STARPU_DEBUG("nentries:%u\n", nentries);
  198. scan_reg_model(f, &per_arch_model->regression);
  199. /* parse entries */
  200. unsigned i;
  201. for (i = 0; i < nentries; i++)
  202. {
  203. struct starpu_perfmodel_history_entry *entry = NULL;
  204. if (scan_history)
  205. {
  206. entry = (struct starpu_perfmodel_history_entry *) malloc(sizeof(struct starpu_perfmodel_history_entry));
  207. STARPU_ASSERT(entry);
  208. }
  209. scan_history_entry(f, entry);
  210. /* insert the entry in the hashtable and the list structures */
  211. /* TODO: Insert it at the end of the list, to avoid reversing
  212. * the order... But efficiently! We may have a lot of entries */
  213. if (scan_history)
  214. insert_history_entry(entry, &per_arch_model->list, &per_arch_model->history);
  215. }
  216. }
  217. static void parse_arch(FILE *f, struct starpu_perfmodel *model, unsigned scan_history,struct starpu_perfmodel_arch* arch)
  218. {
  219. struct starpu_perfmodel_per_arch dummy;
  220. unsigned nimpls, implmax, impl, i, ret;
  221. _STARPU_DEBUG("Parsing %s_%u_ncore_%u\n",
  222. starpu_perfmodel_get_archtype_name(arch->type),
  223. arch->devid,
  224. arch->ncore);
  225. /* Parsing number of implementation */
  226. _starpu_drop_comments(f);
  227. ret = fscanf(f, "%u\n", &nimpls);
  228. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  229. if( model != NULL &&
  230. model->per_arch != NULL &&
  231. model->per_arch[arch->type] != NULL &&
  232. model->per_arch[arch->type][arch->devid] != NULL &&
  233. model->per_arch[arch->type][arch->devid][arch->ncore] != NULL)
  234. {
  235. /* Parsing each implementation */
  236. implmax = STARPU_MIN(nimpls, STARPU_MAXIMPLEMENTATIONS);
  237. for (impl = 0; impl < implmax; impl++)
  238. parse_per_arch_model_file(f, &model->per_arch[arch->type][arch->devid][arch->ncore][impl], scan_history);
  239. }
  240. else
  241. {
  242. impl = 0;
  243. }
  244. /* if the number of implementation is greater than STARPU_MAXIMPLEMENTATIONS
  245. * we skip the last implementation */
  246. for (i = impl; i < nimpls; i++)
  247. parse_per_arch_model_file(f, &dummy, 0);
  248. }
  249. static void parse_device(FILE *f, struct starpu_perfmodel *model, unsigned scan_history, enum starpu_worker_archtype archtype, unsigned devid)
  250. {
  251. unsigned maxncore, ncore, ret;
  252. struct starpu_perfmodel_arch arch;
  253. arch.type = archtype;
  254. arch.devid = devid;
  255. _STARPU_DEBUG("Parsing device %s_%u arch\n",
  256. starpu_perfmodel_get_archtype_name(archtype),
  257. devid);
  258. /* Parsing maximun number of worker for this device */
  259. _starpu_drop_comments(f);
  260. ret = fscanf(f, "%u\n", &maxncore);
  261. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  262. /* Parsing each arch */
  263. for(ncore=0; ncore < maxncore; ncore++)
  264. {
  265. arch.ncore = ncore;
  266. parse_arch(f,model,scan_history,&arch);
  267. }
  268. }
  269. static void parse_archtype(FILE *f, struct starpu_perfmodel *model, unsigned scan_history, enum starpu_worker_archtype archtype)
  270. {
  271. unsigned ndevice, devid, ret;
  272. _STARPU_DEBUG("Parsing %s arch\n", starpu_perfmodel_get_archtype_name(archtype));
  273. /* Parsing number of device for this archtype */
  274. _starpu_drop_comments(f);
  275. ret = fscanf(f, "%u\n", &ndevice);
  276. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  277. /* Parsing each device for this archtype*/
  278. for(devid=0; devid < ndevice; devid++)
  279. parse_device(f,model,scan_history,archtype,devid);
  280. }
  281. static void parse_model_file(FILE *f, struct starpu_perfmodel *model, unsigned scan_history)
  282. {
  283. unsigned archtype;
  284. int ret, version;
  285. _STARPU_DEBUG("Start parsing\n");
  286. /* Parsing performance model version */
  287. _starpu_drop_comments(f);
  288. ret = fscanf(f, "%d\n", &version);
  289. STARPU_ASSERT_MSG(version == _STARPU_PERFMODEL_VERSION, "Incorrect performance model file with a model version %d not being the current model version (%d)\n",
  290. version, _STARPU_PERFMODEL_VERSION);
  291. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  292. /* Parsing each kind of archtype */
  293. for(archtype=0; archtype<STARPU_NARCH; archtype++)
  294. parse_archtype(f, model, scan_history, archtype);
  295. }
  296. static void dump_per_arch_model_file(FILE *f, struct starpu_perfmodel *model, struct starpu_perfmodel_arch * arch, unsigned nimpl)
  297. {
  298. struct starpu_perfmodel_per_arch *per_arch_model;
  299. per_arch_model = &model->per_arch[arch->type][arch->devid][arch->ncore][nimpl];
  300. /* count the number of elements in the lists */
  301. struct starpu_perfmodel_history_list *ptr = NULL;
  302. unsigned nentries = 0;
  303. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  304. {
  305. /* Dump the list of all entries in the history */
  306. ptr = per_arch_model->list;
  307. while(ptr)
  308. {
  309. nentries++;
  310. ptr = ptr->next;
  311. }
  312. }
  313. /* header */
  314. char archname[32];
  315. starpu_perfmodel_get_arch_name(arch, archname, 32, nimpl);
  316. fprintf(f, "#####\n");
  317. fprintf(f, "# Model for %s\n", archname);
  318. fprintf(f, "# number of entries\n%u\n", nentries);
  319. dump_reg_model(f, model, arch, nimpl);
  320. /* Dump the history into the model file in case it is necessary */
  321. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  322. {
  323. fprintf(f, "# hash\t\tsize\t\tflops\t\tmean (us)\tdev (us)\t\tsum\t\tsum2\t\tn\n");
  324. ptr = per_arch_model->list;
  325. while (ptr)
  326. {
  327. dump_history_entry(f, ptr->entry);
  328. ptr = ptr->next;
  329. }
  330. }
  331. fprintf(f, "\n");
  332. }
  333. static unsigned get_n_entries(struct starpu_perfmodel *model, struct starpu_perfmodel_arch * arch, unsigned impl)
  334. {
  335. struct starpu_perfmodel_per_arch *per_arch_model;
  336. per_arch_model = &model->per_arch[arch->type][arch->devid][arch->ncore][impl];
  337. /* count the number of elements in the lists */
  338. struct starpu_perfmodel_history_list *ptr = NULL;
  339. unsigned nentries = 0;
  340. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  341. {
  342. /* Dump the list of all entries in the history */
  343. ptr = per_arch_model->list;
  344. while(ptr)
  345. {
  346. nentries++;
  347. ptr = ptr->next;
  348. }
  349. }
  350. return nentries;
  351. }
  352. static void dump_model_file(FILE *f, struct starpu_perfmodel *model)
  353. {
  354. struct _starpu_machine_config *conf = _starpu_get_machine_config();
  355. char *name = "unknown";
  356. unsigned archtype, ndevice, *ncore, devid, nc, nimpl;
  357. struct starpu_perfmodel_arch arch;
  358. fprintf(f, "##################\n");
  359. fprintf(f, "# Performance Model Version\n");
  360. fprintf(f, "%d\n\n", _STARPU_PERFMODEL_VERSION);
  361. for(archtype=0; archtype<STARPU_NARCH; archtype++)
  362. {
  363. arch.type = archtype;
  364. switch (archtype)
  365. {
  366. case STARPU_CPU_WORKER:
  367. ndevice = 1;
  368. ncore = &conf->topology.nhwcpus;
  369. name = "CPU";
  370. break;
  371. case STARPU_CUDA_WORKER:
  372. ndevice = conf->topology.nhwcudagpus;
  373. ncore = NULL;
  374. name = "CUDA";
  375. break;
  376. case STARPU_OPENCL_WORKER:
  377. ndevice = conf->topology.nhwopenclgpus;
  378. ncore = NULL;
  379. name = "OPENCL";
  380. break;
  381. case STARPU_MIC_WORKER:
  382. ndevice = conf->topology.nhwmicdevices;
  383. ncore = conf->topology.nhwmiccores;
  384. name = "MIC";
  385. break;
  386. case STARPU_SCC_WORKER:
  387. ndevice = conf->topology.nhwscc;
  388. ncore = NULL;
  389. name = "SCC";
  390. break;
  391. default:
  392. /* Unknown arch */
  393. STARPU_ABORT();
  394. break;
  395. }
  396. fprintf(f, "####################\n");
  397. fprintf(f, "# %ss\n", name);
  398. fprintf(f, "# number of %s devices\n", name);
  399. fprintf(f, "%u\n", ndevice);
  400. for(devid=0; devid<ndevice; devid++)
  401. {
  402. arch.devid = devid;
  403. fprintf(f, "###############\n");
  404. fprintf(f, "# %s_%u\n", name, devid);
  405. fprintf(f, "# number of workers on device %s_%d\n", name, devid);
  406. if(ncore != NULL)
  407. fprintf(f, "%u\n", ncore[devid]);
  408. else
  409. fprintf(f, "1\n");
  410. for(nc=0; model->per_arch[archtype][devid][nc] != NULL; nc++)
  411. {
  412. arch.ncore = nc;
  413. unsigned max_impl = 0;
  414. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  415. {
  416. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  417. if (get_n_entries(model, &arch, nimpl))
  418. max_impl = nimpl + 1;
  419. }
  420. else if (model->type == STARPU_REGRESSION_BASED || model->type == STARPU_PER_ARCH || model->type == STARPU_COMMON)
  421. {
  422. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  423. if (model->per_arch[archtype][devid][nc][nimpl].regression.nsample)
  424. max_impl = nimpl + 1;
  425. }
  426. else
  427. STARPU_ASSERT_MSG(0, "Unknown history-based performance model %u", archtype);
  428. fprintf(f, "##########\n");
  429. fprintf(f, "# %u worker(s) in parallel\n", nc+1);
  430. fprintf(f, "# number of implementations\n");
  431. fprintf(f, "%u\n", max_impl);
  432. for (nimpl = 0; nimpl < max_impl; nimpl++)
  433. {
  434. dump_per_arch_model_file(f, model, &arch, nimpl);
  435. }
  436. }
  437. }
  438. }
  439. }
  440. static void initialize_per_arch_model(struct starpu_perfmodel_per_arch *per_arch_model)
  441. {
  442. per_arch_model->history = NULL;
  443. per_arch_model->list = NULL;
  444. per_arch_model->regression.nsample = 0;
  445. per_arch_model->regression.valid = 0;
  446. per_arch_model->regression.nl_valid = 0;
  447. per_arch_model->size_base = NULL;
  448. }
  449. static struct starpu_perfmodel_per_arch*** initialize_arch_model(int maxdevid, unsigned* maxncore_table)
  450. {
  451. int devid, ncore, nimpl;
  452. struct starpu_perfmodel_per_arch *** arch_model = malloc(sizeof(*arch_model)*(maxdevid+1));
  453. arch_model[maxdevid] = NULL;
  454. for(devid=0; devid<maxdevid; devid++)
  455. {
  456. int maxncore;
  457. if(maxncore_table != NULL)
  458. maxncore = maxncore_table[devid];
  459. else
  460. maxncore = 1;
  461. arch_model[devid] = malloc(sizeof(*arch_model[devid])*(maxncore+1));
  462. arch_model[devid][maxncore] = NULL;
  463. for(ncore=0; ncore<maxncore; ncore++)
  464. {
  465. arch_model[devid][ncore] = malloc(sizeof(*arch_model[devid][ncore])*STARPU_MAXIMPLEMENTATIONS);
  466. for(nimpl=0; nimpl<STARPU_MAXIMPLEMENTATIONS; nimpl++)
  467. {
  468. initialize_per_arch_model(&arch_model[devid][ncore][nimpl]);
  469. }
  470. }
  471. }
  472. return arch_model;
  473. }
  474. void initialize_model_without_conf(struct starpu_perfmodel* model, int dev_cpu, unsigned* core_cpu, int dev_cuda, unsigned* core_cuda, int dev_opencl, unsigned* core_opencl, int dev_mic, unsigned* core_mic, int dev_scc, unsigned* core_scc)
  475. {
  476. if(!model->is_init)
  477. {
  478. model->per_arch = malloc(sizeof(*model->per_arch)*(STARPU_NARCH));
  479. model->per_arch[STARPU_CPU_WORKER] = initialize_arch_model(dev_cpu,core_cpu);
  480. model->per_arch[STARPU_CUDA_WORKER] = initialize_arch_model(dev_cuda,core_cuda);
  481. model->per_arch[STARPU_OPENCL_WORKER] = initialize_arch_model(dev_opencl,core_opencl);
  482. model->per_arch[STARPU_MIC_WORKER] = initialize_arch_model(dev_mic,core_mic);
  483. model->per_arch[STARPU_SCC_WORKER] = initialize_arch_model(dev_scc,core_scc);
  484. model->is_init = 1;
  485. }
  486. }
  487. void initialize_model(struct starpu_perfmodel *model)
  488. {
  489. struct _starpu_machine_config *conf = _starpu_get_machine_config();
  490. initialize_model_without_conf(model,1,&conf->topology.nhwcpus,
  491. conf->topology.nhwcudagpus,NULL,
  492. conf->topology.nhwopenclgpus,NULL,
  493. conf->topology.nhwmicdevices,conf->topology.nhwmiccores,
  494. conf->topology.nhwscc,NULL);
  495. }
  496. void initialize_model_with_file(FILE*f, struct starpu_perfmodel *model)
  497. {
  498. unsigned ret, archtype, devid, i, ndevice, * maxncore;
  499. struct starpu_perfmodel_arch arch;
  500. int version;
  501. if(!model->is_init)
  502. {
  503. /* Parsing performance model version */
  504. _starpu_drop_comments(f);
  505. ret = fscanf(f, "%d\n", &version);
  506. STARPU_ASSERT_MSG(version == _STARPU_PERFMODEL_VERSION, "Incorrect performance model file with a model version %d not being the current model version (%d)\n",
  507. version, _STARPU_PERFMODEL_VERSION);
  508. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  509. model->per_arch = malloc(sizeof(*model->per_arch)*(STARPU_NARCH));
  510. for(archtype=0; archtype<STARPU_NARCH; archtype++)
  511. {
  512. arch.type = archtype;
  513. _starpu_drop_comments(f);
  514. ret = fscanf(f, "%u\n", &ndevice);
  515. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  516. if(ndevice != 0)
  517. maxncore = malloc(sizeof((*maxncore)*ndevice));
  518. else
  519. maxncore = NULL;
  520. for(devid=0; devid < ndevice; devid++)
  521. {
  522. arch.devid = devid;
  523. _starpu_drop_comments(f);
  524. ret = fscanf(f, "%u\n", &maxncore[devid]);
  525. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  526. for(i=0; i<maxncore[devid]; i++)
  527. {
  528. arch.ncore = i;
  529. parse_arch(f,NULL,0,&arch);
  530. }
  531. }
  532. model->per_arch[archtype] = initialize_arch_model(ndevice,maxncore);
  533. if(maxncore != NULL)
  534. free(maxncore);
  535. }
  536. model->is_init = 1;
  537. }
  538. }
  539. static void get_model_debug_path(struct starpu_perfmodel *model, const char *arch, char *path, size_t maxlen)
  540. {
  541. STARPU_ASSERT(path);
  542. _starpu_get_perf_model_dir_debug(path, maxlen);
  543. strncat(path, model->symbol, maxlen);
  544. char hostname[65];
  545. _starpu_gethostname(hostname, sizeof(hostname));
  546. strncat(path, ".", maxlen);
  547. strncat(path, hostname, maxlen);
  548. strncat(path, ".", maxlen);
  549. strncat(path, arch, maxlen);
  550. strncat(path, ".debug", maxlen);
  551. }
  552. /*
  553. * Returns 0 is the model was already loaded, 1 otherwise.
  554. */
  555. int _starpu_register_model(struct starpu_perfmodel *model)
  556. {
  557. /* If the model has already been loaded, there is nothing to do */
  558. STARPU_PTHREAD_RWLOCK_RDLOCK(&registered_models_rwlock);
  559. if (model->is_loaded)
  560. {
  561. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  562. return 0;
  563. }
  564. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  565. /* We have to make sure the model has not been loaded since the
  566. * last time we took the lock */
  567. STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  568. if (model->is_loaded)
  569. {
  570. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  571. return 0;
  572. }
  573. /* add the model to a linked list */
  574. struct _starpu_perfmodel_list *node = (struct _starpu_perfmodel_list *) malloc(sizeof(struct _starpu_perfmodel_list));
  575. node->model = model;
  576. //model->debug_modelid = debug_modelid++;
  577. /* put this model at the beginning of the list */
  578. node->next = registered_models;
  579. registered_models = node;
  580. #ifdef STARPU_MODEL_DEBUG
  581. _starpu_create_sampling_directory_if_needed();
  582. unsigned archtype, devid, ncore, nimpl;
  583. struct starpu_perfmodel_arch arch;
  584. for (archtype = 0; archtype < STARPU_NARCH; archtype++)
  585. {
  586. arch.type = archtype;
  587. if(model->per_arch[archtype] != NULL)
  588. {
  589. for(devid=0; model->per_arch[archtype][devid] != NULL; devid++)
  590. {
  591. arch.devid = devid;
  592. for(ncore=0; model->per_arch[archtype][devid][ncore] != NULL; ncore++)
  593. {
  594. arch.ncore = ncore;
  595. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  596. {
  597. starpu_perfmodel_debugfilepath(model, &arch, model->per_arch[archtype][devid][ncore][nimpl].debug_path, 256, nimpl);
  598. }
  599. }
  600. }
  601. }
  602. }
  603. #endif
  604. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  605. return 1;
  606. }
  607. static void get_model_path(struct starpu_perfmodel *model, char *path, size_t maxlen)
  608. {
  609. _starpu_get_perf_model_dir_codelets(path, maxlen);
  610. strncat(path, model->symbol, maxlen);
  611. char hostname[65];
  612. _starpu_gethostname(hostname, sizeof(hostname));
  613. strncat(path, ".", maxlen);
  614. strncat(path, hostname, maxlen);
  615. }
  616. static void save_history_based_model(struct starpu_perfmodel *model)
  617. {
  618. STARPU_ASSERT(model);
  619. STARPU_ASSERT(model->symbol);
  620. /* TODO checks */
  621. /* filename = $STARPU_PERF_MODEL_DIR/codelets/symbol.hostname */
  622. char path[256];
  623. get_model_path(model, path, 256);
  624. _STARPU_DEBUG("Opening performance model file %s for model %s\n", path, model->symbol);
  625. /* overwrite existing file, or create it */
  626. FILE *f;
  627. f = fopen(path, "w+");
  628. STARPU_ASSERT_MSG(f, "Could not save performance model %s\n", path);
  629. dump_model_file(f, model);
  630. fclose(f);
  631. }
  632. static void _starpu_dump_registered_models(void)
  633. {
  634. STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  635. struct _starpu_perfmodel_list *node;
  636. node = registered_models;
  637. _STARPU_DEBUG("DUMP MODELS !\n");
  638. while (node)
  639. {
  640. save_history_based_model(node->model);
  641. node = node->next;
  642. }
  643. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  644. }
  645. void _starpu_initialize_registered_performance_models(void)
  646. {
  647. registered_models = NULL;
  648. STARPU_PTHREAD_RWLOCK_INIT(&registered_models_rwlock, NULL);
  649. }
  650. void _starpu_deinitialize_performance_model(struct starpu_perfmodel *model)
  651. {
  652. unsigned arch, devid, ncore, nimpl;
  653. if(model->per_arch != NULL)
  654. {
  655. for (arch = 0; arch < STARPU_NARCH; arch++)
  656. {
  657. if( model->per_arch[arch] != NULL)
  658. {
  659. for(devid=0; model->per_arch[arch][devid] != NULL; devid++)
  660. {
  661. for(ncore=0; model->per_arch[arch][devid][ncore] != NULL; ncore++)
  662. {
  663. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  664. {
  665. struct starpu_perfmodel_per_arch *archmodel = &model->per_arch[arch][devid][ncore][nimpl];
  666. struct starpu_perfmodel_history_list *list, *plist;
  667. struct starpu_perfmodel_history_table *entry, *tmp;
  668. HASH_ITER(hh, archmodel->history, entry, tmp)
  669. {
  670. HASH_DEL(archmodel->history, entry);
  671. free(entry);
  672. }
  673. archmodel->history = NULL;
  674. list = archmodel->list;
  675. while (list)
  676. {
  677. free(list->entry);
  678. plist = list;
  679. list = list->next;
  680. free(plist);
  681. }
  682. archmodel->list = NULL;
  683. }
  684. free(model->per_arch[arch][devid][ncore]);
  685. model->per_arch[arch][devid][ncore] = NULL;
  686. }
  687. free(model->per_arch[arch][devid]);
  688. model->per_arch[arch][devid] = NULL;
  689. }
  690. free(model->per_arch[arch]);
  691. model->per_arch[arch] = NULL;
  692. }
  693. }
  694. free(model->per_arch);
  695. model->per_arch = NULL;
  696. }
  697. model->is_init = 0;
  698. model->is_loaded = 0;
  699. }
  700. void _starpu_deinitialize_registered_performance_models(void)
  701. {
  702. if (_starpu_get_calibrate_flag())
  703. _starpu_dump_registered_models();
  704. STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  705. struct _starpu_perfmodel_list *node, *pnode;
  706. node = registered_models;
  707. _STARPU_DEBUG("FREE MODELS !\n");
  708. while (node)
  709. {
  710. struct starpu_perfmodel *model = node->model;
  711. STARPU_PTHREAD_RWLOCK_WRLOCK(&model->model_rwlock);
  712. _starpu_deinitialize_performance_model(model);
  713. STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
  714. pnode = node;
  715. node = node->next;
  716. free(pnode);
  717. }
  718. registered_models = NULL;
  719. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  720. STARPU_PTHREAD_RWLOCK_DESTROY(&registered_models_rwlock);
  721. }
  722. /*
  723. * XXX: We should probably factorize the beginning of the _starpu_load_*_model
  724. * functions. This is a bit tricky though, because we must be sure to unlock
  725. * registered_models_rwlock at the right place.
  726. */
  727. void _starpu_load_per_arch_based_model(struct starpu_perfmodel *model)
  728. {
  729. STARPU_ASSERT(model && model->symbol);
  730. int already_loaded;
  731. STARPU_PTHREAD_RWLOCK_RDLOCK(&registered_models_rwlock);
  732. already_loaded = model->is_loaded;
  733. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  734. if (already_loaded)
  735. return;
  736. /* The model is still not loaded so we grab the lock in write mode, and
  737. * if it's not loaded once we have the lock, we do load it. */
  738. STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  739. /* Was the model initialized since the previous test ? */
  740. if (model->is_loaded)
  741. {
  742. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  743. return;
  744. }
  745. STARPU_PTHREAD_RWLOCK_INIT(&model->model_rwlock, NULL);
  746. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  747. }
  748. void _starpu_load_common_based_model(struct starpu_perfmodel *model)
  749. {
  750. STARPU_ASSERT(model && model->symbol);
  751. int already_loaded;
  752. STARPU_PTHREAD_RWLOCK_RDLOCK(&registered_models_rwlock);
  753. already_loaded = model->is_loaded;
  754. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  755. if (already_loaded)
  756. return;
  757. /* The model is still not loaded so we grab the lock in write mode, and
  758. * if it's not loaded once we have the lock, we do load it. */
  759. STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  760. /* Was the model initialized since the previous test ? */
  761. if (model->is_loaded)
  762. {
  763. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  764. return;
  765. }
  766. STARPU_PTHREAD_RWLOCK_INIT(&model->model_rwlock, NULL);
  767. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  768. }
  769. /* We first try to grab the global lock in read mode to check whether the model
  770. * was loaded or not (this is very likely to have been already loaded). If the
  771. * model was not loaded yet, we take the lock in write mode, and if the model
  772. * is still not loaded once we have the lock, we do load it. */
  773. void _starpu_load_history_based_model(struct starpu_perfmodel *model, unsigned scan_history)
  774. {
  775. STARPU_ASSERT(model);
  776. STARPU_ASSERT(model->symbol);
  777. int already_loaded;
  778. STARPU_PTHREAD_RWLOCK_RDLOCK(&registered_models_rwlock);
  779. already_loaded = model->is_loaded;
  780. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  781. if (already_loaded)
  782. return;
  783. /* The model is still not loaded so we grab the lock in write mode, and
  784. * if it's not loaded once we have the lock, we do load it. */
  785. STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  786. /* Was the model initialized since the previous test ? */
  787. if (model->is_loaded)
  788. {
  789. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  790. return;
  791. }
  792. STARPU_PTHREAD_RWLOCK_INIT(&model->model_rwlock, NULL);
  793. STARPU_PTHREAD_RWLOCK_WRLOCK(&model->model_rwlock);
  794. /* make sure the performance model directory exists (or create it) */
  795. _starpu_create_sampling_directory_if_needed();
  796. char path[256];
  797. get_model_path(model, path, 256);
  798. _STARPU_DEBUG("Opening performance model file %s for model %s ...\n", path, model->symbol);
  799. unsigned calibrate_flag = _starpu_get_calibrate_flag();
  800. model->benchmarking = calibrate_flag;
  801. /* try to open an existing file and load it */
  802. int res;
  803. res = access(path, F_OK);
  804. if (res == 0)
  805. {
  806. if (calibrate_flag == 2)
  807. {
  808. /* The user specified that the performance model should
  809. * be overwritten, so we don't load the existing file !
  810. * */
  811. _STARPU_DEBUG("Overwrite existing file\n");
  812. initialize_model(model);
  813. }
  814. else
  815. {
  816. /* We load the available file */
  817. _STARPU_DEBUG("File exists\n");
  818. FILE *f;
  819. f = fopen(path, "r");
  820. STARPU_ASSERT(f);
  821. initialize_model(model);
  822. parse_model_file(f, model, scan_history);
  823. fclose(f);
  824. }
  825. }
  826. else
  827. {
  828. _STARPU_DEBUG("File does not exists\n");
  829. if (!calibrate_flag)
  830. {
  831. _STARPU_DISP("Warning: model %s is not calibrated, forcing calibration for this run. Use the STARPU_CALIBRATE environment variable to control this.\n", model->symbol);
  832. _starpu_set_calibrate_flag(1);
  833. model->benchmarking = 1;
  834. }
  835. initialize_model(model);
  836. }
  837. _STARPU_DEBUG("Performance model file %s for model %s is loaded\n", path, model->symbol);
  838. model->is_loaded = 1;
  839. STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
  840. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  841. }
  842. void starpu_perfmodel_directory(FILE *output)
  843. {
  844. char perf_model_dir[256];
  845. _starpu_get_perf_model_dir_codelets(perf_model_dir, 256);
  846. fprintf(output, "directory: <%s>\n", perf_model_dir);
  847. }
  848. /* This function is intended to be used by external tools that should read
  849. * the performance model files */
  850. int starpu_perfmodel_list(FILE *output)
  851. {
  852. char path[256];
  853. DIR *dp;
  854. struct dirent *ep;
  855. char perf_model_dir_codelets[256];
  856. _starpu_get_perf_model_dir_codelets(perf_model_dir_codelets, 256);
  857. strncpy(path, perf_model_dir_codelets, 256);
  858. dp = opendir(path);
  859. if (dp != NULL)
  860. {
  861. while ((ep = readdir(dp)))
  862. {
  863. if (strcmp(ep->d_name, ".") && strcmp(ep->d_name, ".."))
  864. fprintf(output, "file: <%s>\n", ep->d_name);
  865. }
  866. closedir (dp);
  867. }
  868. else
  869. {
  870. _STARPU_DISP("Could not open the perfmodel directory <%s>: %s\n", path, strerror(errno));
  871. }
  872. return 0;
  873. }
  874. /* This function is intended to be used by external tools that should read the
  875. * performance model files */
  876. /* TODO: write an clear function, to free symbol and history */
  877. int starpu_perfmodel_load_symbol(const char *symbol, struct starpu_perfmodel *model)
  878. {
  879. model->symbol = strdup(symbol);
  880. /* where is the file if it exists ? */
  881. char path[256];
  882. get_model_path(model, path, 256);
  883. // _STARPU_DEBUG("get_model_path -> %s\n", path);
  884. /* does it exist ? */
  885. int res;
  886. res = access(path, F_OK);
  887. if (res)
  888. {
  889. const char *dot = strrchr(symbol, '.');
  890. if (dot)
  891. {
  892. char *symbol2 = strdup(symbol);
  893. symbol2[dot-symbol] = '\0';
  894. int ret;
  895. _STARPU_DISP("note: loading history from %s instead of %s\n", symbol2, symbol);
  896. ret = starpu_perfmodel_load_symbol(symbol2,model);
  897. free(symbol2);
  898. return ret;
  899. }
  900. _STARPU_DISP("There is no performance model for symbol %s\n", symbol);
  901. return 1;
  902. }
  903. FILE *f = fopen(path, "r");
  904. STARPU_ASSERT(f);
  905. if(_starpu_is_initialized())
  906. {
  907. initialize_model(model);
  908. }
  909. else
  910. {
  911. initialize_model_with_file(f, model);
  912. rewind(f);
  913. }
  914. parse_model_file(f, model, 1);
  915. STARPU_ASSERT(fclose(f) == 0);
  916. return 0;
  917. }
  918. int starpu_perfmodel_unload_model(struct starpu_perfmodel *model)
  919. {
  920. free((char *)model->symbol);
  921. _starpu_deinitialize_performance_model(model);
  922. return 0;
  923. }
  924. char* starpu_perfmodel_get_archtype_name(enum starpu_worker_archtype archtype)
  925. {
  926. switch(archtype)
  927. {
  928. case(STARPU_CPU_WORKER):
  929. return "cpu";
  930. break;
  931. case(STARPU_CUDA_WORKER):
  932. return "cuda";
  933. break;
  934. case(STARPU_OPENCL_WORKER):
  935. return "opencl";
  936. break;
  937. case(STARPU_MIC_WORKER):
  938. return "mic";
  939. break;
  940. case(STARPU_SCC_WORKER):
  941. return "scc";
  942. break;
  943. default:
  944. STARPU_ABORT();
  945. break;
  946. }
  947. }
  948. void starpu_perfmodel_get_arch_name(struct starpu_perfmodel_arch* arch, char *archname, size_t maxlen,unsigned nimpl)
  949. {
  950. snprintf(archname, maxlen, "%s_%dncore_%dimpl_%u",
  951. starpu_perfmodel_get_archtype_name(arch->type),
  952. arch->devid,
  953. arch->ncore,
  954. nimpl);
  955. }
  956. void starpu_perfmodel_debugfilepath(struct starpu_perfmodel *model,
  957. struct starpu_perfmodel_arch* arch, char *path, size_t maxlen, unsigned nimpl)
  958. {
  959. char archname[32];
  960. starpu_perfmodel_get_arch_name(arch, archname, 32, nimpl);
  961. STARPU_ASSERT(path);
  962. get_model_debug_path(model, archname, path, maxlen);
  963. }
  964. double _starpu_regression_based_job_expected_perf(struct starpu_perfmodel *model, struct starpu_perfmodel_arch* arch, struct _starpu_job *j, unsigned nimpl)
  965. {
  966. double exp = NAN;
  967. size_t size = _starpu_job_get_data_size(model, arch, nimpl, j);
  968. struct starpu_perfmodel_regression_model *regmodel;
  969. regmodel = &model->per_arch[arch->type][arch->devid][arch->ncore][nimpl].regression;
  970. if (regmodel->valid && size >= regmodel->minx * 0.9 && size <= regmodel->maxx * 1.1)
  971. exp = regmodel->alpha*pow((double)size, regmodel->beta);
  972. return exp;
  973. }
  974. double _starpu_non_linear_regression_based_job_expected_perf(struct starpu_perfmodel *model, struct starpu_perfmodel_arch* arch, struct _starpu_job *j,unsigned nimpl)
  975. {
  976. double exp = NAN;
  977. size_t size = _starpu_job_get_data_size(model, arch, nimpl, j);
  978. struct starpu_perfmodel_regression_model *regmodel;
  979. regmodel = &model->per_arch[arch->type][arch->devid][arch->ncore][nimpl].regression;
  980. if (regmodel->nl_valid && size >= regmodel->minx * 0.9 && size <= regmodel->maxx * 1.1)
  981. exp = regmodel->a*pow((double)size, regmodel->b) + regmodel->c;
  982. else
  983. {
  984. uint32_t key = _starpu_compute_buffers_footprint(model, arch, nimpl, j);
  985. struct starpu_perfmodel_per_arch *per_arch_model = &model->per_arch[arch->type][arch->devid][arch->ncore][nimpl];
  986. struct starpu_perfmodel_history_table *history;
  987. struct starpu_perfmodel_history_table *entry;
  988. STARPU_PTHREAD_RWLOCK_RDLOCK(&model->model_rwlock);
  989. history = per_arch_model->history;
  990. HASH_FIND_UINT32_T(history, &key, entry);
  991. STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
  992. /* We do not care about racing access to the mean, we only want a
  993. * good-enough estimation, thus simulate taking the rdlock */
  994. ANNOTATE_RWLOCK_ACQUIRED(&model->model_rwlock, 0);
  995. if (entry && entry->history_entry && entry->history_entry->nsample >= _STARPU_CALIBRATION_MINIMUM)
  996. exp = entry->history_entry->mean;
  997. else if (!model->benchmarking)
  998. {
  999. char archname[32];
  1000. starpu_perfmodel_get_arch_name(arch, archname, sizeof(archname), nimpl);
  1001. _STARPU_DISP("Warning: model %s is not calibrated enough for %s, forcing calibration for this run. Use the STARPU_CALIBRATE environment variable to control this.\n", model->symbol, archname);
  1002. _starpu_set_calibrate_flag(1);
  1003. model->benchmarking = 1;
  1004. }
  1005. ANNOTATE_RWLOCK_RELEASED(&model->model_rwlock, 0);
  1006. }
  1007. return exp;
  1008. }
  1009. double _starpu_history_based_job_expected_perf(struct starpu_perfmodel *model, struct starpu_perfmodel_arch* arch, struct _starpu_job *j,unsigned nimpl)
  1010. {
  1011. double exp;
  1012. struct starpu_perfmodel_per_arch *per_arch_model;
  1013. struct starpu_perfmodel_history_entry *entry;
  1014. struct starpu_perfmodel_history_table *history, *elt;
  1015. uint32_t key = _starpu_compute_buffers_footprint(model, arch, nimpl, j);
  1016. per_arch_model = &model->per_arch[arch->type][arch->devid][arch->ncore][nimpl];
  1017. STARPU_PTHREAD_RWLOCK_RDLOCK(&model->model_rwlock);
  1018. history = per_arch_model->history;
  1019. HASH_FIND_UINT32_T(history, &key, elt);
  1020. entry = (elt == NULL) ? NULL : elt->history_entry;
  1021. STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
  1022. /* We do not care about racing access to the mean, we only want a
  1023. * good-enough estimation, thus simulate taking the rdlock */
  1024. ANNOTATE_RWLOCK_ACQUIRED(&model->model_rwlock, 0);
  1025. exp = entry?entry->mean:NAN;
  1026. if (entry && entry->nsample < _STARPU_CALIBRATION_MINIMUM)
  1027. /* TODO: report differently if we've scheduled really enough
  1028. * of that task and the scheduler should perhaps put it aside */
  1029. /* Not calibrated enough */
  1030. exp = NAN;
  1031. if (isnan(exp) && !model->benchmarking)
  1032. {
  1033. char archname[32];
  1034. starpu_perfmodel_get_arch_name(arch, archname, sizeof(archname), nimpl);
  1035. _STARPU_DISP("Warning: model %s is not calibrated enough for %s, forcing calibration for this run. Use the STARPU_CALIBRATE environment variable to control this.\n", model->symbol, archname);
  1036. _starpu_set_calibrate_flag(1);
  1037. model->benchmarking = 1;
  1038. }
  1039. ANNOTATE_RWLOCK_RELEASED(&model->model_rwlock, 0);
  1040. return exp;
  1041. }
  1042. double starpu_permodel_history_based_expected_perf(struct starpu_perfmodel *model, struct starpu_perfmodel_arch * arch, uint32_t footprint)
  1043. {
  1044. struct _starpu_job j =
  1045. {
  1046. .footprint = footprint,
  1047. .footprint_is_computed = 1,
  1048. };
  1049. return _starpu_history_based_job_expected_perf(model, arch, &j, j.nimpl);
  1050. }
  1051. void _starpu_update_perfmodel_history(struct _starpu_job *j, struct starpu_perfmodel *model, struct starpu_perfmodel_arch* arch, unsigned cpuid STARPU_ATTRIBUTE_UNUSED, double measured, unsigned nimpl)
  1052. {
  1053. if (model)
  1054. {
  1055. STARPU_PTHREAD_RWLOCK_WRLOCK(&model->model_rwlock);
  1056. struct starpu_perfmodel_per_arch *per_arch_model = &model->per_arch[arch->type][arch->devid][arch->ncore][nimpl];
  1057. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  1058. {
  1059. struct starpu_perfmodel_history_entry *entry;
  1060. struct starpu_perfmodel_history_table *elt;
  1061. struct starpu_perfmodel_history_list **list;
  1062. uint32_t key = _starpu_compute_buffers_footprint(model, arch, nimpl, j);
  1063. list = &per_arch_model->list;
  1064. HASH_FIND_UINT32_T(per_arch_model->history, &key, elt);
  1065. entry = (elt == NULL) ? NULL : elt->history_entry;
  1066. if (!entry)
  1067. {
  1068. /* this is the first entry with such a footprint */
  1069. entry = (struct starpu_perfmodel_history_entry *) malloc(sizeof(struct starpu_perfmodel_history_entry));
  1070. STARPU_ASSERT(entry);
  1071. entry->mean = measured;
  1072. entry->sum = measured;
  1073. entry->deviation = 0.0;
  1074. entry->sum2 = measured*measured;
  1075. entry->size = _starpu_job_get_data_size(model, arch, nimpl, j);
  1076. entry->flops = j->task->flops;
  1077. entry->footprint = key;
  1078. entry->nsample = 1;
  1079. insert_history_entry(entry, list, &per_arch_model->history);
  1080. }
  1081. else
  1082. {
  1083. /* there is already some entry with the same footprint */
  1084. entry->sum += measured;
  1085. entry->sum2 += measured*measured;
  1086. entry->nsample++;
  1087. unsigned n = entry->nsample;
  1088. entry->mean = entry->sum / n;
  1089. entry->deviation = sqrt((entry->sum2 - (entry->sum*entry->sum)/n)/n);
  1090. if (j->task->flops != 0.)
  1091. {
  1092. if (entry->flops == 0.)
  1093. entry->flops = j->task->flops;
  1094. else if (entry->flops != j->task->flops)
  1095. /* Incoherent flops! forget about trying to record flops */
  1096. entry->flops = NAN;
  1097. }
  1098. }
  1099. STARPU_ASSERT(entry);
  1100. }
  1101. if (model->type == STARPU_REGRESSION_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  1102. {
  1103. struct starpu_perfmodel_regression_model *reg_model;
  1104. reg_model = &per_arch_model->regression;
  1105. /* update the regression model */
  1106. size_t job_size = _starpu_job_get_data_size(model, arch, nimpl, j);
  1107. double logy, logx;
  1108. logx = log((double)job_size);
  1109. logy = log(measured);
  1110. reg_model->sumlnx += logx;
  1111. reg_model->sumlnx2 += logx*logx;
  1112. reg_model->sumlny += logy;
  1113. reg_model->sumlnxlny += logx*logy;
  1114. if (reg_model->minx == 0 || job_size < reg_model->minx)
  1115. reg_model->minx = job_size;
  1116. if (reg_model->maxx == 0 || job_size > reg_model->maxx)
  1117. reg_model->maxx = job_size;
  1118. reg_model->nsample++;
  1119. if (VALID_REGRESSION(reg_model))
  1120. {
  1121. unsigned n = reg_model->nsample;
  1122. double num = (n*reg_model->sumlnxlny - reg_model->sumlnx*reg_model->sumlny);
  1123. double denom = (n*reg_model->sumlnx2 - reg_model->sumlnx*reg_model->sumlnx);
  1124. reg_model->beta = num/denom;
  1125. reg_model->alpha = exp((reg_model->sumlny - reg_model->beta*reg_model->sumlnx)/n);
  1126. reg_model->valid = 1;
  1127. }
  1128. }
  1129. #ifdef STARPU_MODEL_DEBUG
  1130. struct starpu_task *task = j->task;
  1131. FILE *f = fopen(per_arch_model->debug_path, "a+");
  1132. if (f == NULL)
  1133. {
  1134. _STARPU_DISP("Error <%s> when opening file <%s>\n", strerror(errno), per_arch_model->debug_path);
  1135. STARPU_ABORT();
  1136. }
  1137. if (!j->footprint_is_computed)
  1138. (void) _starpu_compute_buffers_footprint(model, arch, nimpl, j);
  1139. STARPU_ASSERT(j->footprint_is_computed);
  1140. fprintf(f, "0x%x\t%lu\t%f\t%f\t%f\t%d\t\t", j->footprint, (unsigned long) _starpu_job_get_data_size(model, arch, nimpl, j), measured, task->predicted, task->predicted_transfer, cpuid);
  1141. unsigned i;
  1142. for (i = 0; i < task->cl->nbuffers; i++)
  1143. {
  1144. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(task, i);
  1145. STARPU_ASSERT(handle->ops);
  1146. STARPU_ASSERT(handle->ops->display);
  1147. handle->ops->display(handle, f);
  1148. }
  1149. fprintf(f, "\n");
  1150. fclose(f);
  1151. #endif
  1152. STARPU_PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
  1153. }
  1154. }
  1155. void starpu_perfmodel_update_history(struct starpu_perfmodel *model, struct starpu_task *task, struct starpu_perfmodel_arch * arch, unsigned cpuid, unsigned nimpl, double measured)
  1156. {
  1157. struct _starpu_job *job = _starpu_get_job_associated_to_task(task);
  1158. _starpu_load_perfmodel(model);
  1159. /* Record measurement */
  1160. _starpu_update_perfmodel_history(job, model, arch, cpuid, measured, nimpl);
  1161. /* and save perfmodel on termination */
  1162. _starpu_set_calibrate_flag(1);
  1163. }