perfmodel_history.c 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2014 Université de Bordeaux
  4. * Copyright (C) 2010, 2011, 2012, 2013, 2014 Centre National de la Recherche Scientifique
  5. * Copyright (C) 2011 Télécom-SudParis
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. #if !defined(_WIN32) || defined(__MINGW32__) || defined(__CYGWIN__)
  19. #include <dirent.h>
  20. #include <sys/stat.h>
  21. #endif
  22. #include <errno.h>
  23. #include <common/config.h>
  24. #ifdef HAVE_UNISTD_H
  25. #include <unistd.h>
  26. #endif
  27. #include <common/utils.h>
  28. #include <core/perfmodel/perfmodel.h>
  29. #include <core/jobs.h>
  30. #include <core/workers.h>
  31. #include <datawizard/datawizard.h>
  32. #include <core/perfmodel/regression.h>
  33. #include <common/config.h>
  34. #include <starpu_parameters.h>
  35. #include <common/uthash.h>
  36. #ifdef STARPU_HAVE_WINDOWS
  37. #include <windows.h>
  38. #endif
  39. #define HASH_ADD_UINT32_T(head,field,add) HASH_ADD(hh,head,field,sizeof(uint32_t),add)
  40. #define HASH_FIND_UINT32_T(head,find,out) HASH_FIND(hh,head,find,sizeof(uint32_t),out)
  41. static struct starpu_perfmodel_arch **arch_combs;
  42. static int current_arch_comb;
  43. static int nb_arch_combs;
  44. static starpu_pthread_mutex_t arch_combs_mutex;
  45. struct starpu_perfmodel_history_table
  46. {
  47. UT_hash_handle hh;
  48. uint32_t footprint;
  49. struct starpu_perfmodel_history_entry *history_entry;
  50. };
  51. /* We want more than 10% variance on X to trust regression */
  52. #define VALID_REGRESSION(reg_model) \
  53. ((reg_model)->minx < (9*(reg_model)->maxx)/10 && (reg_model)->nsample >= _STARPU_CALIBRATION_MINIMUM)
  54. static starpu_pthread_rwlock_t registered_models_rwlock;
  55. static struct _starpu_perfmodel_list *registered_models = NULL;
  56. void _starpu_perfmodel_malloc_per_arch(struct starpu_perfmodel *model, int comb, int nb_impl)
  57. {
  58. int i;
  59. model->state->per_arch[comb] = (struct starpu_perfmodel_per_arch*)malloc(nb_impl*sizeof(struct starpu_perfmodel_per_arch));
  60. for(i = 0; i < nb_impl; i++)
  61. {
  62. memset(&model->state->per_arch[comb][i], 0, sizeof(struct starpu_perfmodel_per_arch));
  63. }
  64. model->state->nimpls_set[comb] = nb_impl;
  65. }
  66. void _starpu_perfmodel_malloc_per_arch_is_set(struct starpu_perfmodel *model, int comb, int nb_impl)
  67. {
  68. int i;
  69. model->state->per_arch_is_set[comb] = (int*)malloc(nb_impl*sizeof(int));
  70. for(i = 0; i < nb_impl; i++)
  71. {
  72. model->state->per_arch_is_set[comb][i] = 0;
  73. }
  74. }
  75. void _starpu_perfmodel_arch_combs_realloc(int new_nb_arch_combs)
  76. {
  77. STARPU_PTHREAD_MUTEX_LOCK(&arch_combs_mutex);
  78. nb_arch_combs = new_nb_arch_combs;
  79. arch_combs = (struct starpu_perfmodel_arch**) realloc(arch_combs, nb_arch_combs*sizeof(struct starpu_perfmodel_arch*));
  80. STARPU_PTHREAD_MUTEX_UNLOCK(&arch_combs_mutex);
  81. }
  82. int starpu_perfmodel_arch_comb_add(int ndevices, struct starpu_perfmodel_device* devices)
  83. {
  84. if (current_arch_comb >= nb_arch_combs)
  85. {
  86. // We need to allocate more arch_combs
  87. _starpu_perfmodel_arch_combs_realloc(nb_arch_combs + 10);
  88. }
  89. STARPU_PTHREAD_MUTEX_LOCK(&arch_combs_mutex);
  90. arch_combs[current_arch_comb] = (struct starpu_perfmodel_arch*)malloc(sizeof(struct starpu_perfmodel_arch));
  91. arch_combs[current_arch_comb]->devices = (struct starpu_perfmodel_device*)malloc(ndevices*sizeof(struct starpu_perfmodel_device));
  92. arch_combs[current_arch_comb]->ndevices = ndevices;
  93. int dev;
  94. for(dev = 0; dev < ndevices; dev++)
  95. {
  96. arch_combs[current_arch_comb]->devices[dev].type = devices[dev].type;
  97. arch_combs[current_arch_comb]->devices[dev].devid = devices[dev].devid;
  98. arch_combs[current_arch_comb]->devices[dev].ncores = devices[dev].ncores;
  99. }
  100. current_arch_comb++;
  101. STARPU_PTHREAD_MUTEX_UNLOCK(&arch_combs_mutex);
  102. return current_arch_comb-1;
  103. }
  104. int starpu_perfmodel_arch_comb_get(int ndevices, struct starpu_perfmodel_device *devices)
  105. {
  106. int comb;
  107. for(comb = 0; comb < current_arch_comb; comb++)
  108. {
  109. int found = 0;
  110. if(arch_combs[comb]->ndevices == ndevices)
  111. {
  112. int dev1, dev2;
  113. int nfounded = 0;
  114. for(dev1 = 0; dev1 < arch_combs[comb]->ndevices; dev1++)
  115. {
  116. for(dev2 = 0; dev2 < ndevices; dev2++)
  117. {
  118. if(arch_combs[comb]->devices[dev1].type == devices[dev2].type &&
  119. arch_combs[comb]->devices[dev1].devid == devices[dev2].devid &&
  120. arch_combs[comb]->devices[dev1].ncores == devices[dev2].ncores)
  121. nfounded++;
  122. }
  123. }
  124. if(nfounded == ndevices)
  125. found = 1;
  126. }
  127. if (found)
  128. return comb;
  129. }
  130. return -1;
  131. }
  132. static void _free_arch_combs(void)
  133. {
  134. int i;
  135. STARPU_PTHREAD_MUTEX_LOCK(&arch_combs_mutex);
  136. for(i = 0; i < current_arch_comb; i++)
  137. {
  138. free(arch_combs[i]->devices);
  139. free(arch_combs[i]);
  140. }
  141. current_arch_comb = 0;
  142. free(arch_combs);
  143. STARPU_PTHREAD_MUTEX_UNLOCK(&arch_combs_mutex);
  144. STARPU_PTHREAD_MUTEX_DESTROY(&arch_combs_mutex);
  145. }
  146. int starpu_get_narch_combs()
  147. {
  148. return current_arch_comb;
  149. }
  150. struct starpu_perfmodel_arch *_starpu_arch_comb_get(int comb)
  151. {
  152. return arch_combs[comb];
  153. }
  154. size_t _starpu_job_get_data_size(struct starpu_perfmodel *model, struct starpu_perfmodel_arch* arch, unsigned impl, struct _starpu_job *j)
  155. {
  156. struct starpu_task *task = j->task;
  157. int comb = starpu_perfmodel_arch_comb_get(arch->ndevices, arch->devices);
  158. if (model && model->state->per_arch && comb != -1 && model->state->per_arch[comb] && model->state->per_arch[comb][impl].size_base)
  159. {
  160. return model->state->per_arch[comb][impl].size_base(task, arch, impl);
  161. }
  162. else if (model && model->size_base)
  163. {
  164. return model->size_base(task, impl);
  165. }
  166. else
  167. {
  168. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(task);
  169. size_t size = 0;
  170. unsigned buffer;
  171. for (buffer = 0; buffer < nbuffers; buffer++)
  172. {
  173. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(task, buffer);
  174. size += _starpu_data_get_size(handle);
  175. }
  176. return size;
  177. }
  178. }
  179. /*
  180. * History based model
  181. */
  182. static void insert_history_entry(struct starpu_perfmodel_history_entry *entry, struct starpu_perfmodel_history_list **list, struct starpu_perfmodel_history_table **history_ptr)
  183. {
  184. struct starpu_perfmodel_history_list *link;
  185. struct starpu_perfmodel_history_table *table;
  186. link = (struct starpu_perfmodel_history_list *) malloc(sizeof(struct starpu_perfmodel_history_list));
  187. link->next = *list;
  188. link->entry = entry;
  189. *list = link;
  190. /* detect concurrency issue */
  191. //HASH_FIND_UINT32_T(*history_ptr, &entry->footprint, table);
  192. //STARPU_ASSERT(table == NULL);
  193. table = (struct starpu_perfmodel_history_table*) malloc(sizeof(*table));
  194. STARPU_ASSERT(table != NULL);
  195. table->footprint = entry->footprint;
  196. table->history_entry = entry;
  197. HASH_ADD_UINT32_T(*history_ptr, footprint, table);
  198. }
  199. static void dump_reg_model(FILE *f, struct starpu_perfmodel *model, int comb, int impl)
  200. {
  201. struct starpu_perfmodel_per_arch *per_arch_model;
  202. per_arch_model = &model->state->per_arch[comb][impl];
  203. struct starpu_perfmodel_regression_model *reg_model;
  204. reg_model = &per_arch_model->regression;
  205. /*
  206. * Linear Regression model
  207. */
  208. /* Unless we have enough measurements, we put NaN in the file to indicate the model is invalid */
  209. double alpha = nan(""), beta = nan("");
  210. if (model->type == STARPU_REGRESSION_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  211. {
  212. if (reg_model->nsample > 1)
  213. {
  214. alpha = reg_model->alpha;
  215. beta = reg_model->beta;
  216. }
  217. }
  218. fprintf(f, "# sumlnx\tsumlnx2\t\tsumlny\t\tsumlnxlny\talpha\t\tbeta\t\tn\tminx\t\tmaxx\n");
  219. fprintf(f, "%-15le\t%-15le\t%-15le\t%-15le\t%-15le\t%-15le\t%u\t%-15lu\t%-15lu\n", reg_model->sumlnx, reg_model->sumlnx2, reg_model->sumlny, reg_model->sumlnxlny, alpha, beta, reg_model->nsample, reg_model->minx, reg_model->maxx);
  220. /*
  221. * Non-Linear Regression model
  222. */
  223. double a = nan(""), b = nan(""), c = nan("");
  224. if (model->type == STARPU_NL_REGRESSION_BASED)
  225. _starpu_regression_non_linear_power(per_arch_model->list, &a, &b, &c);
  226. fprintf(f, "# a\t\tb\t\tc\n");
  227. fprintf(f, "%-15le\t%-15le\t%-15le\n", a, b, c);
  228. }
  229. static void scan_reg_model(FILE *f, struct starpu_perfmodel_regression_model *reg_model)
  230. {
  231. int res;
  232. /*
  233. * Linear Regression model
  234. */
  235. _starpu_drop_comments(f);
  236. res = fscanf(f, "%le\t%le\t%le\t%le", &reg_model->sumlnx, &reg_model->sumlnx2, &reg_model->sumlny, &reg_model->sumlnxlny);
  237. STARPU_ASSERT_MSG(res == 4, "Incorrect performance model file");
  238. res = _starpu_read_double(f, "\t%le", &reg_model->alpha);
  239. STARPU_ASSERT_MSG(res == 1, "Incorrect performance model file");
  240. res = _starpu_read_double(f, "\t%le", &reg_model->beta);
  241. STARPU_ASSERT_MSG(res == 1, "Incorrect performance model file");
  242. res = fscanf(f, "\t%u\t%lu\t%lu\n", &reg_model->nsample, &reg_model->minx, &reg_model->maxx);
  243. STARPU_ASSERT_MSG(res == 3, "Incorrect performance model file");
  244. /* If any of the parameters describing the linear regression model is NaN, the model is invalid */
  245. unsigned invalid = (isnan(reg_model->alpha)||isnan(reg_model->beta));
  246. reg_model->valid = !invalid && VALID_REGRESSION(reg_model);
  247. /*
  248. * Non-Linear Regression model
  249. */
  250. _starpu_drop_comments(f);
  251. res = _starpu_read_double(f, "%le\t", &reg_model->a);
  252. STARPU_ASSERT_MSG(res == 1, "Incorrect performance model file");
  253. res = _starpu_read_double(f, "%le\t", &reg_model->b);
  254. STARPU_ASSERT_MSG(res == 1, "Incorrect performance model file");
  255. res = _starpu_read_double(f, "%le\n", &reg_model->c);
  256. STARPU_ASSERT_MSG(res == 1, "Incorrect performance model file");
  257. /* If any of the parameters describing the non-linear regression model is NaN, the model is invalid */
  258. unsigned nl_invalid = (isnan(reg_model->a)||isnan(reg_model->b)||isnan(reg_model->c));
  259. reg_model->nl_valid = !nl_invalid && VALID_REGRESSION(reg_model);
  260. }
  261. static void dump_history_entry(FILE *f, struct starpu_perfmodel_history_entry *entry)
  262. {
  263. fprintf(f, "%08x\t%-15lu\t%-15le\t%-15le\t%-15le\t%-15le\t%-15le\t%u\n", entry->footprint, (unsigned long) entry->size, entry->flops, entry->mean, entry->deviation, entry->sum, entry->sum2, entry->nsample);
  264. }
  265. static void scan_history_entry(FILE *f, struct starpu_perfmodel_history_entry *entry)
  266. {
  267. int res;
  268. _starpu_drop_comments(f);
  269. /* In case entry is NULL, we just drop these values */
  270. unsigned nsample;
  271. uint32_t footprint;
  272. unsigned long size; /* in bytes */
  273. double flops;
  274. double mean;
  275. double deviation;
  276. double sum;
  277. double sum2;
  278. char line[256];
  279. char *ret;
  280. ret = fgets(line, sizeof(line), f);
  281. STARPU_ASSERT(ret);
  282. STARPU_ASSERT(strchr(line, '\n'));
  283. /* Read the values from the file */
  284. res = sscanf(line, "%x\t%lu\t%le\t%le\t%le\t%le\t%le\t%u", &footprint, &size, &flops, &mean, &deviation, &sum, &sum2, &nsample);
  285. if (res != 8)
  286. {
  287. flops = 0.;
  288. /* Read the values from the file */
  289. res = sscanf(line, "%x\t%lu\t%le\t%le\t%le\t%le\t%u", &footprint, &size, &mean, &deviation, &sum, &sum2, &nsample);
  290. STARPU_ASSERT_MSG(res == 7, "Incorrect performance model file");
  291. }
  292. if (entry)
  293. {
  294. entry->footprint = footprint;
  295. entry->size = size;
  296. entry->flops = flops;
  297. entry->mean = mean;
  298. entry->deviation = deviation;
  299. entry->sum = sum;
  300. entry->sum2 = sum2;
  301. entry->nsample = nsample;
  302. }
  303. }
  304. static void parse_per_arch_model_file(FILE *f, struct starpu_perfmodel_per_arch *per_arch_model, unsigned scan_history)
  305. {
  306. unsigned nentries;
  307. _starpu_drop_comments(f);
  308. int res = fscanf(f, "%u\n", &nentries);
  309. STARPU_ASSERT_MSG(res == 1, "Incorrect performance model file");
  310. scan_reg_model(f, &per_arch_model->regression);
  311. /* parse entries */
  312. unsigned i;
  313. for (i = 0; i < nentries; i++)
  314. {
  315. struct starpu_perfmodel_history_entry *entry = NULL;
  316. if (scan_history)
  317. {
  318. entry = (struct starpu_perfmodel_history_entry *) malloc(sizeof(struct starpu_perfmodel_history_entry));
  319. STARPU_ASSERT(entry);
  320. /* Tell helgrind that we do not care about
  321. * racing access to the sampling, we only want a
  322. * good-enough estimation */
  323. STARPU_HG_DISABLE_CHECKING(entry->nsample);
  324. STARPU_HG_DISABLE_CHECKING(entry->mean);
  325. entry->nerror = 0;
  326. }
  327. scan_history_entry(f, entry);
  328. /* insert the entry in the hashtable and the list structures */
  329. /* TODO: Insert it at the end of the list, to avoid reversing
  330. * the order... But efficiently! We may have a lot of entries */
  331. if (scan_history)
  332. insert_history_entry(entry, &per_arch_model->list, &per_arch_model->history);
  333. }
  334. }
  335. static void parse_arch(FILE *f, struct starpu_perfmodel *model, unsigned scan_history, int comb)
  336. {
  337. struct starpu_perfmodel_per_arch dummy;
  338. unsigned nimpls, implmax, impl, i, ret;
  339. /* Parsing number of implementation */
  340. _starpu_drop_comments(f);
  341. ret = fscanf(f, "%u\n", &nimpls);
  342. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  343. if( model != NULL)
  344. {
  345. /* Parsing each implementation */
  346. implmax = STARPU_MIN(nimpls, STARPU_MAXIMPLEMENTATIONS);
  347. model->state->nimpls[comb] = implmax;
  348. if (!model->state->per_arch[comb])
  349. {
  350. _starpu_perfmodel_malloc_per_arch(model, comb, STARPU_MAXIMPLEMENTATIONS);
  351. }
  352. if (!model->state->per_arch_is_set[comb])
  353. {
  354. _starpu_perfmodel_malloc_per_arch_is_set(model, comb, STARPU_MAXIMPLEMENTATIONS);
  355. }
  356. for (impl = 0; impl < implmax; impl++)
  357. {
  358. struct starpu_perfmodel_per_arch *per_arch_model = &model->state->per_arch[comb][impl];
  359. model->state->per_arch_is_set[comb][impl] = 1;
  360. parse_per_arch_model_file(f, per_arch_model, scan_history);
  361. }
  362. }
  363. else
  364. {
  365. impl = 0;
  366. }
  367. /* if the number of implementation is greater than STARPU_MAXIMPLEMENTATIONS
  368. * we skip the last implementation */
  369. for (i = impl; i < nimpls; i++)
  370. parse_per_arch_model_file(f, &dummy, 0);
  371. }
  372. static enum starpu_worker_archtype _get_enum_type(int type)
  373. {
  374. switch(type)
  375. {
  376. case 0:
  377. return STARPU_CPU_WORKER;
  378. case 1:
  379. return STARPU_CUDA_WORKER;
  380. case 2:
  381. return STARPU_OPENCL_WORKER;
  382. case 3:
  383. return STARPU_MIC_WORKER;
  384. case 4:
  385. return STARPU_SCC_WORKER;
  386. default:
  387. STARPU_ABORT();
  388. }
  389. }
  390. static void parse_comb(FILE *f, struct starpu_perfmodel *model, unsigned scan_history, int comb)
  391. {
  392. int ndevices = 0;
  393. _starpu_drop_comments(f);
  394. int ret = fscanf(f, "%d\n", &ndevices );
  395. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  396. struct starpu_perfmodel_device devices[ndevices];
  397. int dev;
  398. for(dev = 0; dev < ndevices; dev++)
  399. {
  400. enum starpu_worker_archtype dev_type;
  401. _starpu_drop_comments(f);
  402. int type;
  403. ret = fscanf(f, "%d\n", &type);
  404. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  405. dev_type = _get_enum_type(type);
  406. int dev_id;
  407. _starpu_drop_comments(f);
  408. ret = fscanf(f, "%d\n", &dev_id);
  409. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  410. int ncores;
  411. _starpu_drop_comments(f);
  412. ret = fscanf(f, "%d\n", &ncores);
  413. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  414. devices[dev].type = dev_type;
  415. devices[dev].devid = dev_id;
  416. devices[dev].ncores = ncores;
  417. }
  418. int id_comb = starpu_perfmodel_arch_comb_get(ndevices, devices);
  419. if(id_comb == -1)
  420. id_comb = starpu_perfmodel_arch_comb_add(ndevices, devices);
  421. model->state->combs[comb] = id_comb;
  422. parse_arch(f, model, scan_history, id_comb);
  423. }
  424. static void parse_model_file(FILE *f, struct starpu_perfmodel *model, unsigned scan_history)
  425. {
  426. int ret, version=0;
  427. /* Parsing performance model version */
  428. _starpu_drop_comments(f);
  429. ret = fscanf(f, "%d\n", &version);
  430. STARPU_ASSERT_MSG(version == _STARPU_PERFMODEL_VERSION, "Incorrect performance model file with a model version %d not being the current model version (%d)\n",
  431. version, _STARPU_PERFMODEL_VERSION);
  432. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  433. int ncombs = 0;
  434. _starpu_drop_comments(f);
  435. ret = fscanf(f, "%d\n", &ncombs);
  436. STARPU_ASSERT_MSG(ret == 1, "Incorrect performance model file");
  437. if(ncombs > 0)
  438. {
  439. model->state->ncombs = ncombs;
  440. }
  441. if (ncombs > nb_arch_combs)
  442. {
  443. // The model has more combs than the original number of arch_combs, we need to reallocate
  444. _starpu_perfmodel_arch_combs_realloc(ncombs);
  445. _starpu_perfmodel_realloc(model, nb_arch_combs);
  446. }
  447. int comb;
  448. for(comb = 0; comb < ncombs; comb++)
  449. parse_comb(f, model, scan_history, comb);
  450. }
  451. static void dump_per_arch_model_file(FILE *f, struct starpu_perfmodel *model, int comb, unsigned impl)
  452. {
  453. struct starpu_perfmodel_per_arch *per_arch_model;
  454. per_arch_model = &model->state->per_arch[comb][impl];
  455. /* count the number of elements in the lists */
  456. struct starpu_perfmodel_history_list *ptr = NULL;
  457. unsigned nentries = 0;
  458. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  459. {
  460. /* Dump the list of all entries in the history */
  461. ptr = per_arch_model->list;
  462. while(ptr)
  463. {
  464. nentries++;
  465. ptr = ptr->next;
  466. }
  467. }
  468. /* header */
  469. char archname[32];
  470. starpu_perfmodel_get_arch_name(arch_combs[comb], archname, 32, impl);
  471. fprintf(f, "#####\n");
  472. fprintf(f, "# Model for %s\n", archname);
  473. fprintf(f, "# number of entries\n%u\n", nentries);
  474. dump_reg_model(f, model, comb, impl);
  475. /* Dump the history into the model file in case it is necessary */
  476. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  477. {
  478. fprintf(f, "# hash\t\tsize\t\tflops\t\tmean (us)\tdev (us)\tsum\t\tsum2\t\tn\n");
  479. ptr = per_arch_model->list;
  480. while (ptr)
  481. {
  482. dump_history_entry(f, ptr->entry);
  483. ptr = ptr->next;
  484. }
  485. }
  486. fprintf(f, "\n");
  487. }
  488. static void dump_model_file(FILE *f, struct starpu_perfmodel *model)
  489. {
  490. fprintf(f, "##################\n");
  491. fprintf(f, "# Performance Model Version\n");
  492. fprintf(f, "%d\n\n", _STARPU_PERFMODEL_VERSION);
  493. int ncombs = model->state->ncombs;
  494. fprintf(f, "####################\n");
  495. fprintf(f, "# COMBs\n");
  496. fprintf(f, "# number of combinations\n");
  497. fprintf(f, "%u\n", ncombs);
  498. int i, impl, dev;
  499. for(i = 0; i < ncombs; i++)
  500. {
  501. int comb = model->state->combs[i];
  502. int ndevices = arch_combs[comb]->ndevices;
  503. fprintf(f, "####################\n");
  504. fprintf(f, "# COMB_%d\n", comb);
  505. fprintf(f, "# number of types devices\n");
  506. fprintf(f, "%u\n", ndevices);
  507. for(dev = 0; dev < ndevices; dev++)
  508. {
  509. fprintf(f, "####################\n");
  510. fprintf(f, "# DEV_%d\n", dev);
  511. fprintf(f, "# device type (CPU - 0, CUDA - 1, OPENCL - 2, MIC - 3, SCC - 4)\n");
  512. fprintf(f, "%u\n", arch_combs[comb]->devices[dev].type);
  513. fprintf(f, "####################\n");
  514. fprintf(f, "# DEV_%d\n", dev);
  515. fprintf(f, "# device id \n");
  516. fprintf(f, "%u\n", arch_combs[comb]->devices[dev].devid);
  517. fprintf(f, "####################\n");
  518. fprintf(f, "# DEV_%d\n", dev);
  519. fprintf(f, "# number of cores \n");
  520. fprintf(f, "%u\n", arch_combs[comb]->devices[dev].ncores);
  521. }
  522. int nimpls = model->state->nimpls[comb];
  523. fprintf(f, "##########\n");
  524. fprintf(f, "# number of implementations\n");
  525. fprintf(f, "%u\n", nimpls);
  526. for (impl = 0; impl < nimpls; impl++)
  527. {
  528. dump_per_arch_model_file(f, model, comb, impl);
  529. }
  530. }
  531. }
  532. void _starpu_perfmodel_realloc(struct starpu_perfmodel *model, int nb)
  533. {
  534. int i;
  535. STARPU_ASSERT(nb > model->state->ncombs_set);
  536. model->state->per_arch = (struct starpu_perfmodel_per_arch**) realloc(model->state->per_arch, nb*sizeof(struct starpu_perfmodel_per_arch*));
  537. model->state->per_arch_is_set = (int**) realloc(model->state->per_arch_is_set, nb*sizeof(int*));
  538. model->state->nimpls = (int *)realloc(model->state->nimpls, nb*sizeof(int));
  539. model->state->nimpls_set = (int *)realloc(model->state->nimpls_set, nb*sizeof(int));
  540. model->state->combs = (int*)realloc(model->state->combs, nb*sizeof(int));
  541. for(i = model->state->ncombs_set; i < nb; i++)
  542. {
  543. model->state->per_arch[i] = NULL;
  544. model->state->per_arch_is_set[i] = NULL;
  545. model->state->nimpls[i] = 0;
  546. model->state->nimpls_set[i] = 0;
  547. }
  548. model->state->ncombs_set = nb;
  549. }
  550. void starpu_perfmodel_init(FILE *f, struct starpu_perfmodel *model)
  551. {
  552. int already_init;
  553. int i;
  554. STARPU_ASSERT(model);
  555. STARPU_PTHREAD_RWLOCK_RDLOCK(&registered_models_rwlock);
  556. already_init = model->is_init;
  557. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  558. if (already_init)
  559. return;
  560. /* The model is still not loaded so we grab the lock in write mode, and
  561. * if it's not loaded once we have the lock, we do load it. */
  562. STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  563. /* Was the model initialized since the previous test ? */
  564. if (model->is_init)
  565. {
  566. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  567. return;
  568. }
  569. model->state = malloc(sizeof(struct _starpu_perfmodel_state));
  570. STARPU_PTHREAD_RWLOCK_INIT(&model->state->model_rwlock, NULL);
  571. model->state->per_arch = (struct starpu_perfmodel_per_arch**) malloc(nb_arch_combs*sizeof(struct starpu_perfmodel_per_arch*));
  572. model->state->per_arch_is_set = (int**) malloc(nb_arch_combs*sizeof(int*));
  573. model->state->nimpls = (int *)malloc(nb_arch_combs*sizeof(int));
  574. model->state->nimpls_set = (int *)malloc(nb_arch_combs*sizeof(int));
  575. model->state->combs = (int*)malloc(nb_arch_combs*sizeof(int));
  576. model->state->ncombs = 0;
  577. model->state->ncombs_set = nb_arch_combs;
  578. for(i = 0; i < nb_arch_combs; i++)
  579. {
  580. model->state->per_arch[i] = NULL;
  581. model->state->per_arch_is_set[i] = NULL;
  582. model->state->nimpls[i] = 0;
  583. model->state->nimpls_set[i] = 0;
  584. }
  585. if(f)
  586. parse_model_file(f, model, 0);
  587. /* add the model to a linked list */
  588. struct _starpu_perfmodel_list *node = (struct _starpu_perfmodel_list *) malloc(sizeof(struct _starpu_perfmodel_list));
  589. node->model = model;
  590. //model->debug_modelid = debug_modelid++;
  591. /* put this model at the beginning of the list */
  592. node->next = registered_models;
  593. registered_models = node;
  594. model->is_init = 1;
  595. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  596. }
  597. static void get_model_debug_path(struct starpu_perfmodel *model, const char *arch, char *path, size_t maxlen)
  598. {
  599. STARPU_ASSERT(path);
  600. _starpu_get_perf_model_dir_debug(path, maxlen);
  601. strncat(path, model->symbol, maxlen);
  602. char hostname[65];
  603. _starpu_gethostname(hostname, sizeof(hostname));
  604. strncat(path, ".", maxlen);
  605. strncat(path, hostname, maxlen);
  606. strncat(path, ".", maxlen);
  607. strncat(path, arch, maxlen);
  608. strncat(path, ".debug", maxlen);
  609. }
  610. static void get_model_path(struct starpu_perfmodel *model, char *path, size_t maxlen)
  611. {
  612. _starpu_get_perf_model_dir_codelets(path, maxlen);
  613. strncat(path, model->symbol, maxlen);
  614. const char *dot = strrchr(model->symbol, '.');
  615. if (dot == NULL)
  616. {
  617. char hostname[65];
  618. _starpu_gethostname(hostname, sizeof(hostname));
  619. strncat(path, ".", maxlen);
  620. strncat(path, hostname, maxlen);
  621. }
  622. }
  623. static void save_history_based_model(struct starpu_perfmodel *model)
  624. {
  625. STARPU_ASSERT(model);
  626. STARPU_ASSERT(model->symbol);
  627. /* TODO checks */
  628. /* filename = $STARPU_PERF_MODEL_DIR/codelets/symbol.hostname */
  629. char path[256];
  630. get_model_path(model, path, 256);
  631. _STARPU_DEBUG("Opening performance model file %s for model %s\n", path, model->symbol);
  632. /* overwrite existing file, or create it */
  633. FILE *f;
  634. f = fopen(path, "w+");
  635. STARPU_ASSERT_MSG(f, "Could not save performance model %s\n", path);
  636. _starpu_fwrlock(f);
  637. _starpu_ftruncate(f);
  638. dump_model_file(f, model);
  639. _starpu_fwrunlock(f);
  640. fclose(f);
  641. }
  642. static void _starpu_dump_registered_models(void)
  643. {
  644. #ifndef STARPU_SIMGRID
  645. STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  646. struct _starpu_perfmodel_list *node;
  647. node = registered_models;
  648. _STARPU_DEBUG("DUMP MODELS !\n");
  649. while (node)
  650. {
  651. if (node->model->is_init)
  652. save_history_based_model(node->model);
  653. node = node->next;
  654. }
  655. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  656. #endif
  657. }
  658. void _starpu_initialize_registered_performance_models(void)
  659. {
  660. /* make sure the performance model directory exists (or create it) */
  661. _starpu_create_sampling_directory_if_needed();
  662. registered_models = NULL;
  663. STARPU_PTHREAD_RWLOCK_INIT(&registered_models_rwlock, NULL);
  664. struct _starpu_machine_config *conf = _starpu_get_machine_config();
  665. unsigned ncores = conf->topology.nhwcpus;
  666. unsigned ncuda = conf->topology.nhwcudagpus;
  667. unsigned nopencl = conf->topology.nhwopenclgpus;
  668. unsigned nmic = 0;
  669. unsigned i;
  670. for(i = 0; i < conf->topology.nhwmicdevices; i++)
  671. nmic += conf->topology.nhwmiccores[i];
  672. unsigned nscc = conf->topology.nhwscc;
  673. // We used to allocate 2**(ncores + ncuda + nopencl + nmic + nscc), this is too big
  674. // We now allocate only 2*(ncores + ncuda + nopencl + nmic + nscc), and reallocate when necessary in starpu_perfmodel_arch_comb_add
  675. nb_arch_combs = 2 * (ncores + ncuda + nopencl + nmic + nscc);
  676. arch_combs = (struct starpu_perfmodel_arch**) malloc(nb_arch_combs*sizeof(struct starpu_perfmodel_arch*));
  677. current_arch_comb = 0;
  678. STARPU_PTHREAD_MUTEX_INIT(&arch_combs_mutex, NULL);
  679. }
  680. void _starpu_deinitialize_performance_model(struct starpu_perfmodel *model)
  681. {
  682. if(model->is_init && model->state && model->state->per_arch != NULL)
  683. {
  684. int i;
  685. for(i=0 ; i<model->state->ncombs_set ; i++)
  686. {
  687. if (model->state->per_arch[i])
  688. {
  689. int impl;
  690. for(impl=0 ; impl<model->state->nimpls_set[i] ; impl++)
  691. {
  692. struct starpu_perfmodel_per_arch *archmodel = &model->state->per_arch[i][impl];
  693. if (archmodel->history)
  694. {
  695. struct starpu_perfmodel_history_list *list, *plist;
  696. struct starpu_perfmodel_history_table *entry, *tmp;
  697. HASH_ITER(hh, archmodel->history, entry, tmp)
  698. {
  699. HASH_DEL(archmodel->history, entry);
  700. free(entry);
  701. }
  702. archmodel->history = NULL;
  703. list = archmodel->list;
  704. while (list)
  705. {
  706. free(list->entry);
  707. plist = list;
  708. list = list->next;
  709. free(plist);
  710. }
  711. archmodel->list = NULL;
  712. }
  713. }
  714. free(model->state->per_arch[i]);
  715. model->state->per_arch[i] = NULL;
  716. free(model->state->per_arch_is_set[i]);
  717. model->state->per_arch_is_set[i] = NULL;
  718. }
  719. }
  720. free(model->state->per_arch);
  721. model->state->per_arch = NULL;
  722. free(model->state->per_arch_is_set);
  723. model->state->per_arch_is_set = NULL;
  724. free(model->state->nimpls);
  725. model->state->nimpls = NULL;
  726. free(model->state->nimpls_set);
  727. model->state->nimpls_set = NULL;
  728. free(model->state->combs);
  729. model->state->combs = NULL;
  730. model->state->ncombs = 0;
  731. }
  732. model->is_init = 0;
  733. model->is_loaded = 0;
  734. }
  735. void _starpu_deinitialize_registered_performance_models(void)
  736. {
  737. if (_starpu_get_calibrate_flag())
  738. _starpu_dump_registered_models();
  739. STARPU_PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);
  740. struct _starpu_perfmodel_list *node, *pnode;
  741. node = registered_models;
  742. _STARPU_DEBUG("FREE MODELS !\n");
  743. while (node)
  744. {
  745. struct starpu_perfmodel *model = node->model;
  746. STARPU_PTHREAD_RWLOCK_WRLOCK(&model->state->model_rwlock);
  747. _starpu_deinitialize_performance_model(model);
  748. STARPU_PTHREAD_RWLOCK_UNLOCK(&model->state->model_rwlock);
  749. free(node->model->state);
  750. node->model->state = NULL;
  751. pnode = node;
  752. node = node->next;
  753. free(pnode);
  754. }
  755. registered_models = NULL;
  756. STARPU_PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
  757. STARPU_PTHREAD_RWLOCK_DESTROY(&registered_models_rwlock);
  758. _free_arch_combs();
  759. }
  760. /*
  761. * XXX: We should probably factorize the beginning of the _starpu_load_*_model
  762. * functions. This is a bit tricky though, because we must be sure to unlock
  763. * registered_models_rwlock at the right place.
  764. */
  765. void _starpu_load_per_arch_based_model(struct starpu_perfmodel *model)
  766. {
  767. starpu_perfmodel_init(NULL, model);
  768. }
  769. void _starpu_load_common_based_model(struct starpu_perfmodel *model)
  770. {
  771. starpu_perfmodel_init(NULL, model);
  772. }
  773. /* We first try to grab the global lock in read mode to check whether the model
  774. * was loaded or not (this is very likely to have been already loaded). If the
  775. * model was not loaded yet, we take the lock in write mode, and if the model
  776. * is still not loaded once we have the lock, we do load it. */
  777. void _starpu_load_history_based_model(struct starpu_perfmodel *model, unsigned scan_history)
  778. {
  779. starpu_perfmodel_init(NULL, model);
  780. STARPU_PTHREAD_RWLOCK_WRLOCK(&model->state->model_rwlock);
  781. if(!model->is_loaded)
  782. {
  783. char path[256];
  784. get_model_path(model, path, 256);
  785. _STARPU_DEBUG("Opening performance model file %s for model %s ...\n", path, model->symbol);
  786. unsigned calibrate_flag = _starpu_get_calibrate_flag();
  787. model->benchmarking = calibrate_flag;
  788. /* try to open an existing file and load it */
  789. int res;
  790. res = access(path, F_OK);
  791. if (res == 0)
  792. {
  793. if (calibrate_flag == 2)
  794. {
  795. /* The user specified that the performance model should
  796. * be overwritten, so we don't load the existing file !
  797. * */
  798. _STARPU_DEBUG("Overwrite existing file\n");
  799. }
  800. else
  801. {
  802. /* We load the available file */
  803. _STARPU_DEBUG("File exists\n");
  804. FILE *f;
  805. f = fopen(path, "r");
  806. STARPU_ASSERT(f);
  807. _starpu_frdlock(f);
  808. parse_model_file(f, model, scan_history);
  809. _starpu_frdunlock(f);
  810. fclose(f);
  811. }
  812. _STARPU_DEBUG("Performance model file %s for model %s is loaded\n", path, model->symbol);
  813. }
  814. else
  815. {
  816. _STARPU_DEBUG("Performance model file %s does not exist\n", path);
  817. }
  818. model->is_loaded = 1;
  819. }
  820. STARPU_PTHREAD_RWLOCK_UNLOCK(&model->state->model_rwlock);
  821. }
  822. void starpu_perfmodel_directory(FILE *output)
  823. {
  824. char perf_model_dir[256];
  825. _starpu_get_perf_model_dir_codelets(perf_model_dir, 256);
  826. fprintf(output, "directory: <%s>\n", perf_model_dir);
  827. }
  828. /* This function is intended to be used by external tools that should read
  829. * the performance model files */
  830. int starpu_perfmodel_list(FILE *output)
  831. {
  832. #if !defined(_WIN32) || defined(__MINGW32__) || defined(__CYGWIN__)
  833. char path[256];
  834. DIR *dp;
  835. struct dirent *ep;
  836. char perf_model_dir_codelets[256];
  837. _starpu_get_perf_model_dir_codelets(perf_model_dir_codelets, 256);
  838. strncpy(path, perf_model_dir_codelets, 256);
  839. dp = opendir(path);
  840. if (dp != NULL)
  841. {
  842. while ((ep = readdir(dp)))
  843. {
  844. if (strcmp(ep->d_name, ".") && strcmp(ep->d_name, ".."))
  845. fprintf(output, "file: <%s>\n", ep->d_name);
  846. }
  847. closedir (dp);
  848. }
  849. else
  850. {
  851. _STARPU_DISP("Could not open the perfmodel directory <%s>: %s\n", path, strerror(errno));
  852. }
  853. return 0;
  854. #else
  855. fprintf(stderr,"Listing perfmodels is not implemented on pure Windows yet\n");
  856. return 1;
  857. #endif
  858. }
  859. /* This function is intended to be used by external tools that should read the
  860. * performance model files */
  861. /* TODO: write an clear function, to free symbol and history */
  862. int starpu_perfmodel_load_symbol(const char *symbol, struct starpu_perfmodel *model)
  863. {
  864. model->symbol = strdup(symbol);
  865. /* where is the file if it exists ? */
  866. char path[256];
  867. get_model_path(model, path, 256);
  868. // _STARPU_DEBUG("get_model_path -> %s\n", path);
  869. /* does it exist ? */
  870. int res;
  871. res = access(path, F_OK);
  872. if (res)
  873. {
  874. const char *dot = strrchr(symbol, '.');
  875. if (dot)
  876. {
  877. char *symbol2 = strdup(symbol);
  878. symbol2[dot-symbol] = '\0';
  879. int ret;
  880. _STARPU_DISP("note: loading history from %s instead of %s\n", symbol2, symbol);
  881. ret = starpu_perfmodel_load_symbol(symbol2,model);
  882. free(symbol2);
  883. return ret;
  884. }
  885. _STARPU_DISP("There is no performance model for symbol %s\n", symbol);
  886. return 1;
  887. }
  888. FILE *f = fopen(path, "r");
  889. STARPU_ASSERT(f);
  890. _starpu_frdlock(f);
  891. starpu_perfmodel_init(NULL, model);
  892. rewind(f);
  893. parse_model_file(f, model, 1);
  894. _starpu_frdunlock(f);
  895. STARPU_ASSERT(fclose(f) == 0);
  896. return 0;
  897. }
  898. int starpu_perfmodel_unload_model(struct starpu_perfmodel *model)
  899. {
  900. free((char *)model->symbol);
  901. _starpu_deinitialize_performance_model(model);
  902. return 0;
  903. }
  904. char* starpu_perfmodel_get_archtype_name(enum starpu_worker_archtype archtype)
  905. {
  906. switch(archtype)
  907. {
  908. case(STARPU_CPU_WORKER):
  909. return "cpu";
  910. break;
  911. case(STARPU_CUDA_WORKER):
  912. return "cuda";
  913. break;
  914. case(STARPU_OPENCL_WORKER):
  915. return "opencl";
  916. break;
  917. case(STARPU_MIC_WORKER):
  918. return "mic";
  919. break;
  920. case(STARPU_SCC_WORKER):
  921. return "scc";
  922. break;
  923. default:
  924. STARPU_ABORT();
  925. break;
  926. }
  927. }
  928. void starpu_perfmodel_get_arch_name(struct starpu_perfmodel_arch* arch, char *archname, size_t maxlen,unsigned impl)
  929. {
  930. int comb = starpu_perfmodel_arch_comb_get(arch->ndevices, arch->devices);
  931. STARPU_ASSERT(comb != -1);
  932. snprintf(archname, maxlen, "Comb%d_impl%u", comb, impl);
  933. }
  934. void starpu_perfmodel_debugfilepath(struct starpu_perfmodel *model,
  935. struct starpu_perfmodel_arch* arch, char *path, size_t maxlen, unsigned nimpl)
  936. {
  937. int comb = starpu_perfmodel_arch_comb_get(arch->ndevices, arch->devices);
  938. STARPU_ASSERT(comb != -1);
  939. char archname[32];
  940. starpu_perfmodel_get_arch_name(arch, archname, 32, nimpl);
  941. STARPU_ASSERT(path);
  942. get_model_debug_path(model, archname, path, maxlen);
  943. }
  944. double _starpu_regression_based_job_expected_perf(struct starpu_perfmodel *model, struct starpu_perfmodel_arch* arch, struct _starpu_job *j, unsigned nimpl)
  945. {
  946. int comb;
  947. double exp = NAN;
  948. size_t size;
  949. struct starpu_perfmodel_regression_model *regmodel;
  950. comb = starpu_perfmodel_arch_comb_get(arch->ndevices, arch->devices);
  951. if(comb == -1)
  952. return NAN;
  953. if (model->state->per_arch[comb] == NULL)
  954. // The model has not been executed on this combination
  955. return NAN;
  956. regmodel = &model->state->per_arch[comb][nimpl].regression;
  957. size = _starpu_job_get_data_size(model, arch, nimpl, j);
  958. if (regmodel->valid && size >= regmodel->minx * 0.9 && size <= regmodel->maxx * 1.1)
  959. exp = regmodel->alpha*pow((double)size, regmodel->beta);
  960. return exp;
  961. }
  962. double _starpu_non_linear_regression_based_job_expected_perf(struct starpu_perfmodel *model, struct starpu_perfmodel_arch* arch, struct _starpu_job *j,unsigned nimpl)
  963. {
  964. int comb;
  965. double exp = NAN;
  966. size_t size;
  967. struct starpu_perfmodel_regression_model *regmodel;
  968. comb = starpu_perfmodel_arch_comb_get(arch->ndevices, arch->devices);
  969. if(comb == -1)
  970. return NAN;
  971. if (model->state->per_arch[comb] == NULL)
  972. // The model has not been executed on this combination
  973. return NAN;
  974. regmodel = &model->state->per_arch[comb][nimpl].regression;
  975. size = _starpu_job_get_data_size(model, arch, nimpl, j);
  976. if (regmodel->nl_valid && size >= regmodel->minx * 0.9 && size <= regmodel->maxx * 1.1)
  977. exp = regmodel->a*pow((double)size, regmodel->b) + regmodel->c;
  978. else
  979. {
  980. uint32_t key = _starpu_compute_buffers_footprint(model, arch, nimpl, j);
  981. struct starpu_perfmodel_per_arch *per_arch_model = &model->state->per_arch[comb][nimpl];
  982. struct starpu_perfmodel_history_table *history;
  983. struct starpu_perfmodel_history_table *entry;
  984. STARPU_PTHREAD_RWLOCK_RDLOCK(&model->state->model_rwlock);
  985. history = per_arch_model->history;
  986. HASH_FIND_UINT32_T(history, &key, entry);
  987. STARPU_PTHREAD_RWLOCK_UNLOCK(&model->state->model_rwlock);
  988. /* Here helgrind would shout that this is unprotected access.
  989. * We do not care about racing access to the mean, we only want
  990. * a good-enough estimation */
  991. if (entry && entry->history_entry && entry->history_entry->nsample >= _STARPU_CALIBRATION_MINIMUM)
  992. exp = entry->history_entry->mean;
  993. STARPU_HG_DISABLE_CHECKING(model->benchmarking);
  994. if (isnan(exp) && !model->benchmarking)
  995. {
  996. char archname[32];
  997. starpu_perfmodel_get_arch_name(arch, archname, sizeof(archname), nimpl);
  998. _STARPU_DISP("Warning: model %s is not calibrated enough for %s (only %u measurements), forcing calibration for this run. Use the STARPU_CALIBRATE environment variable to control this.\n", model->symbol, archname, entry && entry->history_entry ? entry->history_entry->nsample : 0);
  999. _starpu_set_calibrate_flag(1);
  1000. model->benchmarking = 1;
  1001. }
  1002. }
  1003. return exp;
  1004. }
  1005. double _starpu_history_based_job_expected_perf(struct starpu_perfmodel *model, struct starpu_perfmodel_arch* arch, struct _starpu_job *j,unsigned nimpl)
  1006. {
  1007. int comb;
  1008. double exp = NAN;
  1009. struct starpu_perfmodel_per_arch *per_arch_model;
  1010. struct starpu_perfmodel_history_entry *entry;
  1011. struct starpu_perfmodel_history_table *history, *elt;
  1012. uint32_t key;
  1013. comb = starpu_perfmodel_arch_comb_get(arch->ndevices, arch->devices);
  1014. if(comb == -1)
  1015. return NAN;
  1016. if (model->state->per_arch[comb] == NULL)
  1017. // The model has not been executed on this combination
  1018. return NAN;
  1019. per_arch_model = &model->state->per_arch[comb][nimpl];
  1020. key = _starpu_compute_buffers_footprint(model, arch, nimpl, j);
  1021. STARPU_PTHREAD_RWLOCK_RDLOCK(&model->state->model_rwlock);
  1022. history = per_arch_model->history;
  1023. HASH_FIND_UINT32_T(history, &key, elt);
  1024. entry = (elt == NULL) ? NULL : elt->history_entry;
  1025. STARPU_PTHREAD_RWLOCK_UNLOCK(&model->state->model_rwlock);
  1026. /* Here helgrind would shout that this is unprotected access.
  1027. * We do not care about racing access to the mean, we only want
  1028. * a good-enough estimation */
  1029. if (entry && entry->nsample >= _STARPU_CALIBRATION_MINIMUM)
  1030. /* TODO: report differently if we've scheduled really enough
  1031. * of that task and the scheduler should perhaps put it aside */
  1032. /* Calibrated enough */
  1033. exp = entry->mean;
  1034. STARPU_HG_DISABLE_CHECKING(model->benchmarking);
  1035. if (isnan(exp) && !model->benchmarking)
  1036. {
  1037. char archname[32];
  1038. starpu_perfmodel_get_arch_name(arch, archname, sizeof(archname), nimpl);
  1039. _STARPU_DISP("Warning: model %s is not calibrated enough for %s (only %u measurements), forcing calibration for this run. Use the STARPU_CALIBRATE environment variable to control this.\n", model->symbol, archname, entry ? entry->nsample : 0);
  1040. _starpu_set_calibrate_flag(1);
  1041. model->benchmarking = 1;
  1042. }
  1043. return exp;
  1044. }
  1045. double starpu_permodel_history_based_expected_perf(struct starpu_perfmodel *model, struct starpu_perfmodel_arch * arch, uint32_t footprint)
  1046. {
  1047. struct _starpu_job j =
  1048. {
  1049. .footprint = footprint,
  1050. .footprint_is_computed = 1,
  1051. };
  1052. return _starpu_history_based_job_expected_perf(model, arch, &j, j.nimpl);
  1053. }
  1054. int _starpu_perfmodel_create_comb_if_needed(struct starpu_perfmodel_arch* arch)
  1055. {
  1056. int comb = starpu_perfmodel_arch_comb_get(arch->ndevices, arch->devices);
  1057. if(comb == -1)
  1058. comb = starpu_perfmodel_arch_comb_add(arch->ndevices, arch->devices);
  1059. return comb;
  1060. }
  1061. void _starpu_update_perfmodel_history(struct _starpu_job *j, struct starpu_perfmodel *model, struct starpu_perfmodel_arch* arch, unsigned cpuid STARPU_ATTRIBUTE_UNUSED, double measured, unsigned impl)
  1062. {
  1063. if (model)
  1064. {
  1065. int c;
  1066. unsigned found = 0;
  1067. int comb = _starpu_perfmodel_create_comb_if_needed(arch);
  1068. for(c = 0; c < model->state->ncombs; c++)
  1069. {
  1070. if(model->state->combs[c] == comb)
  1071. {
  1072. found = 1;
  1073. break;
  1074. }
  1075. }
  1076. if(!found)
  1077. {
  1078. if (model->state->ncombs + 1 >= model->state->ncombs_set)
  1079. {
  1080. // The number of combinations is bigger than the one which was initially allocated, we need to reallocate,
  1081. // do not only reallocate 1 extra comb, rather reallocate 5 to avoid too frequent calls to _starpu_perfmodel_realloc
  1082. _starpu_perfmodel_realloc(model, model->state->ncombs_set+5);
  1083. }
  1084. model->state->combs[model->state->ncombs++] = comb;
  1085. }
  1086. STARPU_PTHREAD_RWLOCK_WRLOCK(&model->state->model_rwlock);
  1087. if(!model->state->per_arch[comb])
  1088. {
  1089. _starpu_perfmodel_malloc_per_arch(model, comb, STARPU_MAXIMPLEMENTATIONS);
  1090. _starpu_perfmodel_malloc_per_arch_is_set(model, comb, STARPU_MAXIMPLEMENTATIONS);
  1091. }
  1092. struct starpu_perfmodel_per_arch *per_arch_model = &model->state->per_arch[comb][impl];
  1093. if (model->state->per_arch_is_set[comb][impl] == 0)
  1094. {
  1095. // We are adding a new implementation for the given comb and the given impl
  1096. model->state->nimpls[comb]++;
  1097. model->state->per_arch_is_set[comb][impl] = 1;
  1098. }
  1099. if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  1100. {
  1101. struct starpu_perfmodel_history_entry *entry;
  1102. struct starpu_perfmodel_history_table *elt;
  1103. struct starpu_perfmodel_history_list **list;
  1104. uint32_t key = _starpu_compute_buffers_footprint(model, arch, impl, j);
  1105. list = &per_arch_model->list;
  1106. HASH_FIND_UINT32_T(per_arch_model->history, &key, elt);
  1107. entry = (elt == NULL) ? NULL : elt->history_entry;
  1108. if (!entry)
  1109. {
  1110. /* this is the first entry with such a footprint */
  1111. entry = (struct starpu_perfmodel_history_entry *) malloc(sizeof(struct starpu_perfmodel_history_entry));
  1112. STARPU_ASSERT(entry);
  1113. /* Tell helgrind that we do not care about
  1114. * racing access to the sampling, we only want a
  1115. * good-enough estimation */
  1116. STARPU_HG_DISABLE_CHECKING(entry->nsample);
  1117. STARPU_HG_DISABLE_CHECKING(entry->mean);
  1118. /* Do not take the first measurement into account, it is very often quite bogus */
  1119. /* TODO: it'd be good to use a better estimation heuristic, like the median, or latest n values, etc. */
  1120. entry->mean = 0;
  1121. entry->sum = 0;
  1122. entry->deviation = 0.0;
  1123. entry->sum2 = 0;
  1124. entry->size = _starpu_job_get_data_size(model, arch, impl, j);
  1125. entry->flops = j->task->flops;
  1126. entry->footprint = key;
  1127. entry->nsample = 0;
  1128. entry->nerror = 0;
  1129. insert_history_entry(entry, list, &per_arch_model->history);
  1130. }
  1131. else
  1132. {
  1133. /* There is already an entry with the same footprint */
  1134. double local_deviation = measured/entry->mean;
  1135. int historymaxerror = starpu_get_env_number_default("STARPU_HISTORY_MAX_ERROR", STARPU_HISTORYMAXERROR);
  1136. if (entry->nsample &&
  1137. (100 * local_deviation > (100 + historymaxerror)
  1138. || (100 / local_deviation > (100 + historymaxerror))))
  1139. {
  1140. entry->nerror++;
  1141. /* More errors than measurements, we're most probably completely wrong, we flush out all the entries */
  1142. if (entry->nerror >= entry->nsample)
  1143. {
  1144. char archname[32];
  1145. starpu_perfmodel_get_arch_name(arch, archname, sizeof(archname), impl);
  1146. _STARPU_DISP("Too big deviation for model %s on %s: %f vs average %f, %u such errors against %u samples (%+f%%), flushing the performance model. Use the STARPU_HISTORY_MAX_ERROR environement variable to control the threshold (currently %d%%)\n", model->symbol, archname, measured, entry->mean, entry->nerror, entry->nsample, measured * 100. / entry->mean - 100, historymaxerror);
  1147. entry->sum = 0.0;
  1148. entry->sum2 = 0.0;
  1149. entry->nsample = 0;
  1150. entry->nerror = 0;
  1151. entry->mean = 0.0;
  1152. entry->deviation = 0.0;
  1153. }
  1154. }
  1155. else
  1156. {
  1157. entry->sum += measured;
  1158. entry->sum2 += measured*measured;
  1159. entry->nsample++;
  1160. unsigned n = entry->nsample;
  1161. entry->mean = entry->sum / n;
  1162. entry->deviation = sqrt((entry->sum2 - (entry->sum*entry->sum)/n)/n);
  1163. }
  1164. if (j->task->flops != 0.)
  1165. {
  1166. if (entry->flops == 0.)
  1167. entry->flops = j->task->flops;
  1168. else if (entry->flops != j->task->flops)
  1169. /* Incoherent flops! forget about trying to record flops */
  1170. entry->flops = NAN;
  1171. }
  1172. }
  1173. STARPU_ASSERT(entry);
  1174. }
  1175. if (model->type == STARPU_REGRESSION_BASED || model->type == STARPU_NL_REGRESSION_BASED)
  1176. {
  1177. struct starpu_perfmodel_regression_model *reg_model;
  1178. reg_model = &per_arch_model->regression;
  1179. /* update the regression model */
  1180. size_t job_size = _starpu_job_get_data_size(model, arch, impl, j);
  1181. double logy, logx;
  1182. logx = log((double)job_size);
  1183. logy = log(measured);
  1184. reg_model->sumlnx += logx;
  1185. reg_model->sumlnx2 += logx*logx;
  1186. reg_model->sumlny += logy;
  1187. reg_model->sumlnxlny += logx*logy;
  1188. if (reg_model->minx == 0 || job_size < reg_model->minx)
  1189. reg_model->minx = job_size;
  1190. if (reg_model->maxx == 0 || job_size > reg_model->maxx)
  1191. reg_model->maxx = job_size;
  1192. reg_model->nsample++;
  1193. if (VALID_REGRESSION(reg_model))
  1194. {
  1195. unsigned n = reg_model->nsample;
  1196. double num = (n*reg_model->sumlnxlny - reg_model->sumlnx*reg_model->sumlny);
  1197. double denom = (n*reg_model->sumlnx2 - reg_model->sumlnx*reg_model->sumlnx);
  1198. reg_model->beta = num/denom;
  1199. reg_model->alpha = exp((reg_model->sumlny - reg_model->beta*reg_model->sumlnx)/n);
  1200. reg_model->valid = 1;
  1201. }
  1202. }
  1203. #ifdef STARPU_MODEL_DEBUG
  1204. struct starpu_task *task = j->task;
  1205. starpu_perfmodel_debugfilepath(model, arch_combs[comb], per_arch_model->debug_path, 256, impl);
  1206. FILE *f = fopen(per_arch_model->debug_path, "a+");
  1207. if (f == NULL)
  1208. {
  1209. _STARPU_DISP("Error <%s> when opening file <%s>\n", strerror(errno), per_arch_model->debug_path);
  1210. STARPU_ABORT();
  1211. }
  1212. _starpu_fwrlock(f);
  1213. if (!j->footprint_is_computed)
  1214. (void) _starpu_compute_buffers_footprint(model, arch, impl, j);
  1215. STARPU_ASSERT(j->footprint_is_computed);
  1216. fprintf(f, "0x%x\t%lu\t%f\t%f\t%f\t%d\t\t", j->footprint, (unsigned long) _starpu_job_get_data_size(model, arch, impl, j), measured, task->predicted, task->predicted_transfer, cpuid);
  1217. unsigned i;
  1218. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(task);
  1219. for (i = 0; i < nbuffers; i++)
  1220. {
  1221. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(task, i);
  1222. STARPU_ASSERT(handle->ops);
  1223. STARPU_ASSERT(handle->ops->display);
  1224. handle->ops->display(handle, f);
  1225. }
  1226. fprintf(f, "\n");
  1227. _starpu_fwrunlock(f);
  1228. fclose(f);
  1229. #endif
  1230. STARPU_PTHREAD_RWLOCK_UNLOCK(&model->state->model_rwlock);
  1231. }
  1232. }
  1233. void starpu_perfmodel_update_history(struct starpu_perfmodel *model, struct starpu_task *task, struct starpu_perfmodel_arch * arch, unsigned cpuid, unsigned nimpl, double measured)
  1234. {
  1235. struct _starpu_job *job = _starpu_get_job_associated_to_task(task);
  1236. #ifdef STARPU_SIMGRID
  1237. STARPU_ASSERT_MSG(0, "We are not supposed to update history when simulating execution");
  1238. #endif
  1239. _starpu_init_and_load_perfmodel(model);
  1240. /* Record measurement */
  1241. _starpu_update_perfmodel_history(job, model, arch, cpuid, measured, nimpl);
  1242. /* and save perfmodel on termination */
  1243. _starpu_set_calibrate_flag(1);
  1244. }
  1245. int starpu_perfmodel_list_combs(FILE *output, struct starpu_perfmodel *model)
  1246. {
  1247. int comb;
  1248. fprintf(output, "Model <%s>\n", model->symbol);
  1249. for(comb = 0; comb < model->state->ncombs; comb++)
  1250. {
  1251. struct starpu_perfmodel_arch *arch;
  1252. int device;
  1253. arch = _starpu_arch_comb_get(model->state->combs[comb]);
  1254. fprintf(output, "\tComb %d: %d device%s\n", model->state->combs[comb], arch->ndevices, arch->ndevices>1?"s":"");
  1255. for(device=0 ; device<arch->ndevices ; device++)
  1256. {
  1257. char *name = starpu_perfmodel_get_archtype_name(arch->devices[device].type);
  1258. fprintf(output, "\t\tDevice %d: type: %s - devid: %d - ncores: %d\n", device, name, arch->devices[device].devid, arch->devices[device].ncores);
  1259. }
  1260. }
  1261. return 0;
  1262. }
  1263. struct starpu_perfmodel_per_arch *starpu_perfmodel_get_model_per_arch(struct starpu_perfmodel *model, struct starpu_perfmodel_arch *arch, unsigned impl)
  1264. {
  1265. int comb = starpu_perfmodel_arch_comb_get(arch->ndevices, arch->devices);
  1266. if(comb == -1) return NULL;
  1267. return &model->state->per_arch[comb][impl];
  1268. }
  1269. struct starpu_perfmodel_per_arch *_starpu_perfmodel_get_model_per_devices(struct starpu_perfmodel *model, int impl, va_list varg_list)
  1270. {
  1271. struct starpu_perfmodel_arch arch;
  1272. va_list varg_list_copy;
  1273. int i, arg_type;
  1274. int is_cpu_set = 0;
  1275. // We first count the number of devices
  1276. arch.ndevices = 0;
  1277. va_copy(varg_list_copy, varg_list);
  1278. while ((arg_type = va_arg(varg_list_copy, int)) != -1)
  1279. {
  1280. int devid = va_arg(varg_list_copy, int);
  1281. int ncores = va_arg(varg_list_copy, int);
  1282. arch.ndevices ++;
  1283. if (arg_type == STARPU_CPU_WORKER)
  1284. {
  1285. STARPU_ASSERT_MSG(is_cpu_set == 0, "STARPU_CPU_WORKER can only be specified once\n");
  1286. STARPU_ASSERT_MSG(devid==0, "STARPU_CPU_WORKER must be followed by a value 0 for the device id");
  1287. is_cpu_set = 1;
  1288. }
  1289. else
  1290. {
  1291. STARPU_ASSERT_MSG(ncores==1, "%s must be followed by a value 1 for ncores", starpu_worker_get_type_as_string(arg_type));
  1292. }
  1293. }
  1294. va_end(varg_list_copy);
  1295. // We set the devices
  1296. arch.devices = (struct starpu_perfmodel_device*)malloc(arch.ndevices * sizeof(struct starpu_perfmodel_device));
  1297. va_copy(varg_list_copy, varg_list);
  1298. for(i=0 ; i<arch.ndevices ; i++)
  1299. {
  1300. arch.devices[i].type = va_arg(varg_list_copy, int);
  1301. arch.devices[i].devid = va_arg(varg_list_copy, int);
  1302. arch.devices[i].ncores = va_arg(varg_list_copy, int);
  1303. }
  1304. va_end(varg_list_copy);
  1305. // Get the combination for this set of devices
  1306. int comb = starpu_perfmodel_arch_comb_get(arch.ndevices, arch.devices);
  1307. if (comb == -1)
  1308. comb = starpu_perfmodel_arch_comb_add(arch.ndevices, arch.devices);
  1309. free(arch.devices);
  1310. // Realloc if necessary
  1311. if (comb >= model->state->ncombs_set)
  1312. _starpu_perfmodel_realloc(model, comb+1);
  1313. // Get the per_arch object
  1314. if (model->state->per_arch[comb] == NULL)
  1315. {
  1316. _starpu_perfmodel_malloc_per_arch(model, comb, impl+1);
  1317. _starpu_perfmodel_malloc_per_arch_is_set(model, comb, impl+1);
  1318. model->state->nimpls[comb] = 0;
  1319. }
  1320. model->state->per_arch_is_set[comb][impl] = 1;
  1321. model->state->nimpls[comb] ++;
  1322. return &model->state->per_arch[comb][impl];
  1323. }
  1324. struct starpu_perfmodel_per_arch *starpu_perfmodel_get_model_per_devices(struct starpu_perfmodel *model, int impl, ...)
  1325. {
  1326. va_list varg_list;
  1327. struct starpu_perfmodel_per_arch *per_arch;
  1328. va_start(varg_list, impl);
  1329. per_arch = _starpu_perfmodel_get_model_per_devices(model, impl, varg_list);
  1330. va_end(varg_list);
  1331. return per_arch;
  1332. }
  1333. int starpu_perfmodel_set_per_devices_cost_function(struct starpu_perfmodel *model, int impl, starpu_perfmodel_per_arch_cost_function func, ...)
  1334. {
  1335. va_list varg_list;
  1336. struct starpu_perfmodel_per_arch *per_arch;
  1337. va_start(varg_list, func);
  1338. per_arch = _starpu_perfmodel_get_model_per_devices(model, impl, varg_list);
  1339. per_arch->cost_function = func;
  1340. va_end(varg_list);
  1341. return 0;
  1342. }
  1343. int starpu_perfmodel_set_per_devices_size_base(struct starpu_perfmodel *model, int impl, starpu_perfmodel_per_arch_size_base func, ...)
  1344. {
  1345. va_list varg_list;
  1346. struct starpu_perfmodel_per_arch *per_arch;
  1347. va_start(varg_list, func);
  1348. per_arch = _starpu_perfmodel_get_model_per_devices(model, impl, varg_list);
  1349. per_arch->size_base = func;
  1350. va_end(varg_list);
  1351. return 0;
  1352. }