perfmodel_bus.c 86 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011-2014,2016,2017 Inria
  4. * Copyright (C) 2009-2019 Université de Bordeaux
  5. * Copyright (C) 2010-2017,2019 CNRS
  6. * Copyright (C) 2013 Corentin Salingue
  7. *
  8. * StarPU is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU Lesser General Public License as published by
  10. * the Free Software Foundation; either version 2.1 of the License, or (at
  11. * your option) any later version.
  12. *
  13. * StarPU is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  16. *
  17. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  18. */
  19. #ifdef STARPU_USE_CUDA
  20. #ifndef _GNU_SOURCE
  21. #define _GNU_SOURCE 1
  22. #endif
  23. #include <sched.h>
  24. #endif
  25. #include <stdlib.h>
  26. #include <math.h>
  27. #include <starpu.h>
  28. #include <starpu_cuda.h>
  29. #include <starpu_opencl.h>
  30. #include <common/config.h>
  31. #ifdef HAVE_UNISTD_H
  32. #include <unistd.h>
  33. #endif
  34. #include <core/workers.h>
  35. #include <core/perfmodel/perfmodel.h>
  36. #include <core/simgrid.h>
  37. #include <core/topology.h>
  38. #include <common/utils.h>
  39. #include <drivers/mpi/driver_mpi_common.h>
  40. #ifdef STARPU_USE_OPENCL
  41. #include <starpu_opencl.h>
  42. #endif
  43. #ifdef STARPU_HAVE_WINDOWS
  44. #include <windows.h>
  45. #endif
  46. #ifdef STARPU_HAVE_HWLOC
  47. #include <hwloc.h>
  48. #ifndef HWLOC_API_VERSION
  49. #define HWLOC_OBJ_PU HWLOC_OBJ_PROC
  50. #endif
  51. #if HWLOC_API_VERSION < 0x00010b00
  52. #define HWLOC_OBJ_NUMANODE HWLOC_OBJ_NODE
  53. #endif
  54. #endif
  55. #if defined(HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX) && HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX
  56. #include <hwloc/cuda.h>
  57. #endif
  58. #define SIZE (32*1024*1024*sizeof(char))
  59. #define NITER 32
  60. #define PATH_LENGTH 256
  61. #ifndef STARPU_SIMGRID
  62. static void _starpu_bus_force_sampling(void);
  63. #endif
  64. /* timing is in µs per byte (i.e. slowness, inverse of bandwidth) */
  65. struct dev_timing
  66. {
  67. int numa_id;
  68. double timing_htod;
  69. double latency_htod;
  70. double timing_dtoh;
  71. double latency_dtoh;
  72. };
  73. /* TODO: measure latency */
  74. static double bandwidth_matrix[STARPU_MAXNODES][STARPU_MAXNODES];
  75. static double latency_matrix[STARPU_MAXNODES][STARPU_MAXNODES];
  76. static unsigned was_benchmarked = 0;
  77. #ifndef STARPU_SIMGRID
  78. static unsigned ncpus = 0;
  79. #endif
  80. static unsigned nnumas = 0;
  81. static unsigned ncuda = 0;
  82. static unsigned nopencl = 0;
  83. #ifndef STARPU_SIMGRID
  84. static unsigned nmic = 0;
  85. static unsigned nmpi_ms = 0;
  86. /* Benchmarking the performance of the bus */
  87. static double numa_latency[STARPU_MAXNUMANODES][STARPU_MAXNUMANODES];
  88. static double numa_timing[STARPU_MAXNUMANODES][STARPU_MAXNUMANODES];
  89. static uint64_t cuda_size[STARPU_MAXCUDADEVS];
  90. #endif
  91. #ifdef STARPU_USE_CUDA
  92. /* preference order of cores (logical indexes) */
  93. static unsigned cuda_affinity_matrix[STARPU_MAXCUDADEVS][STARPU_MAXNUMANODES];
  94. #ifndef STARPU_SIMGRID
  95. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  96. static double cudadev_timing_dtod[STARPU_MAXNODES][STARPU_MAXNODES] = {{0.0}};
  97. static double cudadev_latency_dtod[STARPU_MAXNODES][STARPU_MAXNODES] = {{0.0}};
  98. #endif
  99. #endif
  100. static struct dev_timing cudadev_timing_per_numa[STARPU_MAXCUDADEVS*STARPU_MAXNUMANODES];
  101. static char cudadev_direct[STARPU_MAXNODES][STARPU_MAXNODES];
  102. #endif
  103. #ifndef STARPU_SIMGRID
  104. static uint64_t opencl_size[STARPU_MAXCUDADEVS];
  105. #endif
  106. #ifdef STARPU_USE_FPGA
  107. /* preference order of cores (logical indexes) */
  108. static int fpga_affinity_matrix[STARPU_MAXFPGADEVS][STARPU_MAXCPUS];
  109. static double fpgadev_timing_htod[STARPU_MAXNODES] = {0.0};
  110. static double fpgadev_latency_htod[STARPU_MAXNODES] = {0.0};
  111. static double fpgadev_timing_dtoh[STARPU_MAXNODES] = {0.0};
  112. static double fpgadev_latency_dtoh[STARPU_MAXNODES] = {0.0};
  113. static struct dev_timing fpgadev_timing_per_cpu[STARPU_MAXNODES*STARPU_MAXCPUS];
  114. #endif
  115. #ifdef STARPU_USE_OPENCL
  116. /* preference order of cores (logical indexes) */
  117. static unsigned opencl_affinity_matrix[STARPU_MAXOPENCLDEVS][STARPU_MAXNUMANODES];
  118. static struct dev_timing opencldev_timing_per_numa[STARPU_MAXOPENCLDEVS*STARPU_MAXNUMANODES];
  119. #endif
  120. #ifdef STARPU_USE_MIC
  121. static double mic_time_host_to_device[STARPU_MAXNODES] = {0.0};
  122. static double mic_time_device_to_host[STARPU_MAXNODES] = {0.0};
  123. #endif /* STARPU_USE_MIC */
  124. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  125. static double mpi_time_device_to_device[STARPU_MAXMPIDEVS][STARPU_MAXMPIDEVS] = {{0.0}};
  126. static double mpi_latency_device_to_device[STARPU_MAXMPIDEVS][STARPU_MAXMPIDEVS] = {{0.0}};
  127. #endif
  128. #ifdef STARPU_HAVE_HWLOC
  129. static hwloc_topology_t hwtopology;
  130. hwloc_topology_t _starpu_perfmodel_get_hwtopology()
  131. {
  132. return hwtopology;
  133. }
  134. #endif
  135. #if (defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)) && !defined(STARPU_SIMGRID)
  136. #ifdef STARPU_USE_CUDA
  137. static void measure_bandwidth_between_host_and_dev_on_numa_with_cuda(int dev, int numa, int cpu, struct dev_timing *dev_timing_per_cpu)
  138. {
  139. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  140. size_t size = SIZE;
  141. /* Initialize CUDA context on the device */
  142. /* We do not need to enable OpenGL interoperability at this point,
  143. * since we cleanly shutdown CUDA before returning. */
  144. cudaSetDevice(dev);
  145. /* hack to avoid third party libs to rebind threads */
  146. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  147. /* hack to force the initialization */
  148. cudaFree(0);
  149. /* hack to avoid third party libs to rebind threads */
  150. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  151. /* Get the maximum size which can be allocated on the device */
  152. struct cudaDeviceProp prop;
  153. cudaError_t cures;
  154. cures = cudaGetDeviceProperties(&prop, dev);
  155. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  156. cuda_size[dev] = prop.totalGlobalMem;
  157. if (size > prop.totalGlobalMem/4) size = prop.totalGlobalMem/4;
  158. /* Allocate a buffer on the device */
  159. unsigned char *d_buffer;
  160. cures = cudaMalloc((void **)&d_buffer, size);
  161. STARPU_ASSERT(cures == cudaSuccess);
  162. /* hack to avoid third party libs to rebind threads */
  163. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  164. /* Allocate a buffer on the host */
  165. unsigned char *h_buffer;
  166. #if defined(STARPU_HAVE_HWLOC)
  167. struct _starpu_machine_config *config = _starpu_get_machine_config();
  168. const unsigned nnuma_nodes = _starpu_topology_get_nnumanodes(config);
  169. if (nnuma_nodes > 1)
  170. {
  171. /* NUMA mode activated */
  172. hwloc_obj_t obj = hwloc_get_obj_by_type(hwtopology, HWLOC_OBJ_NUMANODE, numa);
  173. #if HWLOC_API_VERSION >= 0x00020000
  174. h_buffer = hwloc_alloc_membind(hwtopology, size, obj->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_BYNODESET);
  175. #else
  176. h_buffer = hwloc_alloc_membind_nodeset(hwtopology, size, obj->nodeset, HWLOC_MEMBIND_BIND, 0);
  177. #endif
  178. }
  179. else
  180. #endif
  181. {
  182. /* we use STARPU_MAIN_RAM */
  183. _STARPU_MALLOC(h_buffer, size);
  184. cudaHostRegister((void *)h_buffer, size, 0);
  185. }
  186. STARPU_ASSERT(cures == cudaSuccess);
  187. /* hack to avoid third party libs to rebind threads */
  188. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  189. /* Fill them */
  190. memset(h_buffer, 0, size);
  191. cudaMemset(d_buffer, 0, size);
  192. cudaDeviceSynchronize();
  193. /* hack to avoid third party libs to rebind threads */
  194. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  195. const unsigned timing_numa_index = dev*STARPU_MAXNUMANODES + numa;
  196. unsigned iter;
  197. double timing;
  198. double start;
  199. double end;
  200. /* Measure upload bandwidth */
  201. start = starpu_timing_now();
  202. for (iter = 0; iter < NITER; iter++)
  203. {
  204. cudaMemcpy(d_buffer, h_buffer, size, cudaMemcpyHostToDevice);
  205. cudaDeviceSynchronize();
  206. }
  207. end = starpu_timing_now();
  208. timing = end - start;
  209. dev_timing_per_cpu[timing_numa_index].timing_htod = timing/NITER/size;
  210. /* Measure download bandwidth */
  211. start = starpu_timing_now();
  212. for (iter = 0; iter < NITER; iter++)
  213. {
  214. cudaMemcpy(h_buffer, d_buffer, size, cudaMemcpyDeviceToHost);
  215. cudaDeviceSynchronize();
  216. }
  217. end = starpu_timing_now();
  218. timing = end - start;
  219. dev_timing_per_cpu[timing_numa_index].timing_dtoh = timing/NITER/size;
  220. /* Measure upload latency */
  221. start = starpu_timing_now();
  222. for (iter = 0; iter < NITER; iter++)
  223. {
  224. cudaMemcpy(d_buffer, h_buffer, 1, cudaMemcpyHostToDevice);
  225. cudaDeviceSynchronize();
  226. }
  227. end = starpu_timing_now();
  228. timing = end - start;
  229. dev_timing_per_cpu[timing_numa_index].latency_htod = timing/NITER;
  230. /* Measure download latency */
  231. start = starpu_timing_now();
  232. for (iter = 0; iter < NITER; iter++)
  233. {
  234. cudaMemcpy(h_buffer, d_buffer, 1, cudaMemcpyDeviceToHost);
  235. cudaDeviceSynchronize();
  236. }
  237. end = starpu_timing_now();
  238. timing = end - start;
  239. dev_timing_per_cpu[timing_numa_index].latency_dtoh = timing/NITER;
  240. /* Free buffers */
  241. cudaHostUnregister(h_buffer);
  242. #if defined(STARPU_HAVE_HWLOC)
  243. if (nnuma_nodes > 1)
  244. {
  245. /* NUMA mode activated */
  246. hwloc_free(hwtopology, h_buffer, size);
  247. }
  248. else
  249. #endif
  250. {
  251. free(h_buffer);
  252. }
  253. cudaFree(d_buffer);
  254. cudaThreadExit();
  255. }
  256. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  257. static void measure_bandwidth_between_dev_and_dev_cuda(int src, int dst)
  258. {
  259. size_t size = SIZE;
  260. int can;
  261. /* Get the maximum size which can be allocated on the device */
  262. struct cudaDeviceProp prop;
  263. cudaError_t cures;
  264. cures = cudaGetDeviceProperties(&prop, src);
  265. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  266. if (size > prop.totalGlobalMem/4) size = prop.totalGlobalMem/4;
  267. cures = cudaGetDeviceProperties(&prop, dst);
  268. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  269. if (size > prop.totalGlobalMem/4) size = prop.totalGlobalMem/4;
  270. /* Initialize CUDA context on the source */
  271. /* We do not need to enable OpenGL interoperability at this point,
  272. * since we cleanly shutdown CUDA before returning. */
  273. cudaSetDevice(src);
  274. if (starpu_get_env_number("STARPU_ENABLE_CUDA_GPU_GPU_DIRECT") != 0)
  275. {
  276. cures = cudaDeviceCanAccessPeer(&can, src, dst);
  277. if (!cures && can)
  278. {
  279. cures = cudaDeviceEnablePeerAccess(dst, 0);
  280. if (!cures)
  281. {
  282. _STARPU_DISP("GPU-Direct %d -> %d\n", dst, src);
  283. cudadev_direct[src][dst] = 1;
  284. }
  285. }
  286. }
  287. /* Allocate a buffer on the device */
  288. unsigned char *s_buffer;
  289. cures = cudaMalloc((void **)&s_buffer, size);
  290. STARPU_ASSERT(cures == cudaSuccess);
  291. cudaMemset(s_buffer, 0, size);
  292. cudaDeviceSynchronize();
  293. /* Initialize CUDA context on the destination */
  294. /* We do not need to enable OpenGL interoperability at this point,
  295. * since we cleanly shutdown CUDA before returning. */
  296. cudaSetDevice(dst);
  297. if (starpu_get_env_number("STARPU_ENABLE_CUDA_GPU_GPU_DIRECT") != 0)
  298. {
  299. cures = cudaDeviceCanAccessPeer(&can, dst, src);
  300. if (!cures && can)
  301. {
  302. cures = cudaDeviceEnablePeerAccess(src, 0);
  303. if (!cures)
  304. {
  305. _STARPU_DISP("GPU-Direct %d -> %d\n", src, dst);
  306. cudadev_direct[dst][src] = 1;
  307. }
  308. }
  309. }
  310. /* Allocate a buffer on the device */
  311. unsigned char *d_buffer;
  312. cures = cudaMalloc((void **)&d_buffer, size);
  313. STARPU_ASSERT(cures == cudaSuccess);
  314. cudaMemset(d_buffer, 0, size);
  315. cudaDeviceSynchronize();
  316. unsigned iter;
  317. double timing;
  318. double start;
  319. double end;
  320. /* Measure upload bandwidth */
  321. start = starpu_timing_now();
  322. for (iter = 0; iter < NITER; iter++)
  323. {
  324. cudaMemcpyPeer(d_buffer, dst, s_buffer, src, size);
  325. cudaDeviceSynchronize();
  326. }
  327. end = starpu_timing_now();
  328. timing = end - start;
  329. cudadev_timing_dtod[src][dst] = timing/NITER/size;
  330. /* Measure upload latency */
  331. start = starpu_timing_now();
  332. for (iter = 0; iter < NITER; iter++)
  333. {
  334. cudaMemcpyPeer(d_buffer, dst, s_buffer, src, 1);
  335. cudaDeviceSynchronize();
  336. }
  337. end = starpu_timing_now();
  338. timing = end - start;
  339. cudadev_latency_dtod[src][dst] = timing/NITER;
  340. /* Free buffers */
  341. cudaFree(d_buffer);
  342. cudaSetDevice(src);
  343. cudaFree(s_buffer);
  344. cudaThreadExit();
  345. }
  346. #endif
  347. #endif
  348. #ifdef STARPU_USE_OPENCL
  349. static void measure_bandwidth_between_host_and_dev_on_numa_with_opencl(int dev, int numa, int cpu, struct dev_timing *dev_timing_per_cpu)
  350. {
  351. cl_context context;
  352. cl_command_queue queue;
  353. cl_int err=0;
  354. size_t size = SIZE;
  355. int not_initialized;
  356. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  357. /* Is the context already initialised ? */
  358. starpu_opencl_get_context(dev, &context);
  359. not_initialized = (context == NULL);
  360. if (not_initialized == 1)
  361. _starpu_opencl_init_context(dev);
  362. /* Get context and queue */
  363. starpu_opencl_get_context(dev, &context);
  364. starpu_opencl_get_queue(dev, &queue);
  365. /* Get the maximum size which can be allocated on the device */
  366. cl_device_id device;
  367. cl_ulong maxMemAllocSize, totalGlobalMem;
  368. starpu_opencl_get_device(dev, &device);
  369. err = clGetDeviceInfo(device, CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof(maxMemAllocSize), &maxMemAllocSize, NULL);
  370. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  371. if (size > (size_t)maxMemAllocSize/4) size = maxMemAllocSize/4;
  372. err = clGetDeviceInfo(device, CL_DEVICE_GLOBAL_MEM_SIZE , sizeof(totalGlobalMem), &totalGlobalMem, NULL);
  373. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  374. opencl_size[dev] = totalGlobalMem;
  375. if (_starpu_opencl_get_device_type(dev) == CL_DEVICE_TYPE_CPU)
  376. {
  377. /* Let's not use too much RAM when running OpenCL on a CPU: it
  378. * would make the OS swap like crazy. */
  379. size /= 2;
  380. }
  381. /* hack to avoid third party libs to rebind threads */
  382. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  383. /* Allocate a buffer on the device */
  384. cl_mem d_buffer;
  385. d_buffer = clCreateBuffer(context, CL_MEM_READ_WRITE, size, NULL, &err);
  386. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  387. /* hack to avoid third party libs to rebind threads */
  388. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  389. /* Allocate a buffer on the host */
  390. unsigned char *h_buffer;
  391. #if defined(STARPU_HAVE_HWLOC)
  392. struct _starpu_machine_config *config = _starpu_get_machine_config();
  393. const unsigned nnuma_nodes = _starpu_topology_get_nnumanodes(config);
  394. if (nnuma_nodes > 1)
  395. {
  396. /* NUMA mode activated */
  397. hwloc_obj_t obj = hwloc_get_obj_by_type(hwtopology, HWLOC_OBJ_NUMANODE, numa);
  398. #if HWLOC_API_VERSION >= 0x00020000
  399. h_buffer = hwloc_alloc_membind(hwtopology, size, obj->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_BYNODESET);
  400. #else
  401. h_buffer = hwloc_alloc_membind_nodeset(hwtopology, size, obj->nodeset, HWLOC_MEMBIND_BIND, 0);
  402. #endif
  403. }
  404. else
  405. #endif
  406. {
  407. /* we use STARPU_MAIN_RAM */
  408. _STARPU_MALLOC(h_buffer, size);
  409. }
  410. /* hack to avoid third party libs to rebind threads */
  411. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  412. /* Fill them */
  413. memset(h_buffer, 0, size);
  414. err = clEnqueueWriteBuffer(queue, d_buffer, CL_TRUE, 0, size, h_buffer, 0, NULL, NULL);
  415. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  416. clFinish(queue);
  417. /* hack to avoid third party libs to rebind threads */
  418. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  419. const unsigned timing_numa_index = dev*STARPU_MAXNUMANODES + numa;
  420. unsigned iter;
  421. double timing;
  422. double start;
  423. double end;
  424. /* Measure upload bandwidth */
  425. start = starpu_timing_now();
  426. for (iter = 0; iter < NITER; iter++)
  427. {
  428. err = clEnqueueWriteBuffer(queue, d_buffer, CL_TRUE, 0, size, h_buffer, 0, NULL, NULL);
  429. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  430. clFinish(queue);
  431. }
  432. end = starpu_timing_now();
  433. timing = end - start;
  434. dev_timing_per_cpu[timing_numa_index].timing_htod = timing/NITER/size;
  435. /* Measure download bandwidth */
  436. start = starpu_timing_now();
  437. for (iter = 0; iter < NITER; iter++)
  438. {
  439. err = clEnqueueReadBuffer(queue, d_buffer, CL_TRUE, 0, size, h_buffer, 0, NULL, NULL);
  440. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  441. clFinish(queue);
  442. }
  443. end = starpu_timing_now();
  444. timing = end - start;
  445. dev_timing_per_cpu[timing_numa_index].timing_dtoh = timing/NITER/size;
  446. /* Measure upload latency */
  447. start = starpu_timing_now();
  448. for (iter = 0; iter < NITER; iter++)
  449. {
  450. err = clEnqueueWriteBuffer(queue, d_buffer, CL_TRUE, 0, 1, h_buffer, 0, NULL, NULL);
  451. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  452. clFinish(queue);
  453. }
  454. end = starpu_timing_now();
  455. timing = end - start;
  456. dev_timing_per_cpu[timing_numa_index].latency_htod = timing/NITER;
  457. /* Measure download latency */
  458. start = starpu_timing_now();
  459. for (iter = 0; iter < NITER; iter++)
  460. {
  461. err = clEnqueueReadBuffer(queue, d_buffer, CL_TRUE, 0, 1, h_buffer, 0, NULL, NULL);
  462. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  463. clFinish(queue);
  464. }
  465. end = starpu_timing_now();
  466. timing = end - start;
  467. dev_timing_per_cpu[timing_numa_index].latency_dtoh = timing/NITER;
  468. /* Free buffers */
  469. err = clReleaseMemObject(d_buffer);
  470. if (STARPU_UNLIKELY(err != CL_SUCCESS))
  471. STARPU_OPENCL_REPORT_ERROR(err);
  472. #if defined(STARPU_HAVE_HWLOC)
  473. if (nnuma_nodes > 1)
  474. {
  475. /* NUMA mode activated */
  476. hwloc_free(hwtopology, h_buffer, size);
  477. }
  478. else
  479. #endif
  480. {
  481. free(h_buffer);
  482. }
  483. /* Uninitiliaze OpenCL context on the device */
  484. if (not_initialized == 1)
  485. _starpu_opencl_deinit_context(dev);
  486. }
  487. #endif
  488. /* NB: we want to sort the bandwidth by DECREASING order */
  489. static int compar_dev_timing(const void *left_dev_timing, const void *right_dev_timing)
  490. {
  491. const struct dev_timing *left = (const struct dev_timing *)left_dev_timing;
  492. const struct dev_timing *right = (const struct dev_timing *)right_dev_timing;
  493. double left_dtoh = left->timing_dtoh;
  494. double left_htod = left->timing_htod;
  495. double right_dtoh = right->timing_dtoh;
  496. double right_htod = right->timing_htod;
  497. double timing_sum2_left = left_dtoh*left_dtoh + left_htod*left_htod;
  498. double timing_sum2_right = right_dtoh*right_dtoh + right_htod*right_htod;
  499. /* it's for a decreasing sorting */
  500. return (timing_sum2_left > timing_sum2_right);
  501. }
  502. #ifdef STARPU_HAVE_HWLOC
  503. static int find_cpu_from_numa_node(hwloc_obj_t obj)
  504. {
  505. STARPU_ASSERT(obj);
  506. hwloc_obj_t current = obj;
  507. while (current->depth != HWLOC_OBJ_PU)
  508. {
  509. current = current->first_child;
  510. /* If we don't find a "PU" obj before the leave, perhaps we are
  511. * just not allowed to use it. */
  512. if (!current)
  513. return -1;
  514. }
  515. STARPU_ASSERT(current->depth == HWLOC_OBJ_PU);
  516. return current->logical_index;
  517. }
  518. #endif
  519. static void measure_bandwidth_between_numa_nodes_and_dev(int dev, struct dev_timing *dev_timing_per_numanode, char *type)
  520. {
  521. /* We measure the bandwith between each GPU and each NUMA node */
  522. struct _starpu_machine_config * config = _starpu_get_machine_config();
  523. const unsigned nnuma_nodes = _starpu_topology_get_nnumanodes(config);
  524. unsigned numa_id;
  525. for (numa_id = 0; numa_id < nnuma_nodes; numa_id++)
  526. {
  527. /* Store results by starpu id */
  528. const unsigned timing_numa_index = dev*STARPU_MAXNUMANODES + numa_id;
  529. /* Store STARPU_memnode for later */
  530. dev_timing_per_numanode[timing_numa_index].numa_id = numa_id;
  531. /* Chose one CPU connected to this NUMA node */
  532. int cpu_id = 0;
  533. #ifdef STARPU_HAVE_HWLOC
  534. hwloc_obj_t obj = hwloc_get_obj_by_type(hwtopology, HWLOC_OBJ_NUMANODE, numa_id);
  535. if (obj)
  536. {
  537. #if HWLOC_API_VERSION >= 0x00020000
  538. /* From hwloc 2.0, NUMAnode objects do not contain CPUs, they are contained in a group which contain the CPUs. */
  539. obj = obj->parent;
  540. #endif
  541. cpu_id = find_cpu_from_numa_node(obj);
  542. }
  543. else
  544. /* No such NUMA node, probably hwloc 1.x with no NUMA
  545. * node, just take one CPU from the whole system */
  546. cpu_id = find_cpu_from_numa_node(hwloc_get_root_obj(hwtopology));
  547. #endif
  548. if (cpu_id < 0)
  549. continue;
  550. #ifdef STARPU_USE_CUDA
  551. if (strncmp(type, "CUDA", 4) == 0)
  552. measure_bandwidth_between_host_and_dev_on_numa_with_cuda(dev, numa_id, cpu_id, dev_timing_per_numanode);
  553. #endif
  554. #ifdef STARPU_USE_OPENCL
  555. if (strncmp(type, "OpenCL", 6) == 0)
  556. measure_bandwidth_between_host_and_dev_on_numa_with_opencl(dev, numa_id, cpu_id, dev_timing_per_numanode);
  557. #endif
  558. }
  559. }
  560. static void measure_bandwidth_between_host_and_dev(int dev, struct dev_timing *dev_timing_per_numa, char *type)
  561. {
  562. measure_bandwidth_between_numa_nodes_and_dev(dev, dev_timing_per_numa, type);
  563. #ifdef STARPU_VERBOSE
  564. struct _starpu_machine_config * config = _starpu_get_machine_config();
  565. const unsigned nnuma_nodes = _starpu_topology_get_nnumanodes(config);
  566. unsigned numa_id;
  567. for (numa_id = 0; numa_id < nnuma_nodes; numa_id++)
  568. {
  569. const unsigned timing_numa_index = dev*STARPU_MAXNUMANODES + numa_id;
  570. double bandwidth_dtoh = dev_timing_per_numa[timing_numa_index].timing_dtoh;
  571. double bandwidth_htod = dev_timing_per_numa[timing_numa_index].timing_htod;
  572. double bandwidth_sum2 = bandwidth_dtoh*bandwidth_dtoh + bandwidth_htod*bandwidth_htod;
  573. _STARPU_DISP("(%10s) BANDWIDTH GPU %d NUMA %u - htod %f - dtoh %f - %f\n", type, dev, numa_id, bandwidth_htod, bandwidth_dtoh, sqrt(bandwidth_sum2));
  574. }
  575. #endif
  576. }
  577. #endif /* defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL) */
  578. #if !defined(STARPU_SIMGRID)
  579. static void measure_bandwidth_latency_between_numa(int numa_src, int numa_dst)
  580. {
  581. #if defined(STARPU_HAVE_HWLOC)
  582. if (nnumas > 1)
  583. {
  584. /* NUMA mode activated */
  585. double start, end, timing;
  586. unsigned iter;
  587. unsigned char *h_buffer;
  588. hwloc_obj_t obj_src = hwloc_get_obj_by_type(hwtopology, HWLOC_OBJ_NUMANODE, numa_src);
  589. #if HWLOC_API_VERSION >= 0x00020000
  590. h_buffer = hwloc_alloc_membind(hwtopology, SIZE, obj_src->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_BYNODESET);
  591. #else
  592. h_buffer = hwloc_alloc_membind_nodeset(hwtopology, SIZE, obj_src->nodeset, HWLOC_MEMBIND_BIND, 0);
  593. #endif
  594. unsigned char *d_buffer;
  595. hwloc_obj_t obj_dst = hwloc_get_obj_by_type(hwtopology, HWLOC_OBJ_NUMANODE, numa_dst);
  596. #if HWLOC_API_VERSION >= 0x00020000
  597. d_buffer = hwloc_alloc_membind(hwtopology, SIZE, obj_dst->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_BYNODESET);
  598. #else
  599. d_buffer = hwloc_alloc_membind_nodeset(hwtopology, SIZE, obj_dst->nodeset, HWLOC_MEMBIND_BIND, 0);
  600. #endif
  601. memset(h_buffer, 0, SIZE);
  602. start = starpu_timing_now();
  603. for (iter = 0; iter < NITER; iter++)
  604. {
  605. memcpy(d_buffer, h_buffer, SIZE);
  606. }
  607. end = starpu_timing_now();
  608. timing = end - start;
  609. numa_timing[numa_src][numa_dst] = timing/NITER/SIZE;
  610. start = starpu_timing_now();
  611. for (iter = 0; iter < NITER; iter++)
  612. {
  613. memcpy(d_buffer, h_buffer, 1);
  614. }
  615. end = starpu_timing_now();
  616. timing = end - start;
  617. numa_latency[numa_src][numa_dst] = timing/NITER;
  618. hwloc_free(hwtopology, h_buffer, SIZE);
  619. hwloc_free(hwtopology, d_buffer, SIZE);
  620. }
  621. else
  622. #endif
  623. {
  624. /* Cannot make a real calibration */
  625. numa_timing[numa_src][numa_dst] = 0.01;
  626. numa_latency[numa_src][numa_dst] = 0;
  627. }
  628. }
  629. #endif
  630. static void benchmark_all_gpu_devices(void)
  631. {
  632. #ifdef STARPU_SIMGRID
  633. _STARPU_DISP("Can not measure bus in simgrid mode, please run starpu_calibrate_bus in non-simgrid mode to make sure the bus performance model was calibrated\n");
  634. STARPU_ABORT();
  635. #else /* !SIMGRID */
  636. unsigned i, j;
  637. _STARPU_DEBUG("Benchmarking the speed of the bus\n");
  638. #ifdef STARPU_HAVE_HWLOC
  639. hwloc_topology_init(&hwtopology);
  640. _starpu_topology_filter(hwtopology);
  641. hwloc_topology_load(hwtopology);
  642. #endif
  643. #ifdef STARPU_HAVE_HWLOC
  644. hwloc_bitmap_t former_cpuset = hwloc_bitmap_alloc();
  645. hwloc_get_cpubind(hwtopology, former_cpuset, HWLOC_CPUBIND_THREAD);
  646. #elif __linux__
  647. /* Save the current cpu binding */
  648. cpu_set_t former_process_affinity;
  649. int ret;
  650. ret = sched_getaffinity(0, sizeof(former_process_affinity), &former_process_affinity);
  651. if (ret)
  652. {
  653. perror("sched_getaffinity");
  654. STARPU_ABORT();
  655. }
  656. #else
  657. #warning Missing binding support, StarPU will not be able to properly benchmark NUMA topology
  658. #endif
  659. struct _starpu_machine_config *config = _starpu_get_machine_config();
  660. ncpus = _starpu_topology_get_nhwcpu(config);
  661. nnumas = _starpu_topology_get_nnumanodes(config);
  662. for (i = 0; i < nnumas; i++)
  663. for (j = 0; j < nnumas; j++)
  664. if (i != j)
  665. {
  666. _STARPU_DISP("NUMA %d -> %d...\n", i, j);
  667. measure_bandwidth_latency_between_numa(i, j);
  668. }
  669. #ifdef STARPU_USE_CUDA
  670. ncuda = _starpu_get_cuda_device_count();
  671. for (i = 0; i < ncuda; i++)
  672. {
  673. _STARPU_DISP("CUDA %u...\n", i);
  674. /* measure bandwidth between Host and Device i */
  675. measure_bandwidth_between_host_and_dev(i, cudadev_timing_per_numa, "CUDA");
  676. }
  677. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  678. for (i = 0; i < ncuda; i++)
  679. {
  680. for (j = 0; j < ncuda; j++)
  681. if (i != j)
  682. {
  683. _STARPU_DISP("CUDA %u -> %u...\n", i, j);
  684. /* measure bandwidth between Host and Device i */
  685. measure_bandwidth_between_dev_and_dev_cuda(i, j);
  686. }
  687. }
  688. #endif
  689. #endif
  690. #ifdef STARPU_USE_OPENCL
  691. nopencl = _starpu_opencl_get_device_count();
  692. for (i = 0; i < nopencl; i++)
  693. {
  694. _STARPU_DISP("OpenCL %u...\n", i);
  695. /* measure bandwith between Host and Device i */
  696. measure_bandwidth_between_host_and_dev(i, opencldev_timing_per_numa, "OpenCL");
  697. }
  698. #endif
  699. #ifdef STARPU_USE_MIC
  700. /* TODO: implement real calibration ! For now we only put an arbitrary
  701. * value for each device during at the declaration as a bug fix, else
  702. * we get problems on heft scheduler */
  703. nmic = _starpu_mic_src_get_device_count();
  704. for (i = 0; i < STARPU_MAXNODES; i++)
  705. {
  706. mic_time_host_to_device[i] = 0.1;
  707. mic_time_device_to_host[i] = 0.1;
  708. }
  709. #endif /* STARPU_USE_MIC */
  710. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  711. _starpu_mpi_common_measure_bandwidth_latency(mpi_time_device_to_device, mpi_latency_device_to_device);
  712. #endif /* STARPU_USE_MPI_MASTER_SLAVE */
  713. #ifdef STARPU_HAVE_HWLOC
  714. hwloc_set_cpubind(hwtopology, former_cpuset, HWLOC_CPUBIND_THREAD);
  715. hwloc_bitmap_free(former_cpuset);
  716. #elif __linux__
  717. /* Restore the former affinity */
  718. ret = sched_setaffinity(0, sizeof(former_process_affinity), &former_process_affinity);
  719. if (ret)
  720. {
  721. perror("sched_setaffinity");
  722. STARPU_ABORT();
  723. }
  724. #endif
  725. #ifdef STARPU_HAVE_HWLOC
  726. hwloc_topology_destroy(hwtopology);
  727. #endif
  728. _STARPU_DEBUG("Benchmarking the speed of the bus is done.\n");
  729. was_benchmarked = 1;
  730. #endif /* !SIMGRID */
  731. }
  732. static void get_bus_path(const char *type, char *path, size_t maxlen)
  733. {
  734. char hostname[65];
  735. _starpu_gethostname(hostname, sizeof(hostname));
  736. snprintf(path, maxlen, "%s%s.%s", _starpu_get_perf_model_dir_bus(), hostname, type);
  737. }
  738. /*
  739. * Affinity
  740. */
  741. static void get_affinity_path(char *path, size_t maxlen)
  742. {
  743. get_bus_path("affinity", path, maxlen);
  744. }
  745. #ifndef STARPU_SIMGRID
  746. static void load_bus_affinity_file_content(void)
  747. {
  748. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  749. FILE *f;
  750. int locked;
  751. char path[PATH_LENGTH];
  752. get_affinity_path(path, sizeof(path));
  753. _STARPU_DEBUG("loading affinities from %s\n", path);
  754. f = fopen(path, "r");
  755. STARPU_ASSERT(f);
  756. locked = _starpu_frdlock(f) == 0;
  757. unsigned gpu;
  758. #ifdef STARPU_USE_CUDA
  759. ncuda = _starpu_get_cuda_device_count();
  760. for (gpu = 0; gpu < ncuda; gpu++)
  761. {
  762. int ret;
  763. unsigned dummy;
  764. _starpu_drop_comments(f);
  765. ret = fscanf(f, "%u\t", &dummy);
  766. STARPU_ASSERT(ret == 1);
  767. STARPU_ASSERT(dummy == gpu);
  768. unsigned numa;
  769. for (numa = 0; numa < nnumas; numa++)
  770. {
  771. ret = fscanf(f, "%u\t", &cuda_affinity_matrix[gpu][numa]);
  772. STARPU_ASSERT(ret == 1);
  773. }
  774. ret = fscanf(f, "\n");
  775. STARPU_ASSERT(ret == 0);
  776. }
  777. #endif /* !STARPU_USE_CUDA */
  778. #ifdef STARPU_USE_OPENCL
  779. nopencl = _starpu_opencl_get_device_count();
  780. for (gpu = 0; gpu < nopencl; gpu++)
  781. {
  782. int ret;
  783. unsigned dummy;
  784. _starpu_drop_comments(f);
  785. ret = fscanf(f, "%u\t", &dummy);
  786. STARPU_ASSERT(ret == 1);
  787. STARPU_ASSERT(dummy == gpu);
  788. unsigned numa;
  789. for (numa = 0; numa < nnumas; numa++)
  790. {
  791. ret = fscanf(f, "%u\t", &opencl_affinity_matrix[gpu][numa]);
  792. STARPU_ASSERT(ret == 1);
  793. }
  794. ret = fscanf(f, "\n");
  795. STARPU_ASSERT(ret == 0);
  796. }
  797. #endif /* !STARPU_USE_OPENCL */
  798. if (locked)
  799. _starpu_frdunlock(f);
  800. fclose(f);
  801. #endif /* !(STARPU_USE_CUDA_ || STARPU_USE_OPENCL */
  802. }
  803. #ifndef STARPU_SIMGRID
  804. static void write_bus_affinity_file_content(void)
  805. {
  806. STARPU_ASSERT(was_benchmarked);
  807. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  808. FILE *f;
  809. char path[PATH_LENGTH];
  810. int locked;
  811. get_affinity_path(path, sizeof(path));
  812. _STARPU_DEBUG("writing affinities to %s\n", path);
  813. f = fopen(path, "w+");
  814. if (!f)
  815. {
  816. perror("fopen write_buf_affinity_file_content");
  817. _STARPU_DISP("path '%s'\n", path);
  818. fflush(stderr);
  819. STARPU_ABORT();
  820. }
  821. locked = _starpu_frdlock(f) == 0;
  822. unsigned numa;
  823. unsigned gpu;
  824. fprintf(f, "# GPU\t");
  825. for (numa = 0; numa < nnumas; numa++)
  826. fprintf(f, "NUMA%u\t", numa);
  827. fprintf(f, "\n");
  828. #ifdef STARPU_USE_CUDA
  829. {
  830. /* Use an other array to sort bandwidth */
  831. struct dev_timing cudadev_timing_per_numa_sorted[STARPU_MAXCUDADEVS*STARPU_MAXNUMANODES];
  832. memcpy(cudadev_timing_per_numa_sorted, cudadev_timing_per_numa, STARPU_MAXCUDADEVS*STARPU_MAXNUMANODES*sizeof(struct dev_timing));
  833. for (gpu = 0; gpu < ncuda; gpu++)
  834. {
  835. fprintf(f, "%u\t", gpu);
  836. qsort(&(cudadev_timing_per_numa_sorted[gpu*STARPU_MAXNUMANODES]), nnumas, sizeof(struct dev_timing), compar_dev_timing);
  837. for (numa = 0; numa < nnumas; numa++)
  838. {
  839. fprintf(f, "%d\t", cudadev_timing_per_numa_sorted[gpu*STARPU_MAXNUMANODES+numa].numa_id);
  840. }
  841. fprintf(f, "\n");
  842. }
  843. }
  844. #endif
  845. #ifdef STARPU_USE_OPENCL
  846. {
  847. /* Use an other array to sort bandwidth */
  848. struct dev_timing opencldev_timing_per_numa_sorted[STARPU_MAXOPENCLDEVS*STARPU_MAXNUMANODES];
  849. memcpy(opencldev_timing_per_numa_sorted, opencldev_timing_per_numa, STARPU_MAXOPENCLDEVS*STARPU_MAXNUMANODES*sizeof(struct dev_timing));
  850. for (gpu = 0; gpu < nopencl; gpu++)
  851. {
  852. fprintf(f, "%u\t", gpu);
  853. qsort(&(opencldev_timing_per_numa_sorted[gpu*STARPU_MAXNUMANODES]), nnumas, sizeof(struct dev_timing), compar_dev_timing);
  854. for (numa = 0; numa < nnumas; numa++)
  855. {
  856. fprintf(f, "%d\t", opencldev_timing_per_numa_sorted[gpu*STARPU_MAXNUMANODES+numa].numa_id);
  857. }
  858. fprintf(f, "\n");
  859. }
  860. }
  861. #endif
  862. if (locked)
  863. _starpu_frdunlock(f);
  864. fclose(f);
  865. #endif
  866. }
  867. #endif /* STARPU_SIMGRID */
  868. static void generate_bus_affinity_file(void)
  869. {
  870. if (!was_benchmarked)
  871. benchmark_all_gpu_devices();
  872. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  873. /* Slaves don't write files */
  874. if (!_starpu_mpi_common_is_src_node())
  875. return;
  876. #endif
  877. write_bus_affinity_file_content();
  878. }
  879. static int check_bus_affinity_file(void)
  880. {
  881. int ret = 1;
  882. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  883. FILE *f;
  884. int locked;
  885. unsigned dummy;
  886. char path[PATH_LENGTH];
  887. get_affinity_path(path, sizeof(path));
  888. _STARPU_DEBUG("loading affinities from %s\n", path);
  889. f = fopen(path, "r");
  890. STARPU_ASSERT(f);
  891. locked = _starpu_frdlock(f) == 0;
  892. ret = fscanf(f, "# GPU\t");
  893. STARPU_ASSERT(ret == 0);
  894. ret = fscanf(f, "NUMA%u\t", &dummy);
  895. if (locked)
  896. _starpu_frdunlock(f);
  897. fclose(f);
  898. #endif
  899. return ret == 1;
  900. }
  901. static void load_bus_affinity_file(void)
  902. {
  903. int exist, check = 1;
  904. char path[PATH_LENGTH];
  905. get_affinity_path(path, sizeof(path));
  906. /* access return 0 if file exists */
  907. exist = access(path, F_OK);
  908. if (exist == 0)
  909. /* return 0 if it's not good */
  910. check = check_bus_affinity_file();
  911. if (check == 0)
  912. _STARPU_DISP("Affinity File is too old for this version of StarPU ! Rebuilding it...\n");
  913. if (check == 0 || exist != 0)
  914. {
  915. /* File does not exist yet */
  916. generate_bus_affinity_file();
  917. }
  918. load_bus_affinity_file_content();
  919. }
  920. #ifdef STARPU_USE_CUDA
  921. unsigned *_starpu_get_cuda_affinity_vector(unsigned gpuid)
  922. {
  923. return cuda_affinity_matrix[gpuid];
  924. }
  925. #endif /* STARPU_USE_CUDA */
  926. #ifdef STARPU_USE_FPGA
  927. int *_starpu_get_fpga_affinity_vector(unsigned fpgaid)
  928. {
  929. return fpga_affinity_matrix[fpgaid];
  930. }
  931. #endif /* STARPU_USE_FPGA */
  932. #ifdef STARPU_USE_OPENCL
  933. unsigned *_starpu_get_opencl_affinity_vector(unsigned gpuid)
  934. {
  935. return opencl_affinity_matrix[gpuid];
  936. }
  937. #endif /* STARPU_USE_OPENCL */
  938. void starpu_bus_print_affinity(FILE *f)
  939. {
  940. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  941. unsigned numa;
  942. unsigned gpu;
  943. #endif
  944. fprintf(f, "# GPU\tNUMA in preference order (logical index)\n");
  945. #ifdef STARPU_USE_CUDA
  946. fprintf(f, "# CUDA\n");
  947. for(gpu = 0 ; gpu<ncuda ; gpu++)
  948. {
  949. fprintf(f, "%u\t", gpu);
  950. for (numa = 0; numa < nnumas; numa++)
  951. {
  952. fprintf(f, "%u\t", cuda_affinity_matrix[gpu][numa]);
  953. }
  954. fprintf(f, "\n");
  955. }
  956. #endif
  957. #ifdef STARPU_USE_OPENCL
  958. fprintf(f, "# OpenCL\n");
  959. for(gpu = 0 ; gpu<nopencl ; gpu++)
  960. {
  961. fprintf(f, "%u\t", gpu);
  962. for (numa = 0; numa < nnumas; numa++)
  963. {
  964. fprintf(f, "%u\t", opencl_affinity_matrix[gpu][numa]);
  965. }
  966. fprintf(f, "\n");
  967. }
  968. #endif
  969. }
  970. #endif /* STARPU_SIMGRID */
  971. /*
  972. * Latency
  973. */
  974. static void get_latency_path(char *path, size_t maxlen)
  975. {
  976. get_bus_path("latency", path, maxlen);
  977. }
  978. static int load_bus_latency_file_content(void)
  979. {
  980. int n;
  981. unsigned src, dst;
  982. FILE *f;
  983. double latency;
  984. int locked;
  985. char path[PATH_LENGTH];
  986. get_latency_path(path, sizeof(path));
  987. _STARPU_DEBUG("loading latencies from %s\n", path);
  988. f = fopen(path, "r");
  989. if (!f)
  990. {
  991. perror("fopen load_bus_latency_file_content");
  992. _STARPU_DISP("path '%s'\n", path);
  993. fflush(stderr);
  994. STARPU_ABORT();
  995. }
  996. locked = _starpu_frdlock(f) == 0;
  997. for (src = 0; src < STARPU_MAXNODES; src++)
  998. {
  999. _starpu_drop_comments(f);
  1000. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1001. {
  1002. n = _starpu_read_double(f, "%le", &latency);
  1003. if (n != 1)
  1004. {
  1005. _STARPU_DISP("Error while reading latency file <%s>. Expected a number. Did you change the maximum number of GPUs at ./configure time?\n", path);
  1006. fclose(f);
  1007. return 0;
  1008. }
  1009. n = getc(f);
  1010. if (n == '\n')
  1011. break;
  1012. if (n != '\t')
  1013. {
  1014. _STARPU_DISP("bogus character '%c' (%d) in latency file %s\n", n, n, path);
  1015. fclose(f);
  1016. return 0;
  1017. }
  1018. latency_matrix[src][dst] = latency;
  1019. /* Look out for \t\n */
  1020. n = getc(f);
  1021. if (n == '\n')
  1022. break;
  1023. ungetc(n, f);
  1024. n = '\t';
  1025. }
  1026. /* No more values, take NAN */
  1027. for ( ; dst < STARPU_MAXNODES; dst++)
  1028. latency_matrix[src][dst] = NAN;
  1029. while (n == '\t')
  1030. {
  1031. /* Look out for \t\n */
  1032. n = getc(f);
  1033. if (n == '\n')
  1034. break;
  1035. ungetc(n, f);
  1036. n = _starpu_read_double(f, "%le", &latency);
  1037. if (n && !isnan(latency))
  1038. {
  1039. _STARPU_DISP("Too many nodes in latency file %s for this configuration (%d). Did you change the maximum number of GPUs at ./configure time?\n", path, STARPU_MAXNODES);
  1040. fclose(f);
  1041. return 0;
  1042. }
  1043. n = getc(f);
  1044. }
  1045. if (n != '\n')
  1046. {
  1047. _STARPU_DISP("Bogus character '%c' (%d) in latency file %s\n", n, n, path);
  1048. fclose(f);
  1049. return 0;
  1050. }
  1051. /* Look out for EOF */
  1052. n = getc(f);
  1053. if (n == EOF)
  1054. break;
  1055. ungetc(n, f);
  1056. }
  1057. if (locked)
  1058. _starpu_frdunlock(f);
  1059. fclose(f);
  1060. /* No more values, take NAN */
  1061. for ( ; src < STARPU_MAXNODES; src++)
  1062. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1063. latency_matrix[src][dst] = NAN;
  1064. return 1;
  1065. }
  1066. #if !defined(STARPU_SIMGRID) && (defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL))
  1067. static double search_bus_best_latency(int src, char * type, int htod)
  1068. {
  1069. /* Search the best latency for this node */
  1070. double best = 0.0;
  1071. double actual = 0.0;
  1072. unsigned check = 0;
  1073. unsigned numa;
  1074. for (numa = 0; numa < nnumas; numa++)
  1075. {
  1076. #ifdef STARPU_USE_CUDA
  1077. if (strncmp(type, "CUDA", 4) == 0)
  1078. {
  1079. if (htod)
  1080. actual = cudadev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].latency_htod;
  1081. else
  1082. actual = cudadev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].latency_dtoh;
  1083. }
  1084. #endif
  1085. #ifdef STARPU_USE_OPENCL
  1086. if (strncmp(type, "OpenCL", 6) == 0)
  1087. {
  1088. if (htod)
  1089. actual = opencldev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].latency_htod;
  1090. else
  1091. actual = opencldev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].latency_dtoh;
  1092. }
  1093. #endif
  1094. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  1095. if (!check || actual < best)
  1096. {
  1097. best = actual;
  1098. check = 1;
  1099. }
  1100. #endif
  1101. }
  1102. return best;
  1103. }
  1104. #endif
  1105. #if !defined(STARPU_SIMGRID)
  1106. static void write_bus_latency_file_content(void)
  1107. {
  1108. unsigned src, dst, maxnode;
  1109. /* Boundaries to check if src or dst are inside the interval */
  1110. unsigned b_low, b_up;
  1111. FILE *f;
  1112. int locked;
  1113. STARPU_ASSERT(was_benchmarked);
  1114. char path[PATH_LENGTH];
  1115. get_latency_path(path, sizeof(path));
  1116. _STARPU_DEBUG("writing latencies to %s\n", path);
  1117. f = fopen(path, "w+");
  1118. if (!f)
  1119. {
  1120. perror("fopen write_bus_latency_file_content");
  1121. _STARPU_DISP("path '%s'\n", path);
  1122. fflush(stderr);
  1123. STARPU_ABORT();
  1124. }
  1125. locked = _starpu_fwrlock(f) == 0;
  1126. _starpu_fftruncate(f, 0);
  1127. fprintf(f, "# ");
  1128. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1129. fprintf(f, "to %u\t\t", dst);
  1130. fprintf(f, "\n");
  1131. maxnode = nnumas;
  1132. #ifdef STARPU_USE_CUDA
  1133. maxnode += ncuda;
  1134. #endif
  1135. #ifdef STARPU_USE_OPENCL
  1136. maxnode += nopencl;
  1137. #endif
  1138. #ifdef STARPU_USE_MIC
  1139. maxnode += nmic;
  1140. #endif
  1141. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1142. maxnode += nmpi_ms;
  1143. #endif
  1144. for (src = 0; src < STARPU_MAXNODES; src++)
  1145. {
  1146. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1147. {
  1148. /* µs */
  1149. double latency = 0.0;
  1150. if ((src >= maxnode) || (dst >= maxnode))
  1151. {
  1152. /* convention */
  1153. latency = NAN;
  1154. }
  1155. else if (src == dst)
  1156. {
  1157. latency = 0.0;
  1158. }
  1159. else
  1160. {
  1161. b_low = b_up = 0;
  1162. /* ---- Begin NUMA ---- */
  1163. b_up += nnumas;
  1164. if (src >= b_low && src < b_up && dst >= b_low && dst < b_up)
  1165. latency += numa_latency[src-b_low][dst-b_low];
  1166. /* copy interval to check numa index later */
  1167. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  1168. unsigned numa_low = b_low;
  1169. unsigned numa_up = b_up;
  1170. #endif
  1171. b_low += nnumas;
  1172. /* ---- End NUMA ---- */
  1173. #ifdef STARPU_USE_CUDA
  1174. b_up += ncuda;
  1175. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  1176. if (src >= b_low && src < b_up && dst >= b_low && dst < b_up)
  1177. latency += cudadev_latency_dtod[src-b_low][dst-b_low];
  1178. else
  1179. #endif
  1180. {
  1181. /* Check if it's CUDA <-> NUMA link */
  1182. if (src >=b_low && src < b_up && dst >= numa_low && dst < numa_up)
  1183. latency += cudadev_timing_per_numa[(src-b_low)*STARPU_MAXNUMANODES+dst-numa_low].latency_dtoh;
  1184. if (dst >= b_low && dst < b_up && src >= numa_low && dst < numa_up)
  1185. latency += cudadev_timing_per_numa[(dst-b_low)*STARPU_MAXNUMANODES+src-numa_low].latency_htod;
  1186. /* To other devices, take the best latency */
  1187. if (src >= b_low && src < b_up && !(dst >= numa_low && dst < numa_up))
  1188. latency += search_bus_best_latency(src-b_low, "CUDA", 0);
  1189. if (dst >= b_low && dst < b_up && !(src >= numa_low && dst < numa_up))
  1190. latency += search_bus_best_latency(dst-b_low, "CUDA", 1);
  1191. }
  1192. b_low += ncuda;
  1193. #endif
  1194. #ifdef STARPU_USE_OPENCL
  1195. b_up += nopencl;
  1196. /* Check if it's OpenCL <-> NUMA link */
  1197. if (src >= b_low && src < b_up && dst >= numa_low && dst < numa_up)
  1198. latency += opencldev_timing_per_numa[(src-b_low)*STARPU_MAXNUMANODES+dst-numa_low].latency_dtoh;
  1199. if (dst >= b_low && dst < b_up && src >= numa_low && dst < numa_up)
  1200. latency += opencldev_timing_per_numa[(dst-b_low)*STARPU_MAXNUMANODES+src-numa_low].latency_htod;
  1201. /* To other devices, take the best latency */
  1202. if (src >= b_low && src < b_up && !(dst >= numa_low && dst < numa_up))
  1203. latency += search_bus_best_latency(src-b_low, "OpenCL", 0);
  1204. if (dst >= b_low && dst < b_up && !(src >= numa_low && dst < numa_up))
  1205. latency += search_bus_best_latency(dst-b_low, "OpenCL", 1);
  1206. b_low += nopencl;
  1207. #endif
  1208. #ifdef STARPU_USE_MIC
  1209. b_up += nmic;
  1210. /* TODO Latency MIC */
  1211. b_low += nmic;
  1212. #endif
  1213. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1214. b_up += nmpi_ms;
  1215. /* Modify MPI src and MPI dst if they contain the master node or not
  1216. * Because, we only take care about slaves */
  1217. int mpi_master = _starpu_mpi_common_get_src_node();
  1218. int mpi_src = src - b_low;
  1219. mpi_src = (mpi_master <= mpi_src) ? mpi_src+1 : mpi_src;
  1220. int mpi_dst = dst - b_low;
  1221. mpi_dst = (mpi_master <= mpi_dst) ? mpi_dst+1 : mpi_dst;
  1222. if (src >= b_low && src < b_up && dst >= b_low && dst < b_up)
  1223. latency += mpi_latency_device_to_device[mpi_src][mpi_dst];
  1224. else
  1225. {
  1226. if (src >= b_low && src < b_up)
  1227. latency += mpi_latency_device_to_device[mpi_src][mpi_master];
  1228. if (dst >= b_low && dst < b_up)
  1229. latency += mpi_latency_device_to_device[mpi_master][mpi_dst];
  1230. }
  1231. b_low += nmpi_ms;
  1232. #endif
  1233. }
  1234. if (dst > 0)
  1235. fputc('\t', f);
  1236. _starpu_write_double(f, "%e", latency);
  1237. }
  1238. fprintf(f, "\n");
  1239. }
  1240. if (locked)
  1241. _starpu_fwrunlock(f);
  1242. fclose(f);
  1243. }
  1244. #endif
  1245. static void generate_bus_latency_file(void)
  1246. {
  1247. if (!was_benchmarked)
  1248. benchmark_all_gpu_devices();
  1249. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1250. /* Slaves don't write files */
  1251. if (!_starpu_mpi_common_is_src_node())
  1252. return;
  1253. #endif
  1254. #ifndef STARPU_SIMGRID
  1255. write_bus_latency_file_content();
  1256. #endif
  1257. }
  1258. static void load_bus_latency_file(void)
  1259. {
  1260. int res;
  1261. char path[PATH_LENGTH];
  1262. get_latency_path(path, sizeof(path));
  1263. res = access(path, F_OK);
  1264. if (res || !load_bus_latency_file_content())
  1265. {
  1266. /* File does not exist yet or is bogus */
  1267. generate_bus_latency_file();
  1268. res = load_bus_latency_file_content();
  1269. STARPU_ASSERT(res);
  1270. }
  1271. }
  1272. /*
  1273. * Bandwidth
  1274. */
  1275. static void get_bandwidth_path(char *path, size_t maxlen)
  1276. {
  1277. get_bus_path("bandwidth", path, maxlen);
  1278. }
  1279. static int load_bus_bandwidth_file_content(void)
  1280. {
  1281. int n;
  1282. unsigned src, dst;
  1283. FILE *f;
  1284. double bandwidth;
  1285. int locked;
  1286. char path[PATH_LENGTH];
  1287. get_bandwidth_path(path, sizeof(path));
  1288. _STARPU_DEBUG("loading bandwidth from %s\n", path);
  1289. f = fopen(path, "r");
  1290. if (!f)
  1291. {
  1292. perror("fopen load_bus_bandwidth_file_content");
  1293. _STARPU_DISP("path '%s'\n", path);
  1294. fflush(stderr);
  1295. STARPU_ABORT();
  1296. }
  1297. locked = _starpu_frdlock(f) == 0;
  1298. for (src = 0; src < STARPU_MAXNODES; src++)
  1299. {
  1300. _starpu_drop_comments(f);
  1301. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1302. {
  1303. n = _starpu_read_double(f, "%le", &bandwidth);
  1304. if (n != 1)
  1305. {
  1306. _STARPU_DISP("Error while reading bandwidth file <%s>. Expected a number\n", path);
  1307. fclose(f);
  1308. return 0;
  1309. }
  1310. n = getc(f);
  1311. if (n == '\n')
  1312. break;
  1313. if (n != '\t')
  1314. {
  1315. _STARPU_DISP("bogus character '%c' (%d) in bandwidth file %s\n", n, n, path);
  1316. fclose(f);
  1317. return 0;
  1318. }
  1319. bandwidth_matrix[src][dst] = bandwidth;
  1320. /* Look out for \t\n */
  1321. n = getc(f);
  1322. if (n == '\n')
  1323. break;
  1324. ungetc(n, f);
  1325. n = '\t';
  1326. }
  1327. /* No more values, take NAN */
  1328. for ( ; dst < STARPU_MAXNODES; dst++)
  1329. bandwidth_matrix[src][dst] = NAN;
  1330. while (n == '\t')
  1331. {
  1332. /* Look out for \t\n */
  1333. n = getc(f);
  1334. if (n == '\n')
  1335. break;
  1336. ungetc(n, f);
  1337. n = _starpu_read_double(f, "%le", &bandwidth);
  1338. if (n && !isnan(bandwidth))
  1339. {
  1340. _STARPU_DISP("Too many nodes in bandwidth file %s for this configuration (%d)\n", path, STARPU_MAXNODES);
  1341. fclose(f);
  1342. return 0;
  1343. }
  1344. n = getc(f);
  1345. }
  1346. if (n != '\n')
  1347. {
  1348. _STARPU_DISP("Bogus character '%c' (%d) in bandwidth file %s\n", n, n, path);
  1349. fclose(f);
  1350. return 0;
  1351. }
  1352. /* Look out for EOF */
  1353. n = getc(f);
  1354. if (n == EOF)
  1355. break;
  1356. ungetc(n, f);
  1357. }
  1358. if (locked)
  1359. _starpu_frdunlock(f);
  1360. fclose(f);
  1361. /* No more values, take NAN */
  1362. for ( ; src < STARPU_MAXNODES; src++)
  1363. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1364. latency_matrix[src][dst] = NAN;
  1365. return 1;
  1366. }
  1367. #if !defined(STARPU_SIMGRID) && (defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL))
  1368. static double search_bus_best_timing(int src, char * type, int htod)
  1369. {
  1370. /* Search the best latency for this node */
  1371. double best = 0.0;
  1372. double actual = 0.0;
  1373. unsigned check = 0;
  1374. unsigned numa;
  1375. for (numa = 0; numa < nnumas; numa++)
  1376. {
  1377. #ifdef STARPU_USE_CUDA
  1378. if (strncmp(type, "CUDA", 4) == 0)
  1379. {
  1380. if (htod)
  1381. actual = cudadev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].timing_htod;
  1382. else
  1383. actual = cudadev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].timing_dtoh;
  1384. }
  1385. #endif
  1386. #ifdef STARPU_USE_OPENCL
  1387. if (strncmp(type, "OpenCL", 6) == 0)
  1388. {
  1389. if (htod)
  1390. actual = opencldev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].timing_htod;
  1391. else
  1392. actual = opencldev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].timing_dtoh;
  1393. }
  1394. #endif
  1395. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  1396. if (!check || actual < best)
  1397. {
  1398. best = actual;
  1399. check = 1;
  1400. }
  1401. #endif
  1402. }
  1403. return best;
  1404. }
  1405. #endif
  1406. #if !defined(STARPU_SIMGRID)
  1407. static void write_bus_bandwidth_file_content(void)
  1408. {
  1409. unsigned src, dst, maxnode;
  1410. unsigned b_low, b_up;
  1411. FILE *f;
  1412. int locked;
  1413. STARPU_ASSERT(was_benchmarked);
  1414. char path[PATH_LENGTH];
  1415. get_bandwidth_path(path, sizeof(path));
  1416. _STARPU_DEBUG("writing bandwidth to %s\n", path);
  1417. f = fopen(path, "w+");
  1418. STARPU_ASSERT(f);
  1419. locked = _starpu_fwrlock(f) == 0;
  1420. _starpu_fftruncate(f, 0);
  1421. fprintf(f, "# ");
  1422. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1423. fprintf(f, "to %u\t\t", dst);
  1424. fprintf(f, "\n");
  1425. maxnode = nnumas;
  1426. #ifdef STARPU_USE_CUDA
  1427. maxnode += ncuda;
  1428. #endif
  1429. #ifdef STARPU_USE_OPENCL
  1430. maxnode += nopencl;
  1431. #endif
  1432. #ifdef STARPU_USE_MIC
  1433. maxnode += nmic;
  1434. #endif
  1435. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1436. maxnode += nmpi_ms;
  1437. #endif
  1438. for (src = 0; src < STARPU_MAXNODES; src++)
  1439. {
  1440. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1441. {
  1442. double bandwidth;
  1443. if ((src >= maxnode) || (dst >= maxnode))
  1444. {
  1445. bandwidth = NAN;
  1446. }
  1447. else if (src != dst)
  1448. {
  1449. double slowness = 0.0;
  1450. /* Total bandwidth is the harmonic mean of bandwidths */
  1451. b_low = b_up = 0;
  1452. /* Begin NUMA */
  1453. b_up += nnumas;
  1454. if (src >= b_low && src < b_up && dst >= b_low && dst < b_up)
  1455. slowness += numa_timing[src-b_low][dst-b_low];
  1456. /* copy interval to check numa index later */
  1457. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  1458. unsigned numa_low = b_low;
  1459. unsigned numa_up = b_up;
  1460. #endif
  1461. b_low += nnumas;
  1462. /* End NUMA */
  1463. #ifdef STARPU_USE_CUDA
  1464. b_up += ncuda;
  1465. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  1466. if (src >= b_low && src < b_up && dst >= b_low && dst < b_up)
  1467. /* Direct GPU-GPU transfert */
  1468. slowness += cudadev_timing_dtod[src-b_low][dst-b_low];
  1469. else
  1470. #endif
  1471. {
  1472. /* Check if it's CUDA <-> NUMA link */
  1473. if (src >= b_low && src < b_up && dst >= numa_low && dst < numa_up)
  1474. slowness += cudadev_timing_per_numa[(src-b_low)*STARPU_MAXNUMANODES+dst-numa_low].timing_dtoh;
  1475. if (dst >= b_low && dst < b_up && src >= numa_low && dst < numa_up)
  1476. slowness += cudadev_timing_per_numa[(dst-b_low)*STARPU_MAXNUMANODES+src-numa_low].timing_htod;
  1477. /* To other devices, take the best slowness */
  1478. if (src >= b_low && src < b_up && !(dst >= numa_low && dst < numa_up))
  1479. slowness += search_bus_best_timing(src-b_low, "CUDA", 0);
  1480. if (dst >= b_low && dst < b_up && !(src >= numa_low && dst < numa_up))
  1481. slowness += search_bus_best_timing(dst-b_low, "CUDA", 1);
  1482. }
  1483. b_low += ncuda;
  1484. #endif
  1485. #ifdef STARPU_USE_OPENCL
  1486. b_up += nopencl;
  1487. /* Check if it's OpenCL <-> NUMA link */
  1488. if (src >= b_low && src < b_up && dst >= numa_low && dst < numa_up)
  1489. slowness += opencldev_timing_per_numa[(src-b_low)*STARPU_MAXNUMANODES+dst-numa_low].timing_dtoh;
  1490. if (dst >= b_low && dst < b_up && src >= numa_low && dst < numa_up)
  1491. slowness += opencldev_timing_per_numa[(dst-b_low)*STARPU_MAXNUMANODES+src-numa_low].timing_htod;
  1492. /* To other devices, take the best slowness */
  1493. if (src >= b_low && src < b_up && !(dst >= numa_low && dst < numa_up))
  1494. slowness += search_bus_best_timing(src-b_low, "OpenCL", 0);
  1495. if (dst >= b_low && dst < b_up && !(src >= numa_low && dst < numa_up))
  1496. slowness += search_bus_best_timing(dst-b_low, "OpenCL", 1);
  1497. b_low += nopencl;
  1498. #endif
  1499. #ifdef STARPU_USE_MIC
  1500. b_up += nmic;
  1501. if (src >= b_low && src < b_up)
  1502. slowness += mic_time_device_to_host[src-b_low];
  1503. if (dst >= b_low && dst < b_up)
  1504. slowness += mic_time_host_to_device[dst-b_low];
  1505. b_low += nmic;
  1506. #endif
  1507. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1508. b_up += nmpi_ms;
  1509. /* Modify MPI src and MPI dst if they contain the master node or not
  1510. * Because, we only take care about slaves */
  1511. int mpi_master = _starpu_mpi_common_get_src_node();
  1512. int mpi_src = src - b_low;
  1513. mpi_src = (mpi_master <= mpi_src) ? mpi_src+1 : mpi_src;
  1514. int mpi_dst = dst - b_low;
  1515. mpi_dst = (mpi_master <= mpi_dst) ? mpi_dst+1 : mpi_dst;
  1516. if (src >= b_low && src < b_up && dst >= b_low && dst < b_up)
  1517. slowness += mpi_time_device_to_device[mpi_src][mpi_dst];
  1518. else
  1519. {
  1520. if (src >= b_low && src < b_up)
  1521. slowness += mpi_time_device_to_device[mpi_src][mpi_master];
  1522. if (dst >= b_low && dst < b_up)
  1523. slowness += mpi_time_device_to_device[mpi_master][mpi_dst];
  1524. }
  1525. b_low += nmpi_ms;
  1526. #endif
  1527. bandwidth = 1.0/slowness;
  1528. }
  1529. else
  1530. {
  1531. /* convention */
  1532. bandwidth = 0.0;
  1533. }
  1534. if (dst)
  1535. fputc('\t', f);
  1536. _starpu_write_double(f, "%e", bandwidth);
  1537. }
  1538. fprintf(f, "\n");
  1539. }
  1540. if (locked)
  1541. _starpu_fwrunlock(f);
  1542. fclose(f);
  1543. }
  1544. #endif /* STARPU_SIMGRID */
  1545. void starpu_bus_print_filenames(FILE *output)
  1546. {
  1547. char bandwidth_path[PATH_LENGTH];
  1548. char affinity_path[PATH_LENGTH];
  1549. char latency_path[PATH_LENGTH];
  1550. get_bandwidth_path(bandwidth_path, sizeof(bandwidth_path));
  1551. get_affinity_path(affinity_path, sizeof(affinity_path));
  1552. get_latency_path(latency_path, sizeof(latency_path));
  1553. fprintf(output, "bandwidth: <%s>\n", bandwidth_path);
  1554. fprintf(output, " affinity: <%s>\n", affinity_path);
  1555. fprintf(output, " latency: <%s>\n", latency_path);
  1556. }
  1557. void starpu_bus_print_bandwidth(FILE *f)
  1558. {
  1559. unsigned src, dst, maxnode = starpu_memory_nodes_get_count();
  1560. fprintf(f, "from/to\t");
  1561. for (dst = 0; dst < maxnode; dst++)
  1562. {
  1563. char name[128];
  1564. starpu_memory_node_get_name(dst, name, sizeof(name));
  1565. fprintf(f, "%s\t", name);
  1566. }
  1567. fprintf(f, "\n");
  1568. for (src = 0; src < maxnode; src++)
  1569. {
  1570. char name[128];
  1571. starpu_memory_node_get_name(src, name, sizeof(name));
  1572. fprintf(f, "%s\t", name);
  1573. for (dst = 0; dst < maxnode; dst++)
  1574. fprintf(f, "%.0f\t", bandwidth_matrix[src][dst]);
  1575. fprintf(f, "\n");
  1576. }
  1577. fprintf(f, "\n");
  1578. for (src = 0; src < maxnode; src++)
  1579. {
  1580. char name[128];
  1581. starpu_memory_node_get_name(src, name, sizeof(name));
  1582. fprintf(f, "%s\t", name);
  1583. for (dst = 0; dst < maxnode; dst++)
  1584. fprintf(f, "%.0f\t", latency_matrix[src][dst]);
  1585. fprintf(f, "\n");
  1586. }
  1587. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  1588. if (ncuda != 0 || nopencl != 0)
  1589. fprintf(f, "\nGPU\tNUMA in preference order (logical index), host-to-device, device-to-host\n");
  1590. for (src = 0; src < ncuda + nopencl; src++)
  1591. {
  1592. struct dev_timing *timing;
  1593. struct _starpu_machine_config * config = _starpu_get_machine_config();
  1594. unsigned config_nnumas = _starpu_topology_get_nnumanodes(config);
  1595. unsigned numa;
  1596. #ifdef STARPU_USE_CUDA
  1597. if (src < ncuda)
  1598. {
  1599. fprintf(f, "CUDA_%u\t", src);
  1600. for (numa = 0; numa < config_nnumas; numa++)
  1601. {
  1602. timing = &cudadev_timing_per_numa[src*STARPU_MAXNUMANODES+numa];
  1603. if (timing->timing_htod)
  1604. fprintf(f, "%2d %.0f %.0f\t", timing->numa_id, 1/timing->timing_htod, 1/timing->timing_dtoh);
  1605. else
  1606. fprintf(f, "%2u\t", cuda_affinity_matrix[src][numa]);
  1607. }
  1608. }
  1609. #ifdef STARPU_USE_OPENCL
  1610. else
  1611. #endif
  1612. #endif
  1613. #ifdef STARPU_USE_OPENCL
  1614. {
  1615. fprintf(f, "OpenCL%u\t", src-ncuda);
  1616. for (numa = 0; numa < config_nnumas; numa++)
  1617. {
  1618. timing = &opencldev_timing_per_numa[(src-ncuda)*STARPU_MAXNUMANODES+numa];
  1619. if (timing->timing_htod)
  1620. fprintf(f, "%2d %.0f %.0f\t", timing->numa_id, 1/timing->timing_htod, 1/timing->timing_dtoh);
  1621. else
  1622. fprintf(f, "%2u\t", opencl_affinity_matrix[src][numa]);
  1623. }
  1624. }
  1625. #endif
  1626. fprintf(f, "\n");
  1627. }
  1628. #endif
  1629. }
  1630. static void generate_bus_bandwidth_file(void)
  1631. {
  1632. if (!was_benchmarked)
  1633. benchmark_all_gpu_devices();
  1634. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1635. /* Slaves don't write files */
  1636. if (!_starpu_mpi_common_is_src_node())
  1637. return;
  1638. #endif
  1639. #ifndef STARPU_SIMGRID
  1640. write_bus_bandwidth_file_content();
  1641. #endif
  1642. }
  1643. static void load_bus_bandwidth_file(void)
  1644. {
  1645. int res;
  1646. char path[PATH_LENGTH];
  1647. get_bandwidth_path(path, sizeof(path));
  1648. res = access(path, F_OK);
  1649. if (res || !load_bus_bandwidth_file_content())
  1650. {
  1651. /* File does not exist yet or is bogus */
  1652. generate_bus_bandwidth_file();
  1653. res = load_bus_bandwidth_file_content();
  1654. STARPU_ASSERT(res);
  1655. }
  1656. }
  1657. #ifndef STARPU_SIMGRID
  1658. /*
  1659. * Config
  1660. */
  1661. static void get_config_path(char *path, size_t maxlen)
  1662. {
  1663. get_bus_path("config", path, maxlen);
  1664. }
  1665. #if defined(STARPU_USE_MPI_MASTER_SLAVE)
  1666. /* check if the master or one slave has to recalibrate */
  1667. static int mpi_check_recalibrate(int my_recalibrate)
  1668. {
  1669. int nb_mpi = _starpu_mpi_src_get_device_count() + 1;
  1670. int mpi_recalibrate[nb_mpi];
  1671. int i;
  1672. MPI_Allgather(&my_recalibrate, 1, MPI_INT, mpi_recalibrate, 1, MPI_INT, MPI_COMM_WORLD);
  1673. for (i = 0; i < nb_mpi; i++)
  1674. {
  1675. if (mpi_recalibrate[i])
  1676. {
  1677. return 1;
  1678. }
  1679. }
  1680. return 0;
  1681. }
  1682. #endif
  1683. static void compare_value_and_recalibrate(char * msg, unsigned val_file, unsigned val_detected)
  1684. {
  1685. int recalibrate = 0;
  1686. if (val_file != val_detected)
  1687. recalibrate = 1;
  1688. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1689. //Send to each other to know if we had to recalibrate because someone cannot have the correct value in the config file
  1690. recalibrate = mpi_check_recalibrate(recalibrate);
  1691. #endif
  1692. if (recalibrate)
  1693. {
  1694. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1695. /* Only the master prints the message */
  1696. if (_starpu_mpi_common_is_src_node())
  1697. #endif
  1698. _STARPU_DISP("Current configuration does not match the bus performance model (%s: (stored) %d != (current) %d), recalibrating...\n", msg, val_file, val_detected);
  1699. _starpu_bus_force_sampling();
  1700. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1701. if (_starpu_mpi_common_is_src_node())
  1702. #endif
  1703. _STARPU_DISP("... done\n");
  1704. }
  1705. }
  1706. static void check_bus_config_file(void)
  1707. {
  1708. int res;
  1709. char path[PATH_LENGTH];
  1710. struct _starpu_machine_config *config = _starpu_get_machine_config();
  1711. int recalibrate = 0;
  1712. get_config_path(path, sizeof(path));
  1713. res = access(path, F_OK);
  1714. if (res || config->conf.bus_calibrate > 0)
  1715. recalibrate = 1;
  1716. #if defined(STARPU_USE_MPI_MASTER_SLAVE)
  1717. //Send to each other to know if we had to recalibrate because someone cannot have the config file
  1718. recalibrate = mpi_check_recalibrate(recalibrate);
  1719. #endif
  1720. if (recalibrate)
  1721. {
  1722. if (res)
  1723. _STARPU_DISP("No performance model for the bus, calibrating...\n");
  1724. _starpu_bus_force_sampling();
  1725. if (res)
  1726. _STARPU_DISP("... done\n");
  1727. }
  1728. else
  1729. {
  1730. FILE *f;
  1731. int ret;
  1732. unsigned read_cuda = -1, read_opencl = -1, read_mic = -1, read_mpi_ms = -1;
  1733. unsigned read_cpus = -1, read_numa = -1;
  1734. int locked;
  1735. // Loading configuration from file
  1736. f = fopen(path, "r");
  1737. STARPU_ASSERT(f);
  1738. locked = _starpu_frdlock(f) == 0;
  1739. _starpu_drop_comments(f);
  1740. ret = fscanf(f, "%u\t", &read_cpus);
  1741. STARPU_ASSERT(ret == 1);
  1742. _starpu_drop_comments(f);
  1743. ret = fscanf(f, "%u\t", &read_numa);
  1744. STARPU_ASSERT(ret == 1);
  1745. _starpu_drop_comments(f);
  1746. ret = fscanf(f, "%u\t", &read_cuda);
  1747. STARPU_ASSERT(ret == 1);
  1748. _starpu_drop_comments(f);
  1749. ret = fscanf(f, "%u\t", &read_opencl);
  1750. STARPU_ASSERT(ret == 1);
  1751. _starpu_drop_comments(f);
  1752. ret = fscanf(f, "%u\t", &read_mic);
  1753. if (ret == 0)
  1754. read_mic = 0;
  1755. _starpu_drop_comments(f);
  1756. ret = fscanf(f, "%u\t", &read_mpi_ms);
  1757. if (ret == 0)
  1758. read_mpi_ms = 0;
  1759. _starpu_drop_comments(f);
  1760. if (locked)
  1761. _starpu_frdunlock(f);
  1762. fclose(f);
  1763. // Loading current configuration
  1764. ncpus = _starpu_topology_get_nhwcpu(config);
  1765. nnumas = _starpu_topology_get_nnumanodes(config);
  1766. #ifdef STARPU_USE_CUDA
  1767. ncuda = _starpu_get_cuda_device_count();
  1768. #endif
  1769. #ifdef STARPU_USE_OPENCL
  1770. nopencl = _starpu_opencl_get_device_count();
  1771. #endif
  1772. #ifdef STARPU_USE_MIC
  1773. nmic = _starpu_mic_src_get_device_count();
  1774. #endif /* STARPU_USE_MIC */
  1775. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1776. nmpi_ms = _starpu_mpi_src_get_device_count();
  1777. #endif /* STARPU_USE_MPI_MASTER_SLAVE */
  1778. // Checking if both configurations match
  1779. compare_value_and_recalibrate("CPUS", read_cpus, ncpus);
  1780. compare_value_and_recalibrate("NUMA", read_numa, nnumas);
  1781. compare_value_and_recalibrate("CUDA", read_cuda, ncuda);
  1782. compare_value_and_recalibrate("OpenCL", read_opencl, nopencl);
  1783. compare_value_and_recalibrate("MIC", read_mic, nmic);
  1784. compare_value_and_recalibrate("MPI Master-Slave", read_mpi_ms, nmpi_ms);
  1785. }
  1786. }
  1787. static void write_bus_config_file_content(void)
  1788. {
  1789. FILE *f;
  1790. char path[PATH_LENGTH];
  1791. int locked;
  1792. STARPU_ASSERT(was_benchmarked);
  1793. get_config_path(path, sizeof(path));
  1794. _STARPU_DEBUG("writing config to %s\n", path);
  1795. f = fopen(path, "w+");
  1796. STARPU_ASSERT(f);
  1797. locked = _starpu_fwrlock(f) == 0;
  1798. _starpu_fftruncate(f, 0);
  1799. fprintf(f, "# Current configuration\n");
  1800. fprintf(f, "%u # Number of CPUs\n", ncpus);
  1801. fprintf(f, "%u # Number of NUMA nodes\n", nnumas);
  1802. fprintf(f, "%u # Number of CUDA devices\n", ncuda);
  1803. fprintf(f, "%u # Number of OpenCL devices\n", nopencl);
  1804. fprintf(f, "%u # Number of MIC devices\n", nmic);
  1805. fprintf(f, "%u # Number of MPI devices\n", nmpi_ms);
  1806. if (locked)
  1807. _starpu_fwrunlock(f);
  1808. fclose(f);
  1809. }
  1810. static void generate_bus_config_file(void)
  1811. {
  1812. if (!was_benchmarked)
  1813. benchmark_all_gpu_devices();
  1814. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1815. /* Slaves don't write files */
  1816. if (!_starpu_mpi_common_is_src_node())
  1817. return;
  1818. #endif
  1819. write_bus_config_file_content();
  1820. }
  1821. #endif /* !SIMGRID */
  1822. void _starpu_simgrid_get_platform_path(int version, char *path, size_t maxlen)
  1823. {
  1824. if (version == 3)
  1825. get_bus_path("platform.xml", path, maxlen);
  1826. else
  1827. get_bus_path("platform.v4.xml", path, maxlen);
  1828. }
  1829. #ifndef STARPU_SIMGRID
  1830. /*
  1831. * Compute the precise PCI tree bandwidth and link shares
  1832. *
  1833. * We only have measurements from one leaf to another. We assume that the
  1834. * available bandwidth is greater at lower levels, and thus measurements from
  1835. * increasingly far GPUs provide the PCI bridges bandwidths at each level.
  1836. *
  1837. * The bandwidth of a PCI bridge is thus computed as the maximum of the speed
  1838. * of the various transfers that we have achieved through it. We thus browse
  1839. * the PCI tree three times:
  1840. *
  1841. * - first through all CUDA-CUDA possible transfers to compute the maximum
  1842. * measured bandwidth on each PCI link and hub used for that.
  1843. * - then through the whole tree to emit links for each PCI link and hub.
  1844. * - then through all CUDA-CUDA possible transfers again to emit routes.
  1845. */
  1846. #if defined(STARPU_USE_CUDA) && defined(HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX) && HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX && defined(STARPU_HAVE_CUDA_MEMCPY_PEER)
  1847. /* Records, for each PCI link and hub, the maximum bandwidth seen through it */
  1848. struct pci_userdata
  1849. {
  1850. /* Uplink max measurement */
  1851. double bw_up;
  1852. double bw_down;
  1853. /* Hub max measurement */
  1854. double bw;
  1855. };
  1856. /* Allocate a pci_userdata structure for the given object */
  1857. static void allocate_userdata(hwloc_obj_t obj)
  1858. {
  1859. struct pci_userdata *data;
  1860. if (obj->userdata)
  1861. return;
  1862. _STARPU_MALLOC(obj->userdata, sizeof(*data));
  1863. data = obj->userdata;
  1864. data->bw_up = 0.0;
  1865. data->bw_down = 0.0;
  1866. data->bw = 0.0;
  1867. }
  1868. /* Update the maximum bandwidth seen going to upstream */
  1869. static void update_bandwidth_up(hwloc_obj_t obj, double bandwidth)
  1870. {
  1871. struct pci_userdata *data;
  1872. if (obj->type != HWLOC_OBJ_BRIDGE && obj->type != HWLOC_OBJ_PCI_DEVICE)
  1873. return;
  1874. allocate_userdata(obj);
  1875. data = obj->userdata;
  1876. if (data->bw_up < bandwidth)
  1877. data->bw_up = bandwidth;
  1878. }
  1879. /* Update the maximum bandwidth seen going from upstream */
  1880. static void update_bandwidth_down(hwloc_obj_t obj, double bandwidth)
  1881. {
  1882. struct pci_userdata *data;
  1883. if (obj->type != HWLOC_OBJ_BRIDGE && obj->type != HWLOC_OBJ_PCI_DEVICE)
  1884. return;
  1885. allocate_userdata(obj);
  1886. data = obj->userdata;
  1887. if (data->bw_down < bandwidth)
  1888. data->bw_down = bandwidth;
  1889. }
  1890. /* Update the maximum bandwidth seen going through this Hub */
  1891. static void update_bandwidth_through(hwloc_obj_t obj, double bandwidth)
  1892. {
  1893. struct pci_userdata *data;
  1894. allocate_userdata(obj);
  1895. data = obj->userdata;
  1896. if (data->bw < bandwidth)
  1897. data->bw = bandwidth;
  1898. }
  1899. /* find_* functions perform the first step: computing maximum bandwidths */
  1900. /* Our trafic had to go through the host, go back from target up to the host,
  1901. * updating uplink downstream bandwidth along the way */
  1902. static void find_platform_backward_path(hwloc_obj_t obj, double bandwidth)
  1903. {
  1904. if (!obj)
  1905. /* Oops, we should have seen a host bridge. Well, too bad. */
  1906. return;
  1907. /* Update uplink bandwidth of PCI Hub */
  1908. update_bandwidth_down(obj, bandwidth);
  1909. /* Update internal bandwidth of PCI Hub */
  1910. update_bandwidth_through(obj, bandwidth);
  1911. if (obj->type == HWLOC_OBJ_BRIDGE && obj->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_HOST)
  1912. /* Finished */
  1913. return;
  1914. /* Continue up */
  1915. find_platform_backward_path(obj->parent, bandwidth);
  1916. }
  1917. /* Same, but update uplink upstream bandwidth */
  1918. static void find_platform_forward_path(hwloc_obj_t obj, double bandwidth)
  1919. {
  1920. if (!obj)
  1921. /* Oops, we should have seen a host bridge. Well, too bad. */
  1922. return;
  1923. /* Update uplink bandwidth of PCI Hub */
  1924. update_bandwidth_up(obj, bandwidth);
  1925. /* Update internal bandwidth of PCI Hub */
  1926. update_bandwidth_through(obj, bandwidth);
  1927. if (obj->type == HWLOC_OBJ_BRIDGE && obj->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_HOST)
  1928. /* Finished */
  1929. return;
  1930. /* Continue up */
  1931. find_platform_forward_path(obj->parent, bandwidth);
  1932. }
  1933. /* Find the path from obj1 through parent down to obj2 (without ever going up),
  1934. * and update the maximum bandwidth along the path */
  1935. static int find_platform_path_down(hwloc_obj_t parent, hwloc_obj_t obj1, hwloc_obj_t obj2, double bandwidth)
  1936. {
  1937. unsigned i;
  1938. /* Base case, path is empty */
  1939. if (parent == obj2)
  1940. return 1;
  1941. /* Try to go down from parent */
  1942. for (i = 0; i < parent->arity; i++)
  1943. if (parent->children[i] != obj1 && find_platform_path_down(parent->children[i], NULL, obj2, bandwidth))
  1944. {
  1945. /* Found it down there, update bandwidth of parent */
  1946. update_bandwidth_down(parent->children[i], bandwidth);
  1947. update_bandwidth_through(parent, bandwidth);
  1948. return 1;
  1949. }
  1950. #if HWLOC_API_VERSION >= 0x00020000
  1951. hwloc_obj_t io;
  1952. for (io = parent->io_first_child; io; io = io->next_sibling)
  1953. if (io != obj1 && find_platform_path_down(io, NULL, obj2, bandwidth))
  1954. {
  1955. /* Found it down there, update bandwidth of parent */
  1956. update_bandwidth_down(io, bandwidth);
  1957. update_bandwidth_through(parent, bandwidth);
  1958. return 1;
  1959. }
  1960. #endif
  1961. return 0;
  1962. }
  1963. /* Find the path from obj1 to obj2, and update the maximum bandwidth along the
  1964. * path */
  1965. static int find_platform_path_up(hwloc_obj_t obj1, hwloc_obj_t obj2, double bandwidth)
  1966. {
  1967. int ret;
  1968. hwloc_obj_t parent = obj1->parent;
  1969. if (!parent)
  1970. {
  1971. /* Oops, we should have seen a host bridge. Act as if we had seen it. */
  1972. find_platform_backward_path(obj2, bandwidth);
  1973. return 1;
  1974. }
  1975. if (find_platform_path_down(parent, obj1, obj2, bandwidth))
  1976. /* obj2 was a mere (sub)child of our parent */
  1977. return 1;
  1978. /* obj2 is not a (sub)child of our parent, we have to go up through the parent */
  1979. if (parent->type == HWLOC_OBJ_BRIDGE && parent->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_HOST)
  1980. {
  1981. /* We have to go up to the Host, so obj2 is not in the same PCI
  1982. * tree, so we're for for obj1 to Host, and just find the path
  1983. * from obj2 to Host too.
  1984. */
  1985. find_platform_backward_path(obj2, bandwidth);
  1986. update_bandwidth_up(parent, bandwidth);
  1987. update_bandwidth_through(parent, bandwidth);
  1988. return 1;
  1989. }
  1990. /* Not at host yet, just go up */
  1991. ret = find_platform_path_up(parent, obj2, bandwidth);
  1992. update_bandwidth_up(parent, bandwidth);
  1993. update_bandwidth_through(parent, bandwidth);
  1994. return ret;
  1995. }
  1996. /* find the path between cuda i and cuda j, and update the maximum bandwidth along the path */
  1997. static int find_platform_cuda_path(hwloc_topology_t topology, unsigned i, unsigned j, double bandwidth)
  1998. {
  1999. hwloc_obj_t cudai, cudaj;
  2000. cudai = hwloc_cuda_get_device_osdev_by_index(topology, i);
  2001. cudaj = hwloc_cuda_get_device_osdev_by_index(topology, j);
  2002. if (!cudai || !cudaj)
  2003. return 0;
  2004. return find_platform_path_up(cudai, cudaj, bandwidth);
  2005. }
  2006. /* emit_topology_bandwidths performs the second step: emitting link names */
  2007. /* Emit the link name of the object */
  2008. static void emit_pci_hub(FILE *f, hwloc_obj_t obj)
  2009. {
  2010. STARPU_ASSERT(obj->type == HWLOC_OBJ_BRIDGE);
  2011. fprintf(f, "PCI:%04x:[%02x-%02x]", obj->attr->bridge.downstream.pci.domain, obj->attr->bridge.downstream.pci.secondary_bus, obj->attr->bridge.downstream.pci.subordinate_bus);
  2012. }
  2013. static void emit_pci_dev(FILE *f, struct hwloc_pcidev_attr_s *pcidev)
  2014. {
  2015. fprintf(f, "PCI:%04x:%02x:%02x.%1x", pcidev->domain, pcidev->bus, pcidev->dev, pcidev->func);
  2016. }
  2017. /* Emit the links of the object */
  2018. static void emit_topology_bandwidths(FILE *f, hwloc_obj_t obj, const char *Bps, const char *s)
  2019. {
  2020. unsigned i;
  2021. if (obj->userdata)
  2022. {
  2023. struct pci_userdata *data = obj->userdata;
  2024. if (obj->type == HWLOC_OBJ_BRIDGE)
  2025. {
  2026. /* Uplink */
  2027. fprintf(f, " <link id=\"");
  2028. emit_pci_hub(f, obj);
  2029. fprintf(f, " up\" bandwidth=\"%f%s\" latency=\"0.000000%s\"/>\n", data->bw_up, Bps, s);
  2030. fprintf(f, " <link id=\"");
  2031. emit_pci_hub(f, obj);
  2032. fprintf(f, " down\" bandwidth=\"%f%s\" latency=\"0.000000%s\"/>\n", data->bw_down, Bps, s);
  2033. /* PCI Switches are assumed to have infinite internal bandwidth */
  2034. if (!obj->name || !strstr(obj->name, "Switch"))
  2035. {
  2036. /* We assume that PCI Hubs have double bandwidth in
  2037. * order to support full duplex but not more */
  2038. fprintf(f, " <link id=\"");
  2039. emit_pci_hub(f, obj);
  2040. fprintf(f, " through\" bandwidth=\"%f%s\" latency=\"0.000000%s\"/>\n", data->bw * 2, Bps, s);
  2041. }
  2042. }
  2043. else if (obj->type == HWLOC_OBJ_PCI_DEVICE)
  2044. {
  2045. fprintf(f, " <link id=\"");
  2046. emit_pci_dev(f, &obj->attr->pcidev);
  2047. fprintf(f, " up\" bandwidth=\"%f%s\" latency=\"0.000000%s\"/>\n", data->bw_up, Bps, s);
  2048. fprintf(f, " <link id=\"");
  2049. emit_pci_dev(f, &obj->attr->pcidev);
  2050. fprintf(f, " down\" bandwidth=\"%f%s\" latency=\"0.000000%s\"/>\n", data->bw_down, Bps, s);
  2051. }
  2052. }
  2053. for (i = 0; i < obj->arity; i++)
  2054. emit_topology_bandwidths(f, obj->children[i], Bps, s);
  2055. #if HWLOC_API_VERSION >= 0x00020000
  2056. hwloc_obj_t io;
  2057. for (io = obj->io_first_child; io; io = io->next_sibling)
  2058. emit_topology_bandwidths(f, io, Bps, s);
  2059. #endif
  2060. }
  2061. /* emit_pci_link_* functions perform the third step: emitting the routes */
  2062. static void emit_pci_link(FILE *f, hwloc_obj_t obj, const char *suffix)
  2063. {
  2064. if (obj->type == HWLOC_OBJ_BRIDGE)
  2065. {
  2066. fprintf(f, " <link_ctn id=\"");
  2067. emit_pci_hub(f, obj);
  2068. fprintf(f, " %s\"/>\n", suffix);
  2069. }
  2070. else if (obj->type == HWLOC_OBJ_PCI_DEVICE)
  2071. {
  2072. fprintf(f, " <link_ctn id=\"");
  2073. emit_pci_dev(f, &obj->attr->pcidev);
  2074. fprintf(f, " %s\"/>\n", suffix);
  2075. }
  2076. }
  2077. /* Go to upstream */
  2078. static void emit_pci_link_up(FILE *f, hwloc_obj_t obj)
  2079. {
  2080. emit_pci_link(f, obj, "up");
  2081. }
  2082. /* Go from upstream */
  2083. static void emit_pci_link_down(FILE *f, hwloc_obj_t obj)
  2084. {
  2085. emit_pci_link(f, obj, "down");
  2086. }
  2087. /* Go through PCI hub */
  2088. static void emit_pci_link_through(FILE *f, hwloc_obj_t obj)
  2089. {
  2090. /* We don't care about trafic going through PCI switches */
  2091. if (obj->type == HWLOC_OBJ_BRIDGE)
  2092. {
  2093. if (!obj->name || !strstr(obj->name, "Switch"))
  2094. emit_pci_link(f, obj, "through");
  2095. else
  2096. {
  2097. fprintf(f, " <!-- Switch ");
  2098. emit_pci_hub(f, obj);
  2099. fprintf(f, " through -->\n");
  2100. }
  2101. }
  2102. }
  2103. /* Our trafic has to go through the host, go back from target up to the host,
  2104. * using uplink downstream along the way */
  2105. static void emit_platform_backward_path(FILE *f, hwloc_obj_t obj)
  2106. {
  2107. if (!obj)
  2108. /* Oops, we should have seen a host bridge. Well, too bad. */
  2109. return;
  2110. /* Go through PCI Hub */
  2111. emit_pci_link_through(f, obj);
  2112. /* Go through uplink */
  2113. emit_pci_link_down(f, obj);
  2114. if (obj->type == HWLOC_OBJ_BRIDGE && obj->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_HOST)
  2115. {
  2116. /* Finished, go through host */
  2117. fprintf(f, " <link_ctn id=\"Host\"/>\n");
  2118. return;
  2119. }
  2120. /* Continue up */
  2121. emit_platform_backward_path(f, obj->parent);
  2122. }
  2123. /* Same, but use upstream link */
  2124. static void emit_platform_forward_path(FILE *f, hwloc_obj_t obj)
  2125. {
  2126. if (!obj)
  2127. /* Oops, we should have seen a host bridge. Well, too bad. */
  2128. return;
  2129. /* Go through PCI Hub */
  2130. emit_pci_link_through(f, obj);
  2131. /* Go through uplink */
  2132. emit_pci_link_up(f, obj);
  2133. if (obj->type == HWLOC_OBJ_BRIDGE && obj->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_HOST)
  2134. {
  2135. /* Finished, go through host */
  2136. fprintf(f, " <link_ctn id=\"Host\"/>\n");
  2137. return;
  2138. }
  2139. /* Continue up */
  2140. emit_platform_forward_path(f, obj->parent);
  2141. }
  2142. /* Find the path from obj1 through parent down to obj2 (without ever going up),
  2143. * and use the links along the path */
  2144. static int emit_platform_path_down(FILE *f, hwloc_obj_t parent, hwloc_obj_t obj1, hwloc_obj_t obj2)
  2145. {
  2146. unsigned i;
  2147. /* Base case, path is empty */
  2148. if (parent == obj2)
  2149. return 1;
  2150. /* Try to go down from parent */
  2151. for (i = 0; i < parent->arity; i++)
  2152. if (parent->children[i] != obj1 && emit_platform_path_down(f, parent->children[i], NULL, obj2))
  2153. {
  2154. /* Found it down there, path goes through this hub */
  2155. emit_pci_link_down(f, parent->children[i]);
  2156. emit_pci_link_through(f, parent);
  2157. return 1;
  2158. }
  2159. #if HWLOC_API_VERSION >= 0x00020000
  2160. hwloc_obj_t io;
  2161. for (io = parent->io_first_child; io; io = io->next_sibling)
  2162. if (io != obj1 && emit_platform_path_down(f, io, NULL, obj2))
  2163. {
  2164. /* Found it down there, path goes through this hub */
  2165. emit_pci_link_down(f, io);
  2166. emit_pci_link_through(f, parent);
  2167. return 1;
  2168. }
  2169. #endif
  2170. return 0;
  2171. }
  2172. /* Find the path from obj1 to obj2, and use the links along the path */
  2173. static int emit_platform_path_up(FILE *f, hwloc_obj_t obj1, hwloc_obj_t obj2)
  2174. {
  2175. int ret;
  2176. hwloc_obj_t parent = obj1->parent;
  2177. if (!parent)
  2178. {
  2179. /* Oops, we should have seen a host bridge. Act as if we had seen it. */
  2180. emit_platform_backward_path(f, obj2);
  2181. return 1;
  2182. }
  2183. if (emit_platform_path_down(f, parent, obj1, obj2))
  2184. /* obj2 was a mere (sub)child of our parent */
  2185. return 1;
  2186. /* obj2 is not a (sub)child of our parent, we have to go up through the parent */
  2187. if (parent->type == HWLOC_OBJ_BRIDGE && parent->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_HOST)
  2188. {
  2189. /* We have to go up to the Host, so obj2 is not in the same PCI
  2190. * tree, so we're for for obj1 to Host, and just find the path
  2191. * from obj2 to Host too.
  2192. */
  2193. emit_platform_backward_path(f, obj2);
  2194. fprintf(f, " <link_ctn id=\"Host\"/>\n");
  2195. emit_pci_link_up(f, parent);
  2196. emit_pci_link_through(f, parent);
  2197. return 1;
  2198. }
  2199. /* Not at host yet, just go up */
  2200. ret = emit_platform_path_up(f, parent, obj2);
  2201. emit_pci_link_up(f, parent);
  2202. emit_pci_link_through(f, parent);
  2203. return ret;
  2204. }
  2205. /* Clean our mess in the topology before destroying it */
  2206. static void clean_topology(hwloc_obj_t obj)
  2207. {
  2208. unsigned i;
  2209. if (obj->userdata)
  2210. {
  2211. free(obj->userdata);
  2212. obj->userdata = NULL;
  2213. }
  2214. for (i = 0; i < obj->arity; i++)
  2215. clean_topology(obj->children[i]);
  2216. #if HWLOC_API_VERSION >= 0x00020000
  2217. hwloc_obj_t io;
  2218. for (io = obj->io_first_child; io; io = io->next_sibling)
  2219. clean_topology(io);
  2220. #endif
  2221. }
  2222. #endif
  2223. static void write_bus_platform_file_content(int version)
  2224. {
  2225. FILE *f;
  2226. char path[PATH_LENGTH];
  2227. unsigned i;
  2228. const char *speed, *flops, *Bps, *s;
  2229. char dash;
  2230. int locked;
  2231. if (version == 3)
  2232. {
  2233. speed = "power";
  2234. flops = "";
  2235. Bps = "";
  2236. s = "";
  2237. dash = '_';
  2238. }
  2239. else
  2240. {
  2241. speed = "speed";
  2242. flops = "f";
  2243. Bps = "Bps";
  2244. s = "s";
  2245. dash = '-';
  2246. }
  2247. STARPU_ASSERT(was_benchmarked);
  2248. _starpu_simgrid_get_platform_path(version, path, sizeof(path));
  2249. _STARPU_DEBUG("writing platform to %s\n", path);
  2250. f = fopen(path, "w+");
  2251. if (!f)
  2252. {
  2253. perror("fopen write_bus_platform_file_content");
  2254. _STARPU_DISP("path '%s'\n", path);
  2255. fflush(stderr);
  2256. STARPU_ABORT();
  2257. }
  2258. locked = _starpu_fwrlock(f) == 0;
  2259. _starpu_fftruncate(f, 0);
  2260. fprintf(f,
  2261. "<?xml version='1.0'?>\n"
  2262. "<!DOCTYPE platform SYSTEM '%s'>\n"
  2263. " <platform version=\"%d\">\n"
  2264. " <config id=\"General\">\n"
  2265. " <prop id=\"network/TCP%cgamma\" value=\"-1\"></prop>\n"
  2266. " <prop id=\"network/latency%cfactor\" value=\"1\"></prop>\n"
  2267. " <prop id=\"network/bandwidth%cfactor\" value=\"1\"></prop>\n"
  2268. " </config>\n"
  2269. " <AS id=\"AS0\" routing=\"Full\">\n"
  2270. " <host id=\"MAIN\" %s=\"1%s\"/>\n",
  2271. version == 3
  2272. ? "http://simgrid.gforge.inria.fr/simgrid.dtd"
  2273. : "http://simgrid.gforge.inria.fr/simgrid/simgrid.dtd",
  2274. version, dash, dash, dash, speed, flops);
  2275. for (i = 0; i < ncpus; i++)
  2276. /* TODO: host memory for out-of-core simulation */
  2277. fprintf(f, " <host id=\"CPU%u\" %s=\"2000000000%s\"/>\n", i, speed, flops);
  2278. for (i = 0; i < ncuda; i++)
  2279. {
  2280. fprintf(f, " <host id=\"CUDA%u\" %s=\"2000000000%s\">\n", i, speed, flops);
  2281. fprintf(f, " <prop id=\"memsize\" value=\"%llu\"/>\n", (unsigned long long) cuda_size[i]);
  2282. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  2283. fprintf(f, " <prop id=\"memcpy_peer\" value=\"1\"/>\n");
  2284. #endif
  2285. /* TODO: record cudadev_direct instead of assuming it's NUMA nodes */
  2286. fprintf(f, " </host>\n");
  2287. }
  2288. for (i = 0; i < nopencl; i++)
  2289. {
  2290. fprintf(f, " <host id=\"OpenCL%u\" %s=\"2000000000%s\">\n", i, speed, flops);
  2291. fprintf(f, " <prop id=\"memsize\" value=\"%llu\"/>\n", (unsigned long long) opencl_size[i]);
  2292. fprintf(f, " </host>\n");
  2293. }
  2294. fprintf(f, "\n <host id=\"RAM\" %s=\"1%s\"/>\n", speed, flops);
  2295. /*
  2296. * Compute maximum bandwidth, taken as host bandwidth
  2297. */
  2298. double max_bandwidth = 0;
  2299. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  2300. unsigned numa;
  2301. #endif
  2302. #ifdef STARPU_USE_CUDA
  2303. for (i = 0; i < ncuda; i++)
  2304. {
  2305. for (numa = 0; numa < nnumas; numa++)
  2306. {
  2307. double down_bw = 1.0 / cudadev_timing_per_numa[i*STARPU_MAXNUMANODES+numa].timing_dtoh;
  2308. double up_bw = 1.0 / cudadev_timing_per_numa[i*STARPU_MAXNUMANODES+numa].timing_htod;
  2309. if (max_bandwidth < down_bw)
  2310. max_bandwidth = down_bw;
  2311. if (max_bandwidth < up_bw)
  2312. max_bandwidth = up_bw;
  2313. }
  2314. }
  2315. #endif
  2316. #ifdef STARPU_USE_OPENCL
  2317. for (i = 0; i < nopencl; i++)
  2318. {
  2319. for (numa = 0; numa < nnumas; numa++)
  2320. {
  2321. double down_bw = 1.0 / opencldev_timing_per_numa[i*STARPU_MAXNUMANODES+numa].timing_dtoh;
  2322. double up_bw = 1.0 / opencldev_timing_per_numa[i*STARPU_MAXNUMANODES+numa].timing_htod;
  2323. if (max_bandwidth < down_bw)
  2324. max_bandwidth = down_bw;
  2325. if (max_bandwidth < up_bw)
  2326. max_bandwidth = up_bw;
  2327. }
  2328. }
  2329. #endif
  2330. fprintf(f, "\n <link id=\"Host\" bandwidth=\"%f%s\" latency=\"0.000000%s\"/>\n\n", max_bandwidth*1000000, Bps, s);
  2331. /*
  2332. * OpenCL links
  2333. */
  2334. #ifdef STARPU_USE_OPENCL
  2335. for (i = 0; i < nopencl; i++)
  2336. {
  2337. char i_name[16];
  2338. snprintf(i_name, sizeof(i_name), "OpenCL%u", i);
  2339. fprintf(f, " <link id=\"RAM-%s\" bandwidth=\"%f%s\" latency=\"%f%s\"/>\n",
  2340. i_name,
  2341. 1000000 / search_bus_best_timing(i, "OpenCL", 1), Bps,
  2342. search_bus_best_latency(i, "OpenCL", 1)/1000000., s);
  2343. fprintf(f, " <link id=\"%s-RAM\" bandwidth=\"%f%s\" latency=\"%f%s\"/>\n",
  2344. i_name,
  2345. 1000000 / search_bus_best_timing(i, "OpenCL", 0), Bps,
  2346. search_bus_best_latency(i, "OpenCL", 0)/1000000., s);
  2347. }
  2348. fprintf(f, "\n");
  2349. #endif
  2350. /*
  2351. * CUDA links and routes
  2352. */
  2353. #ifdef STARPU_USE_CUDA
  2354. /* Write RAM/CUDA bandwidths and latencies */
  2355. for (i = 0; i < ncuda; i++)
  2356. {
  2357. char i_name[16];
  2358. snprintf(i_name, sizeof(i_name), "CUDA%u", i);
  2359. fprintf(f, " <link id=\"RAM-%s\" bandwidth=\"%f%s\" latency=\"%f%s\"/>\n",
  2360. i_name,
  2361. 1000000. / search_bus_best_timing(i, "CUDA", 1), Bps,
  2362. search_bus_best_latency(i, "CUDA", 1)/1000000., s);
  2363. fprintf(f, " <link id=\"%s-RAM\" bandwidth=\"%f%s\" latency=\"%f%s\"/>\n",
  2364. i_name,
  2365. 1000000. / search_bus_best_timing(i, "CUDA", 0), Bps,
  2366. search_bus_best_latency(i, "CUDA", 0)/1000000., s);
  2367. }
  2368. fprintf(f, "\n");
  2369. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  2370. /* Write CUDA/CUDA bandwidths and latencies */
  2371. for (i = 0; i < ncuda; i++)
  2372. {
  2373. unsigned j;
  2374. char i_name[16];
  2375. snprintf(i_name, sizeof(i_name), "CUDA%u", i);
  2376. for (j = 0; j < ncuda; j++)
  2377. {
  2378. char j_name[16];
  2379. if (j == i)
  2380. continue;
  2381. snprintf(j_name, sizeof(j_name), "CUDA%u", j);
  2382. fprintf(f, " <link id=\"%s-%s\" bandwidth=\"%f%s\" latency=\"%f%s\"/>\n",
  2383. i_name, j_name,
  2384. 1000000. / cudadev_timing_dtod[i][j], Bps,
  2385. cudadev_latency_dtod[i][j]/1000000., s);
  2386. }
  2387. }
  2388. #endif
  2389. #if defined(HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX) && HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX && defined(STARPU_USE_CUDA) && defined(STARPU_HAVE_CUDA_MEMCPY_PEER)
  2390. #ifdef STARPU_DEVEL
  2391. #warning TODO: use libnvml to get NVLink links, otherwise numbers will be bogusly propagated through PCI topology
  2392. #endif
  2393. /* If we have enough hwloc information, write PCI bandwidths and routes */
  2394. if (!starpu_get_env_number_default("STARPU_PCI_FLAT", 0))
  2395. {
  2396. hwloc_topology_t topology;
  2397. hwloc_topology_init(&topology);
  2398. _starpu_topology_filter(topology);
  2399. hwloc_topology_load(topology);
  2400. /* First find paths and record measured bandwidth along the path */
  2401. for (i = 0; i < ncuda; i++)
  2402. {
  2403. unsigned j;
  2404. for (j = 0; j < ncuda; j++)
  2405. if (i != j)
  2406. if (!find_platform_cuda_path(topology, i, j, 1000000. / cudadev_timing_dtod[i][j]))
  2407. {
  2408. clean_topology(hwloc_get_root_obj(topology));
  2409. hwloc_topology_destroy(topology);
  2410. goto flat_cuda;
  2411. }
  2412. /* Record RAM/CUDA bandwidths */
  2413. find_platform_forward_path(hwloc_cuda_get_device_osdev_by_index(topology, i), 1000000. / search_bus_best_timing(i, "CUDA", 0));
  2414. find_platform_backward_path(hwloc_cuda_get_device_osdev_by_index(topology, i), 1000000. / search_bus_best_timing(i, "CUDA", 1));
  2415. }
  2416. /* Ok, found path in all cases, can emit advanced platform routes */
  2417. fprintf(f, "\n");
  2418. emit_topology_bandwidths(f, hwloc_get_root_obj(topology), Bps, s);
  2419. fprintf(f, "\n");
  2420. for (i = 0; i < ncuda; i++)
  2421. {
  2422. unsigned j;
  2423. for (j = 0; j < ncuda; j++)
  2424. if (i != j)
  2425. {
  2426. fprintf(f, " <route src=\"CUDA%u\" dst=\"CUDA%u\" symmetrical=\"NO\">\n", i, j);
  2427. fprintf(f, " <link_ctn id=\"CUDA%u-CUDA%u\"/>\n", i, j);
  2428. emit_platform_path_up(f,
  2429. hwloc_cuda_get_device_osdev_by_index(topology, i),
  2430. hwloc_cuda_get_device_osdev_by_index(topology, j));
  2431. fprintf(f, " </route>\n");
  2432. }
  2433. fprintf(f, " <route src=\"CUDA%u\" dst=\"RAM\" symmetrical=\"NO\">\n", i);
  2434. fprintf(f, " <link_ctn id=\"CUDA%u-RAM\"/>\n", i);
  2435. emit_platform_forward_path(f, hwloc_cuda_get_device_osdev_by_index(topology, i));
  2436. fprintf(f, " </route>\n");
  2437. fprintf(f, " <route src=\"RAM\" dst=\"CUDA%u\" symmetrical=\"NO\">\n", i);
  2438. fprintf(f, " <link_ctn id=\"RAM-CUDA%u\"/>\n", i);
  2439. emit_platform_backward_path(f, hwloc_cuda_get_device_osdev_by_index(topology, i));
  2440. fprintf(f, " </route>\n");
  2441. }
  2442. clean_topology(hwloc_get_root_obj(topology));
  2443. hwloc_topology_destroy(topology);
  2444. }
  2445. else
  2446. {
  2447. flat_cuda:
  2448. #else
  2449. {
  2450. #endif
  2451. /* If we don't have enough hwloc information, write trivial routes always through host */
  2452. for (i = 0; i < ncuda; i++)
  2453. {
  2454. char i_name[16];
  2455. snprintf(i_name, sizeof(i_name), "CUDA%u", i);
  2456. fprintf(f, " <route src=\"RAM\" dst=\"%s\" symmetrical=\"NO\"><link_ctn id=\"RAM-%s\"/><link_ctn id=\"Host\"/></route>\n", i_name, i_name);
  2457. fprintf(f, " <route src=\"%s\" dst=\"RAM\" symmetrical=\"NO\"><link_ctn id=\"%s-RAM\"/><link_ctn id=\"Host\"/></route>\n", i_name, i_name);
  2458. }
  2459. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  2460. for (i = 0; i < ncuda; i++)
  2461. {
  2462. unsigned j;
  2463. char i_name[16];
  2464. snprintf(i_name, sizeof(i_name), "CUDA%u", i);
  2465. for (j = 0; j < ncuda; j++)
  2466. {
  2467. char j_name[16];
  2468. if (j == i)
  2469. continue;
  2470. snprintf(j_name, sizeof(j_name), "CUDA%u", j);
  2471. fprintf(f, " <route src=\"%s\" dst=\"%s\" symmetrical=\"NO\"><link_ctn id=\"%s-%s\"/><link_ctn id=\"Host\"/></route>\n", i_name, j_name, i_name, j_name);
  2472. }
  2473. }
  2474. #endif
  2475. } /* defined(STARPU_HAVE_HWLOC) && defined(STARPU_HAVE_CUDA_MEMCPY_PEER) */
  2476. fprintf(f, "\n");
  2477. #endif /* STARPU_USE_CUDA */
  2478. /*
  2479. * OpenCL routes
  2480. */
  2481. #ifdef STARPU_USE_OPENCL
  2482. for (i = 0; i < nopencl; i++)
  2483. {
  2484. char i_name[16];
  2485. snprintf(i_name, sizeof(i_name), "OpenCL%u", i);
  2486. fprintf(f, " <route src=\"RAM\" dst=\"%s\" symmetrical=\"NO\"><link_ctn id=\"RAM-%s\"/><link_ctn id=\"Host\"/></route>\n", i_name, i_name);
  2487. fprintf(f, " <route src=\"%s\" dst=\"RAM\" symmetrical=\"NO\"><link_ctn id=\"%s-RAM\"/><link_ctn id=\"Host\"/></route>\n", i_name, i_name);
  2488. }
  2489. #endif
  2490. fprintf(f,
  2491. " </AS>\n"
  2492. " </platform>\n"
  2493. );
  2494. if (locked)
  2495. _starpu_fwrunlock(f);
  2496. fclose(f);
  2497. }
  2498. static void generate_bus_platform_file(void)
  2499. {
  2500. if (!was_benchmarked)
  2501. benchmark_all_gpu_devices();
  2502. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  2503. /* Slaves don't write files */
  2504. if (!_starpu_mpi_common_is_src_node())
  2505. return;
  2506. #endif
  2507. write_bus_platform_file_content(3);
  2508. write_bus_platform_file_content(4);
  2509. }
  2510. static void check_bus_platform_file(void)
  2511. {
  2512. int res;
  2513. char path[PATH_LENGTH];
  2514. _starpu_simgrid_get_platform_path(4, path, sizeof(path));
  2515. res = access(path, F_OK);
  2516. if (!res)
  2517. {
  2518. _starpu_simgrid_get_platform_path(3, path, sizeof(path));
  2519. res = access(path, F_OK);
  2520. }
  2521. if (res)
  2522. {
  2523. /* File does not exist yet */
  2524. generate_bus_platform_file();
  2525. }
  2526. }
  2527. /*
  2528. * Generic
  2529. */
  2530. static void _starpu_bus_force_sampling(void)
  2531. {
  2532. _STARPU_DEBUG("Force bus sampling ...\n");
  2533. _starpu_create_sampling_directory_if_needed();
  2534. generate_bus_affinity_file();
  2535. generate_bus_latency_file();
  2536. generate_bus_bandwidth_file();
  2537. generate_bus_config_file();
  2538. generate_bus_platform_file();
  2539. }
  2540. #endif /* !SIMGRID */
  2541. void _starpu_load_bus_performance_files(void)
  2542. {
  2543. _starpu_create_sampling_directory_if_needed();
  2544. struct _starpu_machine_config * config = _starpu_get_machine_config();
  2545. nnumas = _starpu_topology_get_nnumanodes(config);
  2546. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_SIMGRID)
  2547. ncuda = _starpu_get_cuda_device_count();
  2548. #endif
  2549. #if defined(STARPU_USE_OPENCL) || defined(STARPU_USE_SIMGRID)
  2550. nopencl = _starpu_opencl_get_device_count();
  2551. #endif
  2552. #if defined(STARPU_USE_MPI_MASTER_SLAVE) || defined(STARPU_USE_SIMGRID)
  2553. nmpi_ms = _starpu_mpi_src_get_device_count();
  2554. #endif
  2555. #if defined(STARPU_USE_MIC) || defined(STARPU_USE_SIMGRID)
  2556. nmic = _starpu_mic_src_get_device_count();
  2557. #endif
  2558. #ifndef STARPU_SIMGRID
  2559. check_bus_config_file();
  2560. #endif
  2561. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  2562. /* be sure that master wrote the perf files */
  2563. _starpu_mpi_common_barrier();
  2564. #endif
  2565. #ifndef STARPU_SIMGRID
  2566. load_bus_affinity_file();
  2567. #endif
  2568. load_bus_latency_file();
  2569. load_bus_bandwidth_file();
  2570. #ifndef STARPU_SIMGRID
  2571. check_bus_platform_file();
  2572. #endif
  2573. }
  2574. /* (in MB/s) */
  2575. double starpu_transfer_bandwidth(unsigned src_node, unsigned dst_node)
  2576. {
  2577. return bandwidth_matrix[src_node][dst_node];
  2578. }
  2579. /* (in µs) */
  2580. double starpu_transfer_latency(unsigned src_node, unsigned dst_node)
  2581. {
  2582. return latency_matrix[src_node][dst_node];
  2583. }
  2584. /* (in µs) */
  2585. double starpu_transfer_predict(unsigned src_node, unsigned dst_node, size_t size)
  2586. {
  2587. if (src_node == dst_node)
  2588. return 0;
  2589. double bandwidth = bandwidth_matrix[src_node][dst_node];
  2590. double latency = latency_matrix[src_node][dst_node];
  2591. struct _starpu_machine_topology *topology = &_starpu_get_machine_config()->topology;
  2592. #if 0
  2593. int busid = starpu_bus_get_id(src_node, dst_node);
  2594. int direct = starpu_bus_get_direct(busid);
  2595. #endif
  2596. float ngpus = topology->ncudagpus+topology->nopenclgpus;
  2597. #ifdef STARPU_DEVEL
  2598. #warning FIXME: ngpus should not be used e.g. for slow disk transfers...
  2599. #endif
  2600. #if 0
  2601. /* Ideally we should take into account that some GPUs are directly
  2602. * connected through a PCI switch, which has less contention that the
  2603. * Host bridge, but doing that seems to *decrease* performance... */
  2604. if (direct)
  2605. {
  2606. float neighbours = starpu_bus_get_ngpus(busid);
  2607. /* Count transfers of these GPUs, and count transfers between
  2608. * other GPUs and these GPUs */
  2609. ngpus = neighbours + (ngpus - neighbours) * neighbours / ngpus;
  2610. }
  2611. #endif
  2612. return latency + (size/bandwidth)*2*ngpus;
  2613. }
  2614. /* calculate save bandwidth and latency */
  2615. /* bandwidth in MB/s - latency in µs */
  2616. void _starpu_save_bandwidth_and_latency_disk(double bandwidth_write, double bandwidth_read, double latency_write, double latency_read, unsigned node, const char *name)
  2617. {
  2618. unsigned int i, j;
  2619. double slowness_disk_between_main_ram, slowness_main_ram_between_node;
  2620. int print_stats = starpu_get_env_number_default("STARPU_BUS_STATS", 0);
  2621. if (print_stats)
  2622. {
  2623. fprintf(stderr, "\n#---------------------\n");
  2624. fprintf(stderr, "Data transfer speed for %s (node %u):\n", name, node);
  2625. }
  2626. /* save bandwith */
  2627. for(i = 0; i < STARPU_MAXNODES; ++i)
  2628. {
  2629. for(j = 0; j < STARPU_MAXNODES; ++j)
  2630. {
  2631. if (i == j && j == node) /* source == destination == node */
  2632. {
  2633. bandwidth_matrix[i][j] = 0;
  2634. }
  2635. else if (i == node) /* source == disk */
  2636. {
  2637. /* convert in slowness */
  2638. if(bandwidth_read != 0)
  2639. slowness_disk_between_main_ram = 1/bandwidth_read;
  2640. else
  2641. slowness_disk_between_main_ram = 0;
  2642. if(bandwidth_matrix[STARPU_MAIN_RAM][j] != 0)
  2643. slowness_main_ram_between_node = 1/bandwidth_matrix[STARPU_MAIN_RAM][j];
  2644. else
  2645. slowness_main_ram_between_node = 0;
  2646. bandwidth_matrix[i][j] = 1/(slowness_disk_between_main_ram+slowness_main_ram_between_node);
  2647. if (!isnan(bandwidth_matrix[i][j]) && print_stats)
  2648. fprintf(stderr,"%u -> %u: %.0f MB/s\n", i, j, bandwidth_matrix[i][j]);
  2649. }
  2650. else if (j == node) /* destination == disk */
  2651. {
  2652. /* convert in slowness */
  2653. if(bandwidth_write != 0)
  2654. slowness_disk_between_main_ram = 1/bandwidth_write;
  2655. else
  2656. slowness_disk_between_main_ram = 0;
  2657. if(bandwidth_matrix[i][STARPU_MAIN_RAM] != 0)
  2658. slowness_main_ram_between_node = 1/bandwidth_matrix[i][STARPU_MAIN_RAM];
  2659. else
  2660. slowness_main_ram_between_node = 0;
  2661. bandwidth_matrix[i][j] = 1/(slowness_disk_between_main_ram+slowness_main_ram_between_node);
  2662. if (!isnan(bandwidth_matrix[i][j]) && print_stats)
  2663. fprintf(stderr,"%u -> %u: %.0f MB/s\n", i, j, bandwidth_matrix[i][j]);
  2664. }
  2665. else if (j > node || i > node) /* not affected by the node */
  2666. {
  2667. bandwidth_matrix[i][j] = NAN;
  2668. }
  2669. }
  2670. }
  2671. /* save latency */
  2672. for(i = 0; i < STARPU_MAXNODES; ++i)
  2673. {
  2674. for(j = 0; j < STARPU_MAXNODES; ++j)
  2675. {
  2676. if (i == j && j == node) /* source == destination == node */
  2677. {
  2678. latency_matrix[i][j] = 0;
  2679. }
  2680. else if (i == node) /* source == disk */
  2681. {
  2682. latency_matrix[i][j] = (latency_write+latency_matrix[STARPU_MAIN_RAM][j]);
  2683. if (!isnan(latency_matrix[i][j]) && print_stats)
  2684. fprintf(stderr,"%u -> %u: %.0f us\n", i, j, latency_matrix[i][j]);
  2685. }
  2686. else if (j == node) /* destination == disk */
  2687. {
  2688. latency_matrix[i][j] = (latency_read+latency_matrix[i][STARPU_MAIN_RAM]);
  2689. if (!isnan(latency_matrix[i][j]) && print_stats)
  2690. fprintf(stderr,"%u -> %u: %.0f us\n", i, j, latency_matrix[i][j]);
  2691. }
  2692. else if (j > node || i > node) /* not affected by the node */
  2693. {
  2694. latency_matrix[i][j] = NAN;
  2695. }
  2696. }
  2697. }
  2698. if (print_stats)
  2699. fprintf(stderr, "\n#---------------------\n");
  2700. }