perfmodel_bus.c 89 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2021 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. * Copyright (C) 2013 Corentin Salingue
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #ifdef STARPU_USE_CUDA
  18. #ifndef _GNU_SOURCE
  19. #define _GNU_SOURCE 1
  20. #endif
  21. #include <sched.h>
  22. #endif
  23. #include <stdlib.h>
  24. #include <math.h>
  25. #include <starpu.h>
  26. #include <starpu_cuda.h>
  27. #include <starpu_opencl.h>
  28. #include <common/config.h>
  29. #ifdef HAVE_UNISTD_H
  30. #include <unistd.h>
  31. #endif
  32. #include <core/workers.h>
  33. #include <core/perfmodel/perfmodel.h>
  34. #include <core/simgrid.h>
  35. #include <core/topology.h>
  36. #include <common/utils.h>
  37. #include <drivers/mpi/driver_mpi_common.h>
  38. #include <datawizard/memory_nodes.h>
  39. #ifdef STARPU_USE_OPENCL
  40. #include <starpu_opencl.h>
  41. #endif
  42. #ifdef STARPU_HAVE_WINDOWS
  43. #include <windows.h>
  44. #endif
  45. #ifdef STARPU_HAVE_HWLOC
  46. #include <hwloc.h>
  47. #ifdef STARPU_HAVE_LIBNVIDIA_ML
  48. #include <hwloc/nvml.h>
  49. #endif
  50. #ifndef HWLOC_API_VERSION
  51. #define HWLOC_OBJ_PU HWLOC_OBJ_PROC
  52. #endif
  53. #if HWLOC_API_VERSION < 0x00010b00
  54. #define HWLOC_OBJ_NUMANODE HWLOC_OBJ_NODE
  55. #endif
  56. #endif
  57. #if defined(HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX) && HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX
  58. #include <hwloc/cuda.h>
  59. #endif
  60. #define SIZE (32*1024*1024*sizeof(char))
  61. #define NITER 32
  62. #define PATH_LENGTH 256
  63. #ifndef STARPU_SIMGRID
  64. static void _starpu_bus_force_sampling(void);
  65. #endif
  66. /* timing is in µs per byte (i.e. slowness, inverse of bandwidth) */
  67. struct dev_timing
  68. {
  69. int numa_id;
  70. double timing_htod;
  71. double latency_htod;
  72. double timing_dtoh;
  73. double latency_dtoh;
  74. };
  75. /* TODO: measure latency */
  76. static double bandwidth_matrix[STARPU_MAXNODES][STARPU_MAXNODES]; /* MB/s */
  77. static double latency_matrix[STARPU_MAXNODES][STARPU_MAXNODES]; /* µs */
  78. static unsigned was_benchmarked = 0;
  79. #ifndef STARPU_SIMGRID
  80. static unsigned ncpus = 0;
  81. #endif
  82. static unsigned nnumas = 0;
  83. static unsigned ncuda = 0;
  84. static unsigned nopencl = 0;
  85. #ifndef STARPU_SIMGRID
  86. static unsigned nmpi_ms = 0;
  87. /* Benchmarking the performance of the bus */
  88. static double numa_latency[STARPU_MAXNUMANODES][STARPU_MAXNUMANODES];
  89. static double numa_timing[STARPU_MAXNUMANODES][STARPU_MAXNUMANODES];
  90. static uint64_t cuda_size[STARPU_MAXCUDADEVS];
  91. #endif
  92. #ifdef STARPU_USE_CUDA
  93. /* preference order of cores (logical indexes) */
  94. static unsigned cuda_affinity_matrix[STARPU_MAXCUDADEVS][STARPU_MAXNUMANODES];
  95. #ifndef STARPU_SIMGRID
  96. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  97. static double cudadev_timing_dtod[STARPU_MAXNODES][STARPU_MAXNODES] = {{0.0}};
  98. static double cudadev_latency_dtod[STARPU_MAXNODES][STARPU_MAXNODES] = {{0.0}};
  99. #endif
  100. #endif
  101. static struct dev_timing cudadev_timing_per_numa[STARPU_MAXCUDADEVS*STARPU_MAXNUMANODES];
  102. static char cudadev_direct[STARPU_MAXNODES][STARPU_MAXNODES];
  103. #endif
  104. #ifndef STARPU_SIMGRID
  105. static uint64_t opencl_size[STARPU_MAXOPENCLDEVS];
  106. #endif
  107. #ifdef STARPU_USE_OPENCL
  108. /* preference order of cores (logical indexes) */
  109. static unsigned opencl_affinity_matrix[STARPU_MAXOPENCLDEVS][STARPU_MAXNUMANODES];
  110. static struct dev_timing opencldev_timing_per_numa[STARPU_MAXOPENCLDEVS*STARPU_MAXNUMANODES];
  111. #endif
  112. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  113. static double mpi_time_device_to_device[STARPU_MAXMPIDEVS][STARPU_MAXMPIDEVS] = {{0.0}};
  114. static double mpi_latency_device_to_device[STARPU_MAXMPIDEVS][STARPU_MAXMPIDEVS] = {{0.0}};
  115. #endif
  116. #ifdef STARPU_HAVE_HWLOC
  117. static hwloc_topology_t hwtopology;
  118. hwloc_topology_t _starpu_perfmodel_get_hwtopology()
  119. {
  120. return hwtopology;
  121. }
  122. #endif
  123. #if (defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)) && !defined(STARPU_SIMGRID)
  124. #ifdef STARPU_USE_CUDA
  125. static void measure_bandwidth_between_host_and_dev_on_numa_with_cuda(int dev, int numa, int cpu, struct dev_timing *dev_timing_per_cpu)
  126. {
  127. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  128. size_t size = SIZE;
  129. /* Initialize CUDA context on the device */
  130. /* We do not need to enable OpenGL interoperability at this point,
  131. * since we cleanly shutdown CUDA before returning. */
  132. cudaSetDevice(dev);
  133. /* hack to avoid third party libs to rebind threads */
  134. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  135. /* hack to force the initialization */
  136. cudaFree(0);
  137. /* hack to avoid third party libs to rebind threads */
  138. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  139. /* Get the maximum size which can be allocated on the device */
  140. struct cudaDeviceProp prop;
  141. cudaError_t cures;
  142. cures = cudaGetDeviceProperties(&prop, dev);
  143. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  144. cuda_size[dev] = prop.totalGlobalMem;
  145. if (size > prop.totalGlobalMem/4) size = prop.totalGlobalMem/4;
  146. /* Allocate a buffer on the device */
  147. unsigned char *d_buffer;
  148. cures = cudaMalloc((void **)&d_buffer, size);
  149. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  150. /* hack to avoid third party libs to rebind threads */
  151. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  152. /* Allocate a buffer on the host */
  153. unsigned char *h_buffer;
  154. #if defined(STARPU_HAVE_HWLOC)
  155. struct _starpu_machine_config *config = _starpu_get_machine_config();
  156. const unsigned nnuma_nodes = _starpu_topology_get_nnumanodes(config);
  157. if (nnuma_nodes > 1)
  158. {
  159. /* NUMA mode activated */
  160. hwloc_obj_t obj = hwloc_get_obj_by_type(hwtopology, HWLOC_OBJ_NUMANODE, numa);
  161. #if HWLOC_API_VERSION >= 0x00020000
  162. h_buffer = hwloc_alloc_membind(hwtopology, size, obj->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_BYNODESET);
  163. #else
  164. h_buffer = hwloc_alloc_membind_nodeset(hwtopology, size, obj->nodeset, HWLOC_MEMBIND_BIND, 0);
  165. #endif
  166. }
  167. else
  168. #endif
  169. {
  170. /* we use STARPU_MAIN_RAM */
  171. _STARPU_MALLOC(h_buffer, size);
  172. cudaHostRegister((void *)h_buffer, size, 0);
  173. }
  174. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  175. /* hack to avoid third party libs to rebind threads */
  176. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  177. /* Fill them */
  178. memset(h_buffer, 0, size);
  179. cudaMemset(d_buffer, 0, size);
  180. cudaDeviceSynchronize();
  181. /* hack to avoid third party libs to rebind threads */
  182. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  183. const unsigned timing_numa_index = dev*STARPU_MAXNUMANODES + numa;
  184. unsigned iter;
  185. double timing;
  186. double start;
  187. double end;
  188. /* Measure upload bandwidth */
  189. start = starpu_timing_now();
  190. for (iter = 0; iter < NITER; iter++)
  191. {
  192. cudaMemcpy(d_buffer, h_buffer, size, cudaMemcpyHostToDevice);
  193. cudaDeviceSynchronize();
  194. }
  195. end = starpu_timing_now();
  196. timing = end - start;
  197. dev_timing_per_cpu[timing_numa_index].timing_htod = timing/NITER/size;
  198. /* Measure download bandwidth */
  199. start = starpu_timing_now();
  200. for (iter = 0; iter < NITER; iter++)
  201. {
  202. cudaMemcpy(h_buffer, d_buffer, size, cudaMemcpyDeviceToHost);
  203. cudaDeviceSynchronize();
  204. }
  205. end = starpu_timing_now();
  206. timing = end - start;
  207. dev_timing_per_cpu[timing_numa_index].timing_dtoh = timing/NITER/size;
  208. /* Measure upload latency */
  209. start = starpu_timing_now();
  210. for (iter = 0; iter < NITER; iter++)
  211. {
  212. cudaMemcpy(d_buffer, h_buffer, 1, cudaMemcpyHostToDevice);
  213. cudaDeviceSynchronize();
  214. }
  215. end = starpu_timing_now();
  216. timing = end - start;
  217. dev_timing_per_cpu[timing_numa_index].latency_htod = timing/NITER;
  218. /* Measure download latency */
  219. start = starpu_timing_now();
  220. for (iter = 0; iter < NITER; iter++)
  221. {
  222. cudaMemcpy(h_buffer, d_buffer, 1, cudaMemcpyDeviceToHost);
  223. cudaDeviceSynchronize();
  224. }
  225. end = starpu_timing_now();
  226. timing = end - start;
  227. dev_timing_per_cpu[timing_numa_index].latency_dtoh = timing/NITER;
  228. /* Free buffers */
  229. cudaHostUnregister(h_buffer);
  230. #if defined(STARPU_HAVE_HWLOC)
  231. if (nnuma_nodes > 1)
  232. {
  233. /* NUMA mode activated */
  234. hwloc_free(hwtopology, h_buffer, size);
  235. }
  236. else
  237. #endif
  238. {
  239. free(h_buffer);
  240. }
  241. cudaFree(d_buffer);
  242. #if CUDART_VERSION >= 4000
  243. cudaDeviceReset();
  244. #else
  245. cudaThreadExit();
  246. #endif
  247. }
  248. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  249. static void measure_bandwidth_between_dev_and_dev_cuda(int src, int dst)
  250. {
  251. size_t size = SIZE;
  252. int can;
  253. /* Get the maximum size which can be allocated on the device */
  254. struct cudaDeviceProp prop;
  255. cudaError_t cures;
  256. cures = cudaGetDeviceProperties(&prop, src);
  257. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  258. if (size > prop.totalGlobalMem/4) size = prop.totalGlobalMem/4;
  259. cures = cudaGetDeviceProperties(&prop, dst);
  260. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  261. if (size > prop.totalGlobalMem/4) size = prop.totalGlobalMem/4;
  262. /* Initialize CUDA context on the source */
  263. /* We do not need to enable OpenGL interoperability at this point,
  264. * since we cleanly shutdown CUDA before returning. */
  265. cudaSetDevice(src);
  266. if (starpu_get_env_number("STARPU_ENABLE_CUDA_GPU_GPU_DIRECT") != 0)
  267. {
  268. cures = cudaDeviceCanAccessPeer(&can, src, dst);
  269. (void) cudaGetLastError();
  270. if (!cures && can)
  271. {
  272. cures = cudaDeviceEnablePeerAccess(dst, 0);
  273. (void) cudaGetLastError();
  274. if (!cures)
  275. {
  276. _STARPU_DISP("GPU-Direct %d -> %d\n", dst, src);
  277. cudadev_direct[src][dst] = 1;
  278. }
  279. }
  280. }
  281. /* Allocate a buffer on the device */
  282. unsigned char *s_buffer;
  283. cures = cudaMalloc((void **)&s_buffer, size);
  284. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  285. cudaMemset(s_buffer, 0, size);
  286. cudaDeviceSynchronize();
  287. /* Initialize CUDA context on the destination */
  288. /* We do not need to enable OpenGL interoperability at this point,
  289. * since we cleanly shutdown CUDA before returning. */
  290. cudaSetDevice(dst);
  291. if (starpu_get_env_number("STARPU_ENABLE_CUDA_GPU_GPU_DIRECT") != 0)
  292. {
  293. cures = cudaDeviceCanAccessPeer(&can, dst, src);
  294. (void) cudaGetLastError();
  295. if (!cures && can)
  296. {
  297. cures = cudaDeviceEnablePeerAccess(src, 0);
  298. (void) cudaGetLastError();
  299. if (!cures)
  300. {
  301. _STARPU_DISP("GPU-Direct %d -> %d\n", src, dst);
  302. cudadev_direct[dst][src] = 1;
  303. }
  304. }
  305. }
  306. /* Allocate a buffer on the device */
  307. unsigned char *d_buffer;
  308. cures = cudaMalloc((void **)&d_buffer, size);
  309. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  310. cudaMemset(d_buffer, 0, size);
  311. cudaDeviceSynchronize();
  312. unsigned iter;
  313. double timing;
  314. double start;
  315. double end;
  316. /* Measure upload bandwidth */
  317. start = starpu_timing_now();
  318. for (iter = 0; iter < NITER; iter++)
  319. {
  320. cudaMemcpyPeer(d_buffer, dst, s_buffer, src, size);
  321. cudaDeviceSynchronize();
  322. }
  323. end = starpu_timing_now();
  324. timing = end - start;
  325. cudadev_timing_dtod[src][dst] = timing/NITER/size;
  326. /* Measure upload latency */
  327. start = starpu_timing_now();
  328. for (iter = 0; iter < NITER; iter++)
  329. {
  330. cudaMemcpyPeer(d_buffer, dst, s_buffer, src, 1);
  331. cudaDeviceSynchronize();
  332. }
  333. end = starpu_timing_now();
  334. timing = end - start;
  335. cudadev_latency_dtod[src][dst] = timing/NITER;
  336. /* Free buffers */
  337. cudaFree(d_buffer);
  338. cudaSetDevice(src);
  339. cudaFree(s_buffer);
  340. #if CUDART_VERSION >= 4000
  341. cudaDeviceReset();
  342. #else
  343. cudaThreadExit();
  344. #endif
  345. }
  346. #endif
  347. #endif
  348. #ifdef STARPU_USE_OPENCL
  349. static void measure_bandwidth_between_host_and_dev_on_numa_with_opencl(int dev, int numa, int cpu, struct dev_timing *dev_timing_per_cpu)
  350. {
  351. cl_context context;
  352. cl_command_queue queue;
  353. cl_int err=0;
  354. size_t size = SIZE;
  355. int not_initialized;
  356. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  357. /* Is the context already initialised ? */
  358. starpu_opencl_get_context(dev, &context);
  359. not_initialized = (context == NULL);
  360. if (not_initialized == 1)
  361. _starpu_opencl_init_context(dev);
  362. /* Get context and queue */
  363. starpu_opencl_get_context(dev, &context);
  364. starpu_opencl_get_queue(dev, &queue);
  365. /* Get the maximum size which can be allocated on the device */
  366. cl_device_id device;
  367. cl_ulong maxMemAllocSize, totalGlobalMem;
  368. starpu_opencl_get_device(dev, &device);
  369. err = clGetDeviceInfo(device, CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof(maxMemAllocSize), &maxMemAllocSize, NULL);
  370. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  371. if (size > (size_t)maxMemAllocSize/4) size = maxMemAllocSize/4;
  372. err = clGetDeviceInfo(device, CL_DEVICE_GLOBAL_MEM_SIZE , sizeof(totalGlobalMem), &totalGlobalMem, NULL);
  373. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  374. opencl_size[dev] = totalGlobalMem;
  375. if (_starpu_opencl_get_device_type(dev) == CL_DEVICE_TYPE_CPU)
  376. {
  377. /* Let's not use too much RAM when running OpenCL on a CPU: it
  378. * would make the OS swap like crazy. */
  379. size /= 2;
  380. }
  381. /* hack to avoid third party libs to rebind threads */
  382. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  383. /* Allocate a buffer on the device */
  384. cl_mem d_buffer;
  385. d_buffer = clCreateBuffer(context, CL_MEM_READ_WRITE, size, NULL, &err);
  386. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  387. /* hack to avoid third party libs to rebind threads */
  388. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  389. /* Allocate a buffer on the host */
  390. unsigned char *h_buffer;
  391. #if defined(STARPU_HAVE_HWLOC)
  392. struct _starpu_machine_config *config = _starpu_get_machine_config();
  393. const unsigned nnuma_nodes = _starpu_topology_get_nnumanodes(config);
  394. if (nnuma_nodes > 1)
  395. {
  396. /* NUMA mode activated */
  397. hwloc_obj_t obj = hwloc_get_obj_by_type(hwtopology, HWLOC_OBJ_NUMANODE, numa);
  398. #if HWLOC_API_VERSION >= 0x00020000
  399. h_buffer = hwloc_alloc_membind(hwtopology, size, obj->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_BYNODESET);
  400. #else
  401. h_buffer = hwloc_alloc_membind_nodeset(hwtopology, size, obj->nodeset, HWLOC_MEMBIND_BIND, 0);
  402. #endif
  403. }
  404. else
  405. #endif
  406. {
  407. /* we use STARPU_MAIN_RAM */
  408. _STARPU_MALLOC(h_buffer, size);
  409. }
  410. /* hack to avoid third party libs to rebind threads */
  411. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  412. /* Fill them */
  413. memset(h_buffer, 0, size);
  414. err = clEnqueueWriteBuffer(queue, d_buffer, CL_TRUE, 0, size, h_buffer, 0, NULL, NULL);
  415. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  416. clFinish(queue);
  417. /* hack to avoid third party libs to rebind threads */
  418. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  419. const unsigned timing_numa_index = dev*STARPU_MAXNUMANODES + numa;
  420. unsigned iter;
  421. double timing;
  422. double start;
  423. double end;
  424. /* Measure upload bandwidth */
  425. start = starpu_timing_now();
  426. for (iter = 0; iter < NITER; iter++)
  427. {
  428. err = clEnqueueWriteBuffer(queue, d_buffer, CL_TRUE, 0, size, h_buffer, 0, NULL, NULL);
  429. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  430. clFinish(queue);
  431. }
  432. end = starpu_timing_now();
  433. timing = end - start;
  434. dev_timing_per_cpu[timing_numa_index].timing_htod = timing/NITER/size;
  435. /* Measure download bandwidth */
  436. start = starpu_timing_now();
  437. for (iter = 0; iter < NITER; iter++)
  438. {
  439. err = clEnqueueReadBuffer(queue, d_buffer, CL_TRUE, 0, size, h_buffer, 0, NULL, NULL);
  440. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  441. clFinish(queue);
  442. }
  443. end = starpu_timing_now();
  444. timing = end - start;
  445. dev_timing_per_cpu[timing_numa_index].timing_dtoh = timing/NITER/size;
  446. /* Measure upload latency */
  447. start = starpu_timing_now();
  448. for (iter = 0; iter < NITER; iter++)
  449. {
  450. err = clEnqueueWriteBuffer(queue, d_buffer, CL_TRUE, 0, 1, h_buffer, 0, NULL, NULL);
  451. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  452. clFinish(queue);
  453. }
  454. end = starpu_timing_now();
  455. timing = end - start;
  456. dev_timing_per_cpu[timing_numa_index].latency_htod = timing/NITER;
  457. /* Measure download latency */
  458. start = starpu_timing_now();
  459. for (iter = 0; iter < NITER; iter++)
  460. {
  461. err = clEnqueueReadBuffer(queue, d_buffer, CL_TRUE, 0, 1, h_buffer, 0, NULL, NULL);
  462. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  463. clFinish(queue);
  464. }
  465. end = starpu_timing_now();
  466. timing = end - start;
  467. dev_timing_per_cpu[timing_numa_index].latency_dtoh = timing/NITER;
  468. /* Free buffers */
  469. err = clReleaseMemObject(d_buffer);
  470. if (STARPU_UNLIKELY(err != CL_SUCCESS))
  471. STARPU_OPENCL_REPORT_ERROR(err);
  472. #if defined(STARPU_HAVE_HWLOC)
  473. if (nnuma_nodes > 1)
  474. {
  475. /* NUMA mode activated */
  476. hwloc_free(hwtopology, h_buffer, size);
  477. }
  478. else
  479. #endif
  480. {
  481. free(h_buffer);
  482. }
  483. /* Uninitiliaze OpenCL context on the device */
  484. if (not_initialized == 1)
  485. _starpu_opencl_deinit_context(dev);
  486. }
  487. #endif
  488. /* NB: we want to sort the bandwidth by DECREASING order */
  489. static int compar_dev_timing(const void *left_dev_timing, const void *right_dev_timing)
  490. {
  491. const struct dev_timing *left = (const struct dev_timing *)left_dev_timing;
  492. const struct dev_timing *right = (const struct dev_timing *)right_dev_timing;
  493. double left_dtoh = left->timing_dtoh;
  494. double left_htod = left->timing_htod;
  495. double right_dtoh = right->timing_dtoh;
  496. double right_htod = right->timing_htod;
  497. double timing_sum2_left = left_dtoh*left_dtoh + left_htod*left_htod;
  498. double timing_sum2_right = right_dtoh*right_dtoh + right_htod*right_htod;
  499. /* it's for a decreasing sorting */
  500. return (timing_sum2_left > timing_sum2_right);
  501. }
  502. #ifdef STARPU_HAVE_HWLOC
  503. static int find_cpu_from_numa_node(hwloc_obj_t obj)
  504. {
  505. STARPU_ASSERT(obj);
  506. hwloc_obj_t current = obj;
  507. while (current->depth != HWLOC_OBJ_PU)
  508. {
  509. current = current->first_child;
  510. /* If we don't find a "PU" obj before the leave, perhaps we are
  511. * just not allowed to use it. */
  512. if (!current)
  513. return -1;
  514. }
  515. STARPU_ASSERT(current->depth == HWLOC_OBJ_PU);
  516. return current->logical_index;
  517. }
  518. #endif
  519. static void measure_bandwidth_between_numa_nodes_and_dev(int dev, struct dev_timing *dev_timing_per_numanode, char *type)
  520. {
  521. /* We measure the bandwith between each GPU and each NUMA node */
  522. struct _starpu_machine_config * config = _starpu_get_machine_config();
  523. const unsigned nnuma_nodes = _starpu_topology_get_nnumanodes(config);
  524. unsigned numa_id;
  525. for (numa_id = 0; numa_id < nnuma_nodes; numa_id++)
  526. {
  527. /* Store results by starpu id */
  528. const unsigned timing_numa_index = dev*STARPU_MAXNUMANODES + numa_id;
  529. /* Store STARPU_memnode for later */
  530. dev_timing_per_numanode[timing_numa_index].numa_id = numa_id;
  531. /* Chose one CPU connected to this NUMA node */
  532. int cpu_id = 0;
  533. #ifdef STARPU_HAVE_HWLOC
  534. hwloc_obj_t obj = hwloc_get_obj_by_type(hwtopology, HWLOC_OBJ_NUMANODE, numa_id);
  535. if (obj)
  536. {
  537. #if HWLOC_API_VERSION >= 0x00020000
  538. /* From hwloc 2.0, NUMAnode objects do not contain CPUs, they are contained in a group which contain the CPUs. */
  539. obj = obj->parent;
  540. #endif
  541. cpu_id = find_cpu_from_numa_node(obj);
  542. }
  543. else
  544. /* No such NUMA node, probably hwloc 1.x with no NUMA
  545. * node, just take one CPU from the whole system */
  546. cpu_id = find_cpu_from_numa_node(hwloc_get_root_obj(hwtopology));
  547. #endif
  548. if (cpu_id < 0)
  549. continue;
  550. #ifdef STARPU_USE_CUDA
  551. if (strncmp(type, "CUDA", 4) == 0)
  552. measure_bandwidth_between_host_and_dev_on_numa_with_cuda(dev, numa_id, cpu_id, dev_timing_per_numanode);
  553. #endif
  554. #ifdef STARPU_USE_OPENCL
  555. if (strncmp(type, "OpenCL", 6) == 0)
  556. measure_bandwidth_between_host_and_dev_on_numa_with_opencl(dev, numa_id, cpu_id, dev_timing_per_numanode);
  557. #endif
  558. }
  559. }
  560. static void measure_bandwidth_between_host_and_dev(int dev, struct dev_timing *dev_timing_per_numa, char *type)
  561. {
  562. measure_bandwidth_between_numa_nodes_and_dev(dev, dev_timing_per_numa, type);
  563. #ifdef STARPU_VERBOSE
  564. struct _starpu_machine_config * config = _starpu_get_machine_config();
  565. const unsigned nnuma_nodes = _starpu_topology_get_nnumanodes(config);
  566. unsigned numa_id;
  567. for (numa_id = 0; numa_id < nnuma_nodes; numa_id++)
  568. {
  569. const unsigned timing_numa_index = dev*STARPU_MAXNUMANODES + numa_id;
  570. double bandwidth_dtoh = dev_timing_per_numa[timing_numa_index].timing_dtoh;
  571. double bandwidth_htod = dev_timing_per_numa[timing_numa_index].timing_htod;
  572. double bandwidth_sum2 = bandwidth_dtoh*bandwidth_dtoh + bandwidth_htod*bandwidth_htod;
  573. _STARPU_DISP("(%10s) BANDWIDTH GPU %d NUMA %u - htod %f - dtoh %f - %f\n", type, dev, numa_id, bandwidth_htod, bandwidth_dtoh, sqrt(bandwidth_sum2));
  574. }
  575. #endif
  576. }
  577. #endif /* defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL) */
  578. #if !defined(STARPU_SIMGRID)
  579. static void measure_bandwidth_latency_between_numa(int numa_src, int numa_dst)
  580. {
  581. #if defined(STARPU_HAVE_HWLOC)
  582. if (nnumas > 1)
  583. {
  584. /* NUMA mode activated */
  585. double start, end, timing;
  586. unsigned iter;
  587. unsigned char *h_buffer;
  588. hwloc_obj_t obj_src = hwloc_get_obj_by_type(hwtopology, HWLOC_OBJ_NUMANODE, numa_src);
  589. #if HWLOC_API_VERSION >= 0x00020000
  590. h_buffer = hwloc_alloc_membind(hwtopology, SIZE, obj_src->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_BYNODESET);
  591. #else
  592. h_buffer = hwloc_alloc_membind_nodeset(hwtopology, SIZE, obj_src->nodeset, HWLOC_MEMBIND_BIND, 0);
  593. #endif
  594. unsigned char *d_buffer;
  595. hwloc_obj_t obj_dst = hwloc_get_obj_by_type(hwtopology, HWLOC_OBJ_NUMANODE, numa_dst);
  596. #if HWLOC_API_VERSION >= 0x00020000
  597. d_buffer = hwloc_alloc_membind(hwtopology, SIZE, obj_dst->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_BYNODESET);
  598. #else
  599. d_buffer = hwloc_alloc_membind_nodeset(hwtopology, SIZE, obj_dst->nodeset, HWLOC_MEMBIND_BIND, 0);
  600. #endif
  601. memset(h_buffer, 0, SIZE);
  602. start = starpu_timing_now();
  603. for (iter = 0; iter < NITER; iter++)
  604. {
  605. memcpy(d_buffer, h_buffer, SIZE);
  606. }
  607. end = starpu_timing_now();
  608. timing = end - start;
  609. numa_timing[numa_src][numa_dst] = timing/NITER/SIZE;
  610. start = starpu_timing_now();
  611. for (iter = 0; iter < NITER; iter++)
  612. {
  613. memcpy(d_buffer, h_buffer, 1);
  614. }
  615. end = starpu_timing_now();
  616. timing = end - start;
  617. numa_latency[numa_src][numa_dst] = timing/NITER;
  618. hwloc_free(hwtopology, h_buffer, SIZE);
  619. hwloc_free(hwtopology, d_buffer, SIZE);
  620. }
  621. else
  622. #endif
  623. {
  624. /* Cannot make a real calibration */
  625. numa_timing[numa_src][numa_dst] = 0.01;
  626. numa_latency[numa_src][numa_dst] = 0;
  627. }
  628. }
  629. #endif
  630. static void benchmark_all_gpu_devices(void)
  631. {
  632. #ifdef STARPU_SIMGRID
  633. _STARPU_DISP("Can not measure bus in simgrid mode, please run starpu_calibrate_bus in non-simgrid mode to make sure the bus performance model was calibrated\n");
  634. STARPU_ABORT();
  635. #else /* !SIMGRID */
  636. unsigned i, j;
  637. _STARPU_DEBUG("Benchmarking the speed of the bus\n");
  638. #ifdef STARPU_HAVE_HWLOC
  639. hwloc_topology_init(&hwtopology);
  640. _starpu_topology_filter(hwtopology);
  641. hwloc_topology_load(hwtopology);
  642. #endif
  643. #ifdef STARPU_HAVE_HWLOC
  644. hwloc_bitmap_t former_cpuset = hwloc_bitmap_alloc();
  645. hwloc_get_cpubind(hwtopology, former_cpuset, HWLOC_CPUBIND_THREAD);
  646. #elif __linux__
  647. /* Save the current cpu binding */
  648. cpu_set_t former_process_affinity;
  649. int ret;
  650. ret = sched_getaffinity(0, sizeof(former_process_affinity), &former_process_affinity);
  651. if (ret)
  652. {
  653. perror("sched_getaffinity");
  654. STARPU_ABORT();
  655. }
  656. #else
  657. #warning Missing binding support, StarPU will not be able to properly benchmark NUMA topology
  658. #endif
  659. struct _starpu_machine_config *config = _starpu_get_machine_config();
  660. ncpus = _starpu_topology_get_nhwcpu(config);
  661. nnumas = _starpu_topology_get_nnumanodes(config);
  662. for (i = 0; i < nnumas; i++)
  663. for (j = 0; j < nnumas; j++)
  664. if (i != j)
  665. {
  666. _STARPU_DISP("NUMA %d -> %d...\n", i, j);
  667. measure_bandwidth_latency_between_numa(i, j);
  668. }
  669. #ifdef STARPU_USE_CUDA
  670. ncuda = _starpu_get_cuda_device_count();
  671. for (i = 0; i < ncuda; i++)
  672. {
  673. _STARPU_DISP("CUDA %u...\n", i);
  674. /* measure bandwidth between Host and Device i */
  675. measure_bandwidth_between_host_and_dev(i, cudadev_timing_per_numa, "CUDA");
  676. }
  677. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  678. for (i = 0; i < ncuda; i++)
  679. {
  680. for (j = 0; j < ncuda; j++)
  681. if (i != j)
  682. {
  683. _STARPU_DISP("CUDA %u -> %u...\n", i, j);
  684. /* measure bandwidth between Host and Device i */
  685. measure_bandwidth_between_dev_and_dev_cuda(i, j);
  686. }
  687. }
  688. #endif
  689. #endif
  690. #ifdef STARPU_USE_OPENCL
  691. nopencl = _starpu_opencl_get_device_count();
  692. for (i = 0; i < nopencl; i++)
  693. {
  694. _STARPU_DISP("OpenCL %u...\n", i);
  695. /* measure bandwith between Host and Device i */
  696. measure_bandwidth_between_host_and_dev(i, opencldev_timing_per_numa, "OpenCL");
  697. }
  698. #endif
  699. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  700. _starpu_mpi_common_measure_bandwidth_latency(mpi_time_device_to_device, mpi_latency_device_to_device);
  701. #endif /* STARPU_USE_MPI_MASTER_SLAVE */
  702. #ifdef STARPU_HAVE_HWLOC
  703. hwloc_set_cpubind(hwtopology, former_cpuset, HWLOC_CPUBIND_THREAD);
  704. hwloc_bitmap_free(former_cpuset);
  705. #elif __linux__
  706. /* Restore the former affinity */
  707. ret = sched_setaffinity(0, sizeof(former_process_affinity), &former_process_affinity);
  708. if (ret)
  709. {
  710. perror("sched_setaffinity");
  711. STARPU_ABORT();
  712. }
  713. #endif
  714. #ifdef STARPU_HAVE_HWLOC
  715. hwloc_topology_destroy(hwtopology);
  716. #endif
  717. _STARPU_DEBUG("Benchmarking the speed of the bus is done.\n");
  718. was_benchmarked = 1;
  719. #endif /* !SIMGRID */
  720. }
  721. static void get_bus_path(const char *type, char *path, size_t maxlen)
  722. {
  723. char hostname[65];
  724. _starpu_gethostname(hostname, sizeof(hostname));
  725. snprintf(path, maxlen, "%s%s.%s", _starpu_get_perf_model_dir_bus(), hostname, type);
  726. }
  727. /*
  728. * Affinity
  729. */
  730. static void get_affinity_path(char *path, size_t maxlen)
  731. {
  732. get_bus_path("affinity", path, maxlen);
  733. }
  734. #ifndef STARPU_SIMGRID
  735. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  736. static void load_bus_affinity_file_content(void)
  737. {
  738. FILE *f;
  739. int locked;
  740. char path[PATH_LENGTH];
  741. get_affinity_path(path, sizeof(path));
  742. _STARPU_DEBUG("loading affinities from %s\n", path);
  743. f = fopen(path, "r");
  744. STARPU_ASSERT_MSG(f, "Error when reading from file '%s'", path);
  745. locked = _starpu_frdlock(f) == 0;
  746. unsigned gpu;
  747. #ifdef STARPU_USE_CUDA
  748. ncuda = _starpu_get_cuda_device_count();
  749. for (gpu = 0; gpu < ncuda; gpu++)
  750. {
  751. int ret;
  752. unsigned dummy;
  753. _starpu_drop_comments(f);
  754. ret = fscanf(f, "%u\t", &dummy);
  755. STARPU_ASSERT_MSG(ret == 1, "Error when reading from file '%s'", path);
  756. STARPU_ASSERT(dummy == gpu);
  757. unsigned numa;
  758. for (numa = 0; numa < nnumas; numa++)
  759. {
  760. ret = fscanf(f, "%u\t", &cuda_affinity_matrix[gpu][numa]);
  761. STARPU_ASSERT_MSG(ret == 1, "Error when reading from file '%s'", path);
  762. }
  763. ret = fscanf(f, "\n");
  764. STARPU_ASSERT_MSG(ret == 0, "Error when reading from file '%s'", path);
  765. }
  766. #endif /* !STARPU_USE_CUDA */
  767. #ifdef STARPU_USE_OPENCL
  768. nopencl = _starpu_opencl_get_device_count();
  769. for (gpu = 0; gpu < nopencl; gpu++)
  770. {
  771. int ret;
  772. unsigned dummy;
  773. _starpu_drop_comments(f);
  774. ret = fscanf(f, "%u\t", &dummy);
  775. STARPU_ASSERT_MSG(ret == 1, "Error when reading from file '%s'", path);
  776. STARPU_ASSERT(dummy == gpu);
  777. unsigned numa;
  778. for (numa = 0; numa < nnumas; numa++)
  779. {
  780. ret = fscanf(f, "%u\t", &opencl_affinity_matrix[gpu][numa]);
  781. STARPU_ASSERT_MSG(ret == 1, "Error when reading from file '%s'", path);
  782. }
  783. ret = fscanf(f, "\n");
  784. STARPU_ASSERT_MSG(ret == 0, "Error when reading from file '%s'", path);
  785. }
  786. #endif /* !STARPU_USE_OPENCL */
  787. if (locked)
  788. _starpu_frdunlock(f);
  789. fclose(f);
  790. }
  791. #endif /* !(STARPU_USE_CUDA_ || STARPU_USE_OPENCL */
  792. #ifndef STARPU_SIMGRID
  793. static void write_bus_affinity_file_content(void)
  794. {
  795. STARPU_ASSERT(was_benchmarked);
  796. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  797. FILE *f;
  798. char path[PATH_LENGTH];
  799. int locked;
  800. get_affinity_path(path, sizeof(path));
  801. _STARPU_DEBUG("writing affinities to %s\n", path);
  802. f = fopen(path, "w+");
  803. if (!f)
  804. {
  805. perror("fopen write_buf_affinity_file_content");
  806. _STARPU_DISP("path '%s'\n", path);
  807. fflush(stderr);
  808. STARPU_ABORT();
  809. }
  810. locked = _starpu_frdlock(f) == 0;
  811. unsigned numa;
  812. unsigned gpu;
  813. fprintf(f, "# GPU\t");
  814. for (numa = 0; numa < nnumas; numa++)
  815. fprintf(f, "NUMA%u\t", numa);
  816. fprintf(f, "\n");
  817. #ifdef STARPU_USE_CUDA
  818. {
  819. /* Use an other array to sort bandwidth */
  820. struct dev_timing cudadev_timing_per_numa_sorted[STARPU_MAXCUDADEVS*STARPU_MAXNUMANODES];
  821. memcpy(cudadev_timing_per_numa_sorted, cudadev_timing_per_numa, STARPU_MAXCUDADEVS*STARPU_MAXNUMANODES*sizeof(struct dev_timing));
  822. for (gpu = 0; gpu < ncuda; gpu++)
  823. {
  824. fprintf(f, "%u\t", gpu);
  825. qsort(&(cudadev_timing_per_numa_sorted[gpu*STARPU_MAXNUMANODES]), nnumas, sizeof(struct dev_timing), compar_dev_timing);
  826. for (numa = 0; numa < nnumas; numa++)
  827. {
  828. fprintf(f, "%d\t", cudadev_timing_per_numa_sorted[gpu*STARPU_MAXNUMANODES+numa].numa_id);
  829. }
  830. fprintf(f, "\n");
  831. }
  832. }
  833. #endif
  834. #ifdef STARPU_USE_OPENCL
  835. {
  836. /* Use an other array to sort bandwidth */
  837. struct dev_timing opencldev_timing_per_numa_sorted[STARPU_MAXOPENCLDEVS*STARPU_MAXNUMANODES];
  838. memcpy(opencldev_timing_per_numa_sorted, opencldev_timing_per_numa, STARPU_MAXOPENCLDEVS*STARPU_MAXNUMANODES*sizeof(struct dev_timing));
  839. for (gpu = 0; gpu < nopencl; gpu++)
  840. {
  841. fprintf(f, "%u\t", gpu);
  842. qsort(&(opencldev_timing_per_numa_sorted[gpu*STARPU_MAXNUMANODES]), nnumas, sizeof(struct dev_timing), compar_dev_timing);
  843. for (numa = 0; numa < nnumas; numa++)
  844. {
  845. fprintf(f, "%d\t", opencldev_timing_per_numa_sorted[gpu*STARPU_MAXNUMANODES+numa].numa_id);
  846. }
  847. fprintf(f, "\n");
  848. }
  849. }
  850. #endif
  851. if (locked)
  852. _starpu_frdunlock(f);
  853. fclose(f);
  854. #endif
  855. }
  856. #endif /* STARPU_SIMGRID */
  857. static void generate_bus_affinity_file(void)
  858. {
  859. if (!was_benchmarked)
  860. benchmark_all_gpu_devices();
  861. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  862. /* Slaves don't write files */
  863. if (!_starpu_mpi_common_is_src_node())
  864. return;
  865. #endif
  866. write_bus_affinity_file_content();
  867. }
  868. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  869. static int check_bus_affinity_file(void)
  870. {
  871. int ret = 1;
  872. FILE *f;
  873. int locked;
  874. unsigned dummy;
  875. char path[PATH_LENGTH];
  876. get_affinity_path(path, sizeof(path));
  877. _STARPU_DEBUG("loading affinities from %s\n", path);
  878. f = fopen(path, "r");
  879. STARPU_ASSERT_MSG(f, "Error when reading from file '%s'", path);
  880. locked = _starpu_frdlock(f) == 0;
  881. ret = fscanf(f, "# GPU\t");
  882. STARPU_ASSERT_MSG(ret == 0, "Error when reading from file '%s'", path);
  883. ret = fscanf(f, "NUMA%u\t", &dummy);
  884. if (locked)
  885. _starpu_frdunlock(f);
  886. fclose(f);
  887. return ret == 1;
  888. }
  889. #endif
  890. static void load_bus_affinity_file(void)
  891. {
  892. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  893. int exist, check = 1;
  894. char path[PATH_LENGTH];
  895. get_affinity_path(path, sizeof(path));
  896. /* access return 0 if file exists */
  897. exist = access(path, F_OK);
  898. if (exist == 0)
  899. /* return 0 if it's not good */
  900. check = check_bus_affinity_file();
  901. if (check == 0)
  902. _STARPU_DISP("Affinity File is too old for this version of StarPU ! Rebuilding it...\n");
  903. if (check == 0 || exist != 0)
  904. {
  905. /* File does not exist yet */
  906. generate_bus_affinity_file();
  907. }
  908. load_bus_affinity_file_content();
  909. #endif
  910. }
  911. #ifdef STARPU_USE_CUDA
  912. unsigned *_starpu_get_cuda_affinity_vector(unsigned gpuid)
  913. {
  914. return cuda_affinity_matrix[gpuid];
  915. }
  916. #endif /* STARPU_USE_CUDA */
  917. #ifdef STARPU_USE_OPENCL
  918. unsigned *_starpu_get_opencl_affinity_vector(unsigned gpuid)
  919. {
  920. return opencl_affinity_matrix[gpuid];
  921. }
  922. #endif /* STARPU_USE_OPENCL */
  923. void starpu_bus_print_affinity(FILE *f)
  924. {
  925. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  926. unsigned numa;
  927. unsigned gpu;
  928. #endif
  929. fprintf(f, "# GPU\tNUMA in preference order (logical index)\n");
  930. #ifdef STARPU_USE_CUDA
  931. fprintf(f, "# CUDA\n");
  932. for(gpu = 0 ; gpu<ncuda ; gpu++)
  933. {
  934. fprintf(f, "%u\t", gpu);
  935. for (numa = 0; numa < nnumas; numa++)
  936. {
  937. fprintf(f, "%u\t", cuda_affinity_matrix[gpu][numa]);
  938. }
  939. fprintf(f, "\n");
  940. }
  941. #endif
  942. #ifdef STARPU_USE_OPENCL
  943. fprintf(f, "# OpenCL\n");
  944. for(gpu = 0 ; gpu<nopencl ; gpu++)
  945. {
  946. fprintf(f, "%u\t", gpu);
  947. for (numa = 0; numa < nnumas; numa++)
  948. {
  949. fprintf(f, "%u\t", opencl_affinity_matrix[gpu][numa]);
  950. }
  951. fprintf(f, "\n");
  952. }
  953. #endif
  954. }
  955. #endif /* STARPU_SIMGRID */
  956. /*
  957. * Latency
  958. */
  959. static void get_latency_path(char *path, size_t maxlen)
  960. {
  961. get_bus_path("latency", path, maxlen);
  962. }
  963. static int load_bus_latency_file_content(void)
  964. {
  965. int n;
  966. unsigned src, dst;
  967. FILE *f;
  968. double latency;
  969. int locked;
  970. char path[PATH_LENGTH];
  971. get_latency_path(path, sizeof(path));
  972. _STARPU_DEBUG("loading latencies from %s\n", path);
  973. f = fopen(path, "r");
  974. if (!f)
  975. {
  976. perror("fopen load_bus_latency_file_content");
  977. _STARPU_DISP("path '%s'\n", path);
  978. fflush(stderr);
  979. STARPU_ABORT();
  980. }
  981. locked = _starpu_frdlock(f) == 0;
  982. for (src = 0; src < STARPU_MAXNODES; src++)
  983. {
  984. _starpu_drop_comments(f);
  985. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  986. {
  987. n = _starpu_read_double(f, "%le", &latency);
  988. if (n != 1)
  989. {
  990. _STARPU_DISP("Error while reading latency file <%s>. Expected a number. Did you change the maximum number of GPUs at ./configure time?\n", path);
  991. fclose(f);
  992. return 0;
  993. }
  994. n = getc(f);
  995. if (n == '\n')
  996. break;
  997. if (n != '\t')
  998. {
  999. _STARPU_DISP("bogus character '%c' (%d) in latency file %s\n", n, n, path);
  1000. fclose(f);
  1001. return 0;
  1002. }
  1003. latency_matrix[src][dst] = latency;
  1004. /* Look out for \t\n */
  1005. n = getc(f);
  1006. if (n == '\n')
  1007. break;
  1008. ungetc(n, f);
  1009. n = '\t';
  1010. }
  1011. /* No more values, take NAN */
  1012. for ( ; dst < STARPU_MAXNODES; dst++)
  1013. latency_matrix[src][dst] = NAN;
  1014. while (n == '\t')
  1015. {
  1016. /* Look out for \t\n */
  1017. n = getc(f);
  1018. if (n == '\n')
  1019. break;
  1020. ungetc(n, f);
  1021. n = _starpu_read_double(f, "%le", &latency);
  1022. if (n && !isnan(latency))
  1023. {
  1024. _STARPU_DISP("Too many nodes in latency file %s for this configuration (%d). Did you change the maximum number of GPUs at ./configure time?\n", path, STARPU_MAXNODES);
  1025. fclose(f);
  1026. return 0;
  1027. }
  1028. n = getc(f);
  1029. }
  1030. if (n != '\n')
  1031. {
  1032. _STARPU_DISP("Bogus character '%c' (%d) in latency file %s\n", n, n, path);
  1033. fclose(f);
  1034. return 0;
  1035. }
  1036. /* Look out for EOF */
  1037. n = getc(f);
  1038. if (n == EOF)
  1039. break;
  1040. ungetc(n, f);
  1041. }
  1042. if (locked)
  1043. _starpu_frdunlock(f);
  1044. fclose(f);
  1045. /* No more values, take NAN */
  1046. for ( ; src < STARPU_MAXNODES; src++)
  1047. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1048. latency_matrix[src][dst] = NAN;
  1049. return 1;
  1050. }
  1051. #if !defined(STARPU_SIMGRID) && (defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL))
  1052. static double search_bus_best_latency(int src, char * type, int htod)
  1053. {
  1054. /* Search the best latency for this node */
  1055. double best = 0.0;
  1056. double actual = 0.0;
  1057. unsigned check = 0;
  1058. unsigned numa;
  1059. for (numa = 0; numa < nnumas; numa++)
  1060. {
  1061. #ifdef STARPU_USE_CUDA
  1062. if (strncmp(type, "CUDA", 4) == 0)
  1063. {
  1064. if (htod)
  1065. actual = cudadev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].latency_htod;
  1066. else
  1067. actual = cudadev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].latency_dtoh;
  1068. }
  1069. #endif
  1070. #ifdef STARPU_USE_OPENCL
  1071. if (strncmp(type, "OpenCL", 6) == 0)
  1072. {
  1073. if (htod)
  1074. actual = opencldev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].latency_htod;
  1075. else
  1076. actual = opencldev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].latency_dtoh;
  1077. }
  1078. #endif
  1079. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  1080. if (!check || actual < best)
  1081. {
  1082. best = actual;
  1083. check = 1;
  1084. }
  1085. #endif
  1086. }
  1087. return best;
  1088. }
  1089. #endif
  1090. #if !defined(STARPU_SIMGRID)
  1091. static void write_bus_latency_file_content(void)
  1092. {
  1093. unsigned src, dst, maxnode;
  1094. /* Boundaries to check if src or dst are inside the interval */
  1095. unsigned b_low, b_up;
  1096. FILE *f;
  1097. int locked;
  1098. STARPU_ASSERT(was_benchmarked);
  1099. char path[PATH_LENGTH];
  1100. get_latency_path(path, sizeof(path));
  1101. _STARPU_DEBUG("writing latencies to %s\n", path);
  1102. f = fopen(path, "a+");
  1103. if (!f)
  1104. {
  1105. perror("fopen write_bus_latency_file_content");
  1106. _STARPU_DISP("path '%s'\n", path);
  1107. fflush(stderr);
  1108. STARPU_ABORT();
  1109. }
  1110. locked = _starpu_fwrlock(f) == 0;
  1111. fseek(f, 0, SEEK_SET);
  1112. _starpu_fftruncate(f, 0);
  1113. fprintf(f, "# ");
  1114. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1115. fprintf(f, "to %u\t\t", dst);
  1116. fprintf(f, "\n");
  1117. maxnode = nnumas;
  1118. #ifdef STARPU_USE_CUDA
  1119. maxnode += ncuda;
  1120. #endif
  1121. #ifdef STARPU_USE_OPENCL
  1122. maxnode += nopencl;
  1123. #endif
  1124. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1125. maxnode += nmpi_ms;
  1126. #endif
  1127. for (src = 0; src < STARPU_MAXNODES; src++)
  1128. {
  1129. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1130. {
  1131. /* µs */
  1132. double latency = 0.0;
  1133. if ((src >= maxnode) || (dst >= maxnode))
  1134. {
  1135. /* convention */
  1136. latency = NAN;
  1137. }
  1138. else if (src == dst)
  1139. {
  1140. latency = 0.0;
  1141. }
  1142. else
  1143. {
  1144. b_low = b_up = 0;
  1145. /* ---- Begin NUMA ---- */
  1146. b_up += nnumas;
  1147. if (src >= b_low && src < b_up && dst >= b_low && dst < b_up)
  1148. latency += numa_latency[src-b_low][dst-b_low];
  1149. /* copy interval to check numa index later */
  1150. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  1151. unsigned numa_low = b_low;
  1152. unsigned numa_up = b_up;
  1153. #endif
  1154. b_low += nnumas;
  1155. /* ---- End NUMA ---- */
  1156. #ifdef STARPU_USE_CUDA
  1157. b_up += ncuda;
  1158. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  1159. if (src >= b_low && src < b_up && dst >= b_low && dst < b_up)
  1160. latency += cudadev_latency_dtod[src-b_low][dst-b_low];
  1161. else
  1162. #endif
  1163. {
  1164. /* Check if it's CUDA <-> NUMA link */
  1165. if (src >=b_low && src < b_up && dst >= numa_low && dst < numa_up)
  1166. latency += cudadev_timing_per_numa[(src-b_low)*STARPU_MAXNUMANODES+dst-numa_low].latency_dtoh;
  1167. if (dst >= b_low && dst < b_up && src >= numa_low && dst < numa_up)
  1168. latency += cudadev_timing_per_numa[(dst-b_low)*STARPU_MAXNUMANODES+src-numa_low].latency_htod;
  1169. /* To other devices, take the best latency */
  1170. if (src >= b_low && src < b_up && !(dst >= numa_low && dst < numa_up))
  1171. latency += search_bus_best_latency(src-b_low, "CUDA", 0);
  1172. if (dst >= b_low && dst < b_up && !(src >= numa_low && dst < numa_up))
  1173. latency += search_bus_best_latency(dst-b_low, "CUDA", 1);
  1174. }
  1175. b_low += ncuda;
  1176. #endif
  1177. #ifdef STARPU_USE_OPENCL
  1178. b_up += nopencl;
  1179. /* Check if it's OpenCL <-> NUMA link */
  1180. if (src >= b_low && src < b_up && dst >= numa_low && dst < numa_up)
  1181. latency += opencldev_timing_per_numa[(src-b_low)*STARPU_MAXNUMANODES+dst-numa_low].latency_dtoh;
  1182. if (dst >= b_low && dst < b_up && src >= numa_low && dst < numa_up)
  1183. latency += opencldev_timing_per_numa[(dst-b_low)*STARPU_MAXNUMANODES+src-numa_low].latency_htod;
  1184. /* To other devices, take the best latency */
  1185. if (src >= b_low && src < b_up && !(dst >= numa_low && dst < numa_up))
  1186. latency += search_bus_best_latency(src-b_low, "OpenCL", 0);
  1187. if (dst >= b_low && dst < b_up && !(src >= numa_low && dst < numa_up))
  1188. latency += search_bus_best_latency(dst-b_low, "OpenCL", 1);
  1189. b_low += nopencl;
  1190. #endif
  1191. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1192. b_up += nmpi_ms;
  1193. /* Modify MPI src and MPI dst if they contain the master node or not
  1194. * Because, we only take care about slaves */
  1195. int mpi_master = _starpu_mpi_common_get_src_node();
  1196. int mpi_src = src - b_low;
  1197. mpi_src = (mpi_master <= mpi_src) ? mpi_src+1 : mpi_src;
  1198. int mpi_dst = dst - b_low;
  1199. mpi_dst = (mpi_master <= mpi_dst) ? mpi_dst+1 : mpi_dst;
  1200. if (src >= b_low && src < b_up && dst >= b_low && dst < b_up)
  1201. latency += mpi_latency_device_to_device[mpi_src][mpi_dst];
  1202. else
  1203. {
  1204. if (src >= b_low && src < b_up)
  1205. latency += mpi_latency_device_to_device[mpi_src][mpi_master];
  1206. if (dst >= b_low && dst < b_up)
  1207. latency += mpi_latency_device_to_device[mpi_master][mpi_dst];
  1208. }
  1209. b_low += nmpi_ms;
  1210. #endif
  1211. }
  1212. if (dst > 0)
  1213. fputc('\t', f);
  1214. _starpu_write_double(f, "%e", latency);
  1215. }
  1216. fprintf(f, "\n");
  1217. }
  1218. if (locked)
  1219. _starpu_fwrunlock(f);
  1220. fclose(f);
  1221. }
  1222. #endif
  1223. static void generate_bus_latency_file(void)
  1224. {
  1225. if (!was_benchmarked)
  1226. benchmark_all_gpu_devices();
  1227. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1228. /* Slaves don't write files */
  1229. if (!_starpu_mpi_common_is_src_node())
  1230. return;
  1231. #endif
  1232. #ifndef STARPU_SIMGRID
  1233. write_bus_latency_file_content();
  1234. #endif
  1235. }
  1236. static void load_bus_latency_file(void)
  1237. {
  1238. int res;
  1239. char path[PATH_LENGTH];
  1240. get_latency_path(path, sizeof(path));
  1241. res = access(path, F_OK);
  1242. if (res || !load_bus_latency_file_content())
  1243. {
  1244. /* File does not exist yet or is bogus */
  1245. generate_bus_latency_file();
  1246. res = load_bus_latency_file_content();
  1247. STARPU_ASSERT(res);
  1248. }
  1249. }
  1250. /*
  1251. * Bandwidth
  1252. */
  1253. static void get_bandwidth_path(char *path, size_t maxlen)
  1254. {
  1255. get_bus_path("bandwidth", path, maxlen);
  1256. }
  1257. static int load_bus_bandwidth_file_content(void)
  1258. {
  1259. int n;
  1260. unsigned src, dst;
  1261. FILE *f;
  1262. double bandwidth;
  1263. int locked;
  1264. char path[PATH_LENGTH];
  1265. get_bandwidth_path(path, sizeof(path));
  1266. _STARPU_DEBUG("loading bandwidth from %s\n", path);
  1267. f = fopen(path, "r");
  1268. if (!f)
  1269. {
  1270. perror("fopen load_bus_bandwidth_file_content");
  1271. _STARPU_DISP("path '%s'\n", path);
  1272. fflush(stderr);
  1273. STARPU_ABORT();
  1274. }
  1275. locked = _starpu_frdlock(f) == 0;
  1276. for (src = 0; src < STARPU_MAXNODES; src++)
  1277. {
  1278. _starpu_drop_comments(f);
  1279. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1280. {
  1281. n = _starpu_read_double(f, "%le", &bandwidth);
  1282. if (n != 1)
  1283. {
  1284. _STARPU_DISP("Error while reading bandwidth file <%s>. Expected a number\n", path);
  1285. fclose(f);
  1286. return 0;
  1287. }
  1288. n = getc(f);
  1289. if (n == '\n')
  1290. break;
  1291. if (n != '\t')
  1292. {
  1293. _STARPU_DISP("bogus character '%c' (%d) in bandwidth file %s\n", n, n, path);
  1294. fclose(f);
  1295. return 0;
  1296. }
  1297. int limit_bandwidth = starpu_get_env_number("STARPU_LIMIT_BANDWIDTH");
  1298. if (limit_bandwidth >= 0)
  1299. {
  1300. #ifndef STARPU_SIMGRID
  1301. _STARPU_DISP("Warning: STARPU_LIMIT_BANDWIDTH set to %d but simgrid not enabled, thus ignored\n", limit_bandwidth);
  1302. #else
  1303. #ifdef HAVE_SG_LINK_BANDWIDTH_SET
  1304. bandwidth = limit_bandwidth;
  1305. #else
  1306. _STARPU_DISP("Warning: STARPU_LIMIT_BANDWIDTH set to %d but this requires simgrid 3.26\n", limit_bandwidth);
  1307. #endif
  1308. #endif
  1309. }
  1310. bandwidth_matrix[src][dst] = bandwidth;
  1311. /* Look out for \t\n */
  1312. n = getc(f);
  1313. if (n == '\n')
  1314. break;
  1315. ungetc(n, f);
  1316. n = '\t';
  1317. }
  1318. /* No more values, take NAN */
  1319. for ( ; dst < STARPU_MAXNODES; dst++)
  1320. bandwidth_matrix[src][dst] = NAN;
  1321. while (n == '\t')
  1322. {
  1323. /* Look out for \t\n */
  1324. n = getc(f);
  1325. if (n == '\n')
  1326. break;
  1327. ungetc(n, f);
  1328. n = _starpu_read_double(f, "%le", &bandwidth);
  1329. if (n && !isnan(bandwidth))
  1330. {
  1331. _STARPU_DISP("Too many nodes in bandwidth file %s for this configuration (%d)\n", path, STARPU_MAXNODES);
  1332. fclose(f);
  1333. return 0;
  1334. }
  1335. n = getc(f);
  1336. }
  1337. if (n != '\n')
  1338. {
  1339. _STARPU_DISP("Bogus character '%c' (%d) in bandwidth file %s\n", n, n, path);
  1340. fclose(f);
  1341. return 0;
  1342. }
  1343. /* Look out for EOF */
  1344. n = getc(f);
  1345. if (n == EOF)
  1346. break;
  1347. ungetc(n, f);
  1348. }
  1349. if (locked)
  1350. _starpu_frdunlock(f);
  1351. fclose(f);
  1352. /* No more values, take NAN */
  1353. for ( ; src < STARPU_MAXNODES; src++)
  1354. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1355. latency_matrix[src][dst] = NAN;
  1356. return 1;
  1357. }
  1358. #if !defined(STARPU_SIMGRID) && (defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL))
  1359. static double search_bus_best_timing(int src, char * type, int htod)
  1360. {
  1361. /* Search the best latency for this node */
  1362. double best = 0.0;
  1363. double actual = 0.0;
  1364. unsigned check = 0;
  1365. unsigned numa;
  1366. for (numa = 0; numa < nnumas; numa++)
  1367. {
  1368. #ifdef STARPU_USE_CUDA
  1369. if (strncmp(type, "CUDA", 4) == 0)
  1370. {
  1371. if (htod)
  1372. actual = cudadev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].timing_htod;
  1373. else
  1374. actual = cudadev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].timing_dtoh;
  1375. }
  1376. #endif
  1377. #ifdef STARPU_USE_OPENCL
  1378. if (strncmp(type, "OpenCL", 6) == 0)
  1379. {
  1380. if (htod)
  1381. actual = opencldev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].timing_htod;
  1382. else
  1383. actual = opencldev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].timing_dtoh;
  1384. }
  1385. #endif
  1386. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  1387. if (!check || actual < best)
  1388. {
  1389. best = actual;
  1390. check = 1;
  1391. }
  1392. #endif
  1393. }
  1394. return best;
  1395. }
  1396. #endif
  1397. #if !defined(STARPU_SIMGRID)
  1398. static void write_bus_bandwidth_file_content(void)
  1399. {
  1400. unsigned src, dst, maxnode;
  1401. unsigned b_low, b_up;
  1402. FILE *f;
  1403. int locked;
  1404. STARPU_ASSERT(was_benchmarked);
  1405. char path[PATH_LENGTH];
  1406. get_bandwidth_path(path, sizeof(path));
  1407. _STARPU_DEBUG("writing bandwidth to %s\n", path);
  1408. f = fopen(path, "a+");
  1409. STARPU_ASSERT_MSG(f, "Error when opening file (writing) '%s'", path);
  1410. locked = _starpu_fwrlock(f) == 0;
  1411. fseek(f, 0, SEEK_SET);
  1412. _starpu_fftruncate(f, 0);
  1413. fprintf(f, "# ");
  1414. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1415. fprintf(f, "to %u\t\t", dst);
  1416. fprintf(f, "\n");
  1417. maxnode = nnumas;
  1418. #ifdef STARPU_USE_CUDA
  1419. maxnode += ncuda;
  1420. #endif
  1421. #ifdef STARPU_USE_OPENCL
  1422. maxnode += nopencl;
  1423. #endif
  1424. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1425. maxnode += nmpi_ms;
  1426. #endif
  1427. for (src = 0; src < STARPU_MAXNODES; src++)
  1428. {
  1429. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1430. {
  1431. double bandwidth;
  1432. if ((src >= maxnode) || (dst >= maxnode))
  1433. {
  1434. bandwidth = NAN;
  1435. }
  1436. else if (src != dst)
  1437. {
  1438. double slowness = 0.0;
  1439. /* Total bandwidth is the harmonic mean of bandwidths */
  1440. b_low = b_up = 0;
  1441. /* Begin NUMA */
  1442. b_up += nnumas;
  1443. if (src >= b_low && src < b_up && dst >= b_low && dst < b_up)
  1444. slowness += numa_timing[src-b_low][dst-b_low];
  1445. /* copy interval to check numa index later */
  1446. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  1447. unsigned numa_low = b_low;
  1448. unsigned numa_up = b_up;
  1449. #endif
  1450. b_low += nnumas;
  1451. /* End NUMA */
  1452. #ifdef STARPU_USE_CUDA
  1453. b_up += ncuda;
  1454. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  1455. if (src >= b_low && src < b_up && dst >= b_low && dst < b_up)
  1456. /* Direct GPU-GPU transfert */
  1457. slowness += cudadev_timing_dtod[src-b_low][dst-b_low];
  1458. else
  1459. #endif
  1460. {
  1461. /* Check if it's CUDA <-> NUMA link */
  1462. if (src >= b_low && src < b_up && dst >= numa_low && dst < numa_up)
  1463. slowness += cudadev_timing_per_numa[(src-b_low)*STARPU_MAXNUMANODES+dst-numa_low].timing_dtoh;
  1464. if (dst >= b_low && dst < b_up && src >= numa_low && dst < numa_up)
  1465. slowness += cudadev_timing_per_numa[(dst-b_low)*STARPU_MAXNUMANODES+src-numa_low].timing_htod;
  1466. /* To other devices, take the best slowness */
  1467. if (src >= b_low && src < b_up && !(dst >= numa_low && dst < numa_up))
  1468. slowness += search_bus_best_timing(src-b_low, "CUDA", 0);
  1469. if (dst >= b_low && dst < b_up && !(src >= numa_low && dst < numa_up))
  1470. slowness += search_bus_best_timing(dst-b_low, "CUDA", 1);
  1471. }
  1472. b_low += ncuda;
  1473. #endif
  1474. #ifdef STARPU_USE_OPENCL
  1475. b_up += nopencl;
  1476. /* Check if it's OpenCL <-> NUMA link */
  1477. if (src >= b_low && src < b_up && dst >= numa_low && dst < numa_up)
  1478. slowness += opencldev_timing_per_numa[(src-b_low)*STARPU_MAXNUMANODES+dst-numa_low].timing_dtoh;
  1479. if (dst >= b_low && dst < b_up && src >= numa_low && dst < numa_up)
  1480. slowness += opencldev_timing_per_numa[(dst-b_low)*STARPU_MAXNUMANODES+src-numa_low].timing_htod;
  1481. /* To other devices, take the best slowness */
  1482. if (src >= b_low && src < b_up && !(dst >= numa_low && dst < numa_up))
  1483. slowness += search_bus_best_timing(src-b_low, "OpenCL", 0);
  1484. if (dst >= b_low && dst < b_up && !(src >= numa_low && dst < numa_up))
  1485. slowness += search_bus_best_timing(dst-b_low, "OpenCL", 1);
  1486. b_low += nopencl;
  1487. #endif
  1488. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1489. b_up += nmpi_ms;
  1490. /* Modify MPI src and MPI dst if they contain the master node or not
  1491. * Because, we only take care about slaves */
  1492. int mpi_master = _starpu_mpi_common_get_src_node();
  1493. int mpi_src = src - b_low;
  1494. mpi_src = (mpi_master <= mpi_src) ? mpi_src+1 : mpi_src;
  1495. int mpi_dst = dst - b_low;
  1496. mpi_dst = (mpi_master <= mpi_dst) ? mpi_dst+1 : mpi_dst;
  1497. if (src >= b_low && src < b_up && dst >= b_low && dst < b_up)
  1498. slowness += mpi_time_device_to_device[mpi_src][mpi_dst];
  1499. else
  1500. {
  1501. if (src >= b_low && src < b_up)
  1502. slowness += mpi_time_device_to_device[mpi_src][mpi_master];
  1503. if (dst >= b_low && dst < b_up)
  1504. slowness += mpi_time_device_to_device[mpi_master][mpi_dst];
  1505. }
  1506. b_low += nmpi_ms;
  1507. #endif
  1508. bandwidth = 1.0/slowness;
  1509. }
  1510. else
  1511. {
  1512. /* convention */
  1513. bandwidth = 0.0;
  1514. }
  1515. if (dst)
  1516. fputc('\t', f);
  1517. _starpu_write_double(f, "%e", bandwidth);
  1518. }
  1519. fprintf(f, "\n");
  1520. }
  1521. if (locked)
  1522. _starpu_fwrunlock(f);
  1523. fclose(f);
  1524. }
  1525. #endif /* STARPU_SIMGRID */
  1526. void starpu_bus_print_filenames(FILE *output)
  1527. {
  1528. char bandwidth_path[PATH_LENGTH];
  1529. char affinity_path[PATH_LENGTH];
  1530. char latency_path[PATH_LENGTH];
  1531. get_bandwidth_path(bandwidth_path, sizeof(bandwidth_path));
  1532. get_affinity_path(affinity_path, sizeof(affinity_path));
  1533. get_latency_path(latency_path, sizeof(latency_path));
  1534. fprintf(output, "bandwidth: <%s>\n", bandwidth_path);
  1535. fprintf(output, " affinity: <%s>\n", affinity_path);
  1536. fprintf(output, " latency: <%s>\n", latency_path);
  1537. }
  1538. void starpu_bus_print_bandwidth(FILE *f)
  1539. {
  1540. unsigned src, dst, maxnode = starpu_memory_nodes_get_count();
  1541. fprintf(f, "from/to\t");
  1542. for (dst = 0; dst < maxnode; dst++)
  1543. {
  1544. char name[128];
  1545. starpu_memory_node_get_name(dst, name, sizeof(name));
  1546. fprintf(f, "%s\t", name);
  1547. }
  1548. fprintf(f, "\n");
  1549. for (src = 0; src < maxnode; src++)
  1550. {
  1551. char name[128];
  1552. starpu_memory_node_get_name(src, name, sizeof(name));
  1553. fprintf(f, "%s\t", name);
  1554. for (dst = 0; dst < maxnode; dst++)
  1555. fprintf(f, "%.0f\t", bandwidth_matrix[src][dst]);
  1556. fprintf(f, "\n");
  1557. }
  1558. fprintf(f, "\n");
  1559. for (src = 0; src < maxnode; src++)
  1560. {
  1561. char name[128];
  1562. starpu_memory_node_get_name(src, name, sizeof(name));
  1563. fprintf(f, "%s\t", name);
  1564. for (dst = 0; dst < maxnode; dst++)
  1565. fprintf(f, "%.0f\t", latency_matrix[src][dst]);
  1566. fprintf(f, "\n");
  1567. }
  1568. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  1569. if (ncuda != 0 || nopencl != 0)
  1570. fprintf(f, "\nGPU\tNUMA in preference order (logical index), host-to-device, device-to-host\n");
  1571. for (src = 0; src < ncuda + nopencl; src++)
  1572. {
  1573. struct dev_timing *timing;
  1574. struct _starpu_machine_config * config = _starpu_get_machine_config();
  1575. unsigned config_nnumas = _starpu_topology_get_nnumanodes(config);
  1576. unsigned numa;
  1577. #ifdef STARPU_USE_CUDA
  1578. if (src < ncuda)
  1579. {
  1580. fprintf(f, "CUDA_%u\t", src);
  1581. for (numa = 0; numa < config_nnumas; numa++)
  1582. {
  1583. timing = &cudadev_timing_per_numa[src*STARPU_MAXNUMANODES+numa];
  1584. if (timing->timing_htod)
  1585. fprintf(f, "%2d %.0f %.0f\t", timing->numa_id, 1/timing->timing_htod, 1/timing->timing_dtoh);
  1586. else
  1587. fprintf(f, "%2u\t", cuda_affinity_matrix[src][numa]);
  1588. }
  1589. }
  1590. #ifdef STARPU_USE_OPENCL
  1591. else
  1592. #endif
  1593. #endif
  1594. #ifdef STARPU_USE_OPENCL
  1595. {
  1596. fprintf(f, "OpenCL%u\t", src-ncuda);
  1597. for (numa = 0; numa < config_nnumas; numa++)
  1598. {
  1599. timing = &opencldev_timing_per_numa[(src-ncuda)*STARPU_MAXNUMANODES+numa];
  1600. if (timing->timing_htod)
  1601. fprintf(f, "%2d %.0f %.0f\t", timing->numa_id, 1/timing->timing_htod, 1/timing->timing_dtoh);
  1602. else
  1603. fprintf(f, "%2u\t", opencl_affinity_matrix[src][numa]);
  1604. }
  1605. }
  1606. #endif
  1607. fprintf(f, "\n");
  1608. }
  1609. #endif
  1610. }
  1611. static void generate_bus_bandwidth_file(void)
  1612. {
  1613. if (!was_benchmarked)
  1614. benchmark_all_gpu_devices();
  1615. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1616. /* Slaves don't write files */
  1617. if (!_starpu_mpi_common_is_src_node())
  1618. return;
  1619. #endif
  1620. #ifndef STARPU_SIMGRID
  1621. write_bus_bandwidth_file_content();
  1622. #endif
  1623. }
  1624. static void load_bus_bandwidth_file(void)
  1625. {
  1626. int res;
  1627. char path[PATH_LENGTH];
  1628. get_bandwidth_path(path, sizeof(path));
  1629. res = access(path, F_OK);
  1630. if (res || !load_bus_bandwidth_file_content())
  1631. {
  1632. /* File does not exist yet or is bogus */
  1633. generate_bus_bandwidth_file();
  1634. res = load_bus_bandwidth_file_content();
  1635. STARPU_ASSERT(res);
  1636. }
  1637. }
  1638. #ifndef STARPU_SIMGRID
  1639. /*
  1640. * Config
  1641. */
  1642. static void get_config_path(char *path, size_t maxlen)
  1643. {
  1644. get_bus_path("config", path, maxlen);
  1645. }
  1646. #if defined(STARPU_USE_MPI_MASTER_SLAVE)
  1647. /* check if the master or one slave has to recalibrate */
  1648. static int mpi_check_recalibrate(int my_recalibrate)
  1649. {
  1650. int nb_mpi = _starpu_mpi_src_get_device_count() + 1;
  1651. int mpi_recalibrate[nb_mpi];
  1652. int i;
  1653. MPI_Allgather(&my_recalibrate, 1, MPI_INT, mpi_recalibrate, 1, MPI_INT, MPI_COMM_WORLD);
  1654. for (i = 0; i < nb_mpi; i++)
  1655. {
  1656. if (mpi_recalibrate[i])
  1657. {
  1658. return 1;
  1659. }
  1660. }
  1661. return 0;
  1662. }
  1663. #endif
  1664. static void compare_value_and_recalibrate(char * msg, unsigned val_file, unsigned val_detected)
  1665. {
  1666. int recalibrate = 0;
  1667. if (val_file != val_detected)
  1668. recalibrate = 1;
  1669. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1670. //Send to each other to know if we had to recalibrate because someone cannot have the correct value in the config file
  1671. recalibrate = mpi_check_recalibrate(recalibrate);
  1672. #endif
  1673. if (recalibrate)
  1674. {
  1675. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1676. /* Only the master prints the message */
  1677. if (_starpu_mpi_common_is_src_node())
  1678. #endif
  1679. _STARPU_DISP("Current configuration does not match the bus performance model (%s: (stored) %d != (current) %d), recalibrating...\n", msg, val_file, val_detected);
  1680. _starpu_bus_force_sampling();
  1681. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1682. if (_starpu_mpi_common_is_src_node())
  1683. #endif
  1684. _STARPU_DISP("... done\n");
  1685. }
  1686. }
  1687. static void check_bus_config_file(void)
  1688. {
  1689. int res;
  1690. char path[PATH_LENGTH];
  1691. struct _starpu_machine_config *config = _starpu_get_machine_config();
  1692. int recalibrate = 0;
  1693. get_config_path(path, sizeof(path));
  1694. res = access(path, F_OK);
  1695. if (res || config->conf.bus_calibrate > 0)
  1696. recalibrate = 1;
  1697. #if defined(STARPU_USE_MPI_MASTER_SLAVE)
  1698. //Send to each other to know if we had to recalibrate because someone cannot have the config file
  1699. recalibrate = mpi_check_recalibrate(recalibrate);
  1700. #endif
  1701. if (recalibrate)
  1702. {
  1703. if (res)
  1704. _STARPU_DISP("No performance model for the bus, calibrating...\n");
  1705. _starpu_bus_force_sampling();
  1706. if (res)
  1707. _STARPU_DISP("... done\n");
  1708. }
  1709. else
  1710. {
  1711. FILE *f;
  1712. int ret;
  1713. unsigned read_cuda = -1, read_opencl = -1, read_mpi_ms = -1;
  1714. unsigned read_cpus = -1, read_numa = -1;
  1715. int locked;
  1716. // Loading configuration from file
  1717. f = fopen(path, "r");
  1718. STARPU_ASSERT_MSG(f, "Error when reading from file '%s'", path);
  1719. locked = _starpu_frdlock(f) == 0;
  1720. _starpu_drop_comments(f);
  1721. ret = fscanf(f, "%u\t", &read_cpus);
  1722. STARPU_ASSERT_MSG(ret == 1, "Error when reading from file '%s'", path);
  1723. _starpu_drop_comments(f);
  1724. ret = fscanf(f, "%u\t", &read_numa);
  1725. STARPU_ASSERT_MSG(ret == 1, "Error when reading from file '%s'", path);
  1726. _starpu_drop_comments(f);
  1727. ret = fscanf(f, "%u\t", &read_cuda);
  1728. STARPU_ASSERT_MSG(ret == 1, "Error when reading from file '%s'", path);
  1729. _starpu_drop_comments(f);
  1730. ret = fscanf(f, "%u\t", &read_opencl);
  1731. STARPU_ASSERT_MSG(ret == 1, "Error when reading from file '%s'", path);
  1732. _starpu_drop_comments(f);
  1733. ret = fscanf(f, "%u\t", &read_mpi_ms);
  1734. if (ret == 0)
  1735. read_mpi_ms = 0;
  1736. _starpu_drop_comments(f);
  1737. if (locked)
  1738. _starpu_frdunlock(f);
  1739. fclose(f);
  1740. // Loading current configuration
  1741. ncpus = _starpu_topology_get_nhwcpu(config);
  1742. nnumas = _starpu_topology_get_nnumanodes(config);
  1743. #ifdef STARPU_USE_CUDA
  1744. ncuda = _starpu_get_cuda_device_count();
  1745. #endif
  1746. #ifdef STARPU_USE_OPENCL
  1747. nopencl = _starpu_opencl_get_device_count();
  1748. #endif
  1749. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1750. nmpi_ms = _starpu_mpi_src_get_device_count();
  1751. #endif /* STARPU_USE_MPI_MASTER_SLAVE */
  1752. // Checking if both configurations match
  1753. compare_value_and_recalibrate("CPUS", read_cpus, ncpus);
  1754. compare_value_and_recalibrate("NUMA", read_numa, nnumas);
  1755. compare_value_and_recalibrate("CUDA", read_cuda, ncuda);
  1756. compare_value_and_recalibrate("OpenCL", read_opencl, nopencl);
  1757. compare_value_and_recalibrate("MPI Master-Slave", read_mpi_ms, nmpi_ms);
  1758. }
  1759. }
  1760. static void write_bus_config_file_content(void)
  1761. {
  1762. FILE *f;
  1763. char path[PATH_LENGTH];
  1764. int locked;
  1765. STARPU_ASSERT(was_benchmarked);
  1766. get_config_path(path, sizeof(path));
  1767. _STARPU_DEBUG("writing config to %s\n", path);
  1768. f = fopen(path, "a+");
  1769. STARPU_ASSERT_MSG(f, "Error when opening file (writing) '%s'", path);
  1770. locked = _starpu_fwrlock(f) == 0;
  1771. fseek(f, 0, SEEK_SET);
  1772. _starpu_fftruncate(f, 0);
  1773. fprintf(f, "# Current configuration\n");
  1774. fprintf(f, "%u # Number of CPUs\n", ncpus);
  1775. fprintf(f, "%u # Number of NUMA nodes\n", nnumas);
  1776. fprintf(f, "%u # Number of CUDA devices\n", ncuda);
  1777. fprintf(f, "%u # Number of OpenCL devices\n", nopencl);
  1778. fprintf(f, "%u # Number of MPI devices\n", nmpi_ms);
  1779. if (locked)
  1780. _starpu_fwrunlock(f);
  1781. fclose(f);
  1782. }
  1783. static void generate_bus_config_file(void)
  1784. {
  1785. if (!was_benchmarked)
  1786. benchmark_all_gpu_devices();
  1787. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1788. /* Slaves don't write files */
  1789. if (!_starpu_mpi_common_is_src_node())
  1790. return;
  1791. #endif
  1792. write_bus_config_file_content();
  1793. }
  1794. #endif /* !SIMGRID */
  1795. void _starpu_simgrid_get_platform_path(int version, char *path, size_t maxlen)
  1796. {
  1797. if (version == 3)
  1798. get_bus_path("platform.xml", path, maxlen);
  1799. else
  1800. get_bus_path("platform.v4.xml", path, maxlen);
  1801. }
  1802. #ifndef STARPU_SIMGRID
  1803. /*
  1804. * Compute the precise PCI tree bandwidth and link shares
  1805. *
  1806. * We only have measurements from one leaf to another. We assume that the
  1807. * available bandwidth is greater at lower levels, and thus measurements from
  1808. * increasingly far GPUs provide the PCI bridges bandwidths at each level.
  1809. *
  1810. * The bandwidth of a PCI bridge is thus computed as the maximum of the speed
  1811. * of the various transfers that we have achieved through it. We thus browse
  1812. * the PCI tree three times:
  1813. *
  1814. * - first through all CUDA-CUDA possible transfers to compute the maximum
  1815. * measured bandwidth on each PCI link and hub used for that.
  1816. * - then through the whole tree to emit links for each PCI link and hub.
  1817. * - then through all CUDA-CUDA possible transfers again to emit routes.
  1818. */
  1819. #if defined(STARPU_USE_CUDA) && defined(HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX) && HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX && defined(STARPU_HAVE_CUDA_MEMCPY_PEER)
  1820. /* Records, for each PCI link and hub, the maximum bandwidth seen through it */
  1821. struct pci_userdata
  1822. {
  1823. /* Uplink max measurement */
  1824. double bw_up;
  1825. double bw_down;
  1826. /* Hub max measurement */
  1827. double bw;
  1828. };
  1829. /* Allocate a pci_userdata structure for the given object */
  1830. static void allocate_userdata(hwloc_obj_t obj)
  1831. {
  1832. struct pci_userdata *data;
  1833. if (obj->userdata)
  1834. return;
  1835. _STARPU_MALLOC(obj->userdata, sizeof(*data));
  1836. data = obj->userdata;
  1837. data->bw_up = 0.0;
  1838. data->bw_down = 0.0;
  1839. data->bw = 0.0;
  1840. }
  1841. /* Update the maximum bandwidth seen going to upstream */
  1842. static void update_bandwidth_up(hwloc_obj_t obj, double bandwidth)
  1843. {
  1844. struct pci_userdata *data;
  1845. if (obj->type != HWLOC_OBJ_BRIDGE && obj->type != HWLOC_OBJ_PCI_DEVICE)
  1846. return;
  1847. allocate_userdata(obj);
  1848. data = obj->userdata;
  1849. if (data->bw_up < bandwidth)
  1850. data->bw_up = bandwidth;
  1851. }
  1852. /* Update the maximum bandwidth seen going from upstream */
  1853. static void update_bandwidth_down(hwloc_obj_t obj, double bandwidth)
  1854. {
  1855. struct pci_userdata *data;
  1856. if (obj->type != HWLOC_OBJ_BRIDGE && obj->type != HWLOC_OBJ_PCI_DEVICE)
  1857. return;
  1858. allocate_userdata(obj);
  1859. data = obj->userdata;
  1860. if (data->bw_down < bandwidth)
  1861. data->bw_down = bandwidth;
  1862. }
  1863. /* Update the maximum bandwidth seen going through this Hub */
  1864. static void update_bandwidth_through(hwloc_obj_t obj, double bandwidth)
  1865. {
  1866. struct pci_userdata *data;
  1867. allocate_userdata(obj);
  1868. data = obj->userdata;
  1869. if (data->bw < bandwidth)
  1870. data->bw = bandwidth;
  1871. }
  1872. /* find_* functions perform the first step: computing maximum bandwidths */
  1873. /* Our trafic had to go through the host, go back from target up to the host,
  1874. * updating uplink downstream bandwidth along the way */
  1875. static void find_platform_backward_path(hwloc_obj_t obj, double bandwidth)
  1876. {
  1877. if (!obj)
  1878. /* Oops, we should have seen a host bridge. Well, too bad. */
  1879. return;
  1880. /* Update uplink bandwidth of PCI Hub */
  1881. update_bandwidth_down(obj, bandwidth);
  1882. /* Update internal bandwidth of PCI Hub */
  1883. update_bandwidth_through(obj, bandwidth);
  1884. if (obj->type == HWLOC_OBJ_BRIDGE && obj->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_HOST)
  1885. /* Finished */
  1886. return;
  1887. /* Continue up */
  1888. find_platform_backward_path(obj->parent, bandwidth);
  1889. }
  1890. /* Same, but update uplink upstream bandwidth */
  1891. static void find_platform_forward_path(hwloc_obj_t obj, double bandwidth)
  1892. {
  1893. if (!obj)
  1894. /* Oops, we should have seen a host bridge. Well, too bad. */
  1895. return;
  1896. /* Update uplink bandwidth of PCI Hub */
  1897. update_bandwidth_up(obj, bandwidth);
  1898. /* Update internal bandwidth of PCI Hub */
  1899. update_bandwidth_through(obj, bandwidth);
  1900. if (obj->type == HWLOC_OBJ_BRIDGE && obj->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_HOST)
  1901. /* Finished */
  1902. return;
  1903. /* Continue up */
  1904. find_platform_forward_path(obj->parent, bandwidth);
  1905. }
  1906. /* Find the path from obj1 through parent down to obj2 (without ever going up),
  1907. * and update the maximum bandwidth along the path */
  1908. static int find_platform_path_down(hwloc_obj_t parent, hwloc_obj_t obj1, hwloc_obj_t obj2, double bandwidth)
  1909. {
  1910. unsigned i;
  1911. /* Base case, path is empty */
  1912. if (parent == obj2)
  1913. return 1;
  1914. /* Try to go down from parent */
  1915. for (i = 0; i < parent->arity; i++)
  1916. if (parent->children[i] != obj1 && find_platform_path_down(parent->children[i], NULL, obj2, bandwidth))
  1917. {
  1918. /* Found it down there, update bandwidth of parent */
  1919. update_bandwidth_down(parent->children[i], bandwidth);
  1920. update_bandwidth_through(parent, bandwidth);
  1921. return 1;
  1922. }
  1923. #if HWLOC_API_VERSION >= 0x00020000
  1924. hwloc_obj_t io;
  1925. for (io = parent->io_first_child; io; io = io->next_sibling)
  1926. if (io != obj1 && find_platform_path_down(io, NULL, obj2, bandwidth))
  1927. {
  1928. /* Found it down there, update bandwidth of parent */
  1929. update_bandwidth_down(io, bandwidth);
  1930. update_bandwidth_through(parent, bandwidth);
  1931. return 1;
  1932. }
  1933. #endif
  1934. return 0;
  1935. }
  1936. /* Find the path from obj1 to obj2, and update the maximum bandwidth along the
  1937. * path */
  1938. static int find_platform_path_up(hwloc_obj_t obj1, hwloc_obj_t obj2, double bandwidth)
  1939. {
  1940. int ret;
  1941. hwloc_obj_t parent = obj1->parent;
  1942. if (!parent)
  1943. {
  1944. /* Oops, we should have seen a host bridge. Act as if we had seen it. */
  1945. find_platform_backward_path(obj2, bandwidth);
  1946. return 1;
  1947. }
  1948. if (find_platform_path_down(parent, obj1, obj2, bandwidth))
  1949. /* obj2 was a mere (sub)child of our parent */
  1950. return 1;
  1951. /* obj2 is not a (sub)child of our parent, we have to go up through the parent */
  1952. if (parent->type == HWLOC_OBJ_BRIDGE && parent->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_HOST)
  1953. {
  1954. /* We have to go up to the Host, so obj2 is not in the same PCI
  1955. * tree, so we're for for obj1 to Host, and just find the path
  1956. * from obj2 to Host too.
  1957. */
  1958. find_platform_backward_path(obj2, bandwidth);
  1959. update_bandwidth_up(parent, bandwidth);
  1960. update_bandwidth_through(parent, bandwidth);
  1961. return 1;
  1962. }
  1963. /* Not at host yet, just go up */
  1964. ret = find_platform_path_up(parent, obj2, bandwidth);
  1965. update_bandwidth_up(parent, bandwidth);
  1966. update_bandwidth_through(parent, bandwidth);
  1967. return ret;
  1968. }
  1969. static hwloc_obj_t get_hwloc_cuda_obj(hwloc_topology_t topology, unsigned devid)
  1970. {
  1971. hwloc_obj_t res;
  1972. struct cudaDeviceProp props;
  1973. cudaError_t cures;
  1974. res = hwloc_cuda_get_device_osdev_by_index(topology, devid);
  1975. if (res)
  1976. return res;
  1977. cures = cudaGetDeviceProperties(&props, devid);
  1978. if (cures == cudaSuccess)
  1979. {
  1980. res = hwloc_get_pcidev_by_busid(topology, props.pciDomainID, props.pciBusID, props.pciDeviceID, 0);
  1981. if (res)
  1982. return res;
  1983. #ifdef STARPU_HAVE_LIBNVIDIA_ML
  1984. nvmlDevice_t nvmldev = _starpu_cuda_get_nvmldev(&props);
  1985. if (nvmldev)
  1986. {
  1987. unsigned int index;
  1988. if (nvmlDeviceGetIndex(nvmldev, &index) == NVML_SUCCESS)
  1989. {
  1990. res = hwloc_nvml_get_device_osdev_by_index(topology, index);
  1991. if (res)
  1992. return res;
  1993. }
  1994. res = hwloc_nvml_get_device_osdev(topology, nvmldev);
  1995. if (res)
  1996. return res;
  1997. }
  1998. #endif
  1999. }
  2000. return NULL;
  2001. }
  2002. /* find the path between cuda i and cuda j, and update the maximum bandwidth along the path */
  2003. static int find_platform_cuda_path(hwloc_topology_t topology, unsigned i, unsigned j, double bandwidth)
  2004. {
  2005. hwloc_obj_t cudai, cudaj;
  2006. cudai = get_hwloc_cuda_obj(topology, i);
  2007. cudaj = get_hwloc_cuda_obj(topology, j);
  2008. if (!cudai || !cudaj)
  2009. return 0;
  2010. return find_platform_path_up(cudai, cudaj, bandwidth);
  2011. }
  2012. /* emit_topology_bandwidths performs the second step: emitting link names */
  2013. /* Emit the link name of the object */
  2014. static void emit_pci_hub(FILE *f, hwloc_obj_t obj)
  2015. {
  2016. STARPU_ASSERT(obj->type == HWLOC_OBJ_BRIDGE);
  2017. fprintf(f, "PCI:%04x:[%02x-%02x]", obj->attr->bridge.downstream.pci.domain, obj->attr->bridge.downstream.pci.secondary_bus, obj->attr->bridge.downstream.pci.subordinate_bus);
  2018. }
  2019. static void emit_pci_dev(FILE *f, struct hwloc_pcidev_attr_s *pcidev)
  2020. {
  2021. fprintf(f, "PCI:%04x:%02x:%02x.%1x", pcidev->domain, pcidev->bus, pcidev->dev, pcidev->func);
  2022. }
  2023. /* Emit the links of the object */
  2024. static void emit_topology_bandwidths(FILE *f, hwloc_obj_t obj, const char *Bps, const char *s)
  2025. {
  2026. unsigned i;
  2027. if (obj->userdata)
  2028. {
  2029. struct pci_userdata *data = obj->userdata;
  2030. if (obj->type == HWLOC_OBJ_BRIDGE)
  2031. {
  2032. /* Uplink */
  2033. fprintf(f, " <link id=\"");
  2034. emit_pci_hub(f, obj);
  2035. fprintf(f, " up\" bandwidth=\"%f%s\" latency=\"0.000000%s\"/>\n", data->bw_up, Bps, s);
  2036. fprintf(f, " <link id=\"");
  2037. emit_pci_hub(f, obj);
  2038. fprintf(f, " down\" bandwidth=\"%f%s\" latency=\"0.000000%s\"/>\n", data->bw_down, Bps, s);
  2039. /* PCI Switches are assumed to have infinite internal bandwidth */
  2040. if (!obj->name || !strstr(obj->name, "Switch"))
  2041. {
  2042. /* We assume that PCI Hubs have double bandwidth in
  2043. * order to support full duplex but not more */
  2044. fprintf(f, " <link id=\"");
  2045. emit_pci_hub(f, obj);
  2046. fprintf(f, " through\" bandwidth=\"%f%s\" latency=\"0.000000%s\"/>\n", data->bw * 2, Bps, s);
  2047. }
  2048. }
  2049. else if (obj->type == HWLOC_OBJ_PCI_DEVICE)
  2050. {
  2051. fprintf(f, " <link id=\"");
  2052. emit_pci_dev(f, &obj->attr->pcidev);
  2053. fprintf(f, " up\" bandwidth=\"%f%s\" latency=\"0.000000%s\"/>\n", data->bw_up, Bps, s);
  2054. fprintf(f, " <link id=\"");
  2055. emit_pci_dev(f, &obj->attr->pcidev);
  2056. fprintf(f, " down\" bandwidth=\"%f%s\" latency=\"0.000000%s\"/>\n", data->bw_down, Bps, s);
  2057. }
  2058. }
  2059. for (i = 0; i < obj->arity; i++)
  2060. emit_topology_bandwidths(f, obj->children[i], Bps, s);
  2061. #if HWLOC_API_VERSION >= 0x00020000
  2062. hwloc_obj_t io;
  2063. for (io = obj->io_first_child; io; io = io->next_sibling)
  2064. emit_topology_bandwidths(f, io, Bps, s);
  2065. #endif
  2066. }
  2067. /* emit_pci_link_* functions perform the third step: emitting the routes */
  2068. static void emit_pci_link(FILE *f, hwloc_obj_t obj, const char *suffix)
  2069. {
  2070. if (obj->type == HWLOC_OBJ_BRIDGE)
  2071. {
  2072. fprintf(f, " <link_ctn id=\"");
  2073. emit_pci_hub(f, obj);
  2074. fprintf(f, " %s\"/>\n", suffix);
  2075. }
  2076. else if (obj->type == HWLOC_OBJ_PCI_DEVICE)
  2077. {
  2078. fprintf(f, " <link_ctn id=\"");
  2079. emit_pci_dev(f, &obj->attr->pcidev);
  2080. fprintf(f, " %s\"/>\n", suffix);
  2081. }
  2082. }
  2083. /* Go to upstream */
  2084. static void emit_pci_link_up(FILE *f, hwloc_obj_t obj)
  2085. {
  2086. emit_pci_link(f, obj, "up");
  2087. }
  2088. /* Go from upstream */
  2089. static void emit_pci_link_down(FILE *f, hwloc_obj_t obj)
  2090. {
  2091. emit_pci_link(f, obj, "down");
  2092. }
  2093. /* Go through PCI hub */
  2094. static void emit_pci_link_through(FILE *f, hwloc_obj_t obj)
  2095. {
  2096. /* We don't care about trafic going through PCI switches */
  2097. if (obj->type == HWLOC_OBJ_BRIDGE)
  2098. {
  2099. if (!obj->name || !strstr(obj->name, "Switch"))
  2100. emit_pci_link(f, obj, "through");
  2101. else
  2102. {
  2103. fprintf(f, " <!-- Switch ");
  2104. emit_pci_hub(f, obj);
  2105. fprintf(f, " through -->\n");
  2106. }
  2107. }
  2108. }
  2109. /* Our trafic has to go through the host, go back from target up to the host,
  2110. * using uplink downstream along the way */
  2111. static void emit_platform_backward_path(FILE *f, hwloc_obj_t obj)
  2112. {
  2113. if (!obj)
  2114. /* Oops, we should have seen a host bridge. Well, too bad. */
  2115. return;
  2116. /* Go through PCI Hub */
  2117. emit_pci_link_through(f, obj);
  2118. /* Go through uplink */
  2119. emit_pci_link_down(f, obj);
  2120. if (obj->type == HWLOC_OBJ_BRIDGE && obj->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_HOST)
  2121. {
  2122. /* Finished, go through host */
  2123. fprintf(f, " <link_ctn id=\"Host\"/>\n");
  2124. return;
  2125. }
  2126. /* Continue up */
  2127. emit_platform_backward_path(f, obj->parent);
  2128. }
  2129. /* Same, but use upstream link */
  2130. static void emit_platform_forward_path(FILE *f, hwloc_obj_t obj)
  2131. {
  2132. if (!obj)
  2133. /* Oops, we should have seen a host bridge. Well, too bad. */
  2134. return;
  2135. /* Go through PCI Hub */
  2136. emit_pci_link_through(f, obj);
  2137. /* Go through uplink */
  2138. emit_pci_link_up(f, obj);
  2139. if (obj->type == HWLOC_OBJ_BRIDGE && obj->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_HOST)
  2140. {
  2141. /* Finished, go through host */
  2142. fprintf(f, " <link_ctn id=\"Host\"/>\n");
  2143. return;
  2144. }
  2145. /* Continue up */
  2146. emit_platform_forward_path(f, obj->parent);
  2147. }
  2148. /* Find the path from obj1 through parent down to obj2 (without ever going up),
  2149. * and use the links along the path */
  2150. static int emit_platform_path_down(FILE *f, hwloc_obj_t parent, hwloc_obj_t obj1, hwloc_obj_t obj2)
  2151. {
  2152. unsigned i;
  2153. /* Base case, path is empty */
  2154. if (parent == obj2)
  2155. return 1;
  2156. /* Try to go down from parent */
  2157. for (i = 0; i < parent->arity; i++)
  2158. if (parent->children[i] != obj1 && emit_platform_path_down(f, parent->children[i], NULL, obj2))
  2159. {
  2160. /* Found it down there, path goes through this hub */
  2161. emit_pci_link_down(f, parent->children[i]);
  2162. emit_pci_link_through(f, parent);
  2163. return 1;
  2164. }
  2165. #if HWLOC_API_VERSION >= 0x00020000
  2166. hwloc_obj_t io;
  2167. for (io = parent->io_first_child; io; io = io->next_sibling)
  2168. if (io != obj1 && emit_platform_path_down(f, io, NULL, obj2))
  2169. {
  2170. /* Found it down there, path goes through this hub */
  2171. emit_pci_link_down(f, io);
  2172. emit_pci_link_through(f, parent);
  2173. return 1;
  2174. }
  2175. #endif
  2176. return 0;
  2177. }
  2178. /* Find the path from obj1 to obj2, and use the links along the path */
  2179. static int emit_platform_path_up(FILE *f, hwloc_obj_t obj1, hwloc_obj_t obj2)
  2180. {
  2181. int ret;
  2182. hwloc_obj_t parent = obj1->parent;
  2183. if (!parent)
  2184. {
  2185. /* Oops, we should have seen a host bridge. Act as if we had seen it. */
  2186. emit_platform_backward_path(f, obj2);
  2187. return 1;
  2188. }
  2189. if (emit_platform_path_down(f, parent, obj1, obj2))
  2190. /* obj2 was a mere (sub)child of our parent */
  2191. return 1;
  2192. /* obj2 is not a (sub)child of our parent, we have to go up through the parent */
  2193. if (parent->type == HWLOC_OBJ_BRIDGE && parent->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_HOST)
  2194. {
  2195. /* We have to go up to the Host, so obj2 is not in the same PCI
  2196. * tree, so we're for for obj1 to Host, and just find the path
  2197. * from obj2 to Host too.
  2198. */
  2199. emit_platform_backward_path(f, obj2);
  2200. fprintf(f, " <link_ctn id=\"Host\"/>\n");
  2201. emit_pci_link_up(f, parent);
  2202. emit_pci_link_through(f, parent);
  2203. return 1;
  2204. }
  2205. /* Not at host yet, just go up */
  2206. ret = emit_platform_path_up(f, parent, obj2);
  2207. emit_pci_link_up(f, parent);
  2208. emit_pci_link_through(f, parent);
  2209. return ret;
  2210. }
  2211. /* Clean our mess in the topology before destroying it */
  2212. static void clean_topology(hwloc_obj_t obj)
  2213. {
  2214. unsigned i;
  2215. if (obj->userdata)
  2216. {
  2217. free(obj->userdata);
  2218. obj->userdata = NULL;
  2219. }
  2220. for (i = 0; i < obj->arity; i++)
  2221. clean_topology(obj->children[i]);
  2222. #if HWLOC_API_VERSION >= 0x00020000
  2223. hwloc_obj_t io;
  2224. for (io = obj->io_first_child; io; io = io->next_sibling)
  2225. clean_topology(io);
  2226. #endif
  2227. }
  2228. #endif
  2229. static void write_bus_platform_file_content(int version)
  2230. {
  2231. FILE *f;
  2232. char path[PATH_LENGTH];
  2233. unsigned i;
  2234. const char *speed, *flops, *Bps, *s;
  2235. char dash;
  2236. int locked;
  2237. if (version == 3)
  2238. {
  2239. speed = "power";
  2240. flops = "";
  2241. Bps = "";
  2242. s = "";
  2243. dash = '_';
  2244. }
  2245. else
  2246. {
  2247. speed = "speed";
  2248. flops = "f";
  2249. Bps = "Bps";
  2250. s = "s";
  2251. dash = '-';
  2252. }
  2253. STARPU_ASSERT(was_benchmarked);
  2254. _starpu_simgrid_get_platform_path(version, path, sizeof(path));
  2255. _STARPU_DEBUG("writing platform to %s\n", path);
  2256. f = fopen(path, "a+");
  2257. if (!f)
  2258. {
  2259. perror("fopen write_bus_platform_file_content");
  2260. _STARPU_DISP("path '%s'\n", path);
  2261. fflush(stderr);
  2262. STARPU_ABORT();
  2263. }
  2264. locked = _starpu_fwrlock(f) == 0;
  2265. fseek(f, 0, SEEK_SET);
  2266. _starpu_fftruncate(f, 0);
  2267. fprintf(f,
  2268. "<?xml version='1.0'?>\n"
  2269. "<!DOCTYPE platform SYSTEM '%s'>\n"
  2270. " <platform version=\"%d\">\n"
  2271. " <config id=\"General\">\n"
  2272. " <prop id=\"network/TCP%cgamma\" value=\"-1\"></prop>\n"
  2273. " <prop id=\"network/latency%cfactor\" value=\"1\"></prop>\n"
  2274. " <prop id=\"network/bandwidth%cfactor\" value=\"1\"></prop>\n"
  2275. " <prop id=\"network/crosstraffic\" value=\"0\"></prop>\n"
  2276. " <prop id=\"network/weight%cS\" value=\"0.0\"></prop>\n"
  2277. " </config>\n"
  2278. " <AS id=\"AS0\" routing=\"Full\">\n"
  2279. " <host id=\"MAIN\" %s=\"1%s\"/>\n",
  2280. version == 3
  2281. ? "http://simgrid.gforge.inria.fr/simgrid.dtd"
  2282. : "http://simgrid.gforge.inria.fr/simgrid/simgrid.dtd",
  2283. version, dash, dash, dash, dash, speed, flops);
  2284. for (i = 0; i < ncpus; i++)
  2285. /* TODO: host memory for out-of-core simulation */
  2286. fprintf(f, " <host id=\"CPU%u\" %s=\"2000000000%s\"/>\n", i, speed, flops);
  2287. for (i = 0; i < ncuda; i++)
  2288. {
  2289. fprintf(f, " <host id=\"CUDA%u\" %s=\"2000000000%s\">\n", i, speed, flops);
  2290. fprintf(f, " <prop id=\"memsize\" value=\"%llu\"/>\n", (unsigned long long) cuda_size[i]);
  2291. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  2292. fprintf(f, " <prop id=\"memcpy_peer\" value=\"1\"/>\n");
  2293. #endif
  2294. /* TODO: record cudadev_direct instead of assuming it's NUMA nodes */
  2295. fprintf(f, " </host>\n");
  2296. }
  2297. for (i = 0; i < nopencl; i++)
  2298. {
  2299. fprintf(f, " <host id=\"OpenCL%u\" %s=\"2000000000%s\">\n", i, speed, flops);
  2300. fprintf(f, " <prop id=\"memsize\" value=\"%llu\"/>\n", (unsigned long long) opencl_size[i]);
  2301. fprintf(f, " </host>\n");
  2302. }
  2303. fprintf(f, "\n <host id=\"RAM\" %s=\"1%s\"/>\n", speed, flops);
  2304. /*
  2305. * Compute maximum bandwidth, taken as host bandwidth
  2306. */
  2307. double max_bandwidth = 0;
  2308. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  2309. unsigned numa;
  2310. #endif
  2311. #ifdef STARPU_USE_CUDA
  2312. for (i = 0; i < ncuda; i++)
  2313. {
  2314. for (numa = 0; numa < nnumas; numa++)
  2315. {
  2316. double down_bw = 1.0 / cudadev_timing_per_numa[i*STARPU_MAXNUMANODES+numa].timing_dtoh;
  2317. double up_bw = 1.0 / cudadev_timing_per_numa[i*STARPU_MAXNUMANODES+numa].timing_htod;
  2318. if (max_bandwidth < down_bw)
  2319. max_bandwidth = down_bw;
  2320. if (max_bandwidth < up_bw)
  2321. max_bandwidth = up_bw;
  2322. }
  2323. }
  2324. #endif
  2325. #ifdef STARPU_USE_OPENCL
  2326. for (i = 0; i < nopencl; i++)
  2327. {
  2328. for (numa = 0; numa < nnumas; numa++)
  2329. {
  2330. double down_bw = 1.0 / opencldev_timing_per_numa[i*STARPU_MAXNUMANODES+numa].timing_dtoh;
  2331. double up_bw = 1.0 / opencldev_timing_per_numa[i*STARPU_MAXNUMANODES+numa].timing_htod;
  2332. if (max_bandwidth < down_bw)
  2333. max_bandwidth = down_bw;
  2334. if (max_bandwidth < up_bw)
  2335. max_bandwidth = up_bw;
  2336. }
  2337. }
  2338. #endif
  2339. fprintf(f, "\n <link id=\"Host\" bandwidth=\"%f%s\" latency=\"0.000000%s\"/>\n\n", max_bandwidth*1000000, Bps, s);
  2340. /*
  2341. * OpenCL links
  2342. */
  2343. #ifdef STARPU_USE_OPENCL
  2344. for (i = 0; i < nopencl; i++)
  2345. {
  2346. char i_name[16];
  2347. snprintf(i_name, sizeof(i_name), "OpenCL%u", i);
  2348. fprintf(f, " <link id=\"RAM-%s\" bandwidth=\"%f%s\" latency=\"%f%s\"/>\n",
  2349. i_name,
  2350. 1000000 / search_bus_best_timing(i, "OpenCL", 1), Bps,
  2351. search_bus_best_latency(i, "OpenCL", 1)/1000000., s);
  2352. fprintf(f, " <link id=\"%s-RAM\" bandwidth=\"%f%s\" latency=\"%f%s\"/>\n",
  2353. i_name,
  2354. 1000000 / search_bus_best_timing(i, "OpenCL", 0), Bps,
  2355. search_bus_best_latency(i, "OpenCL", 0)/1000000., s);
  2356. }
  2357. fprintf(f, "\n");
  2358. #endif
  2359. /*
  2360. * CUDA links and routes
  2361. */
  2362. #ifdef STARPU_USE_CUDA
  2363. /* Write RAM/CUDA bandwidths and latencies */
  2364. for (i = 0; i < ncuda; i++)
  2365. {
  2366. char i_name[16];
  2367. snprintf(i_name, sizeof(i_name), "CUDA%u", i);
  2368. fprintf(f, " <link id=\"RAM-%s\" bandwidth=\"%f%s\" latency=\"%f%s\"/>\n",
  2369. i_name,
  2370. 1000000. / search_bus_best_timing(i, "CUDA", 1), Bps,
  2371. search_bus_best_latency(i, "CUDA", 1)/1000000., s);
  2372. fprintf(f, " <link id=\"%s-RAM\" bandwidth=\"%f%s\" latency=\"%f%s\"/>\n",
  2373. i_name,
  2374. 1000000. / search_bus_best_timing(i, "CUDA", 0), Bps,
  2375. search_bus_best_latency(i, "CUDA", 0)/1000000., s);
  2376. }
  2377. fprintf(f, "\n");
  2378. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  2379. /* Write CUDA/CUDA bandwidths and latencies */
  2380. for (i = 0; i < ncuda; i++)
  2381. {
  2382. unsigned j;
  2383. char i_name[16];
  2384. snprintf(i_name, sizeof(i_name), "CUDA%u", i);
  2385. for (j = 0; j < ncuda; j++)
  2386. {
  2387. char j_name[16];
  2388. if (j == i)
  2389. continue;
  2390. snprintf(j_name, sizeof(j_name), "CUDA%u", j);
  2391. fprintf(f, " <link id=\"%s-%s\" bandwidth=\"%f%s\" latency=\"%f%s\"/>\n",
  2392. i_name, j_name,
  2393. 1000000. / cudadev_timing_dtod[i][j], Bps,
  2394. cudadev_latency_dtod[i][j]/1000000., s);
  2395. }
  2396. }
  2397. #endif
  2398. #if defined(HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX) && HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX && defined(STARPU_USE_CUDA) && defined(STARPU_HAVE_CUDA_MEMCPY_PEER)
  2399. #ifdef STARPU_DEVEL
  2400. #warning TODO: use libnvml to get NVLink links, otherwise numbers will be bogusly propagated through PCI topology
  2401. #endif
  2402. /* If we have enough hwloc information, write PCI bandwidths and routes */
  2403. if (!starpu_get_env_number_default("STARPU_PCI_FLAT", 0) && ncuda > 0)
  2404. {
  2405. hwloc_topology_t topology;
  2406. hwloc_topology_init(&topology);
  2407. _starpu_topology_filter(topology);
  2408. hwloc_topology_load(topology);
  2409. char nvlink[ncuda][ncuda];
  2410. char nvlinkhost[ncuda];
  2411. memset(nvlink, 0, sizeof(nvlink));
  2412. memset(nvlinkhost, 0, sizeof(nvlinkhost));
  2413. #ifdef STARPU_HAVE_LIBNVIDIA_ML
  2414. /* First find NVLinks */
  2415. struct cudaDeviceProp props[ncuda];
  2416. for (i = 0; i < ncuda; i++)
  2417. {
  2418. cudaError_t cures = cudaGetDeviceProperties(&props[i], i);
  2419. if (cures != cudaSuccess)
  2420. props[i].name[0] = 0;
  2421. }
  2422. for (i = 0; i < ncuda; i++)
  2423. {
  2424. unsigned j;
  2425. if (!props[i].name[0])
  2426. continue;
  2427. nvmlDevice_t nvmldev;
  2428. nvmldev = _starpu_cuda_get_nvmldev(&props[i]);
  2429. if (!nvmldev)
  2430. continue;
  2431. for (j = 0; j < NVML_NVLINK_MAX_LINKS; j++)
  2432. {
  2433. nvmlEnableState_t active;
  2434. nvmlReturn_t ret;
  2435. nvmlPciInfo_t pci;
  2436. unsigned k;
  2437. ret = nvmlDeviceGetNvLinkState(nvmldev, j, &active);
  2438. if (ret != NVML_SUCCESS)
  2439. continue;
  2440. if (active != NVML_FEATURE_ENABLED)
  2441. continue;
  2442. ret = nvmlDeviceGetNvLinkRemotePciInfo(nvmldev, j, &pci);
  2443. if (ret != NVML_SUCCESS)
  2444. continue;
  2445. hwloc_obj_t obj = hwloc_get_pcidev_by_busid(topology,
  2446. pci.domain, pci.bus, pci.device, 0);
  2447. if (obj && obj->type == HWLOC_OBJ_PCI_DEVICE && (obj->attr->pcidev.class_id >> 8 == 0x06))
  2448. {
  2449. switch (obj->attr->pcidev.vendor_id)
  2450. {
  2451. case 0x1014:
  2452. /* IBM OpenCAPI port, direct CPU-GPU NVLink */
  2453. /* TODO: NUMA affinity */
  2454. nvlinkhost[i] = 1;
  2455. continue;
  2456. case 0x10de:
  2457. /* TODO: NVIDIA NVSwitch */
  2458. continue;
  2459. }
  2460. }
  2461. /* Otherwise, link to another GPU? */
  2462. for (k = i+1; k < ncuda; k++)
  2463. {
  2464. if ((int) pci.domain == props[k].pciDomainID
  2465. && (int) pci.bus == props[k].pciBusID
  2466. && (int) pci.device == props[k].pciDeviceID)
  2467. {
  2468. nvlink[i][k] = 1;
  2469. nvlink[k][i] = 1;
  2470. break;
  2471. }
  2472. }
  2473. }
  2474. }
  2475. #endif
  2476. /* Find paths and record measured bandwidth along the path */
  2477. for (i = 0; i < ncuda; i++)
  2478. {
  2479. unsigned j;
  2480. for (j = 0; j < ncuda; j++)
  2481. if (i != j && !nvlink[i][j] && !nvlinkhost[i] && !nvlinkhost[j])
  2482. if (!find_platform_cuda_path(topology, i, j, 1000000. / cudadev_timing_dtod[i][j]))
  2483. {
  2484. _STARPU_DISP("Warning: could not get CUDA location from hwloc\n");
  2485. clean_topology(hwloc_get_root_obj(topology));
  2486. hwloc_topology_destroy(topology);
  2487. goto flat_cuda;
  2488. }
  2489. /* Record RAM/CUDA bandwidths */
  2490. if (!nvlinkhost[i])
  2491. {
  2492. find_platform_forward_path(get_hwloc_cuda_obj(topology, i), 1000000. / search_bus_best_timing(i, "CUDA", 0));
  2493. find_platform_backward_path(get_hwloc_cuda_obj(topology, i), 1000000. / search_bus_best_timing(i, "CUDA", 1));
  2494. }
  2495. }
  2496. /* Ok, found path in all cases, can emit advanced platform routes */
  2497. fprintf(f, "\n");
  2498. emit_topology_bandwidths(f, hwloc_get_root_obj(topology), Bps, s);
  2499. fprintf(f, "\n");
  2500. for (i = 0; i < ncuda; i++)
  2501. {
  2502. unsigned j;
  2503. for (j = 0; j < ncuda; j++)
  2504. if (i != j)
  2505. {
  2506. fprintf(f, " <route src=\"CUDA%u\" dst=\"CUDA%u\" symmetrical=\"NO\">\n", i, j);
  2507. fprintf(f, " <link_ctn id=\"CUDA%u-CUDA%u\"/>\n", i, j);
  2508. if (!nvlink[i][j])
  2509. {
  2510. if (nvlinkhost[i] && nvlinkhost[j])
  2511. /* TODO: NUMA affinity */
  2512. fprintf(f, " <link_ctn id=\"Host\"/>\n");
  2513. else
  2514. emit_platform_path_up(f,
  2515. get_hwloc_cuda_obj(topology, i),
  2516. get_hwloc_cuda_obj(topology, j));
  2517. }
  2518. fprintf(f, " </route>\n");
  2519. }
  2520. fprintf(f, " <route src=\"CUDA%u\" dst=\"RAM\" symmetrical=\"NO\">\n", i);
  2521. fprintf(f, " <link_ctn id=\"CUDA%u-RAM\"/>\n", i);
  2522. if (nvlinkhost[i])
  2523. /* TODO: NUMA affinity */
  2524. fprintf(f, " <link_ctn id=\"Host\"/>\n");
  2525. else
  2526. emit_platform_forward_path(f, get_hwloc_cuda_obj(topology, i));
  2527. fprintf(f, " </route>\n");
  2528. fprintf(f, " <route src=\"RAM\" dst=\"CUDA%u\" symmetrical=\"NO\">\n", i);
  2529. fprintf(f, " <link_ctn id=\"RAM-CUDA%u\"/>\n", i);
  2530. if (nvlinkhost[i])
  2531. /* TODO: NUMA affinity */
  2532. fprintf(f, " <link_ctn id=\"Host\"/>\n");
  2533. else
  2534. emit_platform_backward_path(f, get_hwloc_cuda_obj(topology, i));
  2535. fprintf(f, " </route>\n");
  2536. }
  2537. clean_topology(hwloc_get_root_obj(topology));
  2538. hwloc_topology_destroy(topology);
  2539. }
  2540. else
  2541. {
  2542. flat_cuda:
  2543. #else
  2544. {
  2545. #endif
  2546. /* If we don't have enough hwloc information, write trivial routes always through host */
  2547. for (i = 0; i < ncuda; i++)
  2548. {
  2549. char i_name[16];
  2550. snprintf(i_name, sizeof(i_name), "CUDA%u", i);
  2551. fprintf(f, " <route src=\"RAM\" dst=\"%s\" symmetrical=\"NO\"><link_ctn id=\"RAM-%s\"/><link_ctn id=\"Host\"/></route>\n", i_name, i_name);
  2552. fprintf(f, " <route src=\"%s\" dst=\"RAM\" symmetrical=\"NO\"><link_ctn id=\"%s-RAM\"/><link_ctn id=\"Host\"/></route>\n", i_name, i_name);
  2553. }
  2554. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  2555. for (i = 0; i < ncuda; i++)
  2556. {
  2557. unsigned j;
  2558. char i_name[16];
  2559. snprintf(i_name, sizeof(i_name), "CUDA%u", i);
  2560. for (j = 0; j < ncuda; j++)
  2561. {
  2562. char j_name[16];
  2563. if (j == i)
  2564. continue;
  2565. snprintf(j_name, sizeof(j_name), "CUDA%u", j);
  2566. fprintf(f, " <route src=\"%s\" dst=\"%s\" symmetrical=\"NO\"><link_ctn id=\"%s-%s\"/><link_ctn id=\"Host\"/></route>\n", i_name, j_name, i_name, j_name);
  2567. }
  2568. }
  2569. #endif
  2570. } /* defined(STARPU_HAVE_HWLOC) && defined(STARPU_HAVE_CUDA_MEMCPY_PEER) */
  2571. fprintf(f, "\n");
  2572. #endif /* STARPU_USE_CUDA */
  2573. /*
  2574. * OpenCL routes
  2575. */
  2576. #ifdef STARPU_USE_OPENCL
  2577. for (i = 0; i < nopencl; i++)
  2578. {
  2579. char i_name[16];
  2580. snprintf(i_name, sizeof(i_name), "OpenCL%u", i);
  2581. fprintf(f, " <route src=\"RAM\" dst=\"%s\" symmetrical=\"NO\"><link_ctn id=\"RAM-%s\"/><link_ctn id=\"Host\"/></route>\n", i_name, i_name);
  2582. fprintf(f, " <route src=\"%s\" dst=\"RAM\" symmetrical=\"NO\"><link_ctn id=\"%s-RAM\"/><link_ctn id=\"Host\"/></route>\n", i_name, i_name);
  2583. }
  2584. #endif
  2585. fprintf(f,
  2586. " </AS>\n"
  2587. " </platform>\n"
  2588. );
  2589. if (locked)
  2590. _starpu_fwrunlock(f);
  2591. fclose(f);
  2592. }
  2593. static void generate_bus_platform_file(void)
  2594. {
  2595. if (!was_benchmarked)
  2596. benchmark_all_gpu_devices();
  2597. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  2598. /* Slaves don't write files */
  2599. if (!_starpu_mpi_common_is_src_node())
  2600. return;
  2601. #endif
  2602. write_bus_platform_file_content(3);
  2603. write_bus_platform_file_content(4);
  2604. }
  2605. static void check_bus_platform_file(void)
  2606. {
  2607. int res;
  2608. char path[PATH_LENGTH];
  2609. _starpu_simgrid_get_platform_path(4, path, sizeof(path));
  2610. res = access(path, F_OK);
  2611. if (!res)
  2612. {
  2613. _starpu_simgrid_get_platform_path(3, path, sizeof(path));
  2614. res = access(path, F_OK);
  2615. }
  2616. if (res)
  2617. {
  2618. /* File does not exist yet */
  2619. generate_bus_platform_file();
  2620. }
  2621. }
  2622. /*
  2623. * Generic
  2624. */
  2625. static void _starpu_bus_force_sampling(void)
  2626. {
  2627. _STARPU_DEBUG("Force bus sampling ...\n");
  2628. _starpu_create_sampling_directory_if_needed();
  2629. generate_bus_affinity_file();
  2630. generate_bus_latency_file();
  2631. generate_bus_bandwidth_file();
  2632. generate_bus_config_file();
  2633. generate_bus_platform_file();
  2634. }
  2635. #endif /* !SIMGRID */
  2636. void _starpu_load_bus_performance_files(void)
  2637. {
  2638. _starpu_create_sampling_directory_if_needed();
  2639. struct _starpu_machine_config * config = _starpu_get_machine_config();
  2640. nnumas = _starpu_topology_get_nnumanodes(config);
  2641. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_SIMGRID)
  2642. ncuda = _starpu_get_cuda_device_count();
  2643. #endif
  2644. #if defined(STARPU_USE_OPENCL) || defined(STARPU_USE_SIMGRID)
  2645. nopencl = _starpu_opencl_get_device_count();
  2646. #endif
  2647. #if defined(STARPU_USE_MPI_MASTER_SLAVE) || defined(STARPU_USE_SIMGRID)
  2648. nmpi_ms = _starpu_mpi_src_get_device_count();
  2649. #endif
  2650. #ifndef STARPU_SIMGRID
  2651. check_bus_config_file();
  2652. #endif
  2653. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  2654. /* be sure that master wrote the perf files */
  2655. _starpu_mpi_common_barrier();
  2656. #endif
  2657. #ifndef STARPU_SIMGRID
  2658. load_bus_affinity_file();
  2659. #endif
  2660. load_bus_latency_file();
  2661. load_bus_bandwidth_file();
  2662. #ifndef STARPU_SIMGRID
  2663. check_bus_platform_file();
  2664. #endif
  2665. }
  2666. /* (in MB/s) */
  2667. double starpu_transfer_bandwidth(unsigned src_node, unsigned dst_node)
  2668. {
  2669. return bandwidth_matrix[src_node][dst_node];
  2670. }
  2671. /* (in µs) */
  2672. double starpu_transfer_latency(unsigned src_node, unsigned dst_node)
  2673. {
  2674. return latency_matrix[src_node][dst_node];
  2675. }
  2676. /* (in µs) */
  2677. double starpu_transfer_predict(unsigned src_node, unsigned dst_node, size_t size)
  2678. {
  2679. if (src_node == dst_node)
  2680. return 0;
  2681. double bandwidth = bandwidth_matrix[src_node][dst_node];
  2682. double latency = latency_matrix[src_node][dst_node];
  2683. struct _starpu_machine_topology *topology = &_starpu_get_machine_config()->topology;
  2684. #if 0
  2685. int busid = starpu_bus_get_id(src_node, dst_node);
  2686. int direct = starpu_bus_get_direct(busid);
  2687. #endif
  2688. float ngpus = topology->ndevices[STARPU_CUDA_WORKER]+topology->ndevices[STARPU_OPENCL_WORKER];
  2689. #ifdef STARPU_DEVEL
  2690. #warning FIXME: ngpus should not be used e.g. for slow disk transfers...
  2691. #endif
  2692. #if 0
  2693. /* Ideally we should take into account that some GPUs are directly
  2694. * connected through a PCI switch, which has less contention that the
  2695. * Host bridge, but doing that seems to *decrease* performance... */
  2696. if (direct)
  2697. {
  2698. float neighbours = starpu_bus_get_ngpus(busid);
  2699. /* Count transfers of these GPUs, and count transfers between
  2700. * other GPUs and these GPUs */
  2701. ngpus = neighbours + (ngpus - neighbours) * neighbours / ngpus;
  2702. }
  2703. #endif
  2704. return latency + (size/bandwidth)*2*ngpus;
  2705. }
  2706. /* calculate save bandwidth and latency */
  2707. /* bandwidth in MB/s - latency in µs */
  2708. void _starpu_save_bandwidth_and_latency_disk(double bandwidth_write, double bandwidth_read, double latency_write, double latency_read, unsigned node, const char *name)
  2709. {
  2710. unsigned int i, j;
  2711. double slowness_disk_between_main_ram, slowness_main_ram_between_node;
  2712. int print_stats = starpu_get_env_number_default("STARPU_BUS_STATS", 0);
  2713. if (print_stats)
  2714. {
  2715. fprintf(stderr, "\n#---------------------\n");
  2716. fprintf(stderr, "Data transfer speed for %s (node %u):\n", name, node);
  2717. }
  2718. /* save bandwith */
  2719. for(i = 0; i < STARPU_MAXNODES; ++i)
  2720. {
  2721. for(j = 0; j < STARPU_MAXNODES; ++j)
  2722. {
  2723. if (i == j && j == node) /* source == destination == node */
  2724. {
  2725. bandwidth_matrix[i][j] = 0;
  2726. }
  2727. else if (i == node) /* source == disk */
  2728. {
  2729. /* convert in slowness */
  2730. if(bandwidth_read != 0)
  2731. slowness_disk_between_main_ram = 1/bandwidth_read;
  2732. else
  2733. slowness_disk_between_main_ram = 0;
  2734. if(bandwidth_matrix[STARPU_MAIN_RAM][j] != 0)
  2735. slowness_main_ram_between_node = 1/bandwidth_matrix[STARPU_MAIN_RAM][j];
  2736. else
  2737. slowness_main_ram_between_node = 0;
  2738. bandwidth_matrix[i][j] = 1/(slowness_disk_between_main_ram+slowness_main_ram_between_node);
  2739. if (!isnan(bandwidth_matrix[i][j]) && print_stats)
  2740. fprintf(stderr,"%u -> %u: %.0f MB/s\n", i, j, bandwidth_matrix[i][j]);
  2741. }
  2742. else if (j == node) /* destination == disk */
  2743. {
  2744. /* convert in slowness */
  2745. if(bandwidth_write != 0)
  2746. slowness_disk_between_main_ram = 1/bandwidth_write;
  2747. else
  2748. slowness_disk_between_main_ram = 0;
  2749. if(bandwidth_matrix[i][STARPU_MAIN_RAM] != 0)
  2750. slowness_main_ram_between_node = 1/bandwidth_matrix[i][STARPU_MAIN_RAM];
  2751. else
  2752. slowness_main_ram_between_node = 0;
  2753. bandwidth_matrix[i][j] = 1/(slowness_disk_between_main_ram+slowness_main_ram_between_node);
  2754. if (!isnan(bandwidth_matrix[i][j]) && print_stats)
  2755. fprintf(stderr,"%u -> %u: %.0f MB/s\n", i, j, bandwidth_matrix[i][j]);
  2756. }
  2757. else if (j > node || i > node) /* not affected by the node */
  2758. {
  2759. bandwidth_matrix[i][j] = NAN;
  2760. }
  2761. }
  2762. }
  2763. /* save latency */
  2764. for(i = 0; i < STARPU_MAXNODES; ++i)
  2765. {
  2766. for(j = 0; j < STARPU_MAXNODES; ++j)
  2767. {
  2768. if (i == j && j == node) /* source == destination == node */
  2769. {
  2770. latency_matrix[i][j] = 0;
  2771. }
  2772. else if (i == node) /* source == disk */
  2773. {
  2774. latency_matrix[i][j] = (latency_write+latency_matrix[STARPU_MAIN_RAM][j]);
  2775. if (!isnan(latency_matrix[i][j]) && print_stats)
  2776. fprintf(stderr,"%u -> %u: %.0f us\n", i, j, latency_matrix[i][j]);
  2777. }
  2778. else if (j == node) /* destination == disk */
  2779. {
  2780. latency_matrix[i][j] = (latency_read+latency_matrix[i][STARPU_MAIN_RAM]);
  2781. if (!isnan(latency_matrix[i][j]) && print_stats)
  2782. fprintf(stderr,"%u -> %u: %.0f us\n", i, j, latency_matrix[i][j]);
  2783. }
  2784. else if (j > node || i > node) /* not affected by the node */
  2785. {
  2786. latency_matrix[i][j] = NAN;
  2787. }
  2788. }
  2789. }
  2790. if (print_stats)
  2791. fprintf(stderr, "\n#---------------------\n");
  2792. }