perfmodel_bus.c 83 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2017 Université de Bordeaux
  4. * Copyright (C) 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 CNRS
  5. * Copyright (C) 2017 Inria
  6. * Copyright (C) 2013 Corentin Salingue
  7. *
  8. * StarPU is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU Lesser General Public License as published by
  10. * the Free Software Foundation; either version 2.1 of the License, or (at
  11. * your option) any later version.
  12. *
  13. * StarPU is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  16. *
  17. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  18. */
  19. #ifdef STARPU_USE_CUDA
  20. #ifndef _GNU_SOURCE
  21. #define _GNU_SOURCE 1
  22. #endif
  23. #include <sched.h>
  24. #endif
  25. #include <stdlib.h>
  26. #include <math.h>
  27. #include <starpu.h>
  28. #include <starpu_cuda.h>
  29. #include <starpu_opencl.h>
  30. #include <common/config.h>
  31. #ifdef HAVE_UNISTD_H
  32. #include <unistd.h>
  33. #endif
  34. #include <core/workers.h>
  35. #include <core/perfmodel/perfmodel.h>
  36. #include <core/simgrid.h>
  37. #include <core/topology.h>
  38. #include <common/utils.h>
  39. #include <drivers/mpi/driver_mpi_common.h>
  40. #ifdef STARPU_USE_OPENCL
  41. #include <starpu_opencl.h>
  42. #endif
  43. #ifdef STARPU_HAVE_WINDOWS
  44. #include <windows.h>
  45. #endif
  46. #if defined(HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX) && HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX
  47. #include <hwloc/cuda.h>
  48. #endif
  49. #define SIZE (32*1024*1024*sizeof(char))
  50. #define NITER 32
  51. #ifndef STARPU_SIMGRID
  52. static void _starpu_bus_force_sampling(void);
  53. #endif
  54. /* timing is in µs per byte (i.e. slowness, inverse of bandwidth) */
  55. struct dev_timing
  56. {
  57. int numa_id;
  58. double timing_htod;
  59. double latency_htod;
  60. double timing_dtoh;
  61. double latency_dtoh;
  62. };
  63. /* TODO: measure latency */
  64. static double bandwidth_matrix[STARPU_MAXNODES][STARPU_MAXNODES];
  65. static double latency_matrix[STARPU_MAXNODES][STARPU_MAXNODES];
  66. static unsigned was_benchmarked = 0;
  67. #ifndef STARPU_SIMGRID
  68. static unsigned ncpus = 0;
  69. #endif
  70. static unsigned nnumas = 0;
  71. static unsigned ncuda = 0;
  72. static unsigned nopencl = 0;
  73. static unsigned nmic = 0;
  74. static unsigned nmpi_ms = 0;
  75. /* Benchmarking the performance of the bus */
  76. static double numa_latency[STARPU_MAXNUMANODES][STARPU_MAXNUMANODES];
  77. static double numa_timing[STARPU_MAXNUMANODES][STARPU_MAXNUMANODES];
  78. #ifndef STARPU_SIMGRID
  79. static uint64_t cuda_size[STARPU_MAXCUDADEVS];
  80. #endif
  81. #ifdef STARPU_USE_CUDA
  82. /* preference order of cores (logical indexes) */
  83. static int cuda_affinity_matrix[STARPU_MAXCUDADEVS][STARPU_MAXNUMANODES];
  84. #ifndef STARPU_SIMGRID
  85. #ifdef HAVE_CUDA_MEMCPY_PEER
  86. static double cudadev_timing_dtod[STARPU_MAXNODES][STARPU_MAXNODES] = {{0.0}};
  87. static double cudadev_latency_dtod[STARPU_MAXNODES][STARPU_MAXNODES] = {{0.0}};
  88. #endif
  89. #endif
  90. static struct dev_timing cudadev_timing_per_numa[STARPU_MAXCUDADEVS*STARPU_MAXNUMANODES];
  91. static char cudadev_direct[STARPU_MAXNODES][STARPU_MAXNODES];
  92. #endif
  93. #ifndef STARPU_SIMGRID
  94. static uint64_t opencl_size[STARPU_MAXCUDADEVS];
  95. #endif
  96. #ifdef STARPU_USE_OPENCL
  97. /* preference order of cores (logical indexes) */
  98. static int opencl_affinity_matrix[STARPU_MAXOPENCLDEVS][STARPU_MAXNUMANODES];
  99. static struct dev_timing opencldev_timing_per_numa[STARPU_MAXOPENCLDEVS*STARPU_MAXNUMANODES];
  100. #endif
  101. #ifdef STARPU_USE_MIC
  102. static double mic_time_host_to_device[STARPU_MAXNODES] = {0.0};
  103. static double mic_time_device_to_host[STARPU_MAXNODES] = {0.0};
  104. #endif /* STARPU_USE_MIC */
  105. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  106. static double mpi_time_device_to_device[STARPU_MAXMPIDEVS][STARPU_MAXMPIDEVS] = {{0.0}};
  107. static double mpi_latency_device_to_device[STARPU_MAXMPIDEVS][STARPU_MAXMPIDEVS] = {{0.0}};
  108. #endif
  109. #ifdef STARPU_HAVE_HWLOC
  110. static hwloc_topology_t hwtopology;
  111. hwloc_topology_t _starpu_perfmodel_get_hwtopology()
  112. {
  113. return hwtopology;
  114. }
  115. #endif
  116. #if (defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)) && !defined(STARPU_SIMGRID)
  117. #ifdef STARPU_USE_CUDA
  118. static void measure_bandwidth_between_host_and_dev_on_numa_with_cuda(int dev, int numa, int cpu, struct dev_timing *dev_timing_per_cpu)
  119. {
  120. struct _starpu_machine_config *config = _starpu_get_machine_config();
  121. _starpu_bind_thread_on_cpu(config, cpu, STARPU_NOWORKERID);
  122. size_t size = SIZE;
  123. const unsigned nnuma_nodes = _starpu_topology_get_nnumanodes(config);
  124. /* Initialize CUDA context on the device */
  125. /* We do not need to enable OpenGL interoperability at this point,
  126. * since we cleanly shutdown CUDA before returning. */
  127. cudaSetDevice(dev);
  128. /* hack to avoid third party libs to rebind threads */
  129. _starpu_bind_thread_on_cpu(config, cpu, STARPU_NOWORKERID);
  130. /* hack to force the initialization */
  131. cudaFree(0);
  132. /* hack to avoid third party libs to rebind threads */
  133. _starpu_bind_thread_on_cpu(config, cpu, STARPU_NOWORKERID);
  134. /* Get the maximum size which can be allocated on the device */
  135. struct cudaDeviceProp prop;
  136. cudaError_t cures;
  137. cures = cudaGetDeviceProperties(&prop, dev);
  138. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  139. cuda_size[dev] = prop.totalGlobalMem;
  140. if (size > prop.totalGlobalMem/4) size = prop.totalGlobalMem/4;
  141. /* Allocate a buffer on the device */
  142. unsigned char *d_buffer;
  143. cures = cudaMalloc((void **)&d_buffer, size);
  144. STARPU_ASSERT(cures == cudaSuccess);
  145. /* hack to avoid third party libs to rebind threads */
  146. _starpu_bind_thread_on_cpu(config, cpu, STARPU_NOWORKERID);
  147. /* Allocate a buffer on the host */
  148. unsigned char *h_buffer;
  149. #if defined(STARPU_HAVE_HWLOC)
  150. if (nnuma_nodes > 1)
  151. {
  152. /* NUMA mode activated */
  153. hwloc_obj_t obj = hwloc_get_obj_by_type(hwtopology, HWLOC_OBJ_NODE, numa);
  154. h_buffer = hwloc_alloc_membind_nodeset(hwtopology, size, obj->nodeset, HWLOC_MEMBIND_BIND, 0);
  155. }
  156. else
  157. #endif
  158. {
  159. /* we use STARPU_MAIN_RAM */
  160. _STARPU_MALLOC(h_buffer, size);
  161. cudaHostRegister((void *)h_buffer, size, 0);
  162. }
  163. STARPU_ASSERT(cures == cudaSuccess);
  164. /* hack to avoid third party libs to rebind threads */
  165. _starpu_bind_thread_on_cpu(config, cpu, STARPU_NOWORKERID);
  166. /* Fill them */
  167. memset(h_buffer, 0, size);
  168. cudaMemset(d_buffer, 0, size);
  169. /* hack to avoid third party libs to rebind threads */
  170. _starpu_bind_thread_on_cpu(config, cpu, STARPU_NOWORKERID);
  171. const unsigned timing_numa_index = dev*STARPU_MAXNUMANODES + numa;
  172. unsigned iter;
  173. double timing;
  174. double start;
  175. double end;
  176. /* Measure upload bandwidth */
  177. start = starpu_timing_now();
  178. for (iter = 0; iter < NITER; iter++)
  179. {
  180. cudaMemcpy(d_buffer, h_buffer, size, cudaMemcpyHostToDevice);
  181. cudaThreadSynchronize();
  182. }
  183. end = starpu_timing_now();
  184. timing = end - start;
  185. dev_timing_per_cpu[timing_numa_index].timing_htod = timing/NITER/size;
  186. /* Measure download bandwidth */
  187. start = starpu_timing_now();
  188. for (iter = 0; iter < NITER; iter++)
  189. {
  190. cudaMemcpy(h_buffer, d_buffer, size, cudaMemcpyDeviceToHost);
  191. cudaThreadSynchronize();
  192. }
  193. end = starpu_timing_now();
  194. timing = end - start;
  195. dev_timing_per_cpu[timing_numa_index].timing_dtoh = timing/NITER/size;
  196. /* Measure upload latency */
  197. start = starpu_timing_now();
  198. for (iter = 0; iter < NITER; iter++)
  199. {
  200. cudaMemcpy(d_buffer, h_buffer, 1, cudaMemcpyHostToDevice);
  201. cudaThreadSynchronize();
  202. }
  203. end = starpu_timing_now();
  204. timing = end - start;
  205. dev_timing_per_cpu[timing_numa_index].latency_htod = timing/NITER;
  206. /* Measure download latency */
  207. start = starpu_timing_now();
  208. for (iter = 0; iter < NITER; iter++)
  209. {
  210. cudaMemcpy(h_buffer, d_buffer, 1, cudaMemcpyDeviceToHost);
  211. cudaThreadSynchronize();
  212. }
  213. end = starpu_timing_now();
  214. timing = end - start;
  215. dev_timing_per_cpu[timing_numa_index].latency_dtoh = timing/NITER;
  216. /* Free buffers */
  217. cudaHostUnregister(h_buffer);
  218. #if defined(STARPU_HAVE_HWLOC)
  219. if (nnuma_nodes > 1)
  220. {
  221. /* NUMA mode activated */
  222. hwloc_free(hwtopology, h_buffer, size);
  223. }
  224. else
  225. #endif
  226. {
  227. free(h_buffer);
  228. }
  229. cudaFree(d_buffer);
  230. cudaThreadExit();
  231. }
  232. #ifdef HAVE_CUDA_MEMCPY_PEER
  233. static void measure_bandwidth_between_dev_and_dev_cuda(int src, int dst)
  234. {
  235. size_t size = SIZE;
  236. int can;
  237. /* Get the maximum size which can be allocated on the device */
  238. struct cudaDeviceProp prop;
  239. cudaError_t cures;
  240. cures = cudaGetDeviceProperties(&prop, src);
  241. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  242. if (size > prop.totalGlobalMem/4) size = prop.totalGlobalMem/4;
  243. cures = cudaGetDeviceProperties(&prop, dst);
  244. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  245. if (size > prop.totalGlobalMem/4) size = prop.totalGlobalMem/4;
  246. /* Initialize CUDA context on the source */
  247. /* We do not need to enable OpenGL interoperability at this point,
  248. * since we cleanly shutdown CUDA before returning. */
  249. cudaSetDevice(src);
  250. if (starpu_get_env_number("STARPU_ENABLE_CUDA_GPU_GPU_DIRECT") != 0)
  251. {
  252. cures = cudaDeviceCanAccessPeer(&can, src, dst);
  253. if (!cures && can)
  254. {
  255. cures = cudaDeviceEnablePeerAccess(dst, 0);
  256. if (!cures)
  257. {
  258. _STARPU_DISP("GPU-Direct %d -> %d\n", dst, src);
  259. cudadev_direct[src][dst] = 1;
  260. }
  261. }
  262. }
  263. /* Allocate a buffer on the device */
  264. unsigned char *s_buffer;
  265. cures = cudaMalloc((void **)&s_buffer, size);
  266. STARPU_ASSERT(cures == cudaSuccess);
  267. cudaMemset(s_buffer, 0, size);
  268. /* Initialize CUDA context on the destination */
  269. /* We do not need to enable OpenGL interoperability at this point,
  270. * since we cleanly shutdown CUDA before returning. */
  271. cudaSetDevice(dst);
  272. if (starpu_get_env_number("STARPU_ENABLE_CUDA_GPU_GPU_DIRECT") != 0)
  273. {
  274. cures = cudaDeviceCanAccessPeer(&can, dst, src);
  275. if (!cures && can)
  276. {
  277. cures = cudaDeviceEnablePeerAccess(src, 0);
  278. if (!cures)
  279. {
  280. _STARPU_DISP("GPU-Direct %d -> %d\n", src, dst);
  281. cudadev_direct[dst][src] = 1;
  282. }
  283. }
  284. }
  285. /* Allocate a buffer on the device */
  286. unsigned char *d_buffer;
  287. cures = cudaMalloc((void **)&d_buffer, size);
  288. STARPU_ASSERT(cures == cudaSuccess);
  289. cudaMemset(d_buffer, 0, size);
  290. unsigned iter;
  291. double timing;
  292. double start;
  293. double end;
  294. /* Measure upload bandwidth */
  295. start = starpu_timing_now();
  296. for (iter = 0; iter < NITER; iter++)
  297. {
  298. cudaMemcpyPeer(d_buffer, dst, s_buffer, src, size);
  299. cudaThreadSynchronize();
  300. }
  301. end = starpu_timing_now();
  302. timing = end - start;
  303. cudadev_timing_dtod[src][dst] = timing/NITER/size;
  304. /* Measure upload latency */
  305. start = starpu_timing_now();
  306. for (iter = 0; iter < NITER; iter++)
  307. {
  308. cudaMemcpyPeer(d_buffer, dst, s_buffer, src, 1);
  309. cudaThreadSynchronize();
  310. }
  311. end = starpu_timing_now();
  312. timing = end - start;
  313. cudadev_latency_dtod[src][dst] = timing/NITER;
  314. /* Free buffers */
  315. cudaFree(d_buffer);
  316. cudaSetDevice(src);
  317. cudaFree(s_buffer);
  318. cudaThreadExit();
  319. }
  320. #endif
  321. #endif
  322. #ifdef STARPU_USE_OPENCL
  323. static void measure_bandwidth_between_host_and_dev_on_numa_with_opencl(int dev, int numa, int cpu, struct dev_timing *dev_timing_per_cpu)
  324. {
  325. cl_context context;
  326. cl_command_queue queue;
  327. cl_int err=0;
  328. size_t size = SIZE;
  329. int not_initialized;
  330. struct _starpu_machine_config *config = _starpu_get_machine_config();
  331. _starpu_bind_thread_on_cpu(config, cpu, STARPU_NOWORKERID);
  332. const unsigned nnuma_nodes = _starpu_topology_get_nnumanodes(config);
  333. /* Is the context already initialised ? */
  334. starpu_opencl_get_context(dev, &context);
  335. not_initialized = (context == NULL);
  336. if (not_initialized == 1)
  337. _starpu_opencl_init_context(dev);
  338. /* Get context and queue */
  339. starpu_opencl_get_context(dev, &context);
  340. starpu_opencl_get_queue(dev, &queue);
  341. /* Get the maximum size which can be allocated on the device */
  342. cl_device_id device;
  343. cl_ulong maxMemAllocSize, totalGlobalMem;
  344. starpu_opencl_get_device(dev, &device);
  345. err = clGetDeviceInfo(device, CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof(maxMemAllocSize), &maxMemAllocSize, NULL);
  346. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  347. if (size > (size_t)maxMemAllocSize/4) size = maxMemAllocSize/4;
  348. err = clGetDeviceInfo(device, CL_DEVICE_GLOBAL_MEM_SIZE , sizeof(totalGlobalMem), &totalGlobalMem, NULL);
  349. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  350. opencl_size[dev] = totalGlobalMem;
  351. if (_starpu_opencl_get_device_type(dev) == CL_DEVICE_TYPE_CPU)
  352. {
  353. /* Let's not use too much RAM when running OpenCL on a CPU: it
  354. * would make the OS swap like crazy. */
  355. size /= 2;
  356. }
  357. /* hack to avoid third party libs to rebind threads */
  358. _starpu_bind_thread_on_cpu(config, cpu, STARPU_NOWORKERID);
  359. /* Allocate a buffer on the device */
  360. cl_mem d_buffer;
  361. d_buffer = clCreateBuffer(context, CL_MEM_READ_WRITE, size, NULL, &err);
  362. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  363. /* hack to avoid third party libs to rebind threads */
  364. _starpu_bind_thread_on_cpu(config, cpu, STARPU_NOWORKERID);
  365. /* Allocate a buffer on the host */
  366. unsigned char *h_buffer;
  367. #if defined(STARPU_HAVE_HWLOC)
  368. if (nnuma_nodes > 1)
  369. {
  370. /* NUMA mode activated */
  371. hwloc_obj_t obj = hwloc_get_obj_by_type(hwtopology, HWLOC_OBJ_NODE, numa);
  372. h_buffer = hwloc_alloc_membind_nodeset(hwtopology, size, obj->nodeset, HWLOC_MEMBIND_BIND, 0);
  373. }
  374. else
  375. #endif
  376. {
  377. /* we use STARPU_MAIN_RAM */
  378. _STARPU_MALLOC(h_buffer, size);
  379. }
  380. /* hack to avoid third party libs to rebind threads */
  381. _starpu_bind_thread_on_cpu(config, cpu, STARPU_NOWORKERID);
  382. /* Fill them */
  383. memset(h_buffer, 0, size);
  384. err = clEnqueueWriteBuffer(queue, d_buffer, CL_TRUE, 0, size, h_buffer, 0, NULL, NULL);
  385. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  386. clFinish(queue);
  387. /* hack to avoid third party libs to rebind threads */
  388. _starpu_bind_thread_on_cpu(config, cpu, STARPU_NOWORKERID);
  389. const unsigned timing_numa_index = dev*STARPU_MAXNUMANODES + numa;
  390. unsigned iter;
  391. double timing;
  392. double start;
  393. double end;
  394. /* Measure upload bandwidth */
  395. start = starpu_timing_now();
  396. for (iter = 0; iter < NITER; iter++)
  397. {
  398. err = clEnqueueWriteBuffer(queue, d_buffer, CL_TRUE, 0, size, h_buffer, 0, NULL, NULL);
  399. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  400. clFinish(queue);
  401. }
  402. end = starpu_timing_now();
  403. timing = end - start;
  404. dev_timing_per_cpu[timing_numa_index].timing_htod = timing/NITER/size;
  405. /* Measure download bandwidth */
  406. start = starpu_timing_now();
  407. for (iter = 0; iter < NITER; iter++)
  408. {
  409. err = clEnqueueReadBuffer(queue, d_buffer, CL_TRUE, 0, size, h_buffer, 0, NULL, NULL);
  410. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  411. clFinish(queue);
  412. }
  413. end = starpu_timing_now();
  414. timing = end - start;
  415. dev_timing_per_cpu[timing_numa_index].timing_dtoh = timing/NITER/size;
  416. /* Measure upload latency */
  417. start = starpu_timing_now();
  418. for (iter = 0; iter < NITER; iter++)
  419. {
  420. err = clEnqueueWriteBuffer(queue, d_buffer, CL_TRUE, 0, 1, h_buffer, 0, NULL, NULL);
  421. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  422. clFinish(queue);
  423. }
  424. end = starpu_timing_now();
  425. timing = end - start;
  426. dev_timing_per_cpu[timing_numa_index].latency_htod = timing/NITER;
  427. /* Measure download latency */
  428. start = starpu_timing_now();
  429. for (iter = 0; iter < NITER; iter++)
  430. {
  431. err = clEnqueueReadBuffer(queue, d_buffer, CL_TRUE, 0, 1, h_buffer, 0, NULL, NULL);
  432. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  433. clFinish(queue);
  434. }
  435. end = starpu_timing_now();
  436. timing = end - start;
  437. dev_timing_per_cpu[timing_numa_index].latency_dtoh = timing/NITER;
  438. /* Free buffers */
  439. err = clReleaseMemObject(d_buffer);
  440. if (STARPU_UNLIKELY(err != CL_SUCCESS))
  441. STARPU_OPENCL_REPORT_ERROR(err);
  442. #if defined(STARPU_HAVE_HWLOC)
  443. if (nnuma_nodes > 1)
  444. {
  445. /* NUMA mode activated */
  446. hwloc_free(hwtopology, h_buffer, size);
  447. }
  448. else
  449. #endif
  450. {
  451. free(h_buffer);
  452. }
  453. /* Uninitiliaze OpenCL context on the device */
  454. if (not_initialized == 1)
  455. _starpu_opencl_deinit_context(dev);
  456. }
  457. #endif
  458. /* NB: we want to sort the bandwidth by DECREASING order */
  459. static int compar_dev_timing(const void *left_dev_timing, const void *right_dev_timing)
  460. {
  461. const struct dev_timing *left = (const struct dev_timing *)left_dev_timing;
  462. const struct dev_timing *right = (const struct dev_timing *)right_dev_timing;
  463. double left_dtoh = left->timing_dtoh;
  464. double left_htod = left->timing_htod;
  465. double right_dtoh = right->timing_dtoh;
  466. double right_htod = right->timing_htod;
  467. double timing_sum2_left = left_dtoh*left_dtoh + left_htod*left_htod;
  468. double timing_sum2_right = right_dtoh*right_dtoh + right_htod*right_htod;
  469. /* it's for a decreasing sorting */
  470. return (timing_sum2_left > timing_sum2_right);
  471. }
  472. #ifdef STARPU_HAVE_HWLOC
  473. #if 0
  474. static int find_numa_node(hwloc_obj_t obj)
  475. {
  476. STARPU_ASSERT(obj);
  477. hwloc_obj_t current = obj;
  478. while (current->depth != HWLOC_OBJ_NODE)
  479. {
  480. current = current->parent;
  481. /* If we don't find a "node" obj before the root, this means
  482. * hwloc does not know whether there are numa nodes or not, so
  483. * we should not use a per-node sampling in that case. */
  484. STARPU_ASSERT(current);
  485. }
  486. STARPU_ASSERT(current->depth == HWLOC_OBJ_NODE);
  487. return current->logical_index;
  488. }
  489. #endif
  490. static int find_cpu_from_numa_node(hwloc_obj_t obj)
  491. {
  492. STARPU_ASSERT(obj);
  493. hwloc_obj_t current = obj;
  494. while (current->depth != HWLOC_OBJ_PU)
  495. {
  496. current = current->first_child;
  497. /* If we don't find a "PU" obj before the leave, this means
  498. * hwloc does not know whether there are CPU or not. */
  499. STARPU_ASSERT(current);
  500. }
  501. STARPU_ASSERT(current->depth == HWLOC_OBJ_PU);
  502. return current->logical_index;
  503. }
  504. #endif
  505. static void measure_bandwidth_between_numa_nodes_and_dev(int dev, struct dev_timing *dev_timing_per_numanode, char *type)
  506. {
  507. /* We measure the bandwith between each GPU and each NUMA node */
  508. struct _starpu_machine_config * config = _starpu_get_machine_config();
  509. const unsigned nnuma_nodes = _starpu_topology_get_nnumanodes(config);
  510. unsigned numa_id;
  511. for (numa_id = 0; numa_id < nnuma_nodes; numa_id++)
  512. {
  513. /* Store results by starpu id */
  514. const unsigned timing_numa_index = dev*STARPU_MAXNUMANODES + numa_id;
  515. /* Store STARPU_memnode for later */
  516. dev_timing_per_numanode[timing_numa_index].numa_id = numa_id;
  517. /* Chose one CPU connected to this NUMA node */
  518. unsigned cpu_id = 0;
  519. #ifdef STARPU_HAVE_HWLOC
  520. hwloc_obj_t obj = hwloc_get_obj_by_type(hwtopology, HWLOC_OBJ_NODE, numa_id);
  521. cpu_id = find_cpu_from_numa_node(obj);
  522. #endif
  523. #ifdef STARPU_USE_CUDA
  524. if (strncmp(type, "CUDA", 4) == 0)
  525. measure_bandwidth_between_host_and_dev_on_numa_with_cuda(dev, numa_id, cpu_id, dev_timing_per_numanode);
  526. #endif
  527. #ifdef STARPU_USE_OPENCL
  528. if (strncmp(type, "OpenCL", 6) == 0)
  529. measure_bandwidth_between_host_and_dev_on_numa_with_opencl(dev, numa_id, cpu_id, dev_timing_per_numanode);
  530. #endif
  531. }
  532. }
  533. static void measure_bandwidth_between_host_and_dev(int dev, struct dev_timing *dev_timing_per_numa, char *type)
  534. {
  535. measure_bandwidth_between_numa_nodes_and_dev(dev, dev_timing_per_numa, type);
  536. #ifdef STARPU_VERBOSE
  537. struct _starpu_machine_config * config = _starpu_get_machine_config();
  538. const unsigned nnuma_nodes = _starpu_topology_get_nnumanodes(config);
  539. unsigned numa_id;
  540. for (numa_id = 0; numa_id < nnuma_nodes; numa_id++)
  541. {
  542. const unsigned timing_numa_index = dev*STARPU_MAXNUMANODES + numa_id;
  543. double bandwidth_dtoh = dev_timing_per_numa[timing_numa_index].timing_dtoh;
  544. double bandwidth_htod = dev_timing_per_numa[timing_numa_index].timing_htod;
  545. double bandwidth_sum2 = bandwidth_dtoh*bandwidth_dtoh + bandwidth_htod*bandwidth_htod;
  546. _STARPU_DISP("(%10s) BANDWIDTH GPU %d NUMA %u - htod %f - dtoh %f - %f\n", type, dev, numa_id, bandwidth_htod, bandwidth_dtoh, sqrt(bandwidth_sum2));
  547. }
  548. #endif
  549. }
  550. #endif /* defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL) */
  551. static void measure_bandwidth_latency_between_numa(int numa_src, int numa_dst)
  552. {
  553. #if defined(STARPU_HAVE_HWLOC)
  554. if (nnumas > 1)
  555. {
  556. /* NUMA mode activated */
  557. double start, end, timing;
  558. unsigned iter;
  559. unsigned char *h_buffer;
  560. hwloc_obj_t obj_src = hwloc_get_obj_by_type(hwtopology, HWLOC_OBJ_NODE, numa_src);
  561. h_buffer = hwloc_alloc_membind_nodeset(hwtopology, SIZE, obj_src->nodeset, HWLOC_MEMBIND_BIND, 0);
  562. unsigned char *d_buffer;
  563. hwloc_obj_t obj_dst = hwloc_get_obj_by_type(hwtopology, HWLOC_OBJ_NODE, numa_dst);
  564. d_buffer = hwloc_alloc_membind_nodeset(hwtopology, SIZE, obj_dst->nodeset, HWLOC_MEMBIND_BIND, 0);
  565. memset(h_buffer, 0, SIZE);
  566. start = starpu_timing_now();
  567. for (iter = 0; iter < NITER; iter++)
  568. {
  569. memcpy(d_buffer, h_buffer, SIZE);
  570. }
  571. end = starpu_timing_now();
  572. timing = end - start;
  573. numa_timing[numa_src][numa_dst] = timing/NITER/SIZE;
  574. start = starpu_timing_now();
  575. for (iter = 0; iter < NITER; iter++)
  576. {
  577. memcpy(d_buffer, h_buffer, 1);
  578. }
  579. end = starpu_timing_now();
  580. timing = end - start;
  581. numa_latency[numa_src][numa_dst] = timing/NITER;
  582. hwloc_free(hwtopology, h_buffer, SIZE);
  583. hwloc_free(hwtopology, d_buffer, SIZE);
  584. }
  585. else
  586. #endif
  587. {
  588. /* Cannot make a real calibration */
  589. numa_timing[numa_src][numa_dst] = 0.01;
  590. numa_latency[numa_src][numa_dst] = 0;
  591. }
  592. }
  593. static void benchmark_all_gpu_devices(void)
  594. {
  595. #ifdef STARPU_SIMGRID
  596. _STARPU_DISP("Can not measure bus in simgrid mode, please run starpu_calibrate_bus in non-simgrid mode to make sure the bus performance model was calibrated\n");
  597. STARPU_ABORT();
  598. #else /* !SIMGRID */
  599. unsigned i, j;
  600. _STARPU_DEBUG("Benchmarking the speed of the bus\n");
  601. #ifdef STARPU_HAVE_HWLOC
  602. hwloc_topology_init(&hwtopology);
  603. hwloc_topology_load(hwtopology);
  604. #endif
  605. #ifdef STARPU_HAVE_HWLOC
  606. hwloc_bitmap_t former_cpuset = hwloc_bitmap_alloc();
  607. hwloc_get_cpubind(hwtopology, former_cpuset, HWLOC_CPUBIND_THREAD);
  608. #elif __linux__
  609. /* Save the current cpu binding */
  610. cpu_set_t former_process_affinity;
  611. int ret;
  612. ret = sched_getaffinity(0, sizeof(former_process_affinity), &former_process_affinity);
  613. if (ret)
  614. {
  615. perror("sched_getaffinity");
  616. STARPU_ABORT();
  617. }
  618. #else
  619. #warning Missing binding support, StarPU will not be able to properly benchmark NUMA topology
  620. #endif
  621. struct _starpu_machine_config *config = _starpu_get_machine_config();
  622. ncpus = _starpu_topology_get_nhwcpu(config);
  623. nnumas = _starpu_topology_get_nnumanodes(config);
  624. for (i = 0; i < nnumas; i++)
  625. for (j = 0; j < nnumas; j++)
  626. if (i != j)
  627. {
  628. _STARPU_DISP("NUMA %d -> %d...\n", i, j);
  629. measure_bandwidth_latency_between_numa(i, j);
  630. }
  631. #ifdef STARPU_USE_CUDA
  632. ncuda = _starpu_get_cuda_device_count();
  633. for (i = 0; i < ncuda; i++)
  634. {
  635. _STARPU_DISP("CUDA %u...\n", i);
  636. /* measure bandwidth between Host and Device i */
  637. measure_bandwidth_between_host_and_dev(i, cudadev_timing_per_numa, "CUDA");
  638. }
  639. #ifdef HAVE_CUDA_MEMCPY_PEER
  640. for (i = 0; i < ncuda; i++)
  641. {
  642. unsigned j;
  643. for (j = 0; j < ncuda; j++)
  644. if (i != j)
  645. {
  646. _STARPU_DISP("CUDA %u -> %u...\n", i, j);
  647. /* measure bandwidth between Host and Device i */
  648. measure_bandwidth_between_dev_and_dev_cuda(i, j);
  649. }
  650. }
  651. #endif
  652. #endif
  653. #ifdef STARPU_USE_OPENCL
  654. nopencl = _starpu_opencl_get_device_count();
  655. for (i = 0; i < nopencl; i++)
  656. {
  657. _STARPU_DISP("OpenCL %u...\n", i);
  658. /* measure bandwith between Host and Device i */
  659. measure_bandwidth_between_host_and_dev(i, opencldev_timing_per_numa, "OpenCL");
  660. }
  661. #endif
  662. #ifdef STARPU_USE_MIC
  663. /* TODO: implement real calibration ! For now we only put an arbitrary
  664. * value for each device during at the declaration as a bug fix, else
  665. * we get problems on heft scheduler */
  666. nmic = _starpu_mic_src_get_device_count();
  667. for (i = 0; i < STARPU_MAXNODES; i++)
  668. {
  669. mic_time_host_to_device[i] = 0.1;
  670. mic_time_device_to_host[i] = 0.1;
  671. }
  672. #endif /* STARPU_USE_MIC */
  673. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  674. _starpu_mpi_common_measure_bandwidth_latency(mpi_time_device_to_device, mpi_latency_device_to_device);
  675. #endif /* STARPU_USE_MPI_MASTER_SLAVE */
  676. #ifdef STARPU_HAVE_HWLOC
  677. hwloc_set_cpubind(hwtopology, former_cpuset, HWLOC_CPUBIND_THREAD);
  678. hwloc_bitmap_free(former_cpuset);
  679. #elif __linux__
  680. /* Restore the former affinity */
  681. ret = sched_setaffinity(0, sizeof(former_process_affinity), &former_process_affinity);
  682. if (ret)
  683. {
  684. perror("sched_setaffinity");
  685. STARPU_ABORT();
  686. }
  687. #endif
  688. #ifdef STARPU_HAVE_HWLOC
  689. hwloc_topology_destroy(hwtopology);
  690. #endif
  691. _STARPU_DEBUG("Benchmarking the speed of the bus is done.\n");
  692. was_benchmarked = 1;
  693. #endif /* !SIMGRID */
  694. }
  695. static void get_bus_path(const char *type, char *path, size_t maxlen)
  696. {
  697. char hostname[65];
  698. _starpu_gethostname(hostname, sizeof(hostname));
  699. snprintf(path, maxlen, "%s%s.%s", _starpu_get_perf_model_dir_bus(), hostname, type);
  700. }
  701. /*
  702. * Affinity
  703. */
  704. static void get_affinity_path(char *path, size_t maxlen)
  705. {
  706. get_bus_path("affinity", path, maxlen);
  707. }
  708. #ifndef STARPU_SIMGRID
  709. static void load_bus_affinity_file_content(void)
  710. {
  711. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  712. FILE *f;
  713. int locked;
  714. char path[256];
  715. get_affinity_path(path, sizeof(path));
  716. _STARPU_DEBUG("loading affinities from %s\n", path);
  717. f = fopen(path, "r");
  718. STARPU_ASSERT(f);
  719. locked = _starpu_frdlock(f) == 0;
  720. unsigned gpu;
  721. #ifdef STARPU_USE_CUDA
  722. ncuda = _starpu_get_cuda_device_count();
  723. for (gpu = 0; gpu < ncuda; gpu++)
  724. {
  725. int ret;
  726. unsigned dummy;
  727. _starpu_drop_comments(f);
  728. ret = fscanf(f, "%u\t", &dummy);
  729. STARPU_ASSERT(ret == 1);
  730. STARPU_ASSERT(dummy == gpu);
  731. unsigned numa;
  732. for (numa = 0; numa < nnumas; numa++)
  733. {
  734. ret = fscanf(f, "%d\t", &cuda_affinity_matrix[gpu][numa]);
  735. STARPU_ASSERT(ret == 1);
  736. }
  737. ret = fscanf(f, "\n");
  738. STARPU_ASSERT(ret == 0);
  739. }
  740. #endif /* !STARPU_USE_CUDA */
  741. #ifdef STARPU_USE_OPENCL
  742. nopencl = _starpu_opencl_get_device_count();
  743. for (gpu = 0; gpu < nopencl; gpu++)
  744. {
  745. int ret;
  746. unsigned dummy;
  747. _starpu_drop_comments(f);
  748. ret = fscanf(f, "%u\t", &dummy);
  749. STARPU_ASSERT(ret == 1);
  750. STARPU_ASSERT(dummy == gpu);
  751. unsigned numa;
  752. for (numa = 0; numa < nnumas; numa++)
  753. {
  754. ret = fscanf(f, "%d\t", &opencl_affinity_matrix[gpu][numa]);
  755. STARPU_ASSERT(ret == 1);
  756. }
  757. ret = fscanf(f, "\n");
  758. STARPU_ASSERT(ret == 0);
  759. }
  760. #endif /* !STARPU_USE_OPENCL */
  761. if (locked)
  762. _starpu_frdunlock(f);
  763. fclose(f);
  764. #endif /* !(STARPU_USE_CUDA_ || STARPU_USE_OPENCL */
  765. }
  766. #ifndef STARPU_SIMGRID
  767. static void write_bus_affinity_file_content(void)
  768. {
  769. STARPU_ASSERT(was_benchmarked);
  770. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  771. FILE *f;
  772. char path[256];
  773. int locked;
  774. get_affinity_path(path, sizeof(path));
  775. _STARPU_DEBUG("writing affinities to %s\n", path);
  776. f = fopen(path, "w+");
  777. if (!f)
  778. {
  779. perror("fopen write_buf_affinity_file_content");
  780. _STARPU_DISP("path '%s'\n", path);
  781. fflush(stderr);
  782. STARPU_ABORT();
  783. }
  784. locked = _starpu_frdlock(f) == 0;
  785. unsigned numa;
  786. unsigned gpu;
  787. fprintf(f, "# GPU\t");
  788. for (numa = 0; numa < nnumas; numa++)
  789. fprintf(f, "NUMA%u\t", numa);
  790. fprintf(f, "\n");
  791. #ifdef STARPU_USE_CUDA
  792. {
  793. /* Use an other array to sort bandwidth */
  794. struct dev_timing cudadev_timing_per_numa_sorted[STARPU_MAXCUDADEVS*STARPU_MAXNUMANODES];
  795. memcpy(cudadev_timing_per_numa_sorted, cudadev_timing_per_numa, STARPU_MAXCUDADEVS*STARPU_MAXNUMANODES*sizeof(struct dev_timing));
  796. for (gpu = 0; gpu < ncuda; gpu++)
  797. {
  798. fprintf(f, "%u\t", gpu);
  799. qsort(&(cudadev_timing_per_numa_sorted[gpu*STARPU_MAXNUMANODES]), nnumas, sizeof(struct dev_timing), compar_dev_timing);
  800. for (numa = 0; numa < nnumas; numa++)
  801. {
  802. fprintf(f, "%d\t", cudadev_timing_per_numa_sorted[gpu*STARPU_MAXNUMANODES+numa].numa_id);
  803. }
  804. fprintf(f, "\n");
  805. }
  806. }
  807. #endif
  808. #ifdef STARPU_USE_OPENCL
  809. {
  810. /* Use an other array to sort bandwidth */
  811. struct dev_timing opencldev_timing_per_numa_sorted[STARPU_MAXOPENCLDEVS*STARPU_MAXNUMANODES];
  812. memcpy(opencldev_timing_per_numa_sorted, opencldev_timing_per_numa, STARPU_MAXOPENCLDEVS*STARPU_MAXNUMANODES*sizeof(struct dev_timing));
  813. for (gpu = 0; gpu < nopencl; gpu++)
  814. {
  815. fprintf(f, "%u\t", gpu);
  816. qsort(&(opencldev_timing_per_numa_sorted[gpu*STARPU_MAXNUMANODES]), nnumas, sizeof(struct dev_timing), compar_dev_timing);
  817. for (numa = 0; numa < nnumas; numa++)
  818. {
  819. fprintf(f, "%d\t", opencldev_timing_per_numa_sorted[gpu*STARPU_MAXNUMANODES+numa].numa_id);
  820. }
  821. fprintf(f, "\n");
  822. }
  823. }
  824. #endif
  825. if (locked)
  826. _starpu_frdunlock(f);
  827. fclose(f);
  828. #endif
  829. }
  830. #endif /* STARPU_SIMGRID */
  831. static void generate_bus_affinity_file(void)
  832. {
  833. if (!was_benchmarked)
  834. benchmark_all_gpu_devices();
  835. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  836. /* Slaves don't write files */
  837. if (!_starpu_mpi_common_is_src_node())
  838. return;
  839. #endif
  840. write_bus_affinity_file_content();
  841. }
  842. static int check_bus_affinity_file(void)
  843. {
  844. int ret = 1;
  845. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  846. FILE *f;
  847. int locked;
  848. unsigned dummy;
  849. char path[256];
  850. get_affinity_path(path, sizeof(path));
  851. _STARPU_DEBUG("loading affinities from %s\n", path);
  852. f = fopen(path, "r");
  853. STARPU_ASSERT(f);
  854. locked = _starpu_frdlock(f) == 0;
  855. ret = fscanf(f, "# GPU\t");
  856. STARPU_ASSERT(ret == 0);
  857. ret = fscanf(f, "NUMA%u\t", &dummy);
  858. if (locked)
  859. _starpu_frdunlock(f);
  860. fclose(f);
  861. #endif
  862. return ret == 1;
  863. }
  864. static void load_bus_affinity_file(void)
  865. {
  866. int exist, check = 1;
  867. char path[256];
  868. get_affinity_path(path, sizeof(path));
  869. /* access return 0 if file exists */
  870. exist = access(path, F_OK);
  871. if (exist == 0)
  872. /* return 0 if it's not good */
  873. check = check_bus_affinity_file();
  874. if (check == 0)
  875. _STARPU_DISP("Affinity File is too old for this version of StarPU ! Rebuilding it...\n");
  876. if (check == 0 || exist != 0)
  877. {
  878. /* File does not exist yet */
  879. generate_bus_affinity_file();
  880. }
  881. load_bus_affinity_file_content();
  882. }
  883. #ifdef STARPU_USE_CUDA
  884. int *_starpu_get_cuda_affinity_vector(unsigned gpuid)
  885. {
  886. return cuda_affinity_matrix[gpuid];
  887. }
  888. #endif /* STARPU_USE_CUDA */
  889. #ifdef STARPU_USE_OPENCL
  890. int *_starpu_get_opencl_affinity_vector(unsigned gpuid)
  891. {
  892. return opencl_affinity_matrix[gpuid];
  893. }
  894. #endif /* STARPU_USE_OPENCL */
  895. void starpu_bus_print_affinity(FILE *f)
  896. {
  897. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  898. unsigned numa;
  899. unsigned gpu;
  900. #endif
  901. fprintf(f, "# GPU\tNUMA in preference order (logical index)\n");
  902. #ifdef STARPU_USE_CUDA
  903. fprintf(f, "# CUDA\n");
  904. for(gpu = 0 ; gpu<ncuda ; gpu++)
  905. {
  906. fprintf(f, "%u\t", gpu);
  907. for (numa = 0; numa < nnumas; numa++)
  908. {
  909. fprintf(f, "%d\t", cuda_affinity_matrix[gpu][numa]);
  910. }
  911. fprintf(f, "\n");
  912. }
  913. #endif
  914. #ifdef STARPU_USE_OPENCL
  915. fprintf(f, "# OpenCL\n");
  916. for(gpu = 0 ; gpu<nopencl ; gpu++)
  917. {
  918. fprintf(f, "%u\t", gpu);
  919. for (numa = 0; numa < nnumas; numa++)
  920. {
  921. fprintf(f, "%d\t", opencl_affinity_matrix[gpu][numa]);
  922. }
  923. fprintf(f, "\n");
  924. }
  925. #endif
  926. }
  927. #endif /* STARPU_SIMGRID */
  928. /*
  929. * Latency
  930. */
  931. static void get_latency_path(char *path, size_t maxlen)
  932. {
  933. get_bus_path("latency", path, maxlen);
  934. }
  935. static int load_bus_latency_file_content(void)
  936. {
  937. int n;
  938. unsigned src, dst;
  939. FILE *f;
  940. double latency;
  941. int locked;
  942. char path[256];
  943. get_latency_path(path, sizeof(path));
  944. _STARPU_DEBUG("loading latencies from %s\n", path);
  945. f = fopen(path, "r");
  946. if (!f)
  947. {
  948. perror("fopen load_bus_latency_file_content");
  949. _STARPU_DISP("path '%s'\n", path);
  950. fflush(stderr);
  951. STARPU_ABORT();
  952. }
  953. locked = _starpu_frdlock(f) == 0;
  954. for (src = 0; src < STARPU_MAXNODES; src++)
  955. {
  956. _starpu_drop_comments(f);
  957. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  958. {
  959. n = _starpu_read_double(f, "%le", &latency);
  960. if (n != 1)
  961. {
  962. _STARPU_DISP("Error while reading latency file <%s>. Expected a number. Did you change the maximum number of GPUs at ./configure time?\n", path);
  963. fclose(f);
  964. return 0;
  965. }
  966. n = getc(f);
  967. if (n == '\n')
  968. break;
  969. if (n != '\t')
  970. {
  971. _STARPU_DISP("bogus character '%c' (%d) in latency file %s\n", n, n, path);
  972. fclose(f);
  973. return 0;
  974. }
  975. latency_matrix[src][dst] = latency;
  976. /* Look out for \t\n */
  977. n = getc(f);
  978. if (n == '\n')
  979. break;
  980. ungetc(n, f);
  981. n = '\t';
  982. }
  983. /* No more values, take NAN */
  984. for ( ; dst < STARPU_MAXNODES; dst++)
  985. latency_matrix[src][dst] = NAN;
  986. while (n == '\t')
  987. {
  988. /* Look out for \t\n */
  989. n = getc(f);
  990. if (n == '\n')
  991. break;
  992. ungetc(n, f);
  993. n = _starpu_read_double(f, "%le", &latency);
  994. if (n && !isnan(latency))
  995. {
  996. _STARPU_DISP("Too many nodes in latency file %s for this configuration (%d). Did you change the maximum number of GPUs at ./configure time?\n", path, STARPU_MAXNODES);
  997. fclose(f);
  998. return 0;
  999. }
  1000. n = getc(f);
  1001. }
  1002. if (n != '\n')
  1003. {
  1004. _STARPU_DISP("Bogus character '%c' (%d) in latency file %s\n", n, n, path);
  1005. fclose(f);
  1006. return 0;
  1007. }
  1008. /* Look out for EOF */
  1009. n = getc(f);
  1010. if (n == EOF)
  1011. break;
  1012. ungetc(n, f);
  1013. }
  1014. if (locked)
  1015. _starpu_frdunlock(f);
  1016. fclose(f);
  1017. /* No more values, take NAN */
  1018. for ( ; src < STARPU_MAXNODES; src++)
  1019. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1020. latency_matrix[src][dst] = NAN;
  1021. return 1;
  1022. }
  1023. #ifndef STARPU_SIMGRID
  1024. static double search_bus_best_latency(int src, char * type, int htod)
  1025. {
  1026. /* Search the best latency for this node */
  1027. double best = 0.0;
  1028. double actual = 0.0;
  1029. unsigned check = 0;
  1030. unsigned numa;
  1031. for (numa = 0; numa < nnumas; numa++)
  1032. {
  1033. #ifdef STARPU_USE_CUDA
  1034. if (strncmp(type, "CUDA", 4) == 0)
  1035. {
  1036. if (htod)
  1037. actual = cudadev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].latency_htod;
  1038. else
  1039. actual = cudadev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].latency_dtoh;
  1040. }
  1041. #endif
  1042. #ifdef STARPU_USE_OPENCL
  1043. if (strncmp(type, "OpenCL", 6) == 0)
  1044. {
  1045. if (htod)
  1046. actual = opencldev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].latency_htod;
  1047. else
  1048. actual = opencldev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].latency_dtoh;
  1049. }
  1050. #endif
  1051. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  1052. if (!check || actual < best)
  1053. {
  1054. best = actual;
  1055. check = 1;
  1056. }
  1057. #endif
  1058. }
  1059. return best;
  1060. }
  1061. static void write_bus_latency_file_content(void)
  1062. {
  1063. unsigned src, dst, maxnode;
  1064. /* Boundaries to check if src or dst are inside the interval */
  1065. unsigned b_low, b_up;
  1066. FILE *f;
  1067. int locked;
  1068. STARPU_ASSERT(was_benchmarked);
  1069. char path[256];
  1070. get_latency_path(path, sizeof(path));
  1071. _STARPU_DEBUG("writing latencies to %s\n", path);
  1072. f = fopen(path, "w+");
  1073. if (!f)
  1074. {
  1075. perror("fopen write_bus_latency_file_content");
  1076. _STARPU_DISP("path '%s'\n", path);
  1077. fflush(stderr);
  1078. STARPU_ABORT();
  1079. }
  1080. locked = _starpu_fwrlock(f) == 0;
  1081. _starpu_fftruncate(f, 0);
  1082. fprintf(f, "# ");
  1083. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1084. fprintf(f, "to %u\t\t", dst);
  1085. fprintf(f, "\n");
  1086. maxnode = nnumas;
  1087. #ifdef STARPU_USE_CUDA
  1088. maxnode += ncuda;
  1089. #endif
  1090. #ifdef STARPU_USE_OPENCL
  1091. maxnode += nopencl;
  1092. #endif
  1093. #ifdef STARPU_USE_MIC
  1094. maxnode += nmic;
  1095. #endif
  1096. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1097. maxnode += nmpi_ms;
  1098. #endif
  1099. for (src = 0; src < STARPU_MAXNODES; src++)
  1100. {
  1101. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1102. {
  1103. /* µs */
  1104. double latency = 0.0;
  1105. if ((src >= maxnode) || (dst >= maxnode))
  1106. {
  1107. /* convention */
  1108. latency = NAN;
  1109. }
  1110. else if (src == dst)
  1111. {
  1112. latency = 0.0;
  1113. }
  1114. else
  1115. {
  1116. b_low = b_up = 0;
  1117. /* ---- Begin NUMA ---- */
  1118. b_up += nnumas;
  1119. if (src >= b_low && src < b_up && dst >= b_low && dst < b_up)
  1120. latency += numa_latency[src-b_low][dst-b_low];
  1121. /* copy interval to check numa index later */
  1122. unsigned numa_low = b_low;
  1123. unsigned numa_up = b_up;
  1124. b_low += nnumas;
  1125. /* ---- End NUMA ---- */
  1126. #ifdef STARPU_USE_CUDA
  1127. b_up += ncuda;
  1128. #ifdef HAVE_CUDA_MEMCPY_PEER
  1129. if (src >= b_low && src < b_up && dst >= b_low && dst < b_up)
  1130. latency += cudadev_latency_dtod[src-b_low][dst-b_low];
  1131. else
  1132. #endif
  1133. {
  1134. /* Check if it's CUDA <-> NUMA link */
  1135. if (src >=b_low && src < b_up && dst >= numa_low && dst < numa_up)
  1136. latency += cudadev_timing_per_numa[(src-b_low)*STARPU_MAXNUMANODES+dst-numa_low].latency_dtoh;
  1137. if (dst >= b_low && dst < b_up && src >= numa_low && dst < numa_up)
  1138. latency += cudadev_timing_per_numa[(dst-b_low)*STARPU_MAXNUMANODES+src-numa_low].latency_htod;
  1139. /* To other devices, take the best latency */
  1140. if (src >= b_low && src < b_up && !(dst >= numa_low && dst < numa_up))
  1141. latency += search_bus_best_latency(src-b_low, "CUDA", 0);
  1142. if (dst >= b_low && dst < b_up && !(src >= numa_low && dst < numa_up))
  1143. latency += search_bus_best_latency(dst-b_low, "CUDA", 1);
  1144. }
  1145. b_low += ncuda;
  1146. #endif
  1147. #ifdef STARPU_USE_OPENCL
  1148. b_up += nopencl;
  1149. /* Check if it's OpenCL <-> NUMA link */
  1150. if (src >= b_low && src < b_up && dst >= numa_low && dst < numa_up)
  1151. latency += opencldev_timing_per_numa[(src-b_low)*STARPU_MAXNUMANODES+dst-numa_low].latency_dtoh;
  1152. if (dst >= b_low && dst < b_up && src >= numa_low && dst < numa_up)
  1153. latency += opencldev_timing_per_numa[(dst-b_low)*STARPU_MAXNUMANODES+src-numa_low].latency_htod;
  1154. /* To other devices, take the best latency */
  1155. if (src >= b_low && src < b_up && !(dst >= numa_low && dst < numa_up))
  1156. latency += search_bus_best_latency(src-b_low, "OpenCL", 0);
  1157. if (dst >= b_low && dst < b_up && !(src >= numa_low && dst < numa_up))
  1158. latency += search_bus_best_latency(dst-b_low, "OpenCL", 1);
  1159. b_low += nopencl;
  1160. #endif
  1161. #ifdef STARPU_USE_MIC
  1162. b_up += nmic;
  1163. /* TODO Latency MIC */
  1164. b_low += nmic;
  1165. #endif
  1166. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1167. b_up += nmpi_ms;
  1168. /* Modify MPI src and MPI dst if they contain the master node or not
  1169. * Because, we only take care about slaves */
  1170. int mpi_master = _starpu_mpi_common_get_src_node();
  1171. int mpi_src = src - b_low;
  1172. mpi_src = (mpi_master <= mpi_src) ? mpi_src+1 : mpi_src;
  1173. int mpi_dst = dst - b_low;
  1174. mpi_dst = (mpi_master <= mpi_dst) ? mpi_dst+1 : mpi_dst;
  1175. if (src >= b_low && src < b_up && dst >= b_low && dst < b_up)
  1176. latency += mpi_latency_device_to_device[mpi_src][mpi_dst];
  1177. else
  1178. {
  1179. if (src >= b_low && src < b_up)
  1180. latency += mpi_latency_device_to_device[mpi_src][mpi_master];
  1181. if (dst >= b_low && dst < b_up)
  1182. latency += mpi_latency_device_to_device[mpi_master][mpi_dst];
  1183. }
  1184. b_low += nmpi_ms;
  1185. #endif
  1186. }
  1187. if (dst > 0)
  1188. fputc('\t', f);
  1189. _starpu_write_double(f, "%e", latency);
  1190. }
  1191. fprintf(f, "\n");
  1192. }
  1193. if (locked)
  1194. _starpu_fwrunlock(f);
  1195. fclose(f);
  1196. }
  1197. #endif
  1198. static void generate_bus_latency_file(void)
  1199. {
  1200. if (!was_benchmarked)
  1201. benchmark_all_gpu_devices();
  1202. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1203. /* Slaves don't write files */
  1204. if (!_starpu_mpi_common_is_src_node())
  1205. return;
  1206. #endif
  1207. #ifndef STARPU_SIMGRID
  1208. write_bus_latency_file_content();
  1209. #endif
  1210. }
  1211. static void load_bus_latency_file(void)
  1212. {
  1213. int res;
  1214. char path[256];
  1215. get_latency_path(path, sizeof(path));
  1216. res = access(path, F_OK);
  1217. if (res || !load_bus_latency_file_content())
  1218. {
  1219. /* File does not exist yet or is bogus */
  1220. generate_bus_latency_file();
  1221. }
  1222. }
  1223. /*
  1224. * Bandwidth
  1225. */
  1226. static void get_bandwidth_path(char *path, size_t maxlen)
  1227. {
  1228. get_bus_path("bandwidth", path, maxlen);
  1229. }
  1230. static int load_bus_bandwidth_file_content(void)
  1231. {
  1232. int n;
  1233. unsigned src, dst;
  1234. FILE *f;
  1235. double bandwidth;
  1236. int locked;
  1237. char path[256];
  1238. get_bandwidth_path(path, sizeof(path));
  1239. _STARPU_DEBUG("loading bandwidth from %s\n", path);
  1240. f = fopen(path, "r");
  1241. if (!f)
  1242. {
  1243. perror("fopen load_bus_bandwidth_file_content");
  1244. _STARPU_DISP("path '%s'\n", path);
  1245. fflush(stderr);
  1246. STARPU_ABORT();
  1247. }
  1248. locked = _starpu_frdlock(f) == 0;
  1249. for (src = 0; src < STARPU_MAXNODES; src++)
  1250. {
  1251. _starpu_drop_comments(f);
  1252. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1253. {
  1254. n = _starpu_read_double(f, "%le", &bandwidth);
  1255. if (n != 1)
  1256. {
  1257. _STARPU_DISP("Error while reading bandwidth file <%s>. Expected a number\n", path);
  1258. fclose(f);
  1259. return 0;
  1260. }
  1261. n = getc(f);
  1262. if (n == '\n')
  1263. break;
  1264. if (n != '\t')
  1265. {
  1266. _STARPU_DISP("bogus character '%c' (%d) in bandwidth file %s\n", n, n, path);
  1267. fclose(f);
  1268. return 0;
  1269. }
  1270. bandwidth_matrix[src][dst] = bandwidth;
  1271. /* Look out for \t\n */
  1272. n = getc(f);
  1273. if (n == '\n')
  1274. break;
  1275. ungetc(n, f);
  1276. n = '\t';
  1277. }
  1278. /* No more values, take NAN */
  1279. for ( ; dst < STARPU_MAXNODES; dst++)
  1280. bandwidth_matrix[src][dst] = NAN;
  1281. while (n == '\t')
  1282. {
  1283. /* Look out for \t\n */
  1284. n = getc(f);
  1285. if (n == '\n')
  1286. break;
  1287. ungetc(n, f);
  1288. n = _starpu_read_double(f, "%le", &bandwidth);
  1289. if (n && !isnan(bandwidth))
  1290. {
  1291. _STARPU_DISP("Too many nodes in bandwidth file %s for this configuration (%d)\n", path, STARPU_MAXNODES);
  1292. fclose(f);
  1293. return 0;
  1294. }
  1295. n = getc(f);
  1296. }
  1297. if (n != '\n')
  1298. {
  1299. _STARPU_DISP("Bogus character '%c' (%d) in bandwidth file %s\n", n, n, path);
  1300. fclose(f);
  1301. return 0;
  1302. }
  1303. /* Look out for EOF */
  1304. n = getc(f);
  1305. if (n == EOF)
  1306. break;
  1307. ungetc(n, f);
  1308. }
  1309. if (locked)
  1310. _starpu_frdunlock(f);
  1311. fclose(f);
  1312. /* No more values, take NAN */
  1313. for ( ; src < STARPU_MAXNODES; src++)
  1314. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1315. latency_matrix[src][dst] = NAN;
  1316. return 1;
  1317. }
  1318. #ifndef STARPU_SIMGRID
  1319. static double search_bus_best_timing(int src, char * type, int htod)
  1320. {
  1321. /* Search the best latency for this node */
  1322. double best = 0.0;
  1323. double actual = 0.0;
  1324. unsigned check = 0;
  1325. unsigned numa;
  1326. for (numa = 0; numa < nnumas; numa++)
  1327. {
  1328. #ifdef STARPU_USE_CUDA
  1329. if (strncmp(type, "CUDA", 4) == 0)
  1330. {
  1331. if (htod)
  1332. actual = cudadev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].timing_htod;
  1333. else
  1334. actual = cudadev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].timing_dtoh;
  1335. }
  1336. #endif
  1337. #ifdef STARPU_USE_OPENCL
  1338. if (strncmp(type, "OpenCL", 6) == 0)
  1339. {
  1340. if (htod)
  1341. actual = opencldev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].timing_htod;
  1342. else
  1343. actual = opencldev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].timing_dtoh;
  1344. }
  1345. #endif
  1346. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  1347. if (!check || actual < best)
  1348. {
  1349. best = actual;
  1350. check = 1;
  1351. }
  1352. #endif
  1353. }
  1354. return best;
  1355. }
  1356. static void write_bus_bandwidth_file_content(void)
  1357. {
  1358. unsigned src, dst, maxnode;
  1359. unsigned b_low, b_up;
  1360. FILE *f;
  1361. int locked;
  1362. STARPU_ASSERT(was_benchmarked);
  1363. char path[256];
  1364. get_bandwidth_path(path, sizeof(path));
  1365. _STARPU_DEBUG("writing bandwidth to %s\n", path);
  1366. f = fopen(path, "w+");
  1367. STARPU_ASSERT(f);
  1368. locked = _starpu_fwrlock(f) == 0;
  1369. _starpu_fftruncate(f, 0);
  1370. fprintf(f, "# ");
  1371. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1372. fprintf(f, "to %u\t\t", dst);
  1373. fprintf(f, "\n");
  1374. maxnode = nnumas;
  1375. #ifdef STARPU_USE_CUDA
  1376. maxnode += ncuda;
  1377. #endif
  1378. #ifdef STARPU_USE_OPENCL
  1379. maxnode += nopencl;
  1380. #endif
  1381. #ifdef STARPU_USE_MIC
  1382. maxnode += nmic;
  1383. #endif
  1384. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1385. maxnode += nmpi_ms;
  1386. #endif
  1387. for (src = 0; src < STARPU_MAXNODES; src++)
  1388. {
  1389. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1390. {
  1391. double bandwidth;
  1392. if ((src >= maxnode) || (dst >= maxnode))
  1393. {
  1394. bandwidth = NAN;
  1395. }
  1396. else if (src != dst)
  1397. {
  1398. double slowness = 0.0;
  1399. /* Total bandwidth is the harmonic mean of bandwidths */
  1400. b_low = b_up = 0;
  1401. /* Begin NUMA */
  1402. b_up += nnumas;
  1403. if (src >= b_low && src < b_up && dst >= b_low && dst < b_up)
  1404. slowness += numa_timing[src-b_low][dst-b_low];
  1405. /* copy interval to check numa index later */
  1406. unsigned numa_low = b_low;
  1407. unsigned numa_up = b_up;
  1408. b_low += nnumas;
  1409. /* End NUMA */
  1410. #ifdef STARPU_USE_CUDA
  1411. b_up += ncuda;
  1412. #ifdef HAVE_CUDA_MEMCPY_PEER
  1413. if (src >= b_low && src < b_up && dst >= b_low && dst < b_up)
  1414. /* Direct GPU-GPU transfert */
  1415. slowness += cudadev_timing_dtod[src-b_low][dst-b_low];
  1416. else
  1417. #endif
  1418. {
  1419. /* Check if it's CUDA <-> NUMA link */
  1420. if (src >= b_low && src < b_up && dst >= numa_low && dst < numa_up)
  1421. slowness += cudadev_timing_per_numa[(src-b_low)*STARPU_MAXNUMANODES+dst-numa_low].timing_dtoh;
  1422. if (dst >= b_low && dst < b_up && src >= numa_low && dst < numa_up)
  1423. slowness += cudadev_timing_per_numa[(dst-b_low)*STARPU_MAXNUMANODES+src-numa_low].timing_htod;
  1424. /* To other devices, take the best slowness */
  1425. if (src >= b_low && src < b_up && !(dst >= numa_low && dst < numa_up))
  1426. slowness += search_bus_best_timing(src-b_low, "CUDA", 0);
  1427. if (dst >= b_low && dst < b_up && !(src >= numa_low && dst < numa_up))
  1428. slowness += search_bus_best_timing(dst-b_low, "CUDA", 1);
  1429. }
  1430. b_low += ncuda;
  1431. #endif
  1432. #ifdef STARPU_USE_OPENCL
  1433. b_up += nopencl;
  1434. /* Check if it's OpenCL <-> NUMA link */
  1435. if (src >= b_low && src < b_up && dst >= numa_low && dst < numa_up)
  1436. slowness += opencldev_timing_per_numa[(src-b_low)*STARPU_MAXNUMANODES+dst-numa_low].timing_dtoh;
  1437. if (dst >= b_low && dst < b_up && src >= numa_low && dst < numa_up)
  1438. slowness += opencldev_timing_per_numa[(dst-b_low)*STARPU_MAXNUMANODES+src-numa_low].timing_htod;
  1439. /* To other devices, take the best slowness */
  1440. if (src >= b_low && src < b_up && !(dst >= numa_low && dst < numa_up))
  1441. slowness += search_bus_best_timing(src-b_low, "OpenCL", 0);
  1442. if (dst >= b_low && dst < b_up && !(src >= numa_low && dst < numa_up))
  1443. slowness += search_bus_best_timing(dst-b_low, "OpenCL", 1);
  1444. b_low += nopencl;
  1445. #endif
  1446. #ifdef STARPU_USE_MIC
  1447. b_up += nmic;
  1448. if (src >= b_low && src < b_up)
  1449. slowness += mic_time_device_to_host[src-b_low];
  1450. if (dst >= b_low && dst < b_up)
  1451. slowness += mic_time_host_to_device[dst-b_low];
  1452. b_low += nmic;
  1453. #endif
  1454. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1455. b_up += nmpi_ms;
  1456. /* Modify MPI src and MPI dst if they contain the master node or not
  1457. * Because, we only take care about slaves */
  1458. int mpi_master = _starpu_mpi_common_get_src_node();
  1459. int mpi_src = src - b_low;
  1460. mpi_src = (mpi_master <= mpi_src) ? mpi_src+1 : mpi_src;
  1461. int mpi_dst = dst - b_low;
  1462. mpi_dst = (mpi_master <= mpi_dst) ? mpi_dst+1 : mpi_dst;
  1463. if (src >= b_low && src < b_up && dst >= b_low && dst < b_up)
  1464. slowness += mpi_time_device_to_device[mpi_src][mpi_dst];
  1465. else
  1466. {
  1467. if (src >= b_low && src < b_up)
  1468. slowness += mpi_time_device_to_device[mpi_src][mpi_master];
  1469. if (dst >= b_low && dst < b_up)
  1470. slowness += mpi_time_device_to_device[mpi_master][mpi_dst];
  1471. }
  1472. b_low += nmpi_ms;
  1473. #endif
  1474. bandwidth = 1.0/slowness;
  1475. }
  1476. else
  1477. {
  1478. /* convention */
  1479. bandwidth = 0.0;
  1480. }
  1481. if (dst)
  1482. fputc('\t', f);
  1483. _starpu_write_double(f, "%e", bandwidth);
  1484. }
  1485. fprintf(f, "\n");
  1486. }
  1487. if (locked)
  1488. _starpu_fwrunlock(f);
  1489. fclose(f);
  1490. }
  1491. #endif /* STARPU_SIMGRID */
  1492. void starpu_bus_print_filenames(FILE *output)
  1493. {
  1494. char bandwidth_path[256];
  1495. char affinity_path[256];
  1496. char latency_path[256];
  1497. get_bandwidth_path(bandwidth_path, sizeof(bandwidth_path));
  1498. get_affinity_path(affinity_path, sizeof(affinity_path));
  1499. get_latency_path(latency_path, sizeof(latency_path));
  1500. fprintf(output, "bandwidth: <%s>\n", bandwidth_path);
  1501. fprintf(output, " affinity: <%s>\n", affinity_path);
  1502. fprintf(output, " latency: <%s>\n", latency_path);
  1503. }
  1504. void starpu_bus_print_bandwidth(FILE *f)
  1505. {
  1506. unsigned src, dst, maxnode;
  1507. maxnode = nnumas;
  1508. #ifdef STARPU_USE_CUDA
  1509. maxnode += ncuda;
  1510. #endif
  1511. #ifdef STARPU_USE_OPENCL
  1512. maxnode += nopencl;
  1513. #endif
  1514. #ifdef STARPU_USE_MIC
  1515. maxnode += nmic;
  1516. #endif
  1517. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1518. maxnode += nmpi_ms;
  1519. #endif
  1520. fprintf(f, "from/to\t");
  1521. for (dst = 0; dst < nnumas; dst++)
  1522. fprintf(f, "NUMA_%u\t", dst);
  1523. for (dst = 0; dst < ncuda; dst++)
  1524. fprintf(f, "CUDA_%u\t", dst);
  1525. for (dst = 0; dst < nopencl; dst++)
  1526. fprintf(f, "OpenCL%u\t", dst);
  1527. for (dst = 0; dst < nmic; dst++)
  1528. fprintf(f, "MIC_%u\t", dst);
  1529. for (dst = 0; dst < nmpi_ms; dst++)
  1530. fprintf(f, "MPI_MS%u\t", dst);
  1531. fprintf(f, "\n");
  1532. for (src = 0; src < maxnode; src++)
  1533. {
  1534. if (src < nnumas)
  1535. fprintf(f, "NUMA_%u\t", src);
  1536. else if (src < nnumas + ncuda)
  1537. fprintf(f, "CUDA_%u\t", src-nnumas);
  1538. else if (src < nnumas + ncuda + nopencl)
  1539. fprintf(f, "OpenCL%u\t", src-nnumas-ncuda);
  1540. else if (src < nnumas + ncuda + nopencl + nmic)
  1541. fprintf(f, "MIC_%u\t", src-nnumas-ncuda-nopencl);
  1542. else
  1543. fprintf(f, "MPI_MS%u\t", src-nnumas-ncuda-nopencl-nmic);
  1544. for (dst = 0; dst < maxnode; dst++)
  1545. fprintf(f, "%.0f\t", bandwidth_matrix[src][dst]);
  1546. fprintf(f, "\n");
  1547. }
  1548. fprintf(f, "\n");
  1549. for (src = 0; src < maxnode; src++)
  1550. {
  1551. if (src < nnumas)
  1552. fprintf(f, "NUMA_%u\t", src);
  1553. else if (src < nnumas + ncuda)
  1554. fprintf(f, "CUDA_%u\t", src-nnumas);
  1555. else if (src < nnumas + ncuda + nopencl)
  1556. fprintf(f, "OpenCL%u\t", src-nnumas-ncuda);
  1557. else if (src < nnumas + ncuda + nopencl + nmic)
  1558. fprintf(f, "MIC_%u\t", src-nnumas-ncuda-nopencl);
  1559. else
  1560. fprintf(f, "MPI_MS%u\t", src-nnumas-ncuda-nopencl-nmic);
  1561. for (dst = 0; dst < maxnode; dst++)
  1562. fprintf(f, "%.0f\t", latency_matrix[src][dst]);
  1563. fprintf(f, "\n");
  1564. }
  1565. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  1566. if (ncuda != 0 || nopencl != 0)
  1567. fprintf(f, "\nGPU\tNUMA in preference order (logical index), host-to-device, device-to-host\n");
  1568. for (src = 0; src < ncuda + nopencl; src++)
  1569. {
  1570. struct dev_timing *timing;
  1571. struct _starpu_machine_config * config = _starpu_get_machine_config();
  1572. unsigned config_nnumas = _starpu_topology_get_nnumanodes(config);
  1573. unsigned numa;
  1574. #ifdef STARPU_USE_CUDA
  1575. if (src < ncuda)
  1576. {
  1577. fprintf(f, "CUDA_%u\t", src);
  1578. for (numa = 0; numa < config_nnumas; numa++)
  1579. {
  1580. timing = &cudadev_timing_per_numa[src*STARPU_MAXNUMANODES+numa];
  1581. if (timing->timing_htod)
  1582. fprintf(f, "%2d %.0f %.0f\t", timing->numa_id, 1/timing->timing_htod, 1/timing->timing_dtoh);
  1583. else
  1584. fprintf(f, "%2d\t", cuda_affinity_matrix[src][numa]);
  1585. }
  1586. }
  1587. #ifdef STARPU_USE_OPENCL
  1588. else
  1589. #endif
  1590. #endif
  1591. #ifdef STARPU_USE_OPENCL
  1592. {
  1593. fprintf(f, "OpenCL%u\t", src-ncuda);
  1594. for (numa = 0; numa < config_nnumas; numa++)
  1595. {
  1596. timing = &opencldev_timing_per_numa[(src-ncuda)*STARPU_MAXNUMANODES+numa];
  1597. if (timing->timing_htod)
  1598. fprintf(f, "%2d %.0f %.0f\t", timing->numa_id, 1/timing->timing_htod, 1/timing->timing_dtoh);
  1599. else
  1600. fprintf(f, "%2d\t", opencl_affinity_matrix[src][numa]);
  1601. }
  1602. }
  1603. #endif
  1604. fprintf(f, "\n");
  1605. }
  1606. #endif
  1607. }
  1608. static void generate_bus_bandwidth_file(void)
  1609. {
  1610. if (!was_benchmarked)
  1611. benchmark_all_gpu_devices();
  1612. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1613. /* Slaves don't write files */
  1614. if (!_starpu_mpi_common_is_src_node())
  1615. return;
  1616. #endif
  1617. #ifndef STARPU_SIMGRID
  1618. write_bus_bandwidth_file_content();
  1619. #endif
  1620. }
  1621. static void load_bus_bandwidth_file(void)
  1622. {
  1623. int res;
  1624. char path[256];
  1625. get_bandwidth_path(path, sizeof(path));
  1626. res = access(path, F_OK);
  1627. if (res || !load_bus_bandwidth_file_content())
  1628. {
  1629. /* File does not exist yet or is bogus */
  1630. generate_bus_bandwidth_file();
  1631. }
  1632. }
  1633. #ifndef STARPU_SIMGRID
  1634. /*
  1635. * Config
  1636. */
  1637. static void get_config_path(char *path, size_t maxlen)
  1638. {
  1639. get_bus_path("config", path, maxlen);
  1640. }
  1641. #if defined(STARPU_USE_MPI_MASTER_SLAVE)
  1642. /* check if the master or one slave has to recalibrate */
  1643. static int mpi_check_recalibrate(int my_recalibrate)
  1644. {
  1645. int nb_mpi = _starpu_mpi_src_get_device_count() + 1;
  1646. int mpi_recalibrate[nb_mpi];
  1647. int i;
  1648. MPI_Allgather(&my_recalibrate, 1, MPI_INT, mpi_recalibrate, 1, MPI_INT, MPI_COMM_WORLD);
  1649. for (i = 0; i < nb_mpi; i++)
  1650. {
  1651. if (mpi_recalibrate[i])
  1652. {
  1653. return 1;
  1654. }
  1655. }
  1656. return 0;
  1657. }
  1658. #endif
  1659. static void compare_value_and_recalibrate(char * msg, unsigned val_file, unsigned val_detected)
  1660. {
  1661. int recalibrate = 0;
  1662. if (val_file != val_detected)
  1663. recalibrate = 1;
  1664. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1665. //Send to each other to know if we had to recalibrate because someone cannot have the correct value in the config file
  1666. recalibrate = mpi_check_recalibrate(recalibrate);
  1667. #endif
  1668. if (recalibrate)
  1669. {
  1670. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1671. /* Only the master prints the message */
  1672. if (_starpu_mpi_common_is_src_node())
  1673. #endif
  1674. _STARPU_DISP("Current configuration does not match the bus performance model (%s: (stored) %d != (current) %d), recalibrating...\n", msg, val_file, val_detected);
  1675. _starpu_bus_force_sampling();
  1676. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1677. if (_starpu_mpi_common_is_src_node())
  1678. #endif
  1679. _STARPU_DISP("... done\n");
  1680. }
  1681. }
  1682. static void check_bus_config_file(void)
  1683. {
  1684. int res;
  1685. char path[256];
  1686. struct _starpu_machine_config *config = _starpu_get_machine_config();
  1687. int recalibrate = 0;
  1688. get_config_path(path, sizeof(path));
  1689. res = access(path, F_OK);
  1690. if (res || config->conf.bus_calibrate > 0)
  1691. recalibrate = 1;
  1692. #if defined(STARPU_USE_MPI_MASTER_SLAVE)
  1693. //Send to each other to know if we had to recalibrate because someone cannot have the config file
  1694. recalibrate = mpi_check_recalibrate(recalibrate);
  1695. #endif
  1696. if (recalibrate)
  1697. {
  1698. if (res)
  1699. _STARPU_DISP("No performance model for the bus, calibrating...\n");
  1700. _starpu_bus_force_sampling();
  1701. if (res)
  1702. _STARPU_DISP("... done\n");
  1703. }
  1704. else
  1705. {
  1706. FILE *f;
  1707. int ret;
  1708. unsigned read_cuda = -1, read_opencl = -1, read_mic = -1, read_mpi_ms = -1;
  1709. unsigned read_cpus = -1, read_numa = -1;
  1710. int locked;
  1711. // Loading configuration from file
  1712. f = fopen(path, "r");
  1713. STARPU_ASSERT(f);
  1714. locked = _starpu_frdlock(f) == 0;
  1715. _starpu_drop_comments(f);
  1716. ret = fscanf(f, "%u\t", &read_cpus);
  1717. STARPU_ASSERT(ret == 1);
  1718. _starpu_drop_comments(f);
  1719. ret = fscanf(f, "%u\t", &read_numa);
  1720. STARPU_ASSERT(ret == 1);
  1721. _starpu_drop_comments(f);
  1722. ret = fscanf(f, "%u\t", &read_cuda);
  1723. STARPU_ASSERT(ret == 1);
  1724. _starpu_drop_comments(f);
  1725. ret = fscanf(f, "%u\t", &read_opencl);
  1726. STARPU_ASSERT(ret == 1);
  1727. _starpu_drop_comments(f);
  1728. ret = fscanf(f, "%u\t", &read_mic);
  1729. if (ret == 0)
  1730. read_mic = 0;
  1731. _starpu_drop_comments(f);
  1732. ret = fscanf(f, "%u\t", &read_mpi_ms);
  1733. if (ret == 0)
  1734. read_mpi_ms = 0;
  1735. _starpu_drop_comments(f);
  1736. if (locked)
  1737. _starpu_frdunlock(f);
  1738. fclose(f);
  1739. // Loading current configuration
  1740. ncpus = _starpu_topology_get_nhwcpu(config);
  1741. nnumas = _starpu_topology_get_nnumanodes(config);
  1742. #ifdef STARPU_USE_CUDA
  1743. ncuda = _starpu_get_cuda_device_count();
  1744. #endif
  1745. #ifdef STARPU_USE_OPENCL
  1746. nopencl = _starpu_opencl_get_device_count();
  1747. #endif
  1748. #ifdef STARPU_USE_MIC
  1749. nmic = _starpu_mic_src_get_device_count();
  1750. #endif /* STARPU_USE_MIC */
  1751. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1752. nmpi_ms = _starpu_mpi_src_get_device_count();
  1753. #endif /* STARPU_USE_MPI_MASTER_SLAVE */
  1754. // Checking if both configurations match
  1755. compare_value_and_recalibrate("CPUS", read_cpus, ncpus);
  1756. compare_value_and_recalibrate("NUMA", read_numa, nnumas);
  1757. compare_value_and_recalibrate("CUDA", read_cuda, ncuda);
  1758. compare_value_and_recalibrate("OpenCL", read_opencl, nopencl);
  1759. compare_value_and_recalibrate("MIC", read_mic, nmic);
  1760. compare_value_and_recalibrate("MPI Master-Slave", read_mpi_ms, nmpi_ms);
  1761. }
  1762. }
  1763. static void write_bus_config_file_content(void)
  1764. {
  1765. FILE *f;
  1766. char path[256];
  1767. int locked;
  1768. STARPU_ASSERT(was_benchmarked);
  1769. get_config_path(path, sizeof(path));
  1770. _STARPU_DEBUG("writing config to %s\n", path);
  1771. f = fopen(path, "w+");
  1772. STARPU_ASSERT(f);
  1773. locked = _starpu_fwrlock(f) == 0;
  1774. _starpu_fftruncate(f, 0);
  1775. fprintf(f, "# Current configuration\n");
  1776. fprintf(f, "%u # Number of CPUs\n", ncpus);
  1777. fprintf(f, "%u # Number of NUMA nodes\n", nnumas);
  1778. fprintf(f, "%u # Number of CUDA devices\n", ncuda);
  1779. fprintf(f, "%u # Number of OpenCL devices\n", nopencl);
  1780. fprintf(f, "%u # Number of MIC devices\n", nmic);
  1781. fprintf(f, "%u # Number of MPI devices\n", nmpi_ms);
  1782. if (locked)
  1783. _starpu_fwrunlock(f);
  1784. fclose(f);
  1785. }
  1786. static void generate_bus_config_file(void)
  1787. {
  1788. if (!was_benchmarked)
  1789. benchmark_all_gpu_devices();
  1790. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1791. /* Slaves don't write files */
  1792. if (!_starpu_mpi_common_is_src_node())
  1793. return;
  1794. #endif
  1795. write_bus_config_file_content();
  1796. }
  1797. #endif /* !SIMGRID */
  1798. void _starpu_simgrid_get_platform_path(int version, char *path, size_t maxlen)
  1799. {
  1800. if (version == 3)
  1801. get_bus_path("platform.xml", path, maxlen);
  1802. else
  1803. get_bus_path("platform.v4.xml", path, maxlen);
  1804. }
  1805. #ifndef STARPU_SIMGRID
  1806. /*
  1807. * Compute the precise PCI tree bandwidth and link shares
  1808. *
  1809. * We only have measurements from one leaf to another. We assume that the
  1810. * available bandwidth is greater at lower levels, and thus measurements from
  1811. * increasingly far GPUs provide the PCI bridges bandwidths at each level.
  1812. *
  1813. * The bandwidth of a PCI bridge is thus computed as the maximum of the speed
  1814. * of the various transfers that we have achieved through it. We thus browse
  1815. * the PCI tree three times:
  1816. *
  1817. * - first through all CUDA-CUDA possible transfers to compute the maximum
  1818. * measured bandwidth on each PCI link and hub used for that.
  1819. * - then through the whole tree to emit links for each PCI link and hub.
  1820. * - then through all CUDA-CUDA possible transfers again to emit routes.
  1821. */
  1822. #if defined(STARPU_USE_CUDA) && defined(HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX) && HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX && defined(HAVE_CUDA_MEMCPY_PEER)
  1823. /* Records, for each PCI link and hub, the maximum bandwidth seen through it */
  1824. struct pci_userdata
  1825. {
  1826. /* Uplink max measurement */
  1827. double bw_up;
  1828. double bw_down;
  1829. /* Hub max measurement */
  1830. double bw;
  1831. };
  1832. /* Allocate a pci_userdata structure for the given object */
  1833. static void allocate_userdata(hwloc_obj_t obj)
  1834. {
  1835. struct pci_userdata *data;
  1836. if (obj->userdata)
  1837. return;
  1838. _STARPU_MALLOC(obj->userdata, sizeof(*data));
  1839. data = obj->userdata;
  1840. data->bw_up = 0.0;
  1841. data->bw_down = 0.0;
  1842. data->bw = 0.0;
  1843. }
  1844. /* Update the maximum bandwidth seen going to upstream */
  1845. static void update_bandwidth_up(hwloc_obj_t obj, double bandwidth)
  1846. {
  1847. struct pci_userdata *data;
  1848. if (obj->type != HWLOC_OBJ_BRIDGE && obj->type != HWLOC_OBJ_PCI_DEVICE)
  1849. return;
  1850. allocate_userdata(obj);
  1851. data = obj->userdata;
  1852. if (data->bw_up < bandwidth)
  1853. data->bw_up = bandwidth;
  1854. }
  1855. /* Update the maximum bandwidth seen going from upstream */
  1856. static void update_bandwidth_down(hwloc_obj_t obj, double bandwidth)
  1857. {
  1858. struct pci_userdata *data;
  1859. if (obj->type != HWLOC_OBJ_BRIDGE && obj->type != HWLOC_OBJ_PCI_DEVICE)
  1860. return;
  1861. allocate_userdata(obj);
  1862. data = obj->userdata;
  1863. if (data->bw_down < bandwidth)
  1864. data->bw_down = bandwidth;
  1865. }
  1866. /* Update the maximum bandwidth seen going through this Hub */
  1867. static void update_bandwidth_through(hwloc_obj_t obj, double bandwidth)
  1868. {
  1869. struct pci_userdata *data;
  1870. allocate_userdata(obj);
  1871. data = obj->userdata;
  1872. if (data->bw < bandwidth)
  1873. data->bw = bandwidth;
  1874. }
  1875. /* find_* functions perform the first step: computing maximum bandwidths */
  1876. /* Our trafic had to go through the host, go back from target up to the host,
  1877. * updating uplink downstream bandwidth along the way */
  1878. static void find_platform_backward_path(hwloc_obj_t obj, double bandwidth)
  1879. {
  1880. if (!obj)
  1881. /* Oops, we should have seen a host bridge. Well, too bad. */
  1882. return;
  1883. /* Update uplink bandwidth of PCI Hub */
  1884. update_bandwidth_down(obj, bandwidth);
  1885. /* Update internal bandwidth of PCI Hub */
  1886. update_bandwidth_through(obj, bandwidth);
  1887. if (obj->type == HWLOC_OBJ_BRIDGE && obj->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_HOST)
  1888. /* Finished */
  1889. return;
  1890. /* Continue up */
  1891. find_platform_backward_path(obj->parent, bandwidth);
  1892. }
  1893. /* Same, but update uplink upstream bandwidth */
  1894. static void find_platform_forward_path(hwloc_obj_t obj, double bandwidth)
  1895. {
  1896. if (!obj)
  1897. /* Oops, we should have seen a host bridge. Well, too bad. */
  1898. return;
  1899. /* Update uplink bandwidth of PCI Hub */
  1900. update_bandwidth_up(obj, bandwidth);
  1901. /* Update internal bandwidth of PCI Hub */
  1902. update_bandwidth_through(obj, bandwidth);
  1903. if (obj->type == HWLOC_OBJ_BRIDGE && obj->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_HOST)
  1904. /* Finished */
  1905. return;
  1906. /* Continue up */
  1907. find_platform_forward_path(obj->parent, bandwidth);
  1908. }
  1909. /* Find the path from obj1 through parent down to obj2 (without ever going up),
  1910. * and update the maximum bandwidth along the path */
  1911. static int find_platform_path_down(hwloc_obj_t parent, hwloc_obj_t obj1, hwloc_obj_t obj2, double bandwidth)
  1912. {
  1913. unsigned i;
  1914. /* Base case, path is empty */
  1915. if (parent == obj2)
  1916. return 1;
  1917. /* Try to go down from parent */
  1918. for (i = 0; i < parent->arity; i++)
  1919. if (parent->children[i] != obj1 && find_platform_path_down(parent->children[i], NULL, obj2, bandwidth))
  1920. {
  1921. /* Found it down there, update bandwidth of parent */
  1922. update_bandwidth_down(parent->children[i], bandwidth);
  1923. update_bandwidth_through(parent, bandwidth);
  1924. return 1;
  1925. }
  1926. return 0;
  1927. }
  1928. /* Find the path from obj1 to obj2, and update the maximum bandwidth along the
  1929. * path */
  1930. static int find_platform_path_up(hwloc_obj_t obj1, hwloc_obj_t obj2, double bandwidth)
  1931. {
  1932. int ret;
  1933. hwloc_obj_t parent = obj1->parent;
  1934. if (!parent)
  1935. {
  1936. /* Oops, we should have seen a host bridge. Act as if we had seen it. */
  1937. find_platform_backward_path(obj2, bandwidth);
  1938. return 1;
  1939. }
  1940. if (find_platform_path_down(parent, obj1, obj2, bandwidth))
  1941. /* obj2 was a mere (sub)child of our parent */
  1942. return 1;
  1943. /* obj2 is not a (sub)child of our parent, we have to go up through the parent */
  1944. if (parent->type == HWLOC_OBJ_BRIDGE && parent->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_HOST)
  1945. {
  1946. /* We have to go up to the Host, so obj2 is not in the same PCI
  1947. * tree, so we're for for obj1 to Host, and just find the path
  1948. * from obj2 to Host too.
  1949. */
  1950. find_platform_backward_path(obj2, bandwidth);
  1951. update_bandwidth_up(parent, bandwidth);
  1952. update_bandwidth_through(parent, bandwidth);
  1953. return 1;
  1954. }
  1955. /* Not at host yet, just go up */
  1956. ret = find_platform_path_up(parent, obj2, bandwidth);
  1957. update_bandwidth_up(parent, bandwidth);
  1958. update_bandwidth_through(parent, bandwidth);
  1959. return ret;
  1960. }
  1961. /* find the path between cuda i and cuda j, and update the maximum bandwidth along the path */
  1962. static int find_platform_cuda_path(hwloc_topology_t topology, unsigned i, unsigned j, double bandwidth)
  1963. {
  1964. hwloc_obj_t cudai, cudaj;
  1965. cudai = hwloc_cuda_get_device_osdev_by_index(topology, i);
  1966. cudaj = hwloc_cuda_get_device_osdev_by_index(topology, j);
  1967. if (!cudai || !cudaj)
  1968. return 0;
  1969. return find_platform_path_up(cudai, cudaj, bandwidth);
  1970. }
  1971. /* emit_topology_bandwidths performs the second step: emitting link names */
  1972. /* Emit the link name of the object */
  1973. static void emit_pci_hub(FILE *f, hwloc_obj_t obj)
  1974. {
  1975. STARPU_ASSERT(obj->type == HWLOC_OBJ_BRIDGE);
  1976. fprintf(f, "PCI:%04x:[%02x-%02x]", obj->attr->bridge.downstream.pci.domain, obj->attr->bridge.downstream.pci.secondary_bus, obj->attr->bridge.downstream.pci.subordinate_bus);
  1977. }
  1978. static void emit_pci_dev(FILE *f, struct hwloc_pcidev_attr_s *pcidev)
  1979. {
  1980. fprintf(f, "PCI:%04x:%02x:%02x.%1x", pcidev->domain, pcidev->bus, pcidev->dev, pcidev->func);
  1981. }
  1982. /* Emit the links of the object */
  1983. static void emit_topology_bandwidths(FILE *f, hwloc_obj_t obj, const char *Bps, const char *s)
  1984. {
  1985. unsigned i;
  1986. if (obj->userdata)
  1987. {
  1988. struct pci_userdata *data = obj->userdata;
  1989. if (obj->type == HWLOC_OBJ_BRIDGE)
  1990. {
  1991. /* Uplink */
  1992. fprintf(f, " <link id=\"");
  1993. emit_pci_hub(f, obj);
  1994. fprintf(f, " up\" bandwidth=\"%f%s\" latency=\"0.000000%s\"/>\n", data->bw_up, Bps, s);
  1995. fprintf(f, " <link id=\"");
  1996. emit_pci_hub(f, obj);
  1997. fprintf(f, " down\" bandwidth=\"%f%s\" latency=\"0.000000%s\"/>\n", data->bw_down, Bps, s);
  1998. /* PCI Switches are assumed to have infinite internal bandwidth */
  1999. if (!obj->name || !strstr(obj->name, "Switch"))
  2000. {
  2001. /* We assume that PCI Hubs have double bandwidth in
  2002. * order to support full duplex but not more */
  2003. fprintf(f, " <link id=\"");
  2004. emit_pci_hub(f, obj);
  2005. fprintf(f, " through\" bandwidth=\"%f%s\" latency=\"0.000000%s\"/>\n", data->bw * 2, Bps, s);
  2006. }
  2007. }
  2008. else if (obj->type == HWLOC_OBJ_PCI_DEVICE)
  2009. {
  2010. fprintf(f, " <link id=\"");
  2011. emit_pci_dev(f, &obj->attr->pcidev);
  2012. fprintf(f, " up\" bandwidth=\"%f%s\" latency=\"0.000000%s\"/>\n", data->bw_up, Bps, s);
  2013. fprintf(f, " <link id=\"");
  2014. emit_pci_dev(f, &obj->attr->pcidev);
  2015. fprintf(f, " down\" bandwidth=\"%f%s\" latency=\"0.000000%s\"/>\n", data->bw_down, Bps, s);
  2016. }
  2017. }
  2018. for (i = 0; i < obj->arity; i++)
  2019. emit_topology_bandwidths(f, obj->children[i], Bps, s);
  2020. }
  2021. /* emit_pci_link_* functions perform the third step: emitting the routes */
  2022. static void emit_pci_link(FILE *f, hwloc_obj_t obj, const char *suffix)
  2023. {
  2024. if (obj->type == HWLOC_OBJ_BRIDGE)
  2025. {
  2026. fprintf(f, " <link_ctn id=\"");
  2027. emit_pci_hub(f, obj);
  2028. fprintf(f, " %s\"/>\n", suffix);
  2029. }
  2030. else if (obj->type == HWLOC_OBJ_PCI_DEVICE)
  2031. {
  2032. fprintf(f, " <link_ctn id=\"");
  2033. emit_pci_dev(f, &obj->attr->pcidev);
  2034. fprintf(f, " %s\"/>\n", suffix);
  2035. }
  2036. }
  2037. /* Go to upstream */
  2038. static void emit_pci_link_up(FILE *f, hwloc_obj_t obj)
  2039. {
  2040. emit_pci_link(f, obj, "up");
  2041. }
  2042. /* Go from upstream */
  2043. static void emit_pci_link_down(FILE *f, hwloc_obj_t obj)
  2044. {
  2045. emit_pci_link(f, obj, "down");
  2046. }
  2047. /* Go through PCI hub */
  2048. static void emit_pci_link_through(FILE *f, hwloc_obj_t obj)
  2049. {
  2050. /* We don't care about trafic going through PCI switches */
  2051. if (obj->type == HWLOC_OBJ_BRIDGE)
  2052. {
  2053. if (!obj->name || !strstr(obj->name, "Switch"))
  2054. emit_pci_link(f, obj, "through");
  2055. else
  2056. {
  2057. fprintf(f, " <!-- Switch ");
  2058. emit_pci_hub(f, obj);
  2059. fprintf(f, " through -->\n");
  2060. }
  2061. }
  2062. }
  2063. /* Our trafic has to go through the host, go back from target up to the host,
  2064. * using uplink downstream along the way */
  2065. static void emit_platform_backward_path(FILE *f, hwloc_obj_t obj)
  2066. {
  2067. if (!obj)
  2068. /* Oops, we should have seen a host bridge. Well, too bad. */
  2069. return;
  2070. /* Go through PCI Hub */
  2071. emit_pci_link_through(f, obj);
  2072. /* Go through uplink */
  2073. emit_pci_link_down(f, obj);
  2074. if (obj->type == HWLOC_OBJ_BRIDGE && obj->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_HOST)
  2075. {
  2076. /* Finished, go through host */
  2077. fprintf(f, " <link_ctn id=\"Host\"/>\n");
  2078. return;
  2079. }
  2080. /* Continue up */
  2081. emit_platform_backward_path(f, obj->parent);
  2082. }
  2083. /* Same, but use upstream link */
  2084. static void emit_platform_forward_path(FILE *f, hwloc_obj_t obj)
  2085. {
  2086. if (!obj)
  2087. /* Oops, we should have seen a host bridge. Well, too bad. */
  2088. return;
  2089. /* Go through PCI Hub */
  2090. emit_pci_link_through(f, obj);
  2091. /* Go through uplink */
  2092. emit_pci_link_up(f, obj);
  2093. if (obj->type == HWLOC_OBJ_BRIDGE && obj->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_HOST)
  2094. {
  2095. /* Finished, go through host */
  2096. fprintf(f, " <link_ctn id=\"Host\"/>\n");
  2097. return;
  2098. }
  2099. /* Continue up */
  2100. emit_platform_forward_path(f, obj->parent);
  2101. }
  2102. /* Find the path from obj1 through parent down to obj2 (without ever going up),
  2103. * and use the links along the path */
  2104. static int emit_platform_path_down(FILE *f, hwloc_obj_t parent, hwloc_obj_t obj1, hwloc_obj_t obj2)
  2105. {
  2106. unsigned i;
  2107. /* Base case, path is empty */
  2108. if (parent == obj2)
  2109. return 1;
  2110. /* Try to go down from parent */
  2111. for (i = 0; i < parent->arity; i++)
  2112. if (parent->children[i] != obj1 && emit_platform_path_down(f, parent->children[i], NULL, obj2))
  2113. {
  2114. /* Found it down there, path goes through this hub */
  2115. emit_pci_link_down(f, parent->children[i]);
  2116. emit_pci_link_through(f, parent);
  2117. return 1;
  2118. }
  2119. return 0;
  2120. }
  2121. /* Find the path from obj1 to obj2, and use the links along the path */
  2122. static int emit_platform_path_up(FILE *f, hwloc_obj_t obj1, hwloc_obj_t obj2)
  2123. {
  2124. int ret;
  2125. hwloc_obj_t parent = obj1->parent;
  2126. if (!parent)
  2127. {
  2128. /* Oops, we should have seen a host bridge. Act as if we had seen it. */
  2129. emit_platform_backward_path(f, obj2);
  2130. return 1;
  2131. }
  2132. if (emit_platform_path_down(f, parent, obj1, obj2))
  2133. /* obj2 was a mere (sub)child of our parent */
  2134. return 1;
  2135. /* obj2 is not a (sub)child of our parent, we have to go up through the parent */
  2136. if (parent->type == HWLOC_OBJ_BRIDGE && parent->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_HOST)
  2137. {
  2138. /* We have to go up to the Host, so obj2 is not in the same PCI
  2139. * tree, so we're for for obj1 to Host, and just find the path
  2140. * from obj2 to Host too.
  2141. */
  2142. emit_platform_backward_path(f, obj2);
  2143. fprintf(f, " <link_ctn id=\"Host\"/>\n");
  2144. emit_pci_link_up(f, parent);
  2145. emit_pci_link_through(f, parent);
  2146. return 1;
  2147. }
  2148. /* Not at host yet, just go up */
  2149. ret = emit_platform_path_up(f, parent, obj2);
  2150. emit_pci_link_up(f, parent);
  2151. emit_pci_link_through(f, parent);
  2152. return ret;
  2153. }
  2154. /* Clean our mess in the topology before destroying it */
  2155. static void clean_topology(hwloc_obj_t obj)
  2156. {
  2157. unsigned i;
  2158. if (obj->userdata)
  2159. free(obj->userdata);
  2160. for (i = 0; i < obj->arity; i++)
  2161. clean_topology(obj->children[i]);
  2162. }
  2163. #endif
  2164. static void write_bus_platform_file_content(int version)
  2165. {
  2166. FILE *f;
  2167. char path[256];
  2168. unsigned i;
  2169. const char *speed, *flops, *Bps, *s;
  2170. char dash;
  2171. int locked;
  2172. if (version == 3)
  2173. {
  2174. speed = "power";
  2175. flops = "";
  2176. Bps = "";
  2177. s = "";
  2178. dash = '_';
  2179. }
  2180. else
  2181. {
  2182. speed = "speed";
  2183. flops = "f";
  2184. Bps = "Bps";
  2185. s = "s";
  2186. dash = '-';
  2187. }
  2188. STARPU_ASSERT(was_benchmarked);
  2189. _starpu_simgrid_get_platform_path(version, path, sizeof(path));
  2190. _STARPU_DEBUG("writing platform to %s\n", path);
  2191. f = fopen(path, "w+");
  2192. if (!f)
  2193. {
  2194. perror("fopen write_bus_platform_file_content");
  2195. _STARPU_DISP("path '%s'\n", path);
  2196. fflush(stderr);
  2197. STARPU_ABORT();
  2198. }
  2199. locked = _starpu_fwrlock(f) == 0;
  2200. _starpu_fftruncate(f, 0);
  2201. fprintf(f,
  2202. "<?xml version='1.0'?>\n"
  2203. "<!DOCTYPE platform SYSTEM '%s'>\n"
  2204. " <platform version=\"%d\">\n"
  2205. " <config id=\"General\">\n"
  2206. " <prop id=\"network/TCP%cgamma\" value=\"-1\"></prop>\n"
  2207. " <prop id=\"network/latency%cfactor\" value=\"1\"></prop>\n"
  2208. " <prop id=\"network/bandwidth%cfactor\" value=\"1\"></prop>\n"
  2209. " </config>\n"
  2210. " <AS id=\"AS0\" routing=\"Full\">\n"
  2211. " <host id=\"MAIN\" %s=\"1%s\"/>\n",
  2212. version == 3
  2213. ? "http://simgrid.gforge.inria.fr/simgrid.dtd"
  2214. : "http://simgrid.gforge.inria.fr/simgrid/simgrid.dtd",
  2215. version, dash, dash, dash, speed, flops);
  2216. for (i = 0; i < ncpus; i++)
  2217. /* TODO: host memory for out-of-core simulation */
  2218. fprintf(f, " <host id=\"CPU%u\" %s=\"2000000000%s\"/>\n", i, speed, flops);
  2219. for (i = 0; i < ncuda; i++)
  2220. {
  2221. fprintf(f, " <host id=\"CUDA%u\" %s=\"2000000000%s\">\n", i, speed, flops);
  2222. fprintf(f, " <prop id=\"memsize\" value=\"%llu\"/>\n", (unsigned long long) cuda_size[i]);
  2223. #ifdef HAVE_CUDA_MEMCPY_PEER
  2224. fprintf(f, " <prop id=\"memcpy_peer\" value=\"1\"/>\n");
  2225. #endif
  2226. /* TODO: record cudadev_direct instead of assuming it's NUMA nodes */
  2227. fprintf(f, " </host>\n");
  2228. }
  2229. for (i = 0; i < nopencl; i++)
  2230. {
  2231. fprintf(f, " <host id=\"OpenCL%u\" %s=\"2000000000%s\">\n", i, speed, flops);
  2232. fprintf(f, " <prop id=\"memsize\" value=\"%llu\"/>\n", (unsigned long long) opencl_size[i]);
  2233. fprintf(f, " </host>\n");
  2234. }
  2235. fprintf(f, "\n <host id=\"RAM\" %s=\"1%s\"/>\n", speed, flops);
  2236. /*
  2237. * Compute maximum bandwidth, taken as host bandwidth
  2238. */
  2239. double max_bandwidth = 0;
  2240. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  2241. unsigned numa;
  2242. #endif
  2243. #ifdef STARPU_USE_CUDA
  2244. for (i = 0; i < ncuda; i++)
  2245. {
  2246. for (numa = 0; numa < nnumas; numa++)
  2247. {
  2248. double down_bw = 1.0 / cudadev_timing_per_numa[i*STARPU_MAXNUMANODES+numa].timing_dtoh;
  2249. double up_bw = 1.0 / cudadev_timing_per_numa[i*STARPU_MAXNUMANODES+numa].timing_htod;
  2250. if (max_bandwidth < down_bw)
  2251. max_bandwidth = down_bw;
  2252. if (max_bandwidth < up_bw)
  2253. max_bandwidth = up_bw;
  2254. }
  2255. }
  2256. #endif
  2257. #ifdef STARPU_USE_OPENCL
  2258. for (i = 0; i < nopencl; i++)
  2259. {
  2260. for (numa = 0; numa < nnumas; numa++)
  2261. {
  2262. double down_bw = 1.0 / opencldev_timing_per_numa[i*STARPU_MAXNUMANODES+numa].timing_dtoh;
  2263. double up_bw = 1.0 / opencldev_timing_per_numa[i*STARPU_MAXNUMANODES+numa].timing_htod;
  2264. if (max_bandwidth < down_bw)
  2265. max_bandwidth = down_bw;
  2266. if (max_bandwidth < up_bw)
  2267. max_bandwidth = up_bw;
  2268. }
  2269. }
  2270. #endif
  2271. fprintf(f, "\n <link id=\"Host\" bandwidth=\"%f%s\" latency=\"0.000000%s\"/>\n\n", max_bandwidth*1000000, Bps, s);
  2272. /*
  2273. * OpenCL links
  2274. */
  2275. #ifdef STARPU_USE_OPENCL
  2276. for (i = 0; i < nopencl; i++)
  2277. {
  2278. char i_name[16];
  2279. snprintf(i_name, sizeof(i_name), "OpenCL%u", i);
  2280. fprintf(f, " <link id=\"RAM-%s\" bandwidth=\"%f%s\" latency=\"%f%s\"/>\n",
  2281. i_name,
  2282. 1000000 / search_bus_best_timing(i, "OpenCL", 1), Bps,
  2283. search_bus_best_latency(i, "OpenCL", 1)/1000000., s);
  2284. fprintf(f, " <link id=\"%s-RAM\" bandwidth=\"%f%s\" latency=\"%f%s\"/>\n",
  2285. i_name,
  2286. 1000000 / search_bus_best_timing(i, "OpenCL", 0), Bps,
  2287. search_bus_best_latency(i, "OpenCL", 0)/1000000., s);
  2288. }
  2289. fprintf(f, "\n");
  2290. #endif
  2291. /*
  2292. * CUDA links and routes
  2293. */
  2294. #ifdef STARPU_USE_CUDA
  2295. /* Write RAM/CUDA bandwidths and latencies */
  2296. for (i = 0; i < ncuda; i++)
  2297. {
  2298. char i_name[16];
  2299. snprintf(i_name, sizeof(i_name), "CUDA%u", i);
  2300. fprintf(f, " <link id=\"RAM-%s\" bandwidth=\"%f%s\" latency=\"%f%s\"/>\n",
  2301. i_name,
  2302. 1000000. / search_bus_best_timing(i, "CUDA", 1), Bps,
  2303. search_bus_best_latency(i, "CUDA", 1)/1000000., s);
  2304. fprintf(f, " <link id=\"%s-RAM\" bandwidth=\"%f%s\" latency=\"%f%s\"/>\n",
  2305. i_name,
  2306. 1000000. / search_bus_best_timing(i, "CUDA", 0), Bps,
  2307. search_bus_best_latency(i, "CUDA", 0)/1000000., s);
  2308. }
  2309. fprintf(f, "\n");
  2310. #ifdef HAVE_CUDA_MEMCPY_PEER
  2311. /* Write CUDA/CUDA bandwidths and latencies */
  2312. for (i = 0; i < ncuda; i++)
  2313. {
  2314. unsigned j;
  2315. char i_name[16];
  2316. snprintf(i_name, sizeof(i_name), "CUDA%u", i);
  2317. for (j = 0; j < ncuda; j++)
  2318. {
  2319. char j_name[16];
  2320. if (j == i)
  2321. continue;
  2322. snprintf(j_name, sizeof(j_name), "CUDA%u", j);
  2323. fprintf(f, " <link id=\"%s-%s\" bandwidth=\"%f%s\" latency=\"%f%s\"/>\n",
  2324. i_name, j_name,
  2325. 1000000. / cudadev_timing_dtod[i][j], Bps,
  2326. cudadev_latency_dtod[i][j]/1000000., s);
  2327. }
  2328. }
  2329. #endif
  2330. #if defined(HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX) && HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX && defined(HAVE_CUDA_MEMCPY_PEER)
  2331. /* If we have enough hwloc information, write PCI bandwidths and routes */
  2332. if (!starpu_get_env_number_default("STARPU_PCI_FLAT", 0))
  2333. {
  2334. hwloc_topology_t topology;
  2335. hwloc_topology_init(&topology);
  2336. _starpu_topology_filter(topology);
  2337. hwloc_topology_load(topology);
  2338. /* First find paths and record measured bandwidth along the path */
  2339. for (i = 0; i < ncuda; i++)
  2340. {
  2341. unsigned j;
  2342. for (j = 0; j < ncuda; j++)
  2343. if (i != j)
  2344. if (!find_platform_cuda_path(topology, i, j, 1000000. / cudadev_timing_dtod[i][j]))
  2345. {
  2346. clean_topology(hwloc_get_root_obj(topology));
  2347. hwloc_topology_destroy(topology);
  2348. goto flat_cuda;
  2349. }
  2350. /* Record RAM/CUDA bandwidths */
  2351. find_platform_forward_path(hwloc_cuda_get_device_osdev_by_index(topology, i), 1000000. / search_bus_best_timing(i, "CUDA", 0));
  2352. find_platform_backward_path(hwloc_cuda_get_device_osdev_by_index(topology, i), 1000000. / search_bus_best_timing(i, "CUDA", 1));
  2353. }
  2354. /* Ok, found path in all cases, can emit advanced platform routes */
  2355. fprintf(f, "\n");
  2356. emit_topology_bandwidths(f, hwloc_get_root_obj(topology), Bps, s);
  2357. fprintf(f, "\n");
  2358. for (i = 0; i < ncuda; i++)
  2359. {
  2360. unsigned j;
  2361. for (j = 0; j < ncuda; j++)
  2362. if (i != j)
  2363. {
  2364. fprintf(f, " <route src=\"CUDA%u\" dst=\"CUDA%u\" symmetrical=\"NO\">\n", i, j);
  2365. fprintf(f, " <link_ctn id=\"CUDA%u-CUDA%u\"/>\n", i, j);
  2366. emit_platform_path_up(f,
  2367. hwloc_cuda_get_device_osdev_by_index(topology, i),
  2368. hwloc_cuda_get_device_osdev_by_index(topology, j));
  2369. fprintf(f, " </route>\n");
  2370. }
  2371. fprintf(f, " <route src=\"CUDA%u\" dst=\"RAM\" symmetrical=\"NO\">\n", i);
  2372. fprintf(f, " <link_ctn id=\"CUDA%u-RAM\"/>\n", i);
  2373. emit_platform_forward_path(f, hwloc_cuda_get_device_osdev_by_index(topology, i));
  2374. fprintf(f, " </route>\n");
  2375. fprintf(f, " <route src=\"RAM\" dst=\"CUDA%u\" symmetrical=\"NO\">\n", i);
  2376. fprintf(f, " <link_ctn id=\"RAM-CUDA%u\"/>\n", i);
  2377. emit_platform_backward_path(f, hwloc_cuda_get_device_osdev_by_index(topology, i));
  2378. fprintf(f, " </route>\n");
  2379. }
  2380. clean_topology(hwloc_get_root_obj(topology));
  2381. hwloc_topology_destroy(topology);
  2382. }
  2383. else
  2384. {
  2385. flat_cuda:
  2386. #else
  2387. {
  2388. #endif
  2389. /* If we don't have enough hwloc information, write trivial routes always through host */
  2390. for (i = 0; i < ncuda; i++)
  2391. {
  2392. char i_name[16];
  2393. snprintf(i_name, sizeof(i_name), "CUDA%u", i);
  2394. fprintf(f, " <route src=\"RAM\" dst=\"%s\" symmetrical=\"NO\"><link_ctn id=\"RAM-%s\"/><link_ctn id=\"Host\"/></route>\n", i_name, i_name);
  2395. fprintf(f, " <route src=\"%s\" dst=\"RAM\" symmetrical=\"NO\"><link_ctn id=\"%s-RAM\"/><link_ctn id=\"Host\"/></route>\n", i_name, i_name);
  2396. }
  2397. #ifdef HAVE_CUDA_MEMCPY_PEER
  2398. for (i = 0; i < ncuda; i++)
  2399. {
  2400. unsigned j;
  2401. char i_name[16];
  2402. snprintf(i_name, sizeof(i_name), "CUDA%u", i);
  2403. for (j = 0; j < ncuda; j++)
  2404. {
  2405. char j_name[16];
  2406. if (j == i)
  2407. continue;
  2408. snprintf(j_name, sizeof(j_name), "CUDA%u", j);
  2409. fprintf(f, " <route src=\"%s\" dst=\"%s\" symmetrical=\"NO\"><link_ctn id=\"%s-%s\"/><link_ctn id=\"Host\"/></route>\n", i_name, j_name, i_name, j_name);
  2410. }
  2411. }
  2412. #endif
  2413. } /* defined(STARPU_HAVE_HWLOC) && defined(HAVE_CUDA_MEMCPY_PEER) */
  2414. fprintf(f, "\n");
  2415. #endif /* STARPU_USE_CUDA */
  2416. /*
  2417. * OpenCL routes
  2418. */
  2419. #ifdef STARPU_USE_OPENCL
  2420. for (i = 0; i < nopencl; i++)
  2421. {
  2422. char i_name[16];
  2423. snprintf(i_name, sizeof(i_name), "OpenCL%u", i);
  2424. fprintf(f, " <route src=\"RAM\" dst=\"%s\" symmetrical=\"NO\"><link_ctn id=\"RAM-%s\"/><link_ctn id=\"Host\"/></route>\n", i_name, i_name);
  2425. fprintf(f, " <route src=\"%s\" dst=\"RAM\" symmetrical=\"NO\"><link_ctn id=\"%s-RAM\"/><link_ctn id=\"Host\"/></route>\n", i_name, i_name);
  2426. }
  2427. #endif
  2428. fprintf(f,
  2429. " </AS>\n"
  2430. " </platform>\n"
  2431. );
  2432. if (locked)
  2433. _starpu_fwrunlock(f);
  2434. fclose(f);
  2435. }
  2436. static void generate_bus_platform_file(void)
  2437. {
  2438. if (!was_benchmarked)
  2439. benchmark_all_gpu_devices();
  2440. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  2441. /* Slaves don't write files */
  2442. if (!_starpu_mpi_common_is_src_node())
  2443. return;
  2444. #endif
  2445. write_bus_platform_file_content(3);
  2446. write_bus_platform_file_content(4);
  2447. }
  2448. static void check_bus_platform_file(void)
  2449. {
  2450. int res;
  2451. char path[256];
  2452. _starpu_simgrid_get_platform_path(4, path, sizeof(path));
  2453. res = access(path, F_OK);
  2454. if (!res)
  2455. {
  2456. _starpu_simgrid_get_platform_path(3, path, sizeof(path));
  2457. res = access(path, F_OK);
  2458. }
  2459. if (res)
  2460. {
  2461. /* File does not exist yet */
  2462. generate_bus_platform_file();
  2463. }
  2464. }
  2465. /*
  2466. * Generic
  2467. */
  2468. static void _starpu_bus_force_sampling(void)
  2469. {
  2470. _STARPU_DEBUG("Force bus sampling ...\n");
  2471. _starpu_create_sampling_directory_if_needed();
  2472. generate_bus_affinity_file();
  2473. generate_bus_latency_file();
  2474. generate_bus_bandwidth_file();
  2475. generate_bus_config_file();
  2476. generate_bus_platform_file();
  2477. }
  2478. #endif /* !SIMGRID */
  2479. void _starpu_load_bus_performance_files(void)
  2480. {
  2481. _starpu_create_sampling_directory_if_needed();
  2482. struct _starpu_machine_config * config = _starpu_get_machine_config();
  2483. nnumas = _starpu_topology_get_nnumanodes(config);
  2484. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_SIMGRID)
  2485. ncuda = _starpu_get_cuda_device_count();
  2486. #endif
  2487. #if defined(STARPU_USE_OPENCL) || defined(STARPU_USE_SIMGRID)
  2488. nopencl = _starpu_opencl_get_device_count();
  2489. #endif
  2490. #if defined(STARPU_USE_MPI_MASTER_SLAVE) || defined(STARPU_USE_SIMGRID)
  2491. nmpi_ms = _starpu_mpi_src_get_device_count();
  2492. #endif
  2493. #if defined(STARPU_USE_MIC) || defined(STARPU_USE_SIMGRID)
  2494. nmic = _starpu_mic_src_get_device_count();
  2495. #endif
  2496. #ifndef STARPU_SIMGRID
  2497. check_bus_config_file();
  2498. #endif
  2499. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  2500. /* be sure that master wrote the perf files */
  2501. _starpu_mpi_common_barrier();
  2502. #endif
  2503. #ifndef STARPU_SIMGRID
  2504. load_bus_affinity_file();
  2505. #endif
  2506. load_bus_latency_file();
  2507. load_bus_bandwidth_file();
  2508. #ifndef STARPU_SIMGRID
  2509. check_bus_platform_file();
  2510. #endif
  2511. }
  2512. /* (in MB/s) */
  2513. double starpu_transfer_bandwidth(unsigned src_node, unsigned dst_node)
  2514. {
  2515. return bandwidth_matrix[src_node][dst_node];
  2516. }
  2517. /* (in µs) */
  2518. double starpu_transfer_latency(unsigned src_node, unsigned dst_node)
  2519. {
  2520. return latency_matrix[src_node][dst_node];
  2521. }
  2522. /* (in µs) */
  2523. double starpu_transfer_predict(unsigned src_node, unsigned dst_node, size_t size)
  2524. {
  2525. double bandwidth = bandwidth_matrix[src_node][dst_node];
  2526. double latency = latency_matrix[src_node][dst_node];
  2527. struct _starpu_machine_topology *topology = &_starpu_get_machine_config()->topology;
  2528. #if 0
  2529. int busid = starpu_bus_get_id(src_node, dst_node);
  2530. int direct = starpu_bus_get_direct(busid);
  2531. #endif
  2532. float ngpus = topology->ncudagpus+topology->nopenclgpus;
  2533. #if 0
  2534. /* Ideally we should take into account that some GPUs are directly
  2535. * connected through a PCI switch, which has less contention that the
  2536. * Host bridge, but doing that seems to *decrease* performance... */
  2537. if (direct)
  2538. {
  2539. float neighbours = starpu_bus_get_ngpus(busid);
  2540. /* Count transfers of these GPUs, and count transfers between
  2541. * other GPUs and these GPUs */
  2542. ngpus = neighbours + (ngpus - neighbours) * neighbours / ngpus;
  2543. }
  2544. #endif
  2545. return latency + (size/bandwidth)*2*ngpus;
  2546. }
  2547. /* calculate save bandwidth and latency */
  2548. /* bandwidth in MB/s - latency in µs */
  2549. void _starpu_save_bandwidth_and_latency_disk(double bandwidth_write, double bandwidth_read, double latency_write, double latency_read, unsigned node)
  2550. {
  2551. unsigned int i, j;
  2552. double slowness_disk_between_main_ram, slowness_main_ram_between_node;
  2553. int print_stats = starpu_get_env_number_default("STARPU_BUS_STATS", 0);
  2554. if (print_stats)
  2555. {
  2556. fprintf(stderr, "\n#---------------------\n");
  2557. fprintf(stderr, "Data transfer speed for %u:\n", node);
  2558. }
  2559. /* save bandwith */
  2560. for(i = 0; i < STARPU_MAXNODES; ++i)
  2561. {
  2562. for(j = 0; j < STARPU_MAXNODES; ++j)
  2563. {
  2564. if (i == j && j == node) /* source == destination == node */
  2565. {
  2566. bandwidth_matrix[i][j] = 0;
  2567. }
  2568. else if (i == node) /* source == disk */
  2569. {
  2570. /* convert in slowness */
  2571. if(bandwidth_read != 0)
  2572. slowness_disk_between_main_ram = 1/bandwidth_read;
  2573. else
  2574. slowness_disk_between_main_ram = 0;
  2575. if(bandwidth_matrix[STARPU_MAIN_RAM][j] != 0)
  2576. slowness_main_ram_between_node = 1/bandwidth_matrix[STARPU_MAIN_RAM][j];
  2577. else
  2578. slowness_main_ram_between_node = 0;
  2579. bandwidth_matrix[i][j] = 1/(slowness_disk_between_main_ram+slowness_main_ram_between_node);
  2580. if (!isnan(bandwidth_matrix[i][j]) && print_stats)
  2581. fprintf(stderr,"%u -> %u: %.0f MB/s\n", i, j, bandwidth_matrix[i][j]);
  2582. }
  2583. else if (j == node) /* destination == disk */
  2584. {
  2585. /* convert in slowness */
  2586. if(bandwidth_write != 0)
  2587. slowness_disk_between_main_ram = 1/bandwidth_write;
  2588. else
  2589. slowness_disk_between_main_ram = 0;
  2590. if(bandwidth_matrix[i][STARPU_MAIN_RAM] != 0)
  2591. slowness_main_ram_between_node = 1/bandwidth_matrix[i][STARPU_MAIN_RAM];
  2592. else
  2593. slowness_main_ram_between_node = 0;
  2594. bandwidth_matrix[i][j] = 1/(slowness_disk_between_main_ram+slowness_main_ram_between_node);
  2595. if (!isnan(bandwidth_matrix[i][j]) && print_stats)
  2596. fprintf(stderr,"%u -> %u: %.0f MB/s\n", i, j, bandwidth_matrix[i][j]);
  2597. }
  2598. else if (j > node || i > node) /* not affected by the node */
  2599. {
  2600. bandwidth_matrix[i][j] = NAN;
  2601. }
  2602. }
  2603. }
  2604. /* save latency */
  2605. for(i = 0; i < STARPU_MAXNODES; ++i)
  2606. {
  2607. for(j = 0; j < STARPU_MAXNODES; ++j)
  2608. {
  2609. if (i == j && j == node) /* source == destination == node */
  2610. {
  2611. latency_matrix[i][j] = 0;
  2612. }
  2613. else if (i == node) /* source == disk */
  2614. {
  2615. latency_matrix[i][j] = (latency_write+latency_matrix[STARPU_MAIN_RAM][j]);
  2616. if (!isnan(latency_matrix[i][j]) && print_stats)
  2617. fprintf(stderr,"%u -> %u: %.0f µs\n", i, j, latency_matrix[i][j]);
  2618. }
  2619. else if (j == node) /* destination == disk */
  2620. {
  2621. latency_matrix[i][j] = (latency_read+latency_matrix[i][STARPU_MAIN_RAM]);
  2622. if (!isnan(latency_matrix[i][j]) && print_stats)
  2623. fprintf(stderr,"%u -> %u: %.0f µs\n", i, j, latency_matrix[i][j]);
  2624. }
  2625. else if (j > node || i > node) /* not affected by the node */
  2626. {
  2627. latency_matrix[i][j] = NAN;
  2628. }
  2629. }
  2630. }
  2631. if (print_stats)
  2632. fprintf(stderr, "\n#---------------------\n");
  2633. }