perfmodel_bus.c 84 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011-2014,2016,2017 Inria
  4. * Copyright (C) 2009-2019 Université de Bordeaux
  5. * Copyright (C) 2010-2017,2019 CNRS
  6. * Copyright (C) 2013 Corentin Salingue
  7. *
  8. * StarPU is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU Lesser General Public License as published by
  10. * the Free Software Foundation; either version 2.1 of the License, or (at
  11. * your option) any later version.
  12. *
  13. * StarPU is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  16. *
  17. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  18. */
  19. #ifdef STARPU_USE_CUDA
  20. #ifndef _GNU_SOURCE
  21. #define _GNU_SOURCE 1
  22. #endif
  23. #include <sched.h>
  24. #endif
  25. #include <stdlib.h>
  26. #include <math.h>
  27. #include <starpu.h>
  28. #include <starpu_cuda.h>
  29. #include <starpu_opencl.h>
  30. #include <common/config.h>
  31. #ifdef HAVE_UNISTD_H
  32. #include <unistd.h>
  33. #endif
  34. #include <core/workers.h>
  35. #include <core/perfmodel/perfmodel.h>
  36. #include <core/simgrid.h>
  37. #include <core/topology.h>
  38. #include <common/utils.h>
  39. #include <drivers/mpi/driver_mpi_common.h>
  40. #ifdef STARPU_USE_OPENCL
  41. #include <starpu_opencl.h>
  42. #endif
  43. #ifdef STARPU_HAVE_WINDOWS
  44. #include <windows.h>
  45. #endif
  46. #ifdef STARPU_HAVE_HWLOC
  47. #include <hwloc.h>
  48. #ifndef HWLOC_API_VERSION
  49. #define HWLOC_OBJ_PU HWLOC_OBJ_PROC
  50. #endif
  51. #if HWLOC_API_VERSION < 0x00010b00
  52. #define HWLOC_OBJ_NUMANODE HWLOC_OBJ_NODE
  53. #endif
  54. #endif
  55. #if defined(HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX) && HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX
  56. #include <hwloc/cuda.h>
  57. #endif
  58. #define SIZE (32*1024*1024*sizeof(char))
  59. #define NITER 32
  60. #define PATH_LENGTH 256
  61. #ifndef STARPU_SIMGRID
  62. static void _starpu_bus_force_sampling(void);
  63. #endif
  64. /* timing is in µs per byte (i.e. slowness, inverse of bandwidth) */
  65. struct dev_timing
  66. {
  67. int numa_id;
  68. double timing_htod;
  69. double latency_htod;
  70. double timing_dtoh;
  71. double latency_dtoh;
  72. };
  73. /* TODO: measure latency */
  74. static double bandwidth_matrix[STARPU_MAXNODES][STARPU_MAXNODES];
  75. static double latency_matrix[STARPU_MAXNODES][STARPU_MAXNODES];
  76. static unsigned was_benchmarked = 0;
  77. #ifndef STARPU_SIMGRID
  78. static unsigned ncpus = 0;
  79. #endif
  80. static unsigned nnumas = 0;
  81. static unsigned ncuda = 0;
  82. static unsigned nopencl = 0;
  83. static unsigned nmic = 0;
  84. static unsigned nmpi_ms = 0;
  85. /* Benchmarking the performance of the bus */
  86. static double numa_latency[STARPU_MAXNUMANODES][STARPU_MAXNUMANODES];
  87. static double numa_timing[STARPU_MAXNUMANODES][STARPU_MAXNUMANODES];
  88. #ifndef STARPU_SIMGRID
  89. static uint64_t cuda_size[STARPU_MAXCUDADEVS];
  90. #endif
  91. #ifdef STARPU_USE_CUDA
  92. /* preference order of cores (logical indexes) */
  93. static unsigned cuda_affinity_matrix[STARPU_MAXCUDADEVS][STARPU_MAXNUMANODES];
  94. #ifndef STARPU_SIMGRID
  95. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  96. static double cudadev_timing_dtod[STARPU_MAXNODES][STARPU_MAXNODES] = {{0.0}};
  97. static double cudadev_latency_dtod[STARPU_MAXNODES][STARPU_MAXNODES] = {{0.0}};
  98. #endif
  99. #endif
  100. static struct dev_timing cudadev_timing_per_numa[STARPU_MAXCUDADEVS*STARPU_MAXNUMANODES];
  101. static char cudadev_direct[STARPU_MAXNODES][STARPU_MAXNODES];
  102. #endif
  103. #ifndef STARPU_SIMGRID
  104. static uint64_t opencl_size[STARPU_MAXCUDADEVS];
  105. #endif
  106. #ifdef STARPU_USE_OPENCL
  107. /* preference order of cores (logical indexes) */
  108. static unsigned opencl_affinity_matrix[STARPU_MAXOPENCLDEVS][STARPU_MAXNUMANODES];
  109. static struct dev_timing opencldev_timing_per_numa[STARPU_MAXOPENCLDEVS*STARPU_MAXNUMANODES];
  110. #endif
  111. #ifdef STARPU_USE_MIC
  112. static double mic_time_host_to_device[STARPU_MAXNODES] = {0.0};
  113. static double mic_time_device_to_host[STARPU_MAXNODES] = {0.0};
  114. #endif /* STARPU_USE_MIC */
  115. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  116. static double mpi_time_device_to_device[STARPU_MAXMPIDEVS][STARPU_MAXMPIDEVS] = {{0.0}};
  117. static double mpi_latency_device_to_device[STARPU_MAXMPIDEVS][STARPU_MAXMPIDEVS] = {{0.0}};
  118. #endif
  119. #ifdef STARPU_HAVE_HWLOC
  120. static hwloc_topology_t hwtopology;
  121. hwloc_topology_t _starpu_perfmodel_get_hwtopology()
  122. {
  123. return hwtopology;
  124. }
  125. #endif
  126. #if (defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)) && !defined(STARPU_SIMGRID)
  127. #ifdef STARPU_USE_CUDA
  128. static void measure_bandwidth_between_host_and_dev_on_numa_with_cuda(int dev, int numa, int cpu, struct dev_timing *dev_timing_per_cpu)
  129. {
  130. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  131. size_t size = SIZE;
  132. /* Initialize CUDA context on the device */
  133. /* We do not need to enable OpenGL interoperability at this point,
  134. * since we cleanly shutdown CUDA before returning. */
  135. cudaSetDevice(dev);
  136. /* hack to avoid third party libs to rebind threads */
  137. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  138. /* hack to force the initialization */
  139. cudaFree(0);
  140. /* hack to avoid third party libs to rebind threads */
  141. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  142. /* Get the maximum size which can be allocated on the device */
  143. struct cudaDeviceProp prop;
  144. cudaError_t cures;
  145. cures = cudaGetDeviceProperties(&prop, dev);
  146. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  147. cuda_size[dev] = prop.totalGlobalMem;
  148. if (size > prop.totalGlobalMem/4) size = prop.totalGlobalMem/4;
  149. /* Allocate a buffer on the device */
  150. unsigned char *d_buffer;
  151. cures = cudaMalloc((void **)&d_buffer, size);
  152. STARPU_ASSERT(cures == cudaSuccess);
  153. /* hack to avoid third party libs to rebind threads */
  154. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  155. /* Allocate a buffer on the host */
  156. unsigned char *h_buffer;
  157. #if defined(STARPU_HAVE_HWLOC)
  158. struct _starpu_machine_config *config = _starpu_get_machine_config();
  159. const unsigned nnuma_nodes = _starpu_topology_get_nnumanodes(config);
  160. if (nnuma_nodes > 1)
  161. {
  162. /* NUMA mode activated */
  163. hwloc_obj_t obj = hwloc_get_obj_by_type(hwtopology, HWLOC_OBJ_NUMANODE, numa);
  164. #if HWLOC_API_VERSION >= 0x00020000
  165. h_buffer = hwloc_alloc_membind(hwtopology, size, obj->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_BYNODESET);
  166. #else
  167. h_buffer = hwloc_alloc_membind_nodeset(hwtopology, size, obj->nodeset, HWLOC_MEMBIND_BIND, 0);
  168. #endif
  169. }
  170. else
  171. #endif
  172. {
  173. /* we use STARPU_MAIN_RAM */
  174. _STARPU_MALLOC(h_buffer, size);
  175. cudaHostRegister((void *)h_buffer, size, 0);
  176. }
  177. STARPU_ASSERT(cures == cudaSuccess);
  178. /* hack to avoid third party libs to rebind threads */
  179. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  180. /* Fill them */
  181. memset(h_buffer, 0, size);
  182. cudaMemset(d_buffer, 0, size);
  183. cudaThreadSynchronize();
  184. /* hack to avoid third party libs to rebind threads */
  185. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  186. const unsigned timing_numa_index = dev*STARPU_MAXNUMANODES + numa;
  187. unsigned iter;
  188. double timing;
  189. double start;
  190. double end;
  191. /* Measure upload bandwidth */
  192. start = starpu_timing_now();
  193. for (iter = 0; iter < NITER; iter++)
  194. {
  195. cudaMemcpy(d_buffer, h_buffer, size, cudaMemcpyHostToDevice);
  196. cudaThreadSynchronize();
  197. }
  198. end = starpu_timing_now();
  199. timing = end - start;
  200. dev_timing_per_cpu[timing_numa_index].timing_htod = timing/NITER/size;
  201. /* Measure download bandwidth */
  202. start = starpu_timing_now();
  203. for (iter = 0; iter < NITER; iter++)
  204. {
  205. cudaMemcpy(h_buffer, d_buffer, size, cudaMemcpyDeviceToHost);
  206. cudaThreadSynchronize();
  207. }
  208. end = starpu_timing_now();
  209. timing = end - start;
  210. dev_timing_per_cpu[timing_numa_index].timing_dtoh = timing/NITER/size;
  211. /* Measure upload latency */
  212. start = starpu_timing_now();
  213. for (iter = 0; iter < NITER; iter++)
  214. {
  215. cudaMemcpy(d_buffer, h_buffer, 1, cudaMemcpyHostToDevice);
  216. cudaThreadSynchronize();
  217. }
  218. end = starpu_timing_now();
  219. timing = end - start;
  220. dev_timing_per_cpu[timing_numa_index].latency_htod = timing/NITER;
  221. /* Measure download latency */
  222. start = starpu_timing_now();
  223. for (iter = 0; iter < NITER; iter++)
  224. {
  225. cudaMemcpy(h_buffer, d_buffer, 1, cudaMemcpyDeviceToHost);
  226. cudaThreadSynchronize();
  227. }
  228. end = starpu_timing_now();
  229. timing = end - start;
  230. dev_timing_per_cpu[timing_numa_index].latency_dtoh = timing/NITER;
  231. /* Free buffers */
  232. cudaHostUnregister(h_buffer);
  233. #if defined(STARPU_HAVE_HWLOC)
  234. if (nnuma_nodes > 1)
  235. {
  236. /* NUMA mode activated */
  237. hwloc_free(hwtopology, h_buffer, size);
  238. }
  239. else
  240. #endif
  241. {
  242. free(h_buffer);
  243. }
  244. cudaFree(d_buffer);
  245. cudaThreadExit();
  246. }
  247. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  248. static void measure_bandwidth_between_dev_and_dev_cuda(int src, int dst)
  249. {
  250. size_t size = SIZE;
  251. int can;
  252. /* Get the maximum size which can be allocated on the device */
  253. struct cudaDeviceProp prop;
  254. cudaError_t cures;
  255. cures = cudaGetDeviceProperties(&prop, src);
  256. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  257. if (size > prop.totalGlobalMem/4) size = prop.totalGlobalMem/4;
  258. cures = cudaGetDeviceProperties(&prop, dst);
  259. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  260. if (size > prop.totalGlobalMem/4) size = prop.totalGlobalMem/4;
  261. /* Initialize CUDA context on the source */
  262. /* We do not need to enable OpenGL interoperability at this point,
  263. * since we cleanly shutdown CUDA before returning. */
  264. cudaSetDevice(src);
  265. if (starpu_get_env_number("STARPU_ENABLE_CUDA_GPU_GPU_DIRECT") != 0)
  266. {
  267. cures = cudaDeviceCanAccessPeer(&can, src, dst);
  268. if (!cures && can)
  269. {
  270. cures = cudaDeviceEnablePeerAccess(dst, 0);
  271. if (!cures)
  272. {
  273. _STARPU_DISP("GPU-Direct %d -> %d\n", dst, src);
  274. cudadev_direct[src][dst] = 1;
  275. }
  276. }
  277. }
  278. /* Allocate a buffer on the device */
  279. unsigned char *s_buffer;
  280. cures = cudaMalloc((void **)&s_buffer, size);
  281. STARPU_ASSERT(cures == cudaSuccess);
  282. cudaMemset(s_buffer, 0, size);
  283. cudaThreadSynchronize();
  284. /* Initialize CUDA context on the destination */
  285. /* We do not need to enable OpenGL interoperability at this point,
  286. * since we cleanly shutdown CUDA before returning. */
  287. cudaSetDevice(dst);
  288. if (starpu_get_env_number("STARPU_ENABLE_CUDA_GPU_GPU_DIRECT") != 0)
  289. {
  290. cures = cudaDeviceCanAccessPeer(&can, dst, src);
  291. if (!cures && can)
  292. {
  293. cures = cudaDeviceEnablePeerAccess(src, 0);
  294. if (!cures)
  295. {
  296. _STARPU_DISP("GPU-Direct %d -> %d\n", src, dst);
  297. cudadev_direct[dst][src] = 1;
  298. }
  299. }
  300. }
  301. /* Allocate a buffer on the device */
  302. unsigned char *d_buffer;
  303. cures = cudaMalloc((void **)&d_buffer, size);
  304. STARPU_ASSERT(cures == cudaSuccess);
  305. cudaMemset(d_buffer, 0, size);
  306. cudaThreadSynchronize();
  307. unsigned iter;
  308. double timing;
  309. double start;
  310. double end;
  311. /* Measure upload bandwidth */
  312. start = starpu_timing_now();
  313. for (iter = 0; iter < NITER; iter++)
  314. {
  315. cudaMemcpyPeer(d_buffer, dst, s_buffer, src, size);
  316. cudaThreadSynchronize();
  317. }
  318. end = starpu_timing_now();
  319. timing = end - start;
  320. cudadev_timing_dtod[src][dst] = timing/NITER/size;
  321. /* Measure upload latency */
  322. start = starpu_timing_now();
  323. for (iter = 0; iter < NITER; iter++)
  324. {
  325. cudaMemcpyPeer(d_buffer, dst, s_buffer, src, 1);
  326. cudaThreadSynchronize();
  327. }
  328. end = starpu_timing_now();
  329. timing = end - start;
  330. cudadev_latency_dtod[src][dst] = timing/NITER;
  331. /* Free buffers */
  332. cudaFree(d_buffer);
  333. cudaSetDevice(src);
  334. cudaFree(s_buffer);
  335. cudaThreadExit();
  336. }
  337. #endif
  338. #endif
  339. #ifdef STARPU_USE_OPENCL
  340. static void measure_bandwidth_between_host_and_dev_on_numa_with_opencl(int dev, int numa, int cpu, struct dev_timing *dev_timing_per_cpu)
  341. {
  342. cl_context context;
  343. cl_command_queue queue;
  344. cl_int err=0;
  345. size_t size = SIZE;
  346. int not_initialized;
  347. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  348. /* Is the context already initialised ? */
  349. starpu_opencl_get_context(dev, &context);
  350. not_initialized = (context == NULL);
  351. if (not_initialized == 1)
  352. _starpu_opencl_init_context(dev);
  353. /* Get context and queue */
  354. starpu_opencl_get_context(dev, &context);
  355. starpu_opencl_get_queue(dev, &queue);
  356. /* Get the maximum size which can be allocated on the device */
  357. cl_device_id device;
  358. cl_ulong maxMemAllocSize, totalGlobalMem;
  359. starpu_opencl_get_device(dev, &device);
  360. err = clGetDeviceInfo(device, CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof(maxMemAllocSize), &maxMemAllocSize, NULL);
  361. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  362. if (size > (size_t)maxMemAllocSize/4) size = maxMemAllocSize/4;
  363. err = clGetDeviceInfo(device, CL_DEVICE_GLOBAL_MEM_SIZE , sizeof(totalGlobalMem), &totalGlobalMem, NULL);
  364. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  365. opencl_size[dev] = totalGlobalMem;
  366. if (_starpu_opencl_get_device_type(dev) == CL_DEVICE_TYPE_CPU)
  367. {
  368. /* Let's not use too much RAM when running OpenCL on a CPU: it
  369. * would make the OS swap like crazy. */
  370. size /= 2;
  371. }
  372. /* hack to avoid third party libs to rebind threads */
  373. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  374. /* Allocate a buffer on the device */
  375. cl_mem d_buffer;
  376. d_buffer = clCreateBuffer(context, CL_MEM_READ_WRITE, size, NULL, &err);
  377. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  378. /* hack to avoid third party libs to rebind threads */
  379. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  380. /* Allocate a buffer on the host */
  381. unsigned char *h_buffer;
  382. #if defined(STARPU_HAVE_HWLOC)
  383. struct _starpu_machine_config *config = _starpu_get_machine_config();
  384. const unsigned nnuma_nodes = _starpu_topology_get_nnumanodes(config);
  385. if (nnuma_nodes > 1)
  386. {
  387. /* NUMA mode activated */
  388. hwloc_obj_t obj = hwloc_get_obj_by_type(hwtopology, HWLOC_OBJ_NUMANODE, numa);
  389. #if HWLOC_API_VERSION >= 0x00020000
  390. h_buffer = hwloc_alloc_membind(hwtopology, size, obj->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_BYNODESET);
  391. #else
  392. h_buffer = hwloc_alloc_membind_nodeset(hwtopology, size, obj->nodeset, HWLOC_MEMBIND_BIND, 0);
  393. #endif
  394. }
  395. else
  396. #endif
  397. {
  398. /* we use STARPU_MAIN_RAM */
  399. _STARPU_MALLOC(h_buffer, size);
  400. }
  401. /* hack to avoid third party libs to rebind threads */
  402. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  403. /* Fill them */
  404. memset(h_buffer, 0, size);
  405. err = clEnqueueWriteBuffer(queue, d_buffer, CL_TRUE, 0, size, h_buffer, 0, NULL, NULL);
  406. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  407. clFinish(queue);
  408. /* hack to avoid third party libs to rebind threads */
  409. _starpu_bind_thread_on_cpu(cpu, STARPU_NOWORKERID, NULL);
  410. const unsigned timing_numa_index = dev*STARPU_MAXNUMANODES + numa;
  411. unsigned iter;
  412. double timing;
  413. double start;
  414. double end;
  415. /* Measure upload bandwidth */
  416. start = starpu_timing_now();
  417. for (iter = 0; iter < NITER; iter++)
  418. {
  419. err = clEnqueueWriteBuffer(queue, d_buffer, CL_TRUE, 0, size, h_buffer, 0, NULL, NULL);
  420. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  421. clFinish(queue);
  422. }
  423. end = starpu_timing_now();
  424. timing = end - start;
  425. dev_timing_per_cpu[timing_numa_index].timing_htod = timing/NITER/size;
  426. /* Measure download bandwidth */
  427. start = starpu_timing_now();
  428. for (iter = 0; iter < NITER; iter++)
  429. {
  430. err = clEnqueueReadBuffer(queue, d_buffer, CL_TRUE, 0, size, h_buffer, 0, NULL, NULL);
  431. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  432. clFinish(queue);
  433. }
  434. end = starpu_timing_now();
  435. timing = end - start;
  436. dev_timing_per_cpu[timing_numa_index].timing_dtoh = timing/NITER/size;
  437. /* Measure upload latency */
  438. start = starpu_timing_now();
  439. for (iter = 0; iter < NITER; iter++)
  440. {
  441. err = clEnqueueWriteBuffer(queue, d_buffer, CL_TRUE, 0, 1, h_buffer, 0, NULL, NULL);
  442. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  443. clFinish(queue);
  444. }
  445. end = starpu_timing_now();
  446. timing = end - start;
  447. dev_timing_per_cpu[timing_numa_index].latency_htod = timing/NITER;
  448. /* Measure download latency */
  449. start = starpu_timing_now();
  450. for (iter = 0; iter < NITER; iter++)
  451. {
  452. err = clEnqueueReadBuffer(queue, d_buffer, CL_TRUE, 0, 1, h_buffer, 0, NULL, NULL);
  453. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  454. clFinish(queue);
  455. }
  456. end = starpu_timing_now();
  457. timing = end - start;
  458. dev_timing_per_cpu[timing_numa_index].latency_dtoh = timing/NITER;
  459. /* Free buffers */
  460. err = clReleaseMemObject(d_buffer);
  461. if (STARPU_UNLIKELY(err != CL_SUCCESS))
  462. STARPU_OPENCL_REPORT_ERROR(err);
  463. #if defined(STARPU_HAVE_HWLOC)
  464. if (nnuma_nodes > 1)
  465. {
  466. /* NUMA mode activated */
  467. hwloc_free(hwtopology, h_buffer, size);
  468. }
  469. else
  470. #endif
  471. {
  472. free(h_buffer);
  473. }
  474. /* Uninitiliaze OpenCL context on the device */
  475. if (not_initialized == 1)
  476. _starpu_opencl_deinit_context(dev);
  477. }
  478. #endif
  479. /* NB: we want to sort the bandwidth by DECREASING order */
  480. static int compar_dev_timing(const void *left_dev_timing, const void *right_dev_timing)
  481. {
  482. const struct dev_timing *left = (const struct dev_timing *)left_dev_timing;
  483. const struct dev_timing *right = (const struct dev_timing *)right_dev_timing;
  484. double left_dtoh = left->timing_dtoh;
  485. double left_htod = left->timing_htod;
  486. double right_dtoh = right->timing_dtoh;
  487. double right_htod = right->timing_htod;
  488. double timing_sum2_left = left_dtoh*left_dtoh + left_htod*left_htod;
  489. double timing_sum2_right = right_dtoh*right_dtoh + right_htod*right_htod;
  490. /* it's for a decreasing sorting */
  491. return (timing_sum2_left > timing_sum2_right);
  492. }
  493. #ifdef STARPU_HAVE_HWLOC
  494. static int find_cpu_from_numa_node(hwloc_obj_t obj)
  495. {
  496. STARPU_ASSERT(obj);
  497. hwloc_obj_t current = obj;
  498. while (current->depth != HWLOC_OBJ_PU)
  499. {
  500. current = current->first_child;
  501. /* If we don't find a "PU" obj before the leave, perhaps we are
  502. * just not allowed to use it. */
  503. if (!current)
  504. return -1;
  505. }
  506. STARPU_ASSERT(current->depth == HWLOC_OBJ_PU);
  507. return current->logical_index;
  508. }
  509. #endif
  510. static void measure_bandwidth_between_numa_nodes_and_dev(int dev, struct dev_timing *dev_timing_per_numanode, char *type)
  511. {
  512. /* We measure the bandwith between each GPU and each NUMA node */
  513. struct _starpu_machine_config * config = _starpu_get_machine_config();
  514. const unsigned nnuma_nodes = _starpu_topology_get_nnumanodes(config);
  515. unsigned numa_id;
  516. for (numa_id = 0; numa_id < nnuma_nodes; numa_id++)
  517. {
  518. /* Store results by starpu id */
  519. const unsigned timing_numa_index = dev*STARPU_MAXNUMANODES + numa_id;
  520. /* Store STARPU_memnode for later */
  521. dev_timing_per_numanode[timing_numa_index].numa_id = numa_id;
  522. /* Chose one CPU connected to this NUMA node */
  523. int cpu_id = 0;
  524. #ifdef STARPU_HAVE_HWLOC
  525. hwloc_obj_t obj = hwloc_get_obj_by_type(hwtopology, HWLOC_OBJ_NUMANODE, numa_id);
  526. if (obj)
  527. {
  528. #if HWLOC_API_VERSION >= 0x00020000
  529. /* From hwloc 2.0, NUMAnode objects do not contain CPUs, they are contained in a group which contain the CPUs. */
  530. obj = obj->parent;
  531. #endif
  532. cpu_id = find_cpu_from_numa_node(obj);
  533. }
  534. else
  535. /* No such NUMA node, probably hwloc 1.x with no NUMA
  536. * node, just take one CPU from the whole system */
  537. cpu_id = find_cpu_from_numa_node(hwloc_get_root_obj(hwtopology));
  538. #endif
  539. if (cpu_id < 0)
  540. continue;
  541. #ifdef STARPU_USE_CUDA
  542. if (strncmp(type, "CUDA", 4) == 0)
  543. measure_bandwidth_between_host_and_dev_on_numa_with_cuda(dev, numa_id, cpu_id, dev_timing_per_numanode);
  544. #endif
  545. #ifdef STARPU_USE_OPENCL
  546. if (strncmp(type, "OpenCL", 6) == 0)
  547. measure_bandwidth_between_host_and_dev_on_numa_with_opencl(dev, numa_id, cpu_id, dev_timing_per_numanode);
  548. #endif
  549. }
  550. }
  551. static void measure_bandwidth_between_host_and_dev(int dev, struct dev_timing *dev_timing_per_numa, char *type)
  552. {
  553. measure_bandwidth_between_numa_nodes_and_dev(dev, dev_timing_per_numa, type);
  554. #ifdef STARPU_VERBOSE
  555. struct _starpu_machine_config * config = _starpu_get_machine_config();
  556. const unsigned nnuma_nodes = _starpu_topology_get_nnumanodes(config);
  557. unsigned numa_id;
  558. for (numa_id = 0; numa_id < nnuma_nodes; numa_id++)
  559. {
  560. const unsigned timing_numa_index = dev*STARPU_MAXNUMANODES + numa_id;
  561. double bandwidth_dtoh = dev_timing_per_numa[timing_numa_index].timing_dtoh;
  562. double bandwidth_htod = dev_timing_per_numa[timing_numa_index].timing_htod;
  563. double bandwidth_sum2 = bandwidth_dtoh*bandwidth_dtoh + bandwidth_htod*bandwidth_htod;
  564. _STARPU_DISP("(%10s) BANDWIDTH GPU %d NUMA %u - htod %f - dtoh %f - %f\n", type, dev, numa_id, bandwidth_htod, bandwidth_dtoh, sqrt(bandwidth_sum2));
  565. }
  566. #endif
  567. }
  568. #endif /* defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL) */
  569. static void measure_bandwidth_latency_between_numa(int numa_src, int numa_dst)
  570. {
  571. #if defined(STARPU_HAVE_HWLOC)
  572. if (nnumas > 1)
  573. {
  574. /* NUMA mode activated */
  575. double start, end, timing;
  576. unsigned iter;
  577. unsigned char *h_buffer;
  578. hwloc_obj_t obj_src = hwloc_get_obj_by_type(hwtopology, HWLOC_OBJ_NUMANODE, numa_src);
  579. #if HWLOC_API_VERSION >= 0x00020000
  580. h_buffer = hwloc_alloc_membind(hwtopology, SIZE, obj_src->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_BYNODESET);
  581. #else
  582. h_buffer = hwloc_alloc_membind_nodeset(hwtopology, SIZE, obj_src->nodeset, HWLOC_MEMBIND_BIND, 0);
  583. #endif
  584. unsigned char *d_buffer;
  585. hwloc_obj_t obj_dst = hwloc_get_obj_by_type(hwtopology, HWLOC_OBJ_NUMANODE, numa_dst);
  586. #if HWLOC_API_VERSION >= 0x00020000
  587. d_buffer = hwloc_alloc_membind(hwtopology, SIZE, obj_dst->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_BYNODESET);
  588. #else
  589. d_buffer = hwloc_alloc_membind_nodeset(hwtopology, SIZE, obj_dst->nodeset, HWLOC_MEMBIND_BIND, 0);
  590. #endif
  591. memset(h_buffer, 0, SIZE);
  592. start = starpu_timing_now();
  593. for (iter = 0; iter < NITER; iter++)
  594. {
  595. memcpy(d_buffer, h_buffer, SIZE);
  596. }
  597. end = starpu_timing_now();
  598. timing = end - start;
  599. numa_timing[numa_src][numa_dst] = timing/NITER/SIZE;
  600. start = starpu_timing_now();
  601. for (iter = 0; iter < NITER; iter++)
  602. {
  603. memcpy(d_buffer, h_buffer, 1);
  604. }
  605. end = starpu_timing_now();
  606. timing = end - start;
  607. numa_latency[numa_src][numa_dst] = timing/NITER;
  608. hwloc_free(hwtopology, h_buffer, SIZE);
  609. hwloc_free(hwtopology, d_buffer, SIZE);
  610. }
  611. else
  612. #endif
  613. {
  614. /* Cannot make a real calibration */
  615. numa_timing[numa_src][numa_dst] = 0.01;
  616. numa_latency[numa_src][numa_dst] = 0;
  617. }
  618. }
  619. static void benchmark_all_gpu_devices(void)
  620. {
  621. #ifdef STARPU_SIMGRID
  622. _STARPU_DISP("Can not measure bus in simgrid mode, please run starpu_calibrate_bus in non-simgrid mode to make sure the bus performance model was calibrated\n");
  623. STARPU_ABORT();
  624. #else /* !SIMGRID */
  625. unsigned i, j;
  626. _STARPU_DEBUG("Benchmarking the speed of the bus\n");
  627. #ifdef STARPU_HAVE_HWLOC
  628. hwloc_topology_init(&hwtopology);
  629. _starpu_topology_filter(hwtopology);
  630. hwloc_topology_load(hwtopology);
  631. #endif
  632. #ifdef STARPU_HAVE_HWLOC
  633. hwloc_bitmap_t former_cpuset = hwloc_bitmap_alloc();
  634. hwloc_get_cpubind(hwtopology, former_cpuset, HWLOC_CPUBIND_THREAD);
  635. #elif __linux__
  636. /* Save the current cpu binding */
  637. cpu_set_t former_process_affinity;
  638. int ret;
  639. ret = sched_getaffinity(0, sizeof(former_process_affinity), &former_process_affinity);
  640. if (ret)
  641. {
  642. perror("sched_getaffinity");
  643. STARPU_ABORT();
  644. }
  645. #else
  646. #warning Missing binding support, StarPU will not be able to properly benchmark NUMA topology
  647. #endif
  648. struct _starpu_machine_config *config = _starpu_get_machine_config();
  649. ncpus = _starpu_topology_get_nhwcpu(config);
  650. nnumas = _starpu_topology_get_nnumanodes(config);
  651. for (i = 0; i < nnumas; i++)
  652. for (j = 0; j < nnumas; j++)
  653. if (i != j)
  654. {
  655. _STARPU_DISP("NUMA %d -> %d...\n", i, j);
  656. measure_bandwidth_latency_between_numa(i, j);
  657. }
  658. #ifdef STARPU_USE_CUDA
  659. ncuda = _starpu_get_cuda_device_count();
  660. for (i = 0; i < ncuda; i++)
  661. {
  662. _STARPU_DISP("CUDA %u...\n", i);
  663. /* measure bandwidth between Host and Device i */
  664. measure_bandwidth_between_host_and_dev(i, cudadev_timing_per_numa, "CUDA");
  665. }
  666. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  667. for (i = 0; i < ncuda; i++)
  668. {
  669. for (j = 0; j < ncuda; j++)
  670. if (i != j)
  671. {
  672. _STARPU_DISP("CUDA %u -> %u...\n", i, j);
  673. /* measure bandwidth between Host and Device i */
  674. measure_bandwidth_between_dev_and_dev_cuda(i, j);
  675. }
  676. }
  677. #endif
  678. #endif
  679. #ifdef STARPU_USE_OPENCL
  680. nopencl = _starpu_opencl_get_device_count();
  681. for (i = 0; i < nopencl; i++)
  682. {
  683. _STARPU_DISP("OpenCL %u...\n", i);
  684. /* measure bandwith between Host and Device i */
  685. measure_bandwidth_between_host_and_dev(i, opencldev_timing_per_numa, "OpenCL");
  686. }
  687. #endif
  688. #ifdef STARPU_USE_MIC
  689. /* TODO: implement real calibration ! For now we only put an arbitrary
  690. * value for each device during at the declaration as a bug fix, else
  691. * we get problems on heft scheduler */
  692. nmic = _starpu_mic_src_get_device_count();
  693. for (i = 0; i < STARPU_MAXNODES; i++)
  694. {
  695. mic_time_host_to_device[i] = 0.1;
  696. mic_time_device_to_host[i] = 0.1;
  697. }
  698. #endif /* STARPU_USE_MIC */
  699. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  700. _starpu_mpi_common_measure_bandwidth_latency(mpi_time_device_to_device, mpi_latency_device_to_device);
  701. #endif /* STARPU_USE_MPI_MASTER_SLAVE */
  702. #ifdef STARPU_HAVE_HWLOC
  703. hwloc_set_cpubind(hwtopology, former_cpuset, HWLOC_CPUBIND_THREAD);
  704. hwloc_bitmap_free(former_cpuset);
  705. #elif __linux__
  706. /* Restore the former affinity */
  707. ret = sched_setaffinity(0, sizeof(former_process_affinity), &former_process_affinity);
  708. if (ret)
  709. {
  710. perror("sched_setaffinity");
  711. STARPU_ABORT();
  712. }
  713. #endif
  714. #ifdef STARPU_HAVE_HWLOC
  715. hwloc_topology_destroy(hwtopology);
  716. #endif
  717. _STARPU_DEBUG("Benchmarking the speed of the bus is done.\n");
  718. was_benchmarked = 1;
  719. #endif /* !SIMGRID */
  720. }
  721. static void get_bus_path(const char *type, char *path, size_t maxlen)
  722. {
  723. char hostname[65];
  724. _starpu_gethostname(hostname, sizeof(hostname));
  725. snprintf(path, maxlen, "%s%s.%s", _starpu_get_perf_model_dir_bus(), hostname, type);
  726. }
  727. /*
  728. * Affinity
  729. */
  730. static void get_affinity_path(char *path, size_t maxlen)
  731. {
  732. get_bus_path("affinity", path, maxlen);
  733. }
  734. #ifndef STARPU_SIMGRID
  735. static void load_bus_affinity_file_content(void)
  736. {
  737. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  738. FILE *f;
  739. int locked;
  740. char path[PATH_LENGTH];
  741. get_affinity_path(path, sizeof(path));
  742. _STARPU_DEBUG("loading affinities from %s\n", path);
  743. f = fopen(path, "r");
  744. STARPU_ASSERT(f);
  745. locked = _starpu_frdlock(f) == 0;
  746. unsigned gpu;
  747. #ifdef STARPU_USE_CUDA
  748. ncuda = _starpu_get_cuda_device_count();
  749. for (gpu = 0; gpu < ncuda; gpu++)
  750. {
  751. int ret;
  752. unsigned dummy;
  753. _starpu_drop_comments(f);
  754. ret = fscanf(f, "%u\t", &dummy);
  755. STARPU_ASSERT(ret == 1);
  756. STARPU_ASSERT(dummy == gpu);
  757. unsigned numa;
  758. for (numa = 0; numa < nnumas; numa++)
  759. {
  760. ret = fscanf(f, "%u\t", &cuda_affinity_matrix[gpu][numa]);
  761. STARPU_ASSERT(ret == 1);
  762. }
  763. ret = fscanf(f, "\n");
  764. STARPU_ASSERT(ret == 0);
  765. }
  766. #endif /* !STARPU_USE_CUDA */
  767. #ifdef STARPU_USE_OPENCL
  768. nopencl = _starpu_opencl_get_device_count();
  769. for (gpu = 0; gpu < nopencl; gpu++)
  770. {
  771. int ret;
  772. unsigned dummy;
  773. _starpu_drop_comments(f);
  774. ret = fscanf(f, "%u\t", &dummy);
  775. STARPU_ASSERT(ret == 1);
  776. STARPU_ASSERT(dummy == gpu);
  777. unsigned numa;
  778. for (numa = 0; numa < nnumas; numa++)
  779. {
  780. ret = fscanf(f, "%u\t", &opencl_affinity_matrix[gpu][numa]);
  781. STARPU_ASSERT(ret == 1);
  782. }
  783. ret = fscanf(f, "\n");
  784. STARPU_ASSERT(ret == 0);
  785. }
  786. #endif /* !STARPU_USE_OPENCL */
  787. if (locked)
  788. _starpu_frdunlock(f);
  789. fclose(f);
  790. #endif /* !(STARPU_USE_CUDA_ || STARPU_USE_OPENCL */
  791. }
  792. #ifndef STARPU_SIMGRID
  793. static void write_bus_affinity_file_content(void)
  794. {
  795. STARPU_ASSERT(was_benchmarked);
  796. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  797. FILE *f;
  798. char path[PATH_LENGTH];
  799. int locked;
  800. get_affinity_path(path, sizeof(path));
  801. _STARPU_DEBUG("writing affinities to %s\n", path);
  802. f = fopen(path, "w+");
  803. if (!f)
  804. {
  805. perror("fopen write_buf_affinity_file_content");
  806. _STARPU_DISP("path '%s'\n", path);
  807. fflush(stderr);
  808. STARPU_ABORT();
  809. }
  810. locked = _starpu_frdlock(f) == 0;
  811. unsigned numa;
  812. unsigned gpu;
  813. fprintf(f, "# GPU\t");
  814. for (numa = 0; numa < nnumas; numa++)
  815. fprintf(f, "NUMA%u\t", numa);
  816. fprintf(f, "\n");
  817. #ifdef STARPU_USE_CUDA
  818. {
  819. /* Use an other array to sort bandwidth */
  820. struct dev_timing cudadev_timing_per_numa_sorted[STARPU_MAXCUDADEVS*STARPU_MAXNUMANODES];
  821. memcpy(cudadev_timing_per_numa_sorted, cudadev_timing_per_numa, STARPU_MAXCUDADEVS*STARPU_MAXNUMANODES*sizeof(struct dev_timing));
  822. for (gpu = 0; gpu < ncuda; gpu++)
  823. {
  824. fprintf(f, "%u\t", gpu);
  825. qsort(&(cudadev_timing_per_numa_sorted[gpu*STARPU_MAXNUMANODES]), nnumas, sizeof(struct dev_timing), compar_dev_timing);
  826. for (numa = 0; numa < nnumas; numa++)
  827. {
  828. fprintf(f, "%d\t", cudadev_timing_per_numa_sorted[gpu*STARPU_MAXNUMANODES+numa].numa_id);
  829. }
  830. fprintf(f, "\n");
  831. }
  832. }
  833. #endif
  834. #ifdef STARPU_USE_OPENCL
  835. {
  836. /* Use an other array to sort bandwidth */
  837. struct dev_timing opencldev_timing_per_numa_sorted[STARPU_MAXOPENCLDEVS*STARPU_MAXNUMANODES];
  838. memcpy(opencldev_timing_per_numa_sorted, opencldev_timing_per_numa, STARPU_MAXOPENCLDEVS*STARPU_MAXNUMANODES*sizeof(struct dev_timing));
  839. for (gpu = 0; gpu < nopencl; gpu++)
  840. {
  841. fprintf(f, "%u\t", gpu);
  842. qsort(&(opencldev_timing_per_numa_sorted[gpu*STARPU_MAXNUMANODES]), nnumas, sizeof(struct dev_timing), compar_dev_timing);
  843. for (numa = 0; numa < nnumas; numa++)
  844. {
  845. fprintf(f, "%d\t", opencldev_timing_per_numa_sorted[gpu*STARPU_MAXNUMANODES+numa].numa_id);
  846. }
  847. fprintf(f, "\n");
  848. }
  849. }
  850. #endif
  851. if (locked)
  852. _starpu_frdunlock(f);
  853. fclose(f);
  854. #endif
  855. }
  856. #endif /* STARPU_SIMGRID */
  857. static void generate_bus_affinity_file(void)
  858. {
  859. if (!was_benchmarked)
  860. benchmark_all_gpu_devices();
  861. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  862. /* Slaves don't write files */
  863. if (!_starpu_mpi_common_is_src_node())
  864. return;
  865. #endif
  866. write_bus_affinity_file_content();
  867. }
  868. static int check_bus_affinity_file(void)
  869. {
  870. int ret = 1;
  871. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  872. FILE *f;
  873. int locked;
  874. unsigned dummy;
  875. char path[PATH_LENGTH];
  876. get_affinity_path(path, sizeof(path));
  877. _STARPU_DEBUG("loading affinities from %s\n", path);
  878. f = fopen(path, "r");
  879. STARPU_ASSERT(f);
  880. locked = _starpu_frdlock(f) == 0;
  881. ret = fscanf(f, "# GPU\t");
  882. STARPU_ASSERT(ret == 0);
  883. ret = fscanf(f, "NUMA%u\t", &dummy);
  884. if (locked)
  885. _starpu_frdunlock(f);
  886. fclose(f);
  887. #endif
  888. return ret == 1;
  889. }
  890. static void load_bus_affinity_file(void)
  891. {
  892. int exist, check = 1;
  893. char path[PATH_LENGTH];
  894. get_affinity_path(path, sizeof(path));
  895. /* access return 0 if file exists */
  896. exist = access(path, F_OK);
  897. if (exist == 0)
  898. /* return 0 if it's not good */
  899. check = check_bus_affinity_file();
  900. if (check == 0)
  901. _STARPU_DISP("Affinity File is too old for this version of StarPU ! Rebuilding it...\n");
  902. if (check == 0 || exist != 0)
  903. {
  904. /* File does not exist yet */
  905. generate_bus_affinity_file();
  906. }
  907. load_bus_affinity_file_content();
  908. }
  909. #ifdef STARPU_USE_CUDA
  910. unsigned *_starpu_get_cuda_affinity_vector(unsigned gpuid)
  911. {
  912. return cuda_affinity_matrix[gpuid];
  913. }
  914. #endif /* STARPU_USE_CUDA */
  915. #ifdef STARPU_USE_OPENCL
  916. unsigned *_starpu_get_opencl_affinity_vector(unsigned gpuid)
  917. {
  918. return opencl_affinity_matrix[gpuid];
  919. }
  920. #endif /* STARPU_USE_OPENCL */
  921. void starpu_bus_print_affinity(FILE *f)
  922. {
  923. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  924. unsigned numa;
  925. unsigned gpu;
  926. #endif
  927. fprintf(f, "# GPU\tNUMA in preference order (logical index)\n");
  928. #ifdef STARPU_USE_CUDA
  929. fprintf(f, "# CUDA\n");
  930. for(gpu = 0 ; gpu<ncuda ; gpu++)
  931. {
  932. fprintf(f, "%u\t", gpu);
  933. for (numa = 0; numa < nnumas; numa++)
  934. {
  935. fprintf(f, "%u\t", cuda_affinity_matrix[gpu][numa]);
  936. }
  937. fprintf(f, "\n");
  938. }
  939. #endif
  940. #ifdef STARPU_USE_OPENCL
  941. fprintf(f, "# OpenCL\n");
  942. for(gpu = 0 ; gpu<nopencl ; gpu++)
  943. {
  944. fprintf(f, "%u\t", gpu);
  945. for (numa = 0; numa < nnumas; numa++)
  946. {
  947. fprintf(f, "%u\t", opencl_affinity_matrix[gpu][numa]);
  948. }
  949. fprintf(f, "\n");
  950. }
  951. #endif
  952. }
  953. #endif /* STARPU_SIMGRID */
  954. /*
  955. * Latency
  956. */
  957. static void get_latency_path(char *path, size_t maxlen)
  958. {
  959. get_bus_path("latency", path, maxlen);
  960. }
  961. static int load_bus_latency_file_content(void)
  962. {
  963. int n;
  964. unsigned src, dst;
  965. FILE *f;
  966. double latency;
  967. int locked;
  968. char path[PATH_LENGTH];
  969. get_latency_path(path, sizeof(path));
  970. _STARPU_DEBUG("loading latencies from %s\n", path);
  971. f = fopen(path, "r");
  972. if (!f)
  973. {
  974. perror("fopen load_bus_latency_file_content");
  975. _STARPU_DISP("path '%s'\n", path);
  976. fflush(stderr);
  977. STARPU_ABORT();
  978. }
  979. locked = _starpu_frdlock(f) == 0;
  980. for (src = 0; src < STARPU_MAXNODES; src++)
  981. {
  982. _starpu_drop_comments(f);
  983. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  984. {
  985. n = _starpu_read_double(f, "%le", &latency);
  986. if (n != 1)
  987. {
  988. _STARPU_DISP("Error while reading latency file <%s>. Expected a number. Did you change the maximum number of GPUs at ./configure time?\n", path);
  989. fclose(f);
  990. return 0;
  991. }
  992. n = getc(f);
  993. if (n == '\n')
  994. break;
  995. if (n != '\t')
  996. {
  997. _STARPU_DISP("bogus character '%c' (%d) in latency file %s\n", n, n, path);
  998. fclose(f);
  999. return 0;
  1000. }
  1001. latency_matrix[src][dst] = latency;
  1002. /* Look out for \t\n */
  1003. n = getc(f);
  1004. if (n == '\n')
  1005. break;
  1006. ungetc(n, f);
  1007. n = '\t';
  1008. }
  1009. /* No more values, take NAN */
  1010. for ( ; dst < STARPU_MAXNODES; dst++)
  1011. latency_matrix[src][dst] = NAN;
  1012. while (n == '\t')
  1013. {
  1014. /* Look out for \t\n */
  1015. n = getc(f);
  1016. if (n == '\n')
  1017. break;
  1018. ungetc(n, f);
  1019. n = _starpu_read_double(f, "%le", &latency);
  1020. if (n && !isnan(latency))
  1021. {
  1022. _STARPU_DISP("Too many nodes in latency file %s for this configuration (%d). Did you change the maximum number of GPUs at ./configure time?\n", path, STARPU_MAXNODES);
  1023. fclose(f);
  1024. return 0;
  1025. }
  1026. n = getc(f);
  1027. }
  1028. if (n != '\n')
  1029. {
  1030. _STARPU_DISP("Bogus character '%c' (%d) in latency file %s\n", n, n, path);
  1031. fclose(f);
  1032. return 0;
  1033. }
  1034. /* Look out for EOF */
  1035. n = getc(f);
  1036. if (n == EOF)
  1037. break;
  1038. ungetc(n, f);
  1039. }
  1040. if (locked)
  1041. _starpu_frdunlock(f);
  1042. fclose(f);
  1043. /* No more values, take NAN */
  1044. for ( ; src < STARPU_MAXNODES; src++)
  1045. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1046. latency_matrix[src][dst] = NAN;
  1047. return 1;
  1048. }
  1049. #if !defined(STARPU_SIMGRID) && (defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL))
  1050. static double search_bus_best_latency(int src, char * type, int htod)
  1051. {
  1052. /* Search the best latency for this node */
  1053. double best = 0.0;
  1054. double actual = 0.0;
  1055. unsigned check = 0;
  1056. unsigned numa;
  1057. for (numa = 0; numa < nnumas; numa++)
  1058. {
  1059. #ifdef STARPU_USE_CUDA
  1060. if (strncmp(type, "CUDA", 4) == 0)
  1061. {
  1062. if (htod)
  1063. actual = cudadev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].latency_htod;
  1064. else
  1065. actual = cudadev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].latency_dtoh;
  1066. }
  1067. #endif
  1068. #ifdef STARPU_USE_OPENCL
  1069. if (strncmp(type, "OpenCL", 6) == 0)
  1070. {
  1071. if (htod)
  1072. actual = opencldev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].latency_htod;
  1073. else
  1074. actual = opencldev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].latency_dtoh;
  1075. }
  1076. #endif
  1077. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  1078. if (!check || actual < best)
  1079. {
  1080. best = actual;
  1081. check = 1;
  1082. }
  1083. #endif
  1084. }
  1085. return best;
  1086. }
  1087. #endif
  1088. #if !defined(STARPU_SIMGRID)
  1089. static void write_bus_latency_file_content(void)
  1090. {
  1091. unsigned src, dst, maxnode;
  1092. /* Boundaries to check if src or dst are inside the interval */
  1093. unsigned b_low, b_up;
  1094. FILE *f;
  1095. int locked;
  1096. STARPU_ASSERT(was_benchmarked);
  1097. char path[PATH_LENGTH];
  1098. get_latency_path(path, sizeof(path));
  1099. _STARPU_DEBUG("writing latencies to %s\n", path);
  1100. f = fopen(path, "w+");
  1101. if (!f)
  1102. {
  1103. perror("fopen write_bus_latency_file_content");
  1104. _STARPU_DISP("path '%s'\n", path);
  1105. fflush(stderr);
  1106. STARPU_ABORT();
  1107. }
  1108. locked = _starpu_fwrlock(f) == 0;
  1109. _starpu_fftruncate(f, 0);
  1110. fprintf(f, "# ");
  1111. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1112. fprintf(f, "to %u\t\t", dst);
  1113. fprintf(f, "\n");
  1114. maxnode = nnumas;
  1115. #ifdef STARPU_USE_CUDA
  1116. maxnode += ncuda;
  1117. #endif
  1118. #ifdef STARPU_USE_OPENCL
  1119. maxnode += nopencl;
  1120. #endif
  1121. #ifdef STARPU_USE_MIC
  1122. maxnode += nmic;
  1123. #endif
  1124. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1125. maxnode += nmpi_ms;
  1126. #endif
  1127. for (src = 0; src < STARPU_MAXNODES; src++)
  1128. {
  1129. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1130. {
  1131. /* µs */
  1132. double latency = 0.0;
  1133. if ((src >= maxnode) || (dst >= maxnode))
  1134. {
  1135. /* convention */
  1136. latency = NAN;
  1137. }
  1138. else if (src == dst)
  1139. {
  1140. latency = 0.0;
  1141. }
  1142. else
  1143. {
  1144. b_low = b_up = 0;
  1145. /* ---- Begin NUMA ---- */
  1146. b_up += nnumas;
  1147. if (src >= b_low && src < b_up && dst >= b_low && dst < b_up)
  1148. latency += numa_latency[src-b_low][dst-b_low];
  1149. /* copy interval to check numa index later */
  1150. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  1151. unsigned numa_low = b_low;
  1152. unsigned numa_up = b_up;
  1153. #endif
  1154. b_low += nnumas;
  1155. /* ---- End NUMA ---- */
  1156. #ifdef STARPU_USE_CUDA
  1157. b_up += ncuda;
  1158. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  1159. if (src >= b_low && src < b_up && dst >= b_low && dst < b_up)
  1160. latency += cudadev_latency_dtod[src-b_low][dst-b_low];
  1161. else
  1162. #endif
  1163. {
  1164. /* Check if it's CUDA <-> NUMA link */
  1165. if (src >=b_low && src < b_up && dst >= numa_low && dst < numa_up)
  1166. latency += cudadev_timing_per_numa[(src-b_low)*STARPU_MAXNUMANODES+dst-numa_low].latency_dtoh;
  1167. if (dst >= b_low && dst < b_up && src >= numa_low && dst < numa_up)
  1168. latency += cudadev_timing_per_numa[(dst-b_low)*STARPU_MAXNUMANODES+src-numa_low].latency_htod;
  1169. /* To other devices, take the best latency */
  1170. if (src >= b_low && src < b_up && !(dst >= numa_low && dst < numa_up))
  1171. latency += search_bus_best_latency(src-b_low, "CUDA", 0);
  1172. if (dst >= b_low && dst < b_up && !(src >= numa_low && dst < numa_up))
  1173. latency += search_bus_best_latency(dst-b_low, "CUDA", 1);
  1174. }
  1175. b_low += ncuda;
  1176. #endif
  1177. #ifdef STARPU_USE_OPENCL
  1178. b_up += nopencl;
  1179. /* Check if it's OpenCL <-> NUMA link */
  1180. if (src >= b_low && src < b_up && dst >= numa_low && dst < numa_up)
  1181. latency += opencldev_timing_per_numa[(src-b_low)*STARPU_MAXNUMANODES+dst-numa_low].latency_dtoh;
  1182. if (dst >= b_low && dst < b_up && src >= numa_low && dst < numa_up)
  1183. latency += opencldev_timing_per_numa[(dst-b_low)*STARPU_MAXNUMANODES+src-numa_low].latency_htod;
  1184. /* To other devices, take the best latency */
  1185. if (src >= b_low && src < b_up && !(dst >= numa_low && dst < numa_up))
  1186. latency += search_bus_best_latency(src-b_low, "OpenCL", 0);
  1187. if (dst >= b_low && dst < b_up && !(src >= numa_low && dst < numa_up))
  1188. latency += search_bus_best_latency(dst-b_low, "OpenCL", 1);
  1189. b_low += nopencl;
  1190. #endif
  1191. #ifdef STARPU_USE_MIC
  1192. b_up += nmic;
  1193. /* TODO Latency MIC */
  1194. b_low += nmic;
  1195. #endif
  1196. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1197. b_up += nmpi_ms;
  1198. /* Modify MPI src and MPI dst if they contain the master node or not
  1199. * Because, we only take care about slaves */
  1200. int mpi_master = _starpu_mpi_common_get_src_node();
  1201. int mpi_src = src - b_low;
  1202. mpi_src = (mpi_master <= mpi_src) ? mpi_src+1 : mpi_src;
  1203. int mpi_dst = dst - b_low;
  1204. mpi_dst = (mpi_master <= mpi_dst) ? mpi_dst+1 : mpi_dst;
  1205. if (src >= b_low && src < b_up && dst >= b_low && dst < b_up)
  1206. latency += mpi_latency_device_to_device[mpi_src][mpi_dst];
  1207. else
  1208. {
  1209. if (src >= b_low && src < b_up)
  1210. latency += mpi_latency_device_to_device[mpi_src][mpi_master];
  1211. if (dst >= b_low && dst < b_up)
  1212. latency += mpi_latency_device_to_device[mpi_master][mpi_dst];
  1213. }
  1214. b_low += nmpi_ms;
  1215. #endif
  1216. }
  1217. if (dst > 0)
  1218. fputc('\t', f);
  1219. _starpu_write_double(f, "%e", latency);
  1220. }
  1221. fprintf(f, "\n");
  1222. }
  1223. if (locked)
  1224. _starpu_fwrunlock(f);
  1225. fclose(f);
  1226. }
  1227. #endif
  1228. static void generate_bus_latency_file(void)
  1229. {
  1230. if (!was_benchmarked)
  1231. benchmark_all_gpu_devices();
  1232. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1233. /* Slaves don't write files */
  1234. if (!_starpu_mpi_common_is_src_node())
  1235. return;
  1236. #endif
  1237. #ifndef STARPU_SIMGRID
  1238. write_bus_latency_file_content();
  1239. #endif
  1240. }
  1241. static void load_bus_latency_file(void)
  1242. {
  1243. int res;
  1244. char path[PATH_LENGTH];
  1245. get_latency_path(path, sizeof(path));
  1246. res = access(path, F_OK);
  1247. if (res || !load_bus_latency_file_content())
  1248. {
  1249. /* File does not exist yet or is bogus */
  1250. generate_bus_latency_file();
  1251. }
  1252. }
  1253. /*
  1254. * Bandwidth
  1255. */
  1256. static void get_bandwidth_path(char *path, size_t maxlen)
  1257. {
  1258. get_bus_path("bandwidth", path, maxlen);
  1259. }
  1260. static int load_bus_bandwidth_file_content(void)
  1261. {
  1262. int n;
  1263. unsigned src, dst;
  1264. FILE *f;
  1265. double bandwidth;
  1266. int locked;
  1267. char path[PATH_LENGTH];
  1268. get_bandwidth_path(path, sizeof(path));
  1269. _STARPU_DEBUG("loading bandwidth from %s\n", path);
  1270. f = fopen(path, "r");
  1271. if (!f)
  1272. {
  1273. perror("fopen load_bus_bandwidth_file_content");
  1274. _STARPU_DISP("path '%s'\n", path);
  1275. fflush(stderr);
  1276. STARPU_ABORT();
  1277. }
  1278. locked = _starpu_frdlock(f) == 0;
  1279. for (src = 0; src < STARPU_MAXNODES; src++)
  1280. {
  1281. _starpu_drop_comments(f);
  1282. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1283. {
  1284. n = _starpu_read_double(f, "%le", &bandwidth);
  1285. if (n != 1)
  1286. {
  1287. _STARPU_DISP("Error while reading bandwidth file <%s>. Expected a number\n", path);
  1288. fclose(f);
  1289. return 0;
  1290. }
  1291. n = getc(f);
  1292. if (n == '\n')
  1293. break;
  1294. if (n != '\t')
  1295. {
  1296. _STARPU_DISP("bogus character '%c' (%d) in bandwidth file %s\n", n, n, path);
  1297. fclose(f);
  1298. return 0;
  1299. }
  1300. bandwidth_matrix[src][dst] = bandwidth;
  1301. /* Look out for \t\n */
  1302. n = getc(f);
  1303. if (n == '\n')
  1304. break;
  1305. ungetc(n, f);
  1306. n = '\t';
  1307. }
  1308. /* No more values, take NAN */
  1309. for ( ; dst < STARPU_MAXNODES; dst++)
  1310. bandwidth_matrix[src][dst] = NAN;
  1311. while (n == '\t')
  1312. {
  1313. /* Look out for \t\n */
  1314. n = getc(f);
  1315. if (n == '\n')
  1316. break;
  1317. ungetc(n, f);
  1318. n = _starpu_read_double(f, "%le", &bandwidth);
  1319. if (n && !isnan(bandwidth))
  1320. {
  1321. _STARPU_DISP("Too many nodes in bandwidth file %s for this configuration (%d)\n", path, STARPU_MAXNODES);
  1322. fclose(f);
  1323. return 0;
  1324. }
  1325. n = getc(f);
  1326. }
  1327. if (n != '\n')
  1328. {
  1329. _STARPU_DISP("Bogus character '%c' (%d) in bandwidth file %s\n", n, n, path);
  1330. fclose(f);
  1331. return 0;
  1332. }
  1333. /* Look out for EOF */
  1334. n = getc(f);
  1335. if (n == EOF)
  1336. break;
  1337. ungetc(n, f);
  1338. }
  1339. if (locked)
  1340. _starpu_frdunlock(f);
  1341. fclose(f);
  1342. /* No more values, take NAN */
  1343. for ( ; src < STARPU_MAXNODES; src++)
  1344. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1345. latency_matrix[src][dst] = NAN;
  1346. return 1;
  1347. }
  1348. #if !defined(STARPU_SIMGRID) && (defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL))
  1349. static double search_bus_best_timing(int src, char * type, int htod)
  1350. {
  1351. /* Search the best latency for this node */
  1352. double best = 0.0;
  1353. double actual = 0.0;
  1354. unsigned check = 0;
  1355. unsigned numa;
  1356. for (numa = 0; numa < nnumas; numa++)
  1357. {
  1358. #ifdef STARPU_USE_CUDA
  1359. if (strncmp(type, "CUDA", 4) == 0)
  1360. {
  1361. if (htod)
  1362. actual = cudadev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].timing_htod;
  1363. else
  1364. actual = cudadev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].timing_dtoh;
  1365. }
  1366. #endif
  1367. #ifdef STARPU_USE_OPENCL
  1368. if (strncmp(type, "OpenCL", 6) == 0)
  1369. {
  1370. if (htod)
  1371. actual = opencldev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].timing_htod;
  1372. else
  1373. actual = opencldev_timing_per_numa[src*STARPU_MAXNUMANODES+numa].timing_dtoh;
  1374. }
  1375. #endif
  1376. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  1377. if (!check || actual < best)
  1378. {
  1379. best = actual;
  1380. check = 1;
  1381. }
  1382. #endif
  1383. }
  1384. return best;
  1385. }
  1386. #endif
  1387. #if !defined(STARPU_SIMGRID)
  1388. static void write_bus_bandwidth_file_content(void)
  1389. {
  1390. unsigned src, dst, maxnode;
  1391. unsigned b_low, b_up;
  1392. FILE *f;
  1393. int locked;
  1394. STARPU_ASSERT(was_benchmarked);
  1395. char path[PATH_LENGTH];
  1396. get_bandwidth_path(path, sizeof(path));
  1397. _STARPU_DEBUG("writing bandwidth to %s\n", path);
  1398. f = fopen(path, "w+");
  1399. STARPU_ASSERT(f);
  1400. locked = _starpu_fwrlock(f) == 0;
  1401. _starpu_fftruncate(f, 0);
  1402. fprintf(f, "# ");
  1403. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1404. fprintf(f, "to %u\t\t", dst);
  1405. fprintf(f, "\n");
  1406. maxnode = nnumas;
  1407. #ifdef STARPU_USE_CUDA
  1408. maxnode += ncuda;
  1409. #endif
  1410. #ifdef STARPU_USE_OPENCL
  1411. maxnode += nopencl;
  1412. #endif
  1413. #ifdef STARPU_USE_MIC
  1414. maxnode += nmic;
  1415. #endif
  1416. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1417. maxnode += nmpi_ms;
  1418. #endif
  1419. for (src = 0; src < STARPU_MAXNODES; src++)
  1420. {
  1421. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  1422. {
  1423. double bandwidth;
  1424. if ((src >= maxnode) || (dst >= maxnode))
  1425. {
  1426. bandwidth = NAN;
  1427. }
  1428. else if (src != dst)
  1429. {
  1430. double slowness = 0.0;
  1431. /* Total bandwidth is the harmonic mean of bandwidths */
  1432. b_low = b_up = 0;
  1433. /* Begin NUMA */
  1434. b_up += nnumas;
  1435. if (src >= b_low && src < b_up && dst >= b_low && dst < b_up)
  1436. slowness += numa_timing[src-b_low][dst-b_low];
  1437. /* copy interval to check numa index later */
  1438. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  1439. unsigned numa_low = b_low;
  1440. unsigned numa_up = b_up;
  1441. #endif
  1442. b_low += nnumas;
  1443. /* End NUMA */
  1444. #ifdef STARPU_USE_CUDA
  1445. b_up += ncuda;
  1446. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  1447. if (src >= b_low && src < b_up && dst >= b_low && dst < b_up)
  1448. /* Direct GPU-GPU transfert */
  1449. slowness += cudadev_timing_dtod[src-b_low][dst-b_low];
  1450. else
  1451. #endif
  1452. {
  1453. /* Check if it's CUDA <-> NUMA link */
  1454. if (src >= b_low && src < b_up && dst >= numa_low && dst < numa_up)
  1455. slowness += cudadev_timing_per_numa[(src-b_low)*STARPU_MAXNUMANODES+dst-numa_low].timing_dtoh;
  1456. if (dst >= b_low && dst < b_up && src >= numa_low && dst < numa_up)
  1457. slowness += cudadev_timing_per_numa[(dst-b_low)*STARPU_MAXNUMANODES+src-numa_low].timing_htod;
  1458. /* To other devices, take the best slowness */
  1459. if (src >= b_low && src < b_up && !(dst >= numa_low && dst < numa_up))
  1460. slowness += search_bus_best_timing(src-b_low, "CUDA", 0);
  1461. if (dst >= b_low && dst < b_up && !(src >= numa_low && dst < numa_up))
  1462. slowness += search_bus_best_timing(dst-b_low, "CUDA", 1);
  1463. }
  1464. b_low += ncuda;
  1465. #endif
  1466. #ifdef STARPU_USE_OPENCL
  1467. b_up += nopencl;
  1468. /* Check if it's OpenCL <-> NUMA link */
  1469. if (src >= b_low && src < b_up && dst >= numa_low && dst < numa_up)
  1470. slowness += opencldev_timing_per_numa[(src-b_low)*STARPU_MAXNUMANODES+dst-numa_low].timing_dtoh;
  1471. if (dst >= b_low && dst < b_up && src >= numa_low && dst < numa_up)
  1472. slowness += opencldev_timing_per_numa[(dst-b_low)*STARPU_MAXNUMANODES+src-numa_low].timing_htod;
  1473. /* To other devices, take the best slowness */
  1474. if (src >= b_low && src < b_up && !(dst >= numa_low && dst < numa_up))
  1475. slowness += search_bus_best_timing(src-b_low, "OpenCL", 0);
  1476. if (dst >= b_low && dst < b_up && !(src >= numa_low && dst < numa_up))
  1477. slowness += search_bus_best_timing(dst-b_low, "OpenCL", 1);
  1478. b_low += nopencl;
  1479. #endif
  1480. #ifdef STARPU_USE_MIC
  1481. b_up += nmic;
  1482. if (src >= b_low && src < b_up)
  1483. slowness += mic_time_device_to_host[src-b_low];
  1484. if (dst >= b_low && dst < b_up)
  1485. slowness += mic_time_host_to_device[dst-b_low];
  1486. b_low += nmic;
  1487. #endif
  1488. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1489. b_up += nmpi_ms;
  1490. /* Modify MPI src and MPI dst if they contain the master node or not
  1491. * Because, we only take care about slaves */
  1492. int mpi_master = _starpu_mpi_common_get_src_node();
  1493. int mpi_src = src - b_low;
  1494. mpi_src = (mpi_master <= mpi_src) ? mpi_src+1 : mpi_src;
  1495. int mpi_dst = dst - b_low;
  1496. mpi_dst = (mpi_master <= mpi_dst) ? mpi_dst+1 : mpi_dst;
  1497. if (src >= b_low && src < b_up && dst >= b_low && dst < b_up)
  1498. slowness += mpi_time_device_to_device[mpi_src][mpi_dst];
  1499. else
  1500. {
  1501. if (src >= b_low && src < b_up)
  1502. slowness += mpi_time_device_to_device[mpi_src][mpi_master];
  1503. if (dst >= b_low && dst < b_up)
  1504. slowness += mpi_time_device_to_device[mpi_master][mpi_dst];
  1505. }
  1506. b_low += nmpi_ms;
  1507. #endif
  1508. bandwidth = 1.0/slowness;
  1509. }
  1510. else
  1511. {
  1512. /* convention */
  1513. bandwidth = 0.0;
  1514. }
  1515. if (dst)
  1516. fputc('\t', f);
  1517. _starpu_write_double(f, "%e", bandwidth);
  1518. }
  1519. fprintf(f, "\n");
  1520. }
  1521. if (locked)
  1522. _starpu_fwrunlock(f);
  1523. fclose(f);
  1524. }
  1525. #endif /* STARPU_SIMGRID */
  1526. void starpu_bus_print_filenames(FILE *output)
  1527. {
  1528. char bandwidth_path[PATH_LENGTH];
  1529. char affinity_path[PATH_LENGTH];
  1530. char latency_path[PATH_LENGTH];
  1531. get_bandwidth_path(bandwidth_path, sizeof(bandwidth_path));
  1532. get_affinity_path(affinity_path, sizeof(affinity_path));
  1533. get_latency_path(latency_path, sizeof(latency_path));
  1534. fprintf(output, "bandwidth: <%s>\n", bandwidth_path);
  1535. fprintf(output, " affinity: <%s>\n", affinity_path);
  1536. fprintf(output, " latency: <%s>\n", latency_path);
  1537. }
  1538. void starpu_bus_print_bandwidth(FILE *f)
  1539. {
  1540. unsigned src, dst, maxnode = starpu_memory_nodes_get_count();
  1541. fprintf(f, "from/to\t");
  1542. for (dst = 0; dst < maxnode; dst++)
  1543. {
  1544. char name[128];
  1545. starpu_memory_node_get_name(dst, name, sizeof(name));
  1546. fprintf(f, "%s\t", name);
  1547. }
  1548. fprintf(f, "\n");
  1549. for (src = 0; src < maxnode; src++)
  1550. {
  1551. char name[128];
  1552. starpu_memory_node_get_name(src, name, sizeof(name));
  1553. fprintf(f, "%s\t", name);
  1554. for (dst = 0; dst < maxnode; dst++)
  1555. fprintf(f, "%.0f\t", bandwidth_matrix[src][dst]);
  1556. fprintf(f, "\n");
  1557. }
  1558. fprintf(f, "\n");
  1559. for (src = 0; src < maxnode; src++)
  1560. {
  1561. char name[128];
  1562. starpu_memory_node_get_name(src, name, sizeof(name));
  1563. fprintf(f, "%s\t", name);
  1564. for (dst = 0; dst < maxnode; dst++)
  1565. fprintf(f, "%.0f\t", latency_matrix[src][dst]);
  1566. fprintf(f, "\n");
  1567. }
  1568. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  1569. if (ncuda != 0 || nopencl != 0)
  1570. fprintf(f, "\nGPU\tNUMA in preference order (logical index), host-to-device, device-to-host\n");
  1571. for (src = 0; src < ncuda + nopencl; src++)
  1572. {
  1573. struct dev_timing *timing;
  1574. struct _starpu_machine_config * config = _starpu_get_machine_config();
  1575. unsigned config_nnumas = _starpu_topology_get_nnumanodes(config);
  1576. unsigned numa;
  1577. #ifdef STARPU_USE_CUDA
  1578. if (src < ncuda)
  1579. {
  1580. fprintf(f, "CUDA_%u\t", src);
  1581. for (numa = 0; numa < config_nnumas; numa++)
  1582. {
  1583. timing = &cudadev_timing_per_numa[src*STARPU_MAXNUMANODES+numa];
  1584. if (timing->timing_htod)
  1585. fprintf(f, "%2d %.0f %.0f\t", timing->numa_id, 1/timing->timing_htod, 1/timing->timing_dtoh);
  1586. else
  1587. fprintf(f, "%2u\t", cuda_affinity_matrix[src][numa]);
  1588. }
  1589. }
  1590. #ifdef STARPU_USE_OPENCL
  1591. else
  1592. #endif
  1593. #endif
  1594. #ifdef STARPU_USE_OPENCL
  1595. {
  1596. fprintf(f, "OpenCL%u\t", src-ncuda);
  1597. for (numa = 0; numa < config_nnumas; numa++)
  1598. {
  1599. timing = &opencldev_timing_per_numa[(src-ncuda)*STARPU_MAXNUMANODES+numa];
  1600. if (timing->timing_htod)
  1601. fprintf(f, "%2d %.0f %.0f\t", timing->numa_id, 1/timing->timing_htod, 1/timing->timing_dtoh);
  1602. else
  1603. fprintf(f, "%2u\t", opencl_affinity_matrix[src][numa]);
  1604. }
  1605. }
  1606. #endif
  1607. fprintf(f, "\n");
  1608. }
  1609. #endif
  1610. }
  1611. static void generate_bus_bandwidth_file(void)
  1612. {
  1613. if (!was_benchmarked)
  1614. benchmark_all_gpu_devices();
  1615. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1616. /* Slaves don't write files */
  1617. if (!_starpu_mpi_common_is_src_node())
  1618. return;
  1619. #endif
  1620. #ifndef STARPU_SIMGRID
  1621. write_bus_bandwidth_file_content();
  1622. #endif
  1623. }
  1624. static void load_bus_bandwidth_file(void)
  1625. {
  1626. int res;
  1627. char path[PATH_LENGTH];
  1628. get_bandwidth_path(path, sizeof(path));
  1629. res = access(path, F_OK);
  1630. if (res || !load_bus_bandwidth_file_content())
  1631. {
  1632. /* File does not exist yet or is bogus */
  1633. generate_bus_bandwidth_file();
  1634. }
  1635. }
  1636. #ifndef STARPU_SIMGRID
  1637. /*
  1638. * Config
  1639. */
  1640. static void get_config_path(char *path, size_t maxlen)
  1641. {
  1642. get_bus_path("config", path, maxlen);
  1643. }
  1644. #if defined(STARPU_USE_MPI_MASTER_SLAVE)
  1645. /* check if the master or one slave has to recalibrate */
  1646. static int mpi_check_recalibrate(int my_recalibrate)
  1647. {
  1648. int nb_mpi = _starpu_mpi_src_get_device_count() + 1;
  1649. int mpi_recalibrate[nb_mpi];
  1650. int i;
  1651. MPI_Allgather(&my_recalibrate, 1, MPI_INT, mpi_recalibrate, 1, MPI_INT, MPI_COMM_WORLD);
  1652. for (i = 0; i < nb_mpi; i++)
  1653. {
  1654. if (mpi_recalibrate[i])
  1655. {
  1656. return 1;
  1657. }
  1658. }
  1659. return 0;
  1660. }
  1661. #endif
  1662. static void compare_value_and_recalibrate(char * msg, unsigned val_file, unsigned val_detected)
  1663. {
  1664. int recalibrate = 0;
  1665. if (val_file != val_detected)
  1666. recalibrate = 1;
  1667. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1668. //Send to each other to know if we had to recalibrate because someone cannot have the correct value in the config file
  1669. recalibrate = mpi_check_recalibrate(recalibrate);
  1670. #endif
  1671. if (recalibrate)
  1672. {
  1673. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1674. /* Only the master prints the message */
  1675. if (_starpu_mpi_common_is_src_node())
  1676. #endif
  1677. _STARPU_DISP("Current configuration does not match the bus performance model (%s: (stored) %d != (current) %d), recalibrating...\n", msg, val_file, val_detected);
  1678. _starpu_bus_force_sampling();
  1679. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1680. if (_starpu_mpi_common_is_src_node())
  1681. #endif
  1682. _STARPU_DISP("... done\n");
  1683. }
  1684. }
  1685. static void check_bus_config_file(void)
  1686. {
  1687. int res;
  1688. char path[PATH_LENGTH];
  1689. struct _starpu_machine_config *config = _starpu_get_machine_config();
  1690. int recalibrate = 0;
  1691. get_config_path(path, sizeof(path));
  1692. res = access(path, F_OK);
  1693. if (res || config->conf.bus_calibrate > 0)
  1694. recalibrate = 1;
  1695. #if defined(STARPU_USE_MPI_MASTER_SLAVE)
  1696. //Send to each other to know if we had to recalibrate because someone cannot have the config file
  1697. recalibrate = mpi_check_recalibrate(recalibrate);
  1698. #endif
  1699. if (recalibrate)
  1700. {
  1701. if (res)
  1702. _STARPU_DISP("No performance model for the bus, calibrating...\n");
  1703. _starpu_bus_force_sampling();
  1704. if (res)
  1705. _STARPU_DISP("... done\n");
  1706. }
  1707. else
  1708. {
  1709. FILE *f;
  1710. int ret;
  1711. unsigned read_cuda = -1, read_opencl = -1, read_mic = -1, read_mpi_ms = -1;
  1712. unsigned read_cpus = -1, read_numa = -1;
  1713. int locked;
  1714. // Loading configuration from file
  1715. f = fopen(path, "r");
  1716. STARPU_ASSERT(f);
  1717. locked = _starpu_frdlock(f) == 0;
  1718. _starpu_drop_comments(f);
  1719. ret = fscanf(f, "%u\t", &read_cpus);
  1720. STARPU_ASSERT(ret == 1);
  1721. _starpu_drop_comments(f);
  1722. ret = fscanf(f, "%u\t", &read_numa);
  1723. STARPU_ASSERT(ret == 1);
  1724. _starpu_drop_comments(f);
  1725. ret = fscanf(f, "%u\t", &read_cuda);
  1726. STARPU_ASSERT(ret == 1);
  1727. _starpu_drop_comments(f);
  1728. ret = fscanf(f, "%u\t", &read_opencl);
  1729. STARPU_ASSERT(ret == 1);
  1730. _starpu_drop_comments(f);
  1731. ret = fscanf(f, "%u\t", &read_mic);
  1732. if (ret == 0)
  1733. read_mic = 0;
  1734. _starpu_drop_comments(f);
  1735. ret = fscanf(f, "%u\t", &read_mpi_ms);
  1736. if (ret == 0)
  1737. read_mpi_ms = 0;
  1738. _starpu_drop_comments(f);
  1739. if (locked)
  1740. _starpu_frdunlock(f);
  1741. fclose(f);
  1742. // Loading current configuration
  1743. ncpus = _starpu_topology_get_nhwcpu(config);
  1744. nnumas = _starpu_topology_get_nnumanodes(config);
  1745. #ifdef STARPU_USE_CUDA
  1746. ncuda = _starpu_get_cuda_device_count();
  1747. #endif
  1748. #ifdef STARPU_USE_OPENCL
  1749. nopencl = _starpu_opencl_get_device_count();
  1750. #endif
  1751. #ifdef STARPU_USE_MIC
  1752. nmic = _starpu_mic_src_get_device_count();
  1753. #endif /* STARPU_USE_MIC */
  1754. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1755. nmpi_ms = _starpu_mpi_src_get_device_count();
  1756. #endif /* STARPU_USE_MPI_MASTER_SLAVE */
  1757. // Checking if both configurations match
  1758. compare_value_and_recalibrate("CPUS", read_cpus, ncpus);
  1759. compare_value_and_recalibrate("NUMA", read_numa, nnumas);
  1760. compare_value_and_recalibrate("CUDA", read_cuda, ncuda);
  1761. compare_value_and_recalibrate("OpenCL", read_opencl, nopencl);
  1762. compare_value_and_recalibrate("MIC", read_mic, nmic);
  1763. compare_value_and_recalibrate("MPI Master-Slave", read_mpi_ms, nmpi_ms);
  1764. }
  1765. }
  1766. static void write_bus_config_file_content(void)
  1767. {
  1768. FILE *f;
  1769. char path[PATH_LENGTH];
  1770. int locked;
  1771. STARPU_ASSERT(was_benchmarked);
  1772. get_config_path(path, sizeof(path));
  1773. _STARPU_DEBUG("writing config to %s\n", path);
  1774. f = fopen(path, "w+");
  1775. STARPU_ASSERT(f);
  1776. locked = _starpu_fwrlock(f) == 0;
  1777. _starpu_fftruncate(f, 0);
  1778. fprintf(f, "# Current configuration\n");
  1779. fprintf(f, "%u # Number of CPUs\n", ncpus);
  1780. fprintf(f, "%u # Number of NUMA nodes\n", nnumas);
  1781. fprintf(f, "%u # Number of CUDA devices\n", ncuda);
  1782. fprintf(f, "%u # Number of OpenCL devices\n", nopencl);
  1783. fprintf(f, "%u # Number of MIC devices\n", nmic);
  1784. fprintf(f, "%u # Number of MPI devices\n", nmpi_ms);
  1785. if (locked)
  1786. _starpu_fwrunlock(f);
  1787. fclose(f);
  1788. }
  1789. static void generate_bus_config_file(void)
  1790. {
  1791. if (!was_benchmarked)
  1792. benchmark_all_gpu_devices();
  1793. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1794. /* Slaves don't write files */
  1795. if (!_starpu_mpi_common_is_src_node())
  1796. return;
  1797. #endif
  1798. write_bus_config_file_content();
  1799. }
  1800. #endif /* !SIMGRID */
  1801. void _starpu_simgrid_get_platform_path(int version, char *path, size_t maxlen)
  1802. {
  1803. if (version == 3)
  1804. get_bus_path("platform.xml", path, maxlen);
  1805. else
  1806. get_bus_path("platform.v4.xml", path, maxlen);
  1807. }
  1808. #ifndef STARPU_SIMGRID
  1809. /*
  1810. * Compute the precise PCI tree bandwidth and link shares
  1811. *
  1812. * We only have measurements from one leaf to another. We assume that the
  1813. * available bandwidth is greater at lower levels, and thus measurements from
  1814. * increasingly far GPUs provide the PCI bridges bandwidths at each level.
  1815. *
  1816. * The bandwidth of a PCI bridge is thus computed as the maximum of the speed
  1817. * of the various transfers that we have achieved through it. We thus browse
  1818. * the PCI tree three times:
  1819. *
  1820. * - first through all CUDA-CUDA possible transfers to compute the maximum
  1821. * measured bandwidth on each PCI link and hub used for that.
  1822. * - then through the whole tree to emit links for each PCI link and hub.
  1823. * - then through all CUDA-CUDA possible transfers again to emit routes.
  1824. */
  1825. #if defined(STARPU_USE_CUDA) && defined(HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX) && HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX && defined(STARPU_HAVE_CUDA_MEMCPY_PEER)
  1826. /* Records, for each PCI link and hub, the maximum bandwidth seen through it */
  1827. struct pci_userdata
  1828. {
  1829. /* Uplink max measurement */
  1830. double bw_up;
  1831. double bw_down;
  1832. /* Hub max measurement */
  1833. double bw;
  1834. };
  1835. /* Allocate a pci_userdata structure for the given object */
  1836. static void allocate_userdata(hwloc_obj_t obj)
  1837. {
  1838. struct pci_userdata *data;
  1839. if (obj->userdata)
  1840. return;
  1841. _STARPU_MALLOC(obj->userdata, sizeof(*data));
  1842. data = obj->userdata;
  1843. data->bw_up = 0.0;
  1844. data->bw_down = 0.0;
  1845. data->bw = 0.0;
  1846. }
  1847. /* Update the maximum bandwidth seen going to upstream */
  1848. static void update_bandwidth_up(hwloc_obj_t obj, double bandwidth)
  1849. {
  1850. struct pci_userdata *data;
  1851. if (obj->type != HWLOC_OBJ_BRIDGE && obj->type != HWLOC_OBJ_PCI_DEVICE)
  1852. return;
  1853. allocate_userdata(obj);
  1854. data = obj->userdata;
  1855. if (data->bw_up < bandwidth)
  1856. data->bw_up = bandwidth;
  1857. }
  1858. /* Update the maximum bandwidth seen going from upstream */
  1859. static void update_bandwidth_down(hwloc_obj_t obj, double bandwidth)
  1860. {
  1861. struct pci_userdata *data;
  1862. if (obj->type != HWLOC_OBJ_BRIDGE && obj->type != HWLOC_OBJ_PCI_DEVICE)
  1863. return;
  1864. allocate_userdata(obj);
  1865. data = obj->userdata;
  1866. if (data->bw_down < bandwidth)
  1867. data->bw_down = bandwidth;
  1868. }
  1869. /* Update the maximum bandwidth seen going through this Hub */
  1870. static void update_bandwidth_through(hwloc_obj_t obj, double bandwidth)
  1871. {
  1872. struct pci_userdata *data;
  1873. allocate_userdata(obj);
  1874. data = obj->userdata;
  1875. if (data->bw < bandwidth)
  1876. data->bw = bandwidth;
  1877. }
  1878. /* find_* functions perform the first step: computing maximum bandwidths */
  1879. /* Our trafic had to go through the host, go back from target up to the host,
  1880. * updating uplink downstream bandwidth along the way */
  1881. static void find_platform_backward_path(hwloc_obj_t obj, double bandwidth)
  1882. {
  1883. if (!obj)
  1884. /* Oops, we should have seen a host bridge. Well, too bad. */
  1885. return;
  1886. /* Update uplink bandwidth of PCI Hub */
  1887. update_bandwidth_down(obj, bandwidth);
  1888. /* Update internal bandwidth of PCI Hub */
  1889. update_bandwidth_through(obj, bandwidth);
  1890. if (obj->type == HWLOC_OBJ_BRIDGE && obj->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_HOST)
  1891. /* Finished */
  1892. return;
  1893. /* Continue up */
  1894. find_platform_backward_path(obj->parent, bandwidth);
  1895. }
  1896. /* Same, but update uplink upstream bandwidth */
  1897. static void find_platform_forward_path(hwloc_obj_t obj, double bandwidth)
  1898. {
  1899. if (!obj)
  1900. /* Oops, we should have seen a host bridge. Well, too bad. */
  1901. return;
  1902. /* Update uplink bandwidth of PCI Hub */
  1903. update_bandwidth_up(obj, bandwidth);
  1904. /* Update internal bandwidth of PCI Hub */
  1905. update_bandwidth_through(obj, bandwidth);
  1906. if (obj->type == HWLOC_OBJ_BRIDGE && obj->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_HOST)
  1907. /* Finished */
  1908. return;
  1909. /* Continue up */
  1910. find_platform_forward_path(obj->parent, bandwidth);
  1911. }
  1912. /* Find the path from obj1 through parent down to obj2 (without ever going up),
  1913. * and update the maximum bandwidth along the path */
  1914. static int find_platform_path_down(hwloc_obj_t parent, hwloc_obj_t obj1, hwloc_obj_t obj2, double bandwidth)
  1915. {
  1916. unsigned i;
  1917. /* Base case, path is empty */
  1918. if (parent == obj2)
  1919. return 1;
  1920. /* Try to go down from parent */
  1921. for (i = 0; i < parent->arity; i++)
  1922. if (parent->children[i] != obj1 && find_platform_path_down(parent->children[i], NULL, obj2, bandwidth))
  1923. {
  1924. /* Found it down there, update bandwidth of parent */
  1925. update_bandwidth_down(parent->children[i], bandwidth);
  1926. update_bandwidth_through(parent, bandwidth);
  1927. return 1;
  1928. }
  1929. return 0;
  1930. }
  1931. /* Find the path from obj1 to obj2, and update the maximum bandwidth along the
  1932. * path */
  1933. static int find_platform_path_up(hwloc_obj_t obj1, hwloc_obj_t obj2, double bandwidth)
  1934. {
  1935. int ret;
  1936. hwloc_obj_t parent = obj1->parent;
  1937. if (!parent)
  1938. {
  1939. /* Oops, we should have seen a host bridge. Act as if we had seen it. */
  1940. find_platform_backward_path(obj2, bandwidth);
  1941. return 1;
  1942. }
  1943. if (find_platform_path_down(parent, obj1, obj2, bandwidth))
  1944. /* obj2 was a mere (sub)child of our parent */
  1945. return 1;
  1946. /* obj2 is not a (sub)child of our parent, we have to go up through the parent */
  1947. if (parent->type == HWLOC_OBJ_BRIDGE && parent->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_HOST)
  1948. {
  1949. /* We have to go up to the Host, so obj2 is not in the same PCI
  1950. * tree, so we're for for obj1 to Host, and just find the path
  1951. * from obj2 to Host too.
  1952. */
  1953. find_platform_backward_path(obj2, bandwidth);
  1954. update_bandwidth_up(parent, bandwidth);
  1955. update_bandwidth_through(parent, bandwidth);
  1956. return 1;
  1957. }
  1958. /* Not at host yet, just go up */
  1959. ret = find_platform_path_up(parent, obj2, bandwidth);
  1960. update_bandwidth_up(parent, bandwidth);
  1961. update_bandwidth_through(parent, bandwidth);
  1962. return ret;
  1963. }
  1964. /* find the path between cuda i and cuda j, and update the maximum bandwidth along the path */
  1965. static int find_platform_cuda_path(hwloc_topology_t topology, unsigned i, unsigned j, double bandwidth)
  1966. {
  1967. hwloc_obj_t cudai, cudaj;
  1968. cudai = hwloc_cuda_get_device_osdev_by_index(topology, i);
  1969. cudaj = hwloc_cuda_get_device_osdev_by_index(topology, j);
  1970. if (!cudai || !cudaj)
  1971. return 0;
  1972. return find_platform_path_up(cudai, cudaj, bandwidth);
  1973. }
  1974. /* emit_topology_bandwidths performs the second step: emitting link names */
  1975. /* Emit the link name of the object */
  1976. static void emit_pci_hub(FILE *f, hwloc_obj_t obj)
  1977. {
  1978. STARPU_ASSERT(obj->type == HWLOC_OBJ_BRIDGE);
  1979. fprintf(f, "PCI:%04x:[%02x-%02x]", obj->attr->bridge.downstream.pci.domain, obj->attr->bridge.downstream.pci.secondary_bus, obj->attr->bridge.downstream.pci.subordinate_bus);
  1980. }
  1981. static void emit_pci_dev(FILE *f, struct hwloc_pcidev_attr_s *pcidev)
  1982. {
  1983. fprintf(f, "PCI:%04x:%02x:%02x.%1x", pcidev->domain, pcidev->bus, pcidev->dev, pcidev->func);
  1984. }
  1985. /* Emit the links of the object */
  1986. static void emit_topology_bandwidths(FILE *f, hwloc_obj_t obj, const char *Bps, const char *s)
  1987. {
  1988. unsigned i;
  1989. if (obj->userdata)
  1990. {
  1991. struct pci_userdata *data = obj->userdata;
  1992. if (obj->type == HWLOC_OBJ_BRIDGE)
  1993. {
  1994. /* Uplink */
  1995. fprintf(f, " <link id=\"");
  1996. emit_pci_hub(f, obj);
  1997. fprintf(f, " up\" bandwidth=\"%f%s\" latency=\"0.000000%s\"/>\n", data->bw_up, Bps, s);
  1998. fprintf(f, " <link id=\"");
  1999. emit_pci_hub(f, obj);
  2000. fprintf(f, " down\" bandwidth=\"%f%s\" latency=\"0.000000%s\"/>\n", data->bw_down, Bps, s);
  2001. /* PCI Switches are assumed to have infinite internal bandwidth */
  2002. if (!obj->name || !strstr(obj->name, "Switch"))
  2003. {
  2004. /* We assume that PCI Hubs have double bandwidth in
  2005. * order to support full duplex but not more */
  2006. fprintf(f, " <link id=\"");
  2007. emit_pci_hub(f, obj);
  2008. fprintf(f, " through\" bandwidth=\"%f%s\" latency=\"0.000000%s\"/>\n", data->bw * 2, Bps, s);
  2009. }
  2010. }
  2011. else if (obj->type == HWLOC_OBJ_PCI_DEVICE)
  2012. {
  2013. fprintf(f, " <link id=\"");
  2014. emit_pci_dev(f, &obj->attr->pcidev);
  2015. fprintf(f, " up\" bandwidth=\"%f%s\" latency=\"0.000000%s\"/>\n", data->bw_up, Bps, s);
  2016. fprintf(f, " <link id=\"");
  2017. emit_pci_dev(f, &obj->attr->pcidev);
  2018. fprintf(f, " down\" bandwidth=\"%f%s\" latency=\"0.000000%s\"/>\n", data->bw_down, Bps, s);
  2019. }
  2020. }
  2021. for (i = 0; i < obj->arity; i++)
  2022. emit_topology_bandwidths(f, obj->children[i], Bps, s);
  2023. }
  2024. /* emit_pci_link_* functions perform the third step: emitting the routes */
  2025. static void emit_pci_link(FILE *f, hwloc_obj_t obj, const char *suffix)
  2026. {
  2027. if (obj->type == HWLOC_OBJ_BRIDGE)
  2028. {
  2029. fprintf(f, " <link_ctn id=\"");
  2030. emit_pci_hub(f, obj);
  2031. fprintf(f, " %s\"/>\n", suffix);
  2032. }
  2033. else if (obj->type == HWLOC_OBJ_PCI_DEVICE)
  2034. {
  2035. fprintf(f, " <link_ctn id=\"");
  2036. emit_pci_dev(f, &obj->attr->pcidev);
  2037. fprintf(f, " %s\"/>\n", suffix);
  2038. }
  2039. }
  2040. /* Go to upstream */
  2041. static void emit_pci_link_up(FILE *f, hwloc_obj_t obj)
  2042. {
  2043. emit_pci_link(f, obj, "up");
  2044. }
  2045. /* Go from upstream */
  2046. static void emit_pci_link_down(FILE *f, hwloc_obj_t obj)
  2047. {
  2048. emit_pci_link(f, obj, "down");
  2049. }
  2050. /* Go through PCI hub */
  2051. static void emit_pci_link_through(FILE *f, hwloc_obj_t obj)
  2052. {
  2053. /* We don't care about trafic going through PCI switches */
  2054. if (obj->type == HWLOC_OBJ_BRIDGE)
  2055. {
  2056. if (!obj->name || !strstr(obj->name, "Switch"))
  2057. emit_pci_link(f, obj, "through");
  2058. else
  2059. {
  2060. fprintf(f, " <!-- Switch ");
  2061. emit_pci_hub(f, obj);
  2062. fprintf(f, " through -->\n");
  2063. }
  2064. }
  2065. }
  2066. /* Our trafic has to go through the host, go back from target up to the host,
  2067. * using uplink downstream along the way */
  2068. static void emit_platform_backward_path(FILE *f, hwloc_obj_t obj)
  2069. {
  2070. if (!obj)
  2071. /* Oops, we should have seen a host bridge. Well, too bad. */
  2072. return;
  2073. /* Go through PCI Hub */
  2074. emit_pci_link_through(f, obj);
  2075. /* Go through uplink */
  2076. emit_pci_link_down(f, obj);
  2077. if (obj->type == HWLOC_OBJ_BRIDGE && obj->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_HOST)
  2078. {
  2079. /* Finished, go through host */
  2080. fprintf(f, " <link_ctn id=\"Host\"/>\n");
  2081. return;
  2082. }
  2083. /* Continue up */
  2084. emit_platform_backward_path(f, obj->parent);
  2085. }
  2086. /* Same, but use upstream link */
  2087. static void emit_platform_forward_path(FILE *f, hwloc_obj_t obj)
  2088. {
  2089. if (!obj)
  2090. /* Oops, we should have seen a host bridge. Well, too bad. */
  2091. return;
  2092. /* Go through PCI Hub */
  2093. emit_pci_link_through(f, obj);
  2094. /* Go through uplink */
  2095. emit_pci_link_up(f, obj);
  2096. if (obj->type == HWLOC_OBJ_BRIDGE && obj->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_HOST)
  2097. {
  2098. /* Finished, go through host */
  2099. fprintf(f, " <link_ctn id=\"Host\"/>\n");
  2100. return;
  2101. }
  2102. /* Continue up */
  2103. emit_platform_forward_path(f, obj->parent);
  2104. }
  2105. /* Find the path from obj1 through parent down to obj2 (without ever going up),
  2106. * and use the links along the path */
  2107. static int emit_platform_path_down(FILE *f, hwloc_obj_t parent, hwloc_obj_t obj1, hwloc_obj_t obj2)
  2108. {
  2109. unsigned i;
  2110. /* Base case, path is empty */
  2111. if (parent == obj2)
  2112. return 1;
  2113. /* Try to go down from parent */
  2114. for (i = 0; i < parent->arity; i++)
  2115. if (parent->children[i] != obj1 && emit_platform_path_down(f, parent->children[i], NULL, obj2))
  2116. {
  2117. /* Found it down there, path goes through this hub */
  2118. emit_pci_link_down(f, parent->children[i]);
  2119. emit_pci_link_through(f, parent);
  2120. return 1;
  2121. }
  2122. return 0;
  2123. }
  2124. /* Find the path from obj1 to obj2, and use the links along the path */
  2125. static int emit_platform_path_up(FILE *f, hwloc_obj_t obj1, hwloc_obj_t obj2)
  2126. {
  2127. int ret;
  2128. hwloc_obj_t parent = obj1->parent;
  2129. if (!parent)
  2130. {
  2131. /* Oops, we should have seen a host bridge. Act as if we had seen it. */
  2132. emit_platform_backward_path(f, obj2);
  2133. return 1;
  2134. }
  2135. if (emit_platform_path_down(f, parent, obj1, obj2))
  2136. /* obj2 was a mere (sub)child of our parent */
  2137. return 1;
  2138. /* obj2 is not a (sub)child of our parent, we have to go up through the parent */
  2139. if (parent->type == HWLOC_OBJ_BRIDGE && parent->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_HOST)
  2140. {
  2141. /* We have to go up to the Host, so obj2 is not in the same PCI
  2142. * tree, so we're for for obj1 to Host, and just find the path
  2143. * from obj2 to Host too.
  2144. */
  2145. emit_platform_backward_path(f, obj2);
  2146. fprintf(f, " <link_ctn id=\"Host\"/>\n");
  2147. emit_pci_link_up(f, parent);
  2148. emit_pci_link_through(f, parent);
  2149. return 1;
  2150. }
  2151. /* Not at host yet, just go up */
  2152. ret = emit_platform_path_up(f, parent, obj2);
  2153. emit_pci_link_up(f, parent);
  2154. emit_pci_link_through(f, parent);
  2155. return ret;
  2156. }
  2157. /* Clean our mess in the topology before destroying it */
  2158. static void clean_topology(hwloc_obj_t obj)
  2159. {
  2160. unsigned i;
  2161. if (obj->userdata)
  2162. free(obj->userdata);
  2163. for (i = 0; i < obj->arity; i++)
  2164. clean_topology(obj->children[i]);
  2165. }
  2166. #endif
  2167. static void write_bus_platform_file_content(int version)
  2168. {
  2169. FILE *f;
  2170. char path[PATH_LENGTH];
  2171. unsigned i;
  2172. const char *speed, *flops, *Bps, *s;
  2173. char dash;
  2174. int locked;
  2175. if (version == 3)
  2176. {
  2177. speed = "power";
  2178. flops = "";
  2179. Bps = "";
  2180. s = "";
  2181. dash = '_';
  2182. }
  2183. else
  2184. {
  2185. speed = "speed";
  2186. flops = "f";
  2187. Bps = "Bps";
  2188. s = "s";
  2189. dash = '-';
  2190. }
  2191. STARPU_ASSERT(was_benchmarked);
  2192. _starpu_simgrid_get_platform_path(version, path, sizeof(path));
  2193. _STARPU_DEBUG("writing platform to %s\n", path);
  2194. f = fopen(path, "w+");
  2195. if (!f)
  2196. {
  2197. perror("fopen write_bus_platform_file_content");
  2198. _STARPU_DISP("path '%s'\n", path);
  2199. fflush(stderr);
  2200. STARPU_ABORT();
  2201. }
  2202. locked = _starpu_fwrlock(f) == 0;
  2203. _starpu_fftruncate(f, 0);
  2204. fprintf(f,
  2205. "<?xml version='1.0'?>\n"
  2206. "<!DOCTYPE platform SYSTEM '%s'>\n"
  2207. " <platform version=\"%d\">\n"
  2208. " <config id=\"General\">\n"
  2209. " <prop id=\"network/TCP%cgamma\" value=\"-1\"></prop>\n"
  2210. " <prop id=\"network/latency%cfactor\" value=\"1\"></prop>\n"
  2211. " <prop id=\"network/bandwidth%cfactor\" value=\"1\"></prop>\n"
  2212. " </config>\n"
  2213. " <AS id=\"AS0\" routing=\"Full\">\n"
  2214. " <host id=\"MAIN\" %s=\"1%s\"/>\n",
  2215. version == 3
  2216. ? "http://simgrid.gforge.inria.fr/simgrid.dtd"
  2217. : "http://simgrid.gforge.inria.fr/simgrid/simgrid.dtd",
  2218. version, dash, dash, dash, speed, flops);
  2219. for (i = 0; i < ncpus; i++)
  2220. /* TODO: host memory for out-of-core simulation */
  2221. fprintf(f, " <host id=\"CPU%u\" %s=\"2000000000%s\"/>\n", i, speed, flops);
  2222. for (i = 0; i < ncuda; i++)
  2223. {
  2224. fprintf(f, " <host id=\"CUDA%u\" %s=\"2000000000%s\">\n", i, speed, flops);
  2225. fprintf(f, " <prop id=\"memsize\" value=\"%llu\"/>\n", (unsigned long long) cuda_size[i]);
  2226. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  2227. fprintf(f, " <prop id=\"memcpy_peer\" value=\"1\"/>\n");
  2228. #endif
  2229. /* TODO: record cudadev_direct instead of assuming it's NUMA nodes */
  2230. fprintf(f, " </host>\n");
  2231. }
  2232. for (i = 0; i < nopencl; i++)
  2233. {
  2234. fprintf(f, " <host id=\"OpenCL%u\" %s=\"2000000000%s\">\n", i, speed, flops);
  2235. fprintf(f, " <prop id=\"memsize\" value=\"%llu\"/>\n", (unsigned long long) opencl_size[i]);
  2236. fprintf(f, " </host>\n");
  2237. }
  2238. fprintf(f, "\n <host id=\"RAM\" %s=\"1%s\"/>\n", speed, flops);
  2239. /*
  2240. * Compute maximum bandwidth, taken as host bandwidth
  2241. */
  2242. double max_bandwidth = 0;
  2243. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  2244. unsigned numa;
  2245. #endif
  2246. #ifdef STARPU_USE_CUDA
  2247. for (i = 0; i < ncuda; i++)
  2248. {
  2249. for (numa = 0; numa < nnumas; numa++)
  2250. {
  2251. double down_bw = 1.0 / cudadev_timing_per_numa[i*STARPU_MAXNUMANODES+numa].timing_dtoh;
  2252. double up_bw = 1.0 / cudadev_timing_per_numa[i*STARPU_MAXNUMANODES+numa].timing_htod;
  2253. if (max_bandwidth < down_bw)
  2254. max_bandwidth = down_bw;
  2255. if (max_bandwidth < up_bw)
  2256. max_bandwidth = up_bw;
  2257. }
  2258. }
  2259. #endif
  2260. #ifdef STARPU_USE_OPENCL
  2261. for (i = 0; i < nopencl; i++)
  2262. {
  2263. for (numa = 0; numa < nnumas; numa++)
  2264. {
  2265. double down_bw = 1.0 / opencldev_timing_per_numa[i*STARPU_MAXNUMANODES+numa].timing_dtoh;
  2266. double up_bw = 1.0 / opencldev_timing_per_numa[i*STARPU_MAXNUMANODES+numa].timing_htod;
  2267. if (max_bandwidth < down_bw)
  2268. max_bandwidth = down_bw;
  2269. if (max_bandwidth < up_bw)
  2270. max_bandwidth = up_bw;
  2271. }
  2272. }
  2273. #endif
  2274. fprintf(f, "\n <link id=\"Host\" bandwidth=\"%f%s\" latency=\"0.000000%s\"/>\n\n", max_bandwidth*1000000, Bps, s);
  2275. /*
  2276. * OpenCL links
  2277. */
  2278. #ifdef STARPU_USE_OPENCL
  2279. for (i = 0; i < nopencl; i++)
  2280. {
  2281. char i_name[16];
  2282. snprintf(i_name, sizeof(i_name), "OpenCL%u", i);
  2283. fprintf(f, " <link id=\"RAM-%s\" bandwidth=\"%f%s\" latency=\"%f%s\"/>\n",
  2284. i_name,
  2285. 1000000 / search_bus_best_timing(i, "OpenCL", 1), Bps,
  2286. search_bus_best_latency(i, "OpenCL", 1)/1000000., s);
  2287. fprintf(f, " <link id=\"%s-RAM\" bandwidth=\"%f%s\" latency=\"%f%s\"/>\n",
  2288. i_name,
  2289. 1000000 / search_bus_best_timing(i, "OpenCL", 0), Bps,
  2290. search_bus_best_latency(i, "OpenCL", 0)/1000000., s);
  2291. }
  2292. fprintf(f, "\n");
  2293. #endif
  2294. /*
  2295. * CUDA links and routes
  2296. */
  2297. #ifdef STARPU_USE_CUDA
  2298. /* Write RAM/CUDA bandwidths and latencies */
  2299. for (i = 0; i < ncuda; i++)
  2300. {
  2301. char i_name[16];
  2302. snprintf(i_name, sizeof(i_name), "CUDA%u", i);
  2303. fprintf(f, " <link id=\"RAM-%s\" bandwidth=\"%f%s\" latency=\"%f%s\"/>\n",
  2304. i_name,
  2305. 1000000. / search_bus_best_timing(i, "CUDA", 1), Bps,
  2306. search_bus_best_latency(i, "CUDA", 1)/1000000., s);
  2307. fprintf(f, " <link id=\"%s-RAM\" bandwidth=\"%f%s\" latency=\"%f%s\"/>\n",
  2308. i_name,
  2309. 1000000. / search_bus_best_timing(i, "CUDA", 0), Bps,
  2310. search_bus_best_latency(i, "CUDA", 0)/1000000., s);
  2311. }
  2312. fprintf(f, "\n");
  2313. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  2314. /* Write CUDA/CUDA bandwidths and latencies */
  2315. for (i = 0; i < ncuda; i++)
  2316. {
  2317. unsigned j;
  2318. char i_name[16];
  2319. snprintf(i_name, sizeof(i_name), "CUDA%u", i);
  2320. for (j = 0; j < ncuda; j++)
  2321. {
  2322. char j_name[16];
  2323. if (j == i)
  2324. continue;
  2325. snprintf(j_name, sizeof(j_name), "CUDA%u", j);
  2326. fprintf(f, " <link id=\"%s-%s\" bandwidth=\"%f%s\" latency=\"%f%s\"/>\n",
  2327. i_name, j_name,
  2328. 1000000. / cudadev_timing_dtod[i][j], Bps,
  2329. cudadev_latency_dtod[i][j]/1000000., s);
  2330. }
  2331. }
  2332. #endif
  2333. #if defined(HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX) && HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX && defined(STARPU_USE_CUDA) && defined(STARPU_HAVE_CUDA_MEMCPY_PEER)
  2334. /* If we have enough hwloc information, write PCI bandwidths and routes */
  2335. if (!starpu_get_env_number_default("STARPU_PCI_FLAT", 0))
  2336. {
  2337. hwloc_topology_t topology;
  2338. hwloc_topology_init(&topology);
  2339. _starpu_topology_filter(topology);
  2340. hwloc_topology_load(topology);
  2341. /* First find paths and record measured bandwidth along the path */
  2342. for (i = 0; i < ncuda; i++)
  2343. {
  2344. unsigned j;
  2345. for (j = 0; j < ncuda; j++)
  2346. if (i != j)
  2347. if (!find_platform_cuda_path(topology, i, j, 1000000. / cudadev_timing_dtod[i][j]))
  2348. {
  2349. clean_topology(hwloc_get_root_obj(topology));
  2350. hwloc_topology_destroy(topology);
  2351. goto flat_cuda;
  2352. }
  2353. /* Record RAM/CUDA bandwidths */
  2354. find_platform_forward_path(hwloc_cuda_get_device_osdev_by_index(topology, i), 1000000. / search_bus_best_timing(i, "CUDA", 0));
  2355. find_platform_backward_path(hwloc_cuda_get_device_osdev_by_index(topology, i), 1000000. / search_bus_best_timing(i, "CUDA", 1));
  2356. }
  2357. /* Ok, found path in all cases, can emit advanced platform routes */
  2358. fprintf(f, "\n");
  2359. emit_topology_bandwidths(f, hwloc_get_root_obj(topology), Bps, s);
  2360. fprintf(f, "\n");
  2361. for (i = 0; i < ncuda; i++)
  2362. {
  2363. unsigned j;
  2364. for (j = 0; j < ncuda; j++)
  2365. if (i != j)
  2366. {
  2367. fprintf(f, " <route src=\"CUDA%u\" dst=\"CUDA%u\" symmetrical=\"NO\">\n", i, j);
  2368. fprintf(f, " <link_ctn id=\"CUDA%u-CUDA%u\"/>\n", i, j);
  2369. emit_platform_path_up(f,
  2370. hwloc_cuda_get_device_osdev_by_index(topology, i),
  2371. hwloc_cuda_get_device_osdev_by_index(topology, j));
  2372. fprintf(f, " </route>\n");
  2373. }
  2374. fprintf(f, " <route src=\"CUDA%u\" dst=\"RAM\" symmetrical=\"NO\">\n", i);
  2375. fprintf(f, " <link_ctn id=\"CUDA%u-RAM\"/>\n", i);
  2376. emit_platform_forward_path(f, hwloc_cuda_get_device_osdev_by_index(topology, i));
  2377. fprintf(f, " </route>\n");
  2378. fprintf(f, " <route src=\"RAM\" dst=\"CUDA%u\" symmetrical=\"NO\">\n", i);
  2379. fprintf(f, " <link_ctn id=\"RAM-CUDA%u\"/>\n", i);
  2380. emit_platform_backward_path(f, hwloc_cuda_get_device_osdev_by_index(topology, i));
  2381. fprintf(f, " </route>\n");
  2382. }
  2383. clean_topology(hwloc_get_root_obj(topology));
  2384. hwloc_topology_destroy(topology);
  2385. }
  2386. else
  2387. {
  2388. flat_cuda:
  2389. #else
  2390. {
  2391. #endif
  2392. /* If we don't have enough hwloc information, write trivial routes always through host */
  2393. for (i = 0; i < ncuda; i++)
  2394. {
  2395. char i_name[16];
  2396. snprintf(i_name, sizeof(i_name), "CUDA%u", i);
  2397. fprintf(f, " <route src=\"RAM\" dst=\"%s\" symmetrical=\"NO\"><link_ctn id=\"RAM-%s\"/><link_ctn id=\"Host\"/></route>\n", i_name, i_name);
  2398. fprintf(f, " <route src=\"%s\" dst=\"RAM\" symmetrical=\"NO\"><link_ctn id=\"%s-RAM\"/><link_ctn id=\"Host\"/></route>\n", i_name, i_name);
  2399. }
  2400. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  2401. for (i = 0; i < ncuda; i++)
  2402. {
  2403. unsigned j;
  2404. char i_name[16];
  2405. snprintf(i_name, sizeof(i_name), "CUDA%u", i);
  2406. for (j = 0; j < ncuda; j++)
  2407. {
  2408. char j_name[16];
  2409. if (j == i)
  2410. continue;
  2411. snprintf(j_name, sizeof(j_name), "CUDA%u", j);
  2412. fprintf(f, " <route src=\"%s\" dst=\"%s\" symmetrical=\"NO\"><link_ctn id=\"%s-%s\"/><link_ctn id=\"Host\"/></route>\n", i_name, j_name, i_name, j_name);
  2413. }
  2414. }
  2415. #endif
  2416. } /* defined(STARPU_HAVE_HWLOC) && defined(STARPU_HAVE_CUDA_MEMCPY_PEER) */
  2417. fprintf(f, "\n");
  2418. #endif /* STARPU_USE_CUDA */
  2419. /*
  2420. * OpenCL routes
  2421. */
  2422. #ifdef STARPU_USE_OPENCL
  2423. for (i = 0; i < nopencl; i++)
  2424. {
  2425. char i_name[16];
  2426. snprintf(i_name, sizeof(i_name), "OpenCL%u", i);
  2427. fprintf(f, " <route src=\"RAM\" dst=\"%s\" symmetrical=\"NO\"><link_ctn id=\"RAM-%s\"/><link_ctn id=\"Host\"/></route>\n", i_name, i_name);
  2428. fprintf(f, " <route src=\"%s\" dst=\"RAM\" symmetrical=\"NO\"><link_ctn id=\"%s-RAM\"/><link_ctn id=\"Host\"/></route>\n", i_name, i_name);
  2429. }
  2430. #endif
  2431. fprintf(f,
  2432. " </AS>\n"
  2433. " </platform>\n"
  2434. );
  2435. if (locked)
  2436. _starpu_fwrunlock(f);
  2437. fclose(f);
  2438. }
  2439. static void generate_bus_platform_file(void)
  2440. {
  2441. if (!was_benchmarked)
  2442. benchmark_all_gpu_devices();
  2443. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  2444. /* Slaves don't write files */
  2445. if (!_starpu_mpi_common_is_src_node())
  2446. return;
  2447. #endif
  2448. write_bus_platform_file_content(3);
  2449. write_bus_platform_file_content(4);
  2450. }
  2451. static void check_bus_platform_file(void)
  2452. {
  2453. int res;
  2454. char path[PATH_LENGTH];
  2455. _starpu_simgrid_get_platform_path(4, path, sizeof(path));
  2456. res = access(path, F_OK);
  2457. if (!res)
  2458. {
  2459. _starpu_simgrid_get_platform_path(3, path, sizeof(path));
  2460. res = access(path, F_OK);
  2461. }
  2462. if (res)
  2463. {
  2464. /* File does not exist yet */
  2465. generate_bus_platform_file();
  2466. }
  2467. }
  2468. /*
  2469. * Generic
  2470. */
  2471. static void _starpu_bus_force_sampling(void)
  2472. {
  2473. _STARPU_DEBUG("Force bus sampling ...\n");
  2474. _starpu_create_sampling_directory_if_needed();
  2475. generate_bus_affinity_file();
  2476. generate_bus_latency_file();
  2477. generate_bus_bandwidth_file();
  2478. generate_bus_config_file();
  2479. generate_bus_platform_file();
  2480. }
  2481. #endif /* !SIMGRID */
  2482. void _starpu_load_bus_performance_files(void)
  2483. {
  2484. _starpu_create_sampling_directory_if_needed();
  2485. struct _starpu_machine_config * config = _starpu_get_machine_config();
  2486. nnumas = _starpu_topology_get_nnumanodes(config);
  2487. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_SIMGRID)
  2488. ncuda = _starpu_get_cuda_device_count();
  2489. #endif
  2490. #if defined(STARPU_USE_OPENCL) || defined(STARPU_USE_SIMGRID)
  2491. nopencl = _starpu_opencl_get_device_count();
  2492. #endif
  2493. #if defined(STARPU_USE_MPI_MASTER_SLAVE) || defined(STARPU_USE_SIMGRID)
  2494. nmpi_ms = _starpu_mpi_src_get_device_count();
  2495. #endif
  2496. #if defined(STARPU_USE_MIC) || defined(STARPU_USE_SIMGRID)
  2497. nmic = _starpu_mic_src_get_device_count();
  2498. #endif
  2499. #ifndef STARPU_SIMGRID
  2500. check_bus_config_file();
  2501. #endif
  2502. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  2503. /* be sure that master wrote the perf files */
  2504. _starpu_mpi_common_barrier();
  2505. #endif
  2506. #ifndef STARPU_SIMGRID
  2507. load_bus_affinity_file();
  2508. #endif
  2509. load_bus_latency_file();
  2510. load_bus_bandwidth_file();
  2511. #ifndef STARPU_SIMGRID
  2512. check_bus_platform_file();
  2513. #endif
  2514. }
  2515. /* (in MB/s) */
  2516. double starpu_transfer_bandwidth(unsigned src_node, unsigned dst_node)
  2517. {
  2518. return bandwidth_matrix[src_node][dst_node];
  2519. }
  2520. /* (in µs) */
  2521. double starpu_transfer_latency(unsigned src_node, unsigned dst_node)
  2522. {
  2523. return latency_matrix[src_node][dst_node];
  2524. }
  2525. /* (in µs) */
  2526. double starpu_transfer_predict(unsigned src_node, unsigned dst_node, size_t size)
  2527. {
  2528. if (src_node == dst_node)
  2529. return 0;
  2530. double bandwidth = bandwidth_matrix[src_node][dst_node];
  2531. double latency = latency_matrix[src_node][dst_node];
  2532. struct _starpu_machine_topology *topology = &_starpu_get_machine_config()->topology;
  2533. #if 0
  2534. int busid = starpu_bus_get_id(src_node, dst_node);
  2535. int direct = starpu_bus_get_direct(busid);
  2536. #endif
  2537. float ngpus = topology->ncudagpus+topology->nopenclgpus;
  2538. #ifdef STARPU_DEVEL
  2539. #warning FIXME: ngpus should not be used e.g. for slow disk transfers...
  2540. #endif
  2541. #if 0
  2542. /* Ideally we should take into account that some GPUs are directly
  2543. * connected through a PCI switch, which has less contention that the
  2544. * Host bridge, but doing that seems to *decrease* performance... */
  2545. if (direct)
  2546. {
  2547. float neighbours = starpu_bus_get_ngpus(busid);
  2548. /* Count transfers of these GPUs, and count transfers between
  2549. * other GPUs and these GPUs */
  2550. ngpus = neighbours + (ngpus - neighbours) * neighbours / ngpus;
  2551. }
  2552. #endif
  2553. return latency + (size/bandwidth)*2*ngpus;
  2554. }
  2555. /* calculate save bandwidth and latency */
  2556. /* bandwidth in MB/s - latency in µs */
  2557. void _starpu_save_bandwidth_and_latency_disk(double bandwidth_write, double bandwidth_read, double latency_write, double latency_read, unsigned node, const char *name)
  2558. {
  2559. unsigned int i, j;
  2560. double slowness_disk_between_main_ram, slowness_main_ram_between_node;
  2561. int print_stats = starpu_get_env_number_default("STARPU_BUS_STATS", 0);
  2562. if (print_stats)
  2563. {
  2564. fprintf(stderr, "\n#---------------------\n");
  2565. fprintf(stderr, "Data transfer speed for %s (node %u):\n", name, node);
  2566. }
  2567. /* save bandwith */
  2568. for(i = 0; i < STARPU_MAXNODES; ++i)
  2569. {
  2570. for(j = 0; j < STARPU_MAXNODES; ++j)
  2571. {
  2572. if (i == j && j == node) /* source == destination == node */
  2573. {
  2574. bandwidth_matrix[i][j] = 0;
  2575. }
  2576. else if (i == node) /* source == disk */
  2577. {
  2578. /* convert in slowness */
  2579. if(bandwidth_read != 0)
  2580. slowness_disk_between_main_ram = 1/bandwidth_read;
  2581. else
  2582. slowness_disk_between_main_ram = 0;
  2583. if(bandwidth_matrix[STARPU_MAIN_RAM][j] != 0)
  2584. slowness_main_ram_between_node = 1/bandwidth_matrix[STARPU_MAIN_RAM][j];
  2585. else
  2586. slowness_main_ram_between_node = 0;
  2587. bandwidth_matrix[i][j] = 1/(slowness_disk_between_main_ram+slowness_main_ram_between_node);
  2588. if (!isnan(bandwidth_matrix[i][j]) && print_stats)
  2589. fprintf(stderr,"%u -> %u: %.0f MB/s\n", i, j, bandwidth_matrix[i][j]);
  2590. }
  2591. else if (j == node) /* destination == disk */
  2592. {
  2593. /* convert in slowness */
  2594. if(bandwidth_write != 0)
  2595. slowness_disk_between_main_ram = 1/bandwidth_write;
  2596. else
  2597. slowness_disk_between_main_ram = 0;
  2598. if(bandwidth_matrix[i][STARPU_MAIN_RAM] != 0)
  2599. slowness_main_ram_between_node = 1/bandwidth_matrix[i][STARPU_MAIN_RAM];
  2600. else
  2601. slowness_main_ram_between_node = 0;
  2602. bandwidth_matrix[i][j] = 1/(slowness_disk_between_main_ram+slowness_main_ram_between_node);
  2603. if (!isnan(bandwidth_matrix[i][j]) && print_stats)
  2604. fprintf(stderr,"%u -> %u: %.0f MB/s\n", i, j, bandwidth_matrix[i][j]);
  2605. }
  2606. else if (j > node || i > node) /* not affected by the node */
  2607. {
  2608. bandwidth_matrix[i][j] = NAN;
  2609. }
  2610. }
  2611. }
  2612. /* save latency */
  2613. for(i = 0; i < STARPU_MAXNODES; ++i)
  2614. {
  2615. for(j = 0; j < STARPU_MAXNODES; ++j)
  2616. {
  2617. if (i == j && j == node) /* source == destination == node */
  2618. {
  2619. latency_matrix[i][j] = 0;
  2620. }
  2621. else if (i == node) /* source == disk */
  2622. {
  2623. latency_matrix[i][j] = (latency_write+latency_matrix[STARPU_MAIN_RAM][j]);
  2624. if (!isnan(latency_matrix[i][j]) && print_stats)
  2625. fprintf(stderr,"%u -> %u: %.0f us\n", i, j, latency_matrix[i][j]);
  2626. }
  2627. else if (j == node) /* destination == disk */
  2628. {
  2629. latency_matrix[i][j] = (latency_read+latency_matrix[i][STARPU_MAIN_RAM]);
  2630. if (!isnan(latency_matrix[i][j]) && print_stats)
  2631. fprintf(stderr,"%u -> %u: %.0f us\n", i, j, latency_matrix[i][j]);
  2632. }
  2633. else if (j > node || i > node) /* not affected by the node */
  2634. {
  2635. latency_matrix[i][j] = NAN;
  2636. }
  2637. }
  2638. }
  2639. if (print_stats)
  2640. fprintf(stderr, "\n#---------------------\n");
  2641. }