topology.c 95 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2020 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. * Copyright (C) 2013 Thibaut Lambert
  5. * Copyright (C) 2016 Uppsala University
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. #include <stdlib.h>
  19. #include <stdio.h>
  20. #include <common/config.h>
  21. #ifdef HAVE_UNISTD_H
  22. #include <unistd.h>
  23. #endif
  24. #include <core/workers.h>
  25. #include <core/debug.h>
  26. #include <core/topology.h>
  27. #include <drivers/cuda/driver_cuda.h>
  28. #include <drivers/cpu/driver_cpu.h>
  29. #include <drivers/mic/driver_mic_source.h>
  30. #include <drivers/mpi/driver_mpi_source.h>
  31. #include <drivers/mpi/driver_mpi_common.h>
  32. #include <drivers/mp_common/source_common.h>
  33. #include <drivers/opencl/driver_opencl.h>
  34. #include <drivers/opencl/driver_opencl_utils.h>
  35. #include <profiling/profiling.h>
  36. #include <datawizard/datastats.h>
  37. #include <datawizard/memory_nodes.h>
  38. #include <datawizard/memory_manager.h>
  39. #include <common/uthash.h>
  40. #ifdef STARPU_HAVE_HWLOC
  41. #include <hwloc.h>
  42. #ifndef HWLOC_API_VERSION
  43. #define HWLOC_OBJ_PU HWLOC_OBJ_PROC
  44. #endif
  45. #if HWLOC_API_VERSION < 0x00010b00
  46. #define HWLOC_OBJ_NUMANODE HWLOC_OBJ_NODE
  47. #endif
  48. #endif
  49. #ifdef STARPU_HAVE_WINDOWS
  50. #include <windows.h>
  51. #endif
  52. #ifdef STARPU_SIMGRID
  53. #include <core/simgrid.h>
  54. #endif
  55. #if defined(HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX) && HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX
  56. #include <hwloc/cuda.h>
  57. #endif
  58. #if defined(STARPU_HAVE_HWLOC) && defined(STARPU_USE_OPENCL)
  59. #include <hwloc/opencl.h>
  60. #endif
  61. static unsigned topology_is_initialized = 0;
  62. static int nobind;
  63. static int numa_enabled = -1;
  64. /* For checking whether two workers share the same PU, indexed by PU number */
  65. static int cpu_worker[STARPU_MAXCPUS];
  66. static char * cpu_name[STARPU_MAXCPUS];
  67. static unsigned nb_numa_nodes = 0;
  68. static int numa_memory_nodes_to_hwloclogid[STARPU_MAXNUMANODES]; /* indexed by StarPU numa node to convert in hwloc logid */
  69. static int numa_memory_nodes_to_physicalid[STARPU_MAXNUMANODES]; /* indexed by StarPU numa node to convert in physical id */
  70. static unsigned numa_bus_id[STARPU_MAXNUMANODES*STARPU_MAXNUMANODES];
  71. static int _starpu_get_logical_numa_node_worker(unsigned workerid);
  72. #define STARPU_NUMA_UNINITIALIZED (-2)
  73. #define STARPU_NUMA_MAIN_RAM (-1)
  74. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  75. struct handle_entry
  76. {
  77. UT_hash_handle hh;
  78. unsigned gpuid;
  79. };
  80. # if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  81. /* Entry in the `devices_using_cuda' hash table. */
  82. static struct handle_entry *devices_using_cuda;
  83. # endif
  84. static unsigned may_bind_automatically[STARPU_NARCH] = { 0 };
  85. #endif // defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  86. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  87. static struct _starpu_worker_set cuda_worker_set[STARPU_MAXCUDADEVS];
  88. #endif
  89. #ifdef STARPU_USE_MIC
  90. static struct _starpu_worker_set mic_worker_set[STARPU_MAXMICDEVS];
  91. #endif
  92. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  93. struct _starpu_worker_set mpi_worker_set[STARPU_MAXMPIDEVS];
  94. #endif
  95. int starpu_memory_nodes_get_numa_count(void)
  96. {
  97. return nb_numa_nodes;
  98. }
  99. #if defined(STARPU_HAVE_HWLOC)
  100. static hwloc_obj_t numa_get_obj(hwloc_obj_t obj)
  101. {
  102. #if HWLOC_API_VERSION >= 0x00020000
  103. while (obj->memory_first_child == NULL)
  104. {
  105. obj = obj->parent;
  106. if (!obj)
  107. return NULL;
  108. }
  109. return obj->memory_first_child;
  110. #else
  111. while (obj->type != HWLOC_OBJ_NUMANODE)
  112. {
  113. obj = obj->parent;
  114. /* If we don't find a "node" obj before the root, this means
  115. * hwloc does not know whether there are numa nodes or not, so
  116. * we should not use a per-node sampling in that case. */
  117. if (!obj)
  118. return NULL;
  119. }
  120. return obj;
  121. #endif
  122. }
  123. static int numa_get_logical_id(hwloc_obj_t obj)
  124. {
  125. STARPU_ASSERT(obj);
  126. obj = numa_get_obj(obj);
  127. if (!obj)
  128. return 0;
  129. return obj->logical_index;
  130. }
  131. static int numa_get_physical_id(hwloc_obj_t obj)
  132. {
  133. STARPU_ASSERT(obj);
  134. obj = numa_get_obj(obj);
  135. if (!obj)
  136. return 0;
  137. return obj->os_index;
  138. }
  139. #endif
  140. /* This returns the exact NUMA node next to a worker */
  141. static int _starpu_get_logical_numa_node_worker(unsigned workerid)
  142. {
  143. #if defined(STARPU_HAVE_HWLOC)
  144. STARPU_ASSERT(numa_enabled != -1);
  145. if (numa_enabled)
  146. {
  147. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  148. struct _starpu_machine_config *config = (struct _starpu_machine_config *)_starpu_get_machine_config() ;
  149. struct _starpu_machine_topology *topology = &config->topology ;
  150. hwloc_obj_t obj;
  151. switch(worker->arch)
  152. {
  153. case STARPU_CPU_WORKER:
  154. obj = hwloc_get_obj_by_type(topology->hwtopology, HWLOC_OBJ_PU, worker->bindid) ;
  155. break;
  156. default:
  157. STARPU_ABORT();
  158. }
  159. return numa_get_logical_id(obj);
  160. }
  161. else
  162. #endif
  163. {
  164. (void) workerid; /* unused */
  165. return STARPU_NUMA_MAIN_RAM;
  166. }
  167. }
  168. /* This returns the exact NUMA node next to a worker */
  169. static int _starpu_get_physical_numa_node_worker(unsigned workerid)
  170. {
  171. #if defined(STARPU_HAVE_HWLOC)
  172. STARPU_ASSERT(numa_enabled != -1);
  173. if (numa_enabled)
  174. {
  175. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  176. struct _starpu_machine_config *config = (struct _starpu_machine_config *)_starpu_get_machine_config() ;
  177. struct _starpu_machine_topology *topology = &config->topology ;
  178. hwloc_obj_t obj;
  179. switch(worker->arch)
  180. {
  181. case STARPU_CPU_WORKER:
  182. obj = hwloc_get_obj_by_type(topology->hwtopology, HWLOC_OBJ_PU, worker->bindid) ;
  183. break;
  184. default:
  185. STARPU_ABORT();
  186. }
  187. return numa_get_physical_id(obj);
  188. }
  189. else
  190. #endif
  191. {
  192. (void) workerid; /* unused */
  193. return STARPU_NUMA_MAIN_RAM;
  194. }
  195. }
  196. /* This returns the CPU NUMA memory close to a worker */
  197. static int _starpu_get_logical_close_numa_node_worker(unsigned workerid)
  198. {
  199. #if defined(STARPU_HAVE_HWLOC)
  200. STARPU_ASSERT(numa_enabled != -1);
  201. if (numa_enabled)
  202. {
  203. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  204. struct _starpu_machine_config *config = (struct _starpu_machine_config *)_starpu_get_machine_config() ;
  205. struct _starpu_machine_topology *topology = &config->topology ;
  206. hwloc_obj_t obj;
  207. switch(worker->arch)
  208. {
  209. default:
  210. obj = hwloc_get_obj_by_type(topology->hwtopology, HWLOC_OBJ_PU, worker->bindid) ;
  211. break;
  212. #ifndef STARPU_SIMGRID
  213. #if defined(HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX) && HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX
  214. case STARPU_CUDA_WORKER:
  215. obj = hwloc_cuda_get_device_osdev_by_index(topology->hwtopology, worker->devid);
  216. if (!obj)
  217. obj = hwloc_get_obj_by_type(topology->hwtopology, HWLOC_OBJ_PU, worker->bindid) ;
  218. break;
  219. #endif
  220. #endif
  221. }
  222. return numa_get_logical_id(obj);
  223. }
  224. else
  225. #endif
  226. {
  227. (void) workerid; /* unused */
  228. return STARPU_NUMA_MAIN_RAM;
  229. }
  230. }
  231. //TODO change this in an array
  232. int starpu_memory_nodes_numa_hwloclogid_to_id(int logid)
  233. {
  234. unsigned n;
  235. for (n = 0; n < nb_numa_nodes; n++)
  236. if (numa_memory_nodes_to_hwloclogid[n] == logid)
  237. return n;
  238. return -1;
  239. }
  240. int starpu_memory_nodes_numa_id_to_hwloclogid(unsigned id)
  241. {
  242. STARPU_ASSERT(id < STARPU_MAXNUMANODES);
  243. return numa_memory_nodes_to_hwloclogid[id];
  244. }
  245. int starpu_memory_nodes_numa_devid_to_id(unsigned id)
  246. {
  247. STARPU_ASSERT(id < STARPU_MAXNUMANODES);
  248. return numa_memory_nodes_to_physicalid[id];
  249. }
  250. //TODO change this in an array
  251. int starpu_memory_nodes_numa_id_to_devid(int osid)
  252. {
  253. unsigned n;
  254. for (n = 0; n < nb_numa_nodes; n++)
  255. if (numa_memory_nodes_to_physicalid[n] == osid)
  256. return n;
  257. return -1;
  258. }
  259. // TODO: cache the values instead of looking in hwloc each time
  260. /* Avoid using this one, prefer _starpu_task_data_get_node_on_worker */
  261. int _starpu_task_data_get_node_on_node(struct starpu_task *task, unsigned index, unsigned local_node)
  262. {
  263. int node = STARPU_SPECIFIC_NODE_LOCAL;
  264. if (task->cl->specific_nodes)
  265. node = STARPU_CODELET_GET_NODE(task->cl, index);
  266. switch (node)
  267. {
  268. case STARPU_SPECIFIC_NODE_LOCAL:
  269. // TODO: rather find MCDRAM
  270. node = local_node;
  271. break;
  272. case STARPU_SPECIFIC_NODE_CPU:
  273. switch (starpu_node_get_kind(local_node))
  274. {
  275. case STARPU_CPU_RAM:
  276. node = local_node;
  277. break;
  278. default:
  279. // TODO: rather take close NUMA node
  280. node = STARPU_MAIN_RAM;
  281. break;
  282. }
  283. break;
  284. case STARPU_SPECIFIC_NODE_SLOW:
  285. // TODO: rather leave in DDR
  286. node = local_node;
  287. break;
  288. case STARPU_SPECIFIC_NODE_LOCAL_OR_CPU:
  289. if (task->handles[index]->per_node[local_node].state != STARPU_INVALID)
  290. {
  291. /* It is here already, rather access it from here */
  292. node = local_node;
  293. }
  294. else
  295. {
  296. /* It is not here already, do not bother moving it */
  297. node = STARPU_MAIN_RAM;
  298. }
  299. break;
  300. }
  301. return node;
  302. }
  303. int _starpu_task_data_get_node_on_worker(struct starpu_task *task, unsigned index, unsigned worker)
  304. {
  305. unsigned local_node = starpu_worker_get_memory_node(worker);
  306. int node = STARPU_SPECIFIC_NODE_LOCAL;
  307. if (task->cl->specific_nodes)
  308. node = STARPU_CODELET_GET_NODE(task->cl, index);
  309. switch (node)
  310. {
  311. case STARPU_SPECIFIC_NODE_LOCAL:
  312. // TODO: rather find MCDRAM
  313. node = local_node;
  314. break;
  315. case STARPU_SPECIFIC_NODE_CPU:
  316. node = starpu_memory_nodes_numa_hwloclogid_to_id(_starpu_get_logical_close_numa_node_worker(worker));
  317. if (node == -1)
  318. node = STARPU_MAIN_RAM;
  319. break;
  320. case STARPU_SPECIFIC_NODE_SLOW:
  321. // TODO: rather leave in DDR
  322. node = local_node;
  323. break;
  324. case STARPU_SPECIFIC_NODE_LOCAL_OR_CPU:
  325. if (task->handles[index]->per_node[local_node].state != STARPU_INVALID)
  326. {
  327. /* It is here already, rather access it from here */
  328. node = local_node;
  329. }
  330. else
  331. {
  332. /* It is not here already, do not bother moving it */
  333. node = STARPU_MAIN_RAM;
  334. }
  335. break;
  336. }
  337. return node;
  338. }
  339. struct _starpu_worker *_starpu_get_worker_from_driver(struct starpu_driver *d)
  340. {
  341. unsigned nworkers = starpu_worker_get_count();
  342. unsigned workerid;
  343. for (workerid = 0; workerid < nworkers; workerid++)
  344. {
  345. if (starpu_worker_get_type(workerid) == d->type)
  346. {
  347. struct _starpu_worker *worker;
  348. worker = _starpu_get_worker_struct(workerid);
  349. switch (d->type)
  350. {
  351. #ifdef STARPU_USE_CPU
  352. case STARPU_CPU_WORKER:
  353. if (worker->devid == d->id.cpu_id)
  354. return worker;
  355. break;
  356. #endif
  357. #ifdef STARPU_USE_OPENCL
  358. case STARPU_OPENCL_WORKER:
  359. {
  360. cl_device_id device;
  361. starpu_opencl_get_device(worker->devid, &device);
  362. if (device == d->id.opencl_id)
  363. return worker;
  364. break;
  365. }
  366. #endif
  367. #ifdef STARPU_USE_CUDA
  368. case STARPU_CUDA_WORKER:
  369. {
  370. if (worker->devid == d->id.cuda_id)
  371. return worker;
  372. break;
  373. }
  374. #endif
  375. default:
  376. (void) worker;
  377. _STARPU_DEBUG("Invalid device type\n");
  378. return NULL;
  379. }
  380. }
  381. }
  382. return NULL;
  383. }
  384. /*
  385. * Discover the topology of the machine
  386. */
  387. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  388. static void _starpu_initialize_workers_deviceid(int *explicit_workers_gpuid,
  389. int *current, int *workers_gpuid,
  390. const char *varname, unsigned nhwgpus,
  391. enum starpu_worker_archtype type)
  392. {
  393. char *strval;
  394. unsigned i;
  395. *current = 0;
  396. /* conf->workers_gpuid indicates the successive GPU identifier that
  397. * should be used to bind the workers. It should be either filled
  398. * according to the user's explicit parameters (from starpu_conf) or
  399. * according to the STARPU_WORKERS_CUDAID env. variable. Otherwise, a
  400. * round-robin policy is used to distributed the workers over the
  401. * cores. */
  402. /* what do we use, explicit value, env. variable, or round-robin ? */
  403. strval = starpu_getenv(varname);
  404. if (strval)
  405. {
  406. /* STARPU_WORKERS_CUDAID certainly contains less entries than
  407. * STARPU_NMAXWORKERS, so we reuse its entries in a round
  408. * robin fashion: "1 2" is equivalent to "1 2 1 2 1 2 .... 1
  409. * 2". */
  410. unsigned wrap = 0;
  411. unsigned number_of_entries = 0;
  412. char *endptr;
  413. /* we use the content of the STARPU_WORKERS_CUDAID
  414. * env. variable */
  415. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  416. {
  417. if (!wrap)
  418. {
  419. long int val;
  420. val = strtol(strval, &endptr, 10);
  421. if (endptr != strval)
  422. {
  423. workers_gpuid[i] = (unsigned)val;
  424. strval = endptr;
  425. }
  426. else
  427. {
  428. /* there must be at least one entry */
  429. STARPU_ASSERT(i != 0);
  430. number_of_entries = i;
  431. /* there is no more values in the
  432. * string */
  433. wrap = 1;
  434. workers_gpuid[i] = workers_gpuid[0];
  435. }
  436. }
  437. else
  438. {
  439. workers_gpuid[i] =
  440. workers_gpuid[i % number_of_entries];
  441. }
  442. }
  443. }
  444. else if (explicit_workers_gpuid)
  445. {
  446. /* we use the explicit value from the user */
  447. memcpy(workers_gpuid,
  448. explicit_workers_gpuid,
  449. STARPU_NMAXWORKERS*sizeof(unsigned));
  450. }
  451. else
  452. {
  453. /* by default, we take a round robin policy */
  454. if (nhwgpus > 0)
  455. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  456. workers_gpuid[i] = (unsigned)(i % nhwgpus);
  457. /* StarPU can use sampling techniques to bind threads
  458. * correctly */
  459. may_bind_automatically[type] = 1;
  460. }
  461. }
  462. #endif
  463. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  464. static void _starpu_initialize_workers_cuda_gpuid(struct _starpu_machine_config *config)
  465. {
  466. struct _starpu_machine_topology *topology = &config->topology;
  467. struct starpu_conf *uconf = &config->conf;
  468. _starpu_initialize_workers_deviceid(uconf->use_explicit_workers_cuda_gpuid == 0
  469. ? NULL
  470. : (int *)uconf->workers_cuda_gpuid,
  471. &(config->current_cuda_gpuid),
  472. (int *)topology->workers_cuda_gpuid,
  473. "STARPU_WORKERS_CUDAID",
  474. topology->nhwcudagpus,
  475. STARPU_CUDA_WORKER);
  476. }
  477. static inline int _starpu_get_next_cuda_gpuid(struct _starpu_machine_config *config)
  478. {
  479. unsigned i = ((config->current_cuda_gpuid++) % config->topology.ncudagpus);
  480. return (int)config->topology.workers_cuda_gpuid[i];
  481. }
  482. #endif
  483. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  484. static void _starpu_initialize_workers_opencl_gpuid(struct _starpu_machine_config*config)
  485. {
  486. struct _starpu_machine_topology *topology = &config->topology;
  487. struct starpu_conf *uconf = &config->conf;
  488. _starpu_initialize_workers_deviceid(uconf->use_explicit_workers_opencl_gpuid == 0
  489. ? NULL
  490. : (int *)uconf->workers_opencl_gpuid,
  491. &(config->current_opencl_gpuid),
  492. (int *)topology->workers_opencl_gpuid,
  493. "STARPU_WORKERS_OPENCLID",
  494. topology->nhwopenclgpus,
  495. STARPU_OPENCL_WORKER);
  496. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  497. // Detect devices which are already used with CUDA
  498. {
  499. unsigned tmp[STARPU_NMAXWORKERS];
  500. unsigned nb=0;
  501. int i;
  502. for(i=0 ; i<STARPU_NMAXWORKERS ; i++)
  503. {
  504. struct handle_entry *entry;
  505. int devid = config->topology.workers_opencl_gpuid[i];
  506. HASH_FIND_INT(devices_using_cuda, &devid, entry);
  507. if (entry == NULL)
  508. {
  509. tmp[nb] = topology->workers_opencl_gpuid[i];
  510. nb++;
  511. }
  512. }
  513. for (i=nb ; i<STARPU_NMAXWORKERS ; i++)
  514. tmp[i] = -1;
  515. memcpy(topology->workers_opencl_gpuid, tmp, sizeof(unsigned)*STARPU_NMAXWORKERS);
  516. }
  517. #endif /* STARPU_USE_CUDA */
  518. {
  519. // Detect identical devices
  520. struct handle_entry *devices_already_used = NULL;
  521. unsigned tmp[STARPU_NMAXWORKERS];
  522. unsigned nb=0;
  523. int i;
  524. for(i=0 ; i<STARPU_NMAXWORKERS ; i++)
  525. {
  526. int devid = topology->workers_opencl_gpuid[i];
  527. struct handle_entry *entry;
  528. HASH_FIND_INT(devices_already_used, &devid, entry);
  529. if (entry == NULL)
  530. {
  531. struct handle_entry *entry2;
  532. _STARPU_MALLOC(entry2, sizeof(*entry2));
  533. entry2->gpuid = devid;
  534. HASH_ADD_INT(devices_already_used, gpuid,
  535. entry2);
  536. tmp[nb] = devid;
  537. nb ++;
  538. }
  539. }
  540. struct handle_entry *entry=NULL, *tempo=NULL;
  541. HASH_ITER(hh, devices_already_used, entry, tempo)
  542. {
  543. HASH_DEL(devices_already_used, entry);
  544. free(entry);
  545. }
  546. for (i=nb ; i<STARPU_NMAXWORKERS ; i++)
  547. tmp[i] = -1;
  548. memcpy(topology->workers_opencl_gpuid, tmp, sizeof(unsigned)*STARPU_NMAXWORKERS);
  549. }
  550. }
  551. static inline int _starpu_get_next_opencl_gpuid(struct _starpu_machine_config *config)
  552. {
  553. unsigned i = ((config->current_opencl_gpuid++) % config->topology.nopenclgpus);
  554. return (int)config->topology.workers_opencl_gpuid[i];
  555. }
  556. #endif
  557. #if 0
  558. #if defined(STARPU_USE_MIC) || defined(STARPU_SIMGRID)
  559. static void _starpu_initialize_workers_mic_deviceid(struct _starpu_machine_config *config)
  560. {
  561. struct _starpu_machine_topology *topology = &config->topology;
  562. struct starpu_conf *uconf = &config->conf;
  563. _starpu_initialize_workers_deviceid(uconf->use_explicit_workers_mic_deviceid == 0
  564. ? NULL
  565. : (int *)config->user_conf->workers_mic_deviceid,
  566. &(config->current_mic_deviceid),
  567. (int *)topology->workers_mic_deviceid,
  568. "STARPU_WORKERS_MICID",
  569. topology->nhwmiccores,
  570. STARPU_MIC_WORKER);
  571. }
  572. #endif
  573. #endif
  574. #if 0
  575. #ifdef STARPU_USE_MIC
  576. static inline int _starpu_get_next_mic_deviceid(struct _starpu_machine_config *config)
  577. {
  578. unsigned i = ((config->current_mic_deviceid++) % config->topology.nmicdevices);
  579. return (int)config->topology.workers_mic_deviceid[i];
  580. }
  581. #endif
  582. #endif
  583. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  584. static inline int _starpu_get_next_mpi_deviceid(struct _starpu_machine_config *config)
  585. {
  586. unsigned i = ((config->current_mpi_deviceid++) % config->topology.nmpidevices);
  587. return (int)config->topology.workers_mpi_ms_deviceid[i];
  588. }
  589. static void _starpu_init_mpi_topology(struct _starpu_machine_config *config, long mpi_idx)
  590. {
  591. /* Discover the topology of the mpi node identifier by MPI_IDX. That
  592. * means, make this StarPU instance aware of the number of cores available
  593. * on this MPI device. Update the `nhwmpicores' topology field
  594. * accordingly. */
  595. struct _starpu_machine_topology *topology = &config->topology;
  596. int nbcores;
  597. _starpu_src_common_sink_nbcores(_starpu_mpi_ms_nodes[mpi_idx], &nbcores);
  598. topology->nhwmpicores[mpi_idx] = nbcores;
  599. }
  600. #endif /* STARPU_USE_MPI_MASTER_SLAVE */
  601. #ifdef STARPU_USE_MIC
  602. static void _starpu_init_mic_topology(struct _starpu_machine_config *config, long mic_idx)
  603. {
  604. /* Discover the topology of the mic node identifier by MIC_IDX. That
  605. * means, make this StarPU instance aware of the number of cores available
  606. * on this MIC device. Update the `nhwmiccores' topology field
  607. * accordingly. */
  608. struct _starpu_machine_topology *topology = &config->topology;
  609. int nbcores;
  610. _starpu_src_common_sink_nbcores(_starpu_mic_nodes[mic_idx], &nbcores);
  611. topology->nhwmiccores[mic_idx] = nbcores;
  612. }
  613. static int _starpu_init_mic_node(struct _starpu_machine_config *config, int mic_idx,
  614. COIENGINE *coi_handle, COIPROCESS *coi_process)
  615. {
  616. /* Initialize the MIC node of index MIC_IDX. */
  617. struct starpu_conf *user_conf = &config->conf;
  618. char ***argv = _starpu_get_argv();
  619. const char *suffixes[] = {"-mic", "_mic", NULL};
  620. /* Environment variables to send to the Sink, it informs it what kind
  621. * of node it is (architecture and type) as there is no way to discover
  622. * it itself */
  623. char mic_idx_env[32];
  624. snprintf(mic_idx_env, sizeof(mic_idx_env), "_STARPU_MIC_DEVID=%d", mic_idx);
  625. /* XXX: this is currently necessary so that the remote process does not
  626. * segfault. */
  627. char nb_mic_env[32];
  628. snprintf(nb_mic_env, sizeof(nb_mic_env), "_STARPU_MIC_NB=%d", 2);
  629. const char *mic_sink_env[] = {"STARPU_SINK=STARPU_MIC", mic_idx_env, nb_mic_env, NULL};
  630. char mic_sink_program_path[1024];
  631. /* Let's get the helper program to run on the MIC device */
  632. int mic_file_found = _starpu_src_common_locate_file(mic_sink_program_path,
  633. sizeof(mic_sink_program_path),
  634. starpu_getenv("STARPU_MIC_SINK_PROGRAM_NAME"),
  635. starpu_getenv("STARPU_MIC_SINK_PROGRAM_PATH"),
  636. user_conf->mic_sink_program_path,
  637. (argv ? (*argv)[0] : NULL),
  638. suffixes);
  639. if (0 != mic_file_found)
  640. {
  641. _STARPU_MSG("No MIC program specified, use the environment\n"
  642. "variable STARPU_MIC_SINK_PROGRAM_NAME or the environment\n"
  643. "or the field 'starpu_conf.mic_sink_program_path'\n"
  644. "to define it.\n");
  645. return -1;
  646. }
  647. COIRESULT res;
  648. /* Let's get the handle which let us manage the remote MIC device */
  649. res = COIEngineGetHandle(COI_ISA_MIC, mic_idx, coi_handle);
  650. if (STARPU_UNLIKELY(res != COI_SUCCESS))
  651. STARPU_MIC_SRC_REPORT_COI_ERROR(res);
  652. /* We launch the helper on the MIC device, which will wait for us
  653. * to give it work to do.
  654. * As we will communicate further with the device throught scif we
  655. * don't need to keep the process pointer */
  656. res = COIProcessCreateFromFile(*coi_handle, mic_sink_program_path, 0, NULL, 0,
  657. mic_sink_env, 1, NULL, 0, NULL,
  658. coi_process);
  659. if (STARPU_UNLIKELY(res != COI_SUCCESS))
  660. STARPU_MIC_SRC_REPORT_COI_ERROR(res);
  661. /* Let's create the node structure, we'll communicate with the peer
  662. * through scif thanks to it */
  663. _starpu_mic_nodes[mic_idx] =
  664. _starpu_mp_common_node_create(STARPU_NODE_MIC_SOURCE, mic_idx);
  665. return 0;
  666. }
  667. #endif
  668. #ifndef STARPU_SIMGRID
  669. #ifdef STARPU_HAVE_HWLOC
  670. static void _starpu_allocate_topology_userdata(hwloc_obj_t obj)
  671. {
  672. unsigned i;
  673. _STARPU_CALLOC(obj->userdata, 1, sizeof(struct _starpu_hwloc_userdata));
  674. for (i = 0; i < obj->arity; i++)
  675. _starpu_allocate_topology_userdata(obj->children[i]);
  676. #if HWLOC_API_VERSION >= 0x00020000
  677. hwloc_obj_t child;
  678. for (child = obj->io_first_child; child; child = child->next_sibling)
  679. _starpu_allocate_topology_userdata(child);
  680. #endif
  681. }
  682. static void _starpu_deallocate_topology_userdata(hwloc_obj_t obj)
  683. {
  684. unsigned i;
  685. struct _starpu_hwloc_userdata *data = obj->userdata;
  686. STARPU_ASSERT(!data->worker_list || data->worker_list == (void*)-1);
  687. free(data);
  688. for (i = 0; i < obj->arity; i++)
  689. _starpu_deallocate_topology_userdata(obj->children[i]);
  690. #if HWLOC_API_VERSION >= 0x00020000
  691. hwloc_obj_t child;
  692. for (child = obj->io_first_child; child; child = child->next_sibling)
  693. _starpu_deallocate_topology_userdata(child);
  694. #endif
  695. }
  696. #endif
  697. #endif
  698. static void _starpu_init_topology(struct _starpu_machine_config *config)
  699. {
  700. /* Discover the topology, meaning finding all the available PUs for
  701. the compiled drivers. These drivers MUST have been initialized
  702. before calling this function. The discovered topology is filled in
  703. CONFIG. */
  704. struct _starpu_machine_topology *topology = &config->topology;
  705. if (topology_is_initialized)
  706. return;
  707. nobind = starpu_get_env_number("STARPU_WORKERS_NOBIND");
  708. topology->nhwcpus = 0;
  709. topology->nhwpus = 0;
  710. #ifndef STARPU_SIMGRID
  711. #ifdef STARPU_HAVE_HWLOC
  712. hwloc_topology_init(&topology->hwtopology);
  713. char *hwloc_input = starpu_getenv("STARPU_HWLOC_INPUT");
  714. if (hwloc_input && hwloc_input[0])
  715. {
  716. int err = hwloc_topology_set_xml(topology->hwtopology, hwloc_input);
  717. if (err < 0) _STARPU_DISP("Could not load hwloc input %s\n", hwloc_input);
  718. }
  719. _starpu_topology_filter(topology->hwtopology);
  720. hwloc_topology_load(topology->hwtopology);
  721. if (starpu_get_env_number_default("STARPU_WORKERS_GETBIND", 0))
  722. {
  723. /* Respect the existing binding */
  724. hwloc_bitmap_t cpuset = hwloc_bitmap_alloc();
  725. int ret = hwloc_get_cpubind(topology->hwtopology, cpuset, HWLOC_CPUBIND_THREAD);
  726. if (ret)
  727. _STARPU_DISP("Warning: could not get current CPU binding: %s\n", strerror(errno));
  728. else
  729. {
  730. ret = hwloc_topology_restrict(topology->hwtopology, cpuset, 0);
  731. if (ret)
  732. _STARPU_DISP("Warning: could not restrict hwloc to cpuset: %s\n", strerror(errno));
  733. }
  734. hwloc_bitmap_free(cpuset);
  735. }
  736. _starpu_allocate_topology_userdata(hwloc_get_root_obj(topology->hwtopology));
  737. #endif
  738. #endif
  739. #ifdef STARPU_SIMGRID
  740. config->topology.nhwcpus = config->topology.nhwpus = _starpu_simgrid_get_nbhosts("CPU");
  741. #elif defined(STARPU_HAVE_HWLOC)
  742. /* Discover the CPUs relying on the hwloc interface and fills CONFIG
  743. * accordingly. */
  744. config->cpu_depth = hwloc_get_type_depth(topology->hwtopology, HWLOC_OBJ_CORE);
  745. config->pu_depth = hwloc_get_type_depth(topology->hwtopology, HWLOC_OBJ_PU);
  746. /* Would be very odd */
  747. STARPU_ASSERT(config->cpu_depth != HWLOC_TYPE_DEPTH_MULTIPLE);
  748. if (config->cpu_depth == HWLOC_TYPE_DEPTH_UNKNOWN)
  749. {
  750. /* unknown, using logical procesors as fallback */
  751. _STARPU_DISP("Warning: The OS did not report CPU cores. Assuming there is only one hardware thread per core.\n");
  752. config->cpu_depth = hwloc_get_type_depth(topology->hwtopology,
  753. HWLOC_OBJ_PU);
  754. }
  755. topology->nhwcpus = hwloc_get_nbobjs_by_depth(topology->hwtopology, config->cpu_depth);
  756. topology->nhwpus = hwloc_get_nbobjs_by_depth(topology->hwtopology, config->pu_depth);
  757. #elif defined(HAVE_SYSCONF)
  758. /* Discover the CPUs relying on the sysconf(3) function and fills
  759. * CONFIG accordingly. */
  760. config->topology.nhwcpus = config->topology.nhwpus = sysconf(_SC_NPROCESSORS_ONLN);
  761. #elif defined(_WIN32)
  762. /* Discover the CPUs on Cygwin and MinGW systems. */
  763. SYSTEM_INFO sysinfo;
  764. GetSystemInfo(&sysinfo);
  765. config->topology.nhwcpus = config->topology.nhwpus = sysinfo.dwNumberOfProcessors;
  766. #else
  767. #warning no way to know number of cores, assuming 1
  768. config->topology.nhwcpus = config->topology.nhwpus = 1;
  769. #endif
  770. if (config->conf.ncuda != 0)
  771. _starpu_cuda_discover_devices(config);
  772. if (config->conf.nopencl != 0)
  773. _starpu_opencl_discover_devices(config);
  774. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  775. config->topology.nhwmpi = _starpu_mpi_src_get_device_count();
  776. #endif
  777. topology_is_initialized = 1;
  778. }
  779. /*
  780. * Bind workers on the different processors
  781. */
  782. static void _starpu_initialize_workers_bindid(struct _starpu_machine_config *config)
  783. {
  784. char *strval;
  785. unsigned i;
  786. struct _starpu_machine_topology *topology = &config->topology;
  787. config->current_bindid = 0;
  788. /* conf->workers_bindid indicates the successive logical PU identifier that
  789. * should be used to bind the workers. It should be either filled
  790. * according to the user's explicit parameters (from starpu_conf) or
  791. * according to the STARPU_WORKERS_CPUID env. variable. Otherwise, a
  792. * round-robin policy is used to distributed the workers over the
  793. * cores. */
  794. /* what do we use, explicit value, env. variable, or round-robin ? */
  795. strval = starpu_getenv("STARPU_WORKERS_CPUID");
  796. if (strval)
  797. {
  798. /* STARPU_WORKERS_CPUID certainly contains less entries than
  799. * STARPU_NMAXWORKERS, so we reuse its entries in a round
  800. * robin fashion: "1 2" is equivalent to "1 2 1 2 1 2 .... 1
  801. * 2". */
  802. unsigned wrap = 0;
  803. unsigned number_of_entries = 0;
  804. char *endptr;
  805. /* we use the content of the STARPU_WORKERS_CPUID
  806. * env. variable */
  807. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  808. {
  809. if (!wrap)
  810. {
  811. long int val;
  812. val = strtol(strval, &endptr, 10);
  813. if (endptr != strval)
  814. {
  815. topology->workers_bindid[i] = (unsigned)(val % topology->nhwpus);
  816. strval = endptr;
  817. if (*strval == '-')
  818. {
  819. /* range of values */
  820. long int endval;
  821. strval++;
  822. if (*strval && *strval != ' ' && *strval != ',')
  823. {
  824. endval = strtol(strval, &endptr, 10);
  825. strval = endptr;
  826. }
  827. else
  828. {
  829. endval = topology->nhwpus-1;
  830. if (*strval)
  831. strval++;
  832. }
  833. for (val++; val <= endval && i < STARPU_NMAXWORKERS-1; val++)
  834. {
  835. i++;
  836. topology->workers_bindid[i] = (unsigned)(val % topology->nhwpus);
  837. }
  838. }
  839. if (*strval == ',')
  840. strval++;
  841. }
  842. else
  843. {
  844. /* there must be at least one entry */
  845. STARPU_ASSERT(i != 0);
  846. number_of_entries = i;
  847. /* there is no more values in the
  848. * string */
  849. wrap = 1;
  850. topology->workers_bindid[i] =
  851. topology->workers_bindid[0];
  852. }
  853. }
  854. else
  855. {
  856. topology->workers_bindid[i] =
  857. topology->workers_bindid[i % number_of_entries];
  858. }
  859. }
  860. }
  861. else if (config->conf.use_explicit_workers_bindid)
  862. {
  863. /* we use the explicit value from the user */
  864. memcpy(topology->workers_bindid,
  865. config->conf.workers_bindid,
  866. STARPU_NMAXWORKERS*sizeof(unsigned));
  867. }
  868. else
  869. {
  870. int nth_per_core = starpu_get_env_number_default("STARPU_NTHREADS_PER_CORE", 1);
  871. int k;
  872. int nbindids=0;
  873. int nhyperthreads = topology->nhwpus / topology->nhwcpus;
  874. STARPU_ASSERT_MSG(nth_per_core > 0 && nth_per_core <= nhyperthreads , "Incorrect number of hyperthreads");
  875. i = 0; /* PU number currently assigned */
  876. k = 0; /* Number of threads already put on the current core */
  877. while(nbindids < STARPU_NMAXWORKERS)
  878. {
  879. if (k >= nth_per_core)
  880. {
  881. /* We have already put enough workers on this
  882. * core, skip remaining PUs from this core, and
  883. * proceed with next core */
  884. i += nhyperthreads-nth_per_core;
  885. k = 0;
  886. continue;
  887. }
  888. /* Add a worker to this core, by using this logical PU */
  889. topology->workers_bindid[nbindids++] = (unsigned)(i % topology->nhwpus);
  890. k++;
  891. i++;
  892. }
  893. }
  894. for (i = 0; i < STARPU_MAXCPUS;i++)
  895. cpu_worker[i] = STARPU_NOWORKERID;
  896. /* no binding yet */
  897. memset(&config->currently_bound, 0, sizeof(config->currently_bound));
  898. memset(&config->currently_shared, 0, sizeof(config->currently_shared));
  899. }
  900. static void _starpu_deinitialize_workers_bindid(struct _starpu_machine_config *config STARPU_ATTRIBUTE_UNUSED)
  901. {
  902. unsigned i;
  903. for (i = 0; i < STARPU_MAXCPUS;i++)
  904. {
  905. if (cpu_name[i])
  906. {
  907. free(cpu_name[i]);
  908. cpu_name[i] = NULL;
  909. }
  910. }
  911. }
  912. /* This function gets the identifier of the next core on which to bind a
  913. * worker. In case a list of preferred cores was specified (logical indexes),
  914. * we look for a an available core among the list if possible, otherwise a
  915. * round-robin policy is used. */
  916. static inline unsigned _starpu_get_next_bindid(struct _starpu_machine_config *config, unsigned flags,
  917. unsigned *preferred_binding, unsigned npreferred)
  918. {
  919. struct _starpu_machine_topology *topology = &config->topology;
  920. unsigned current_preferred;
  921. unsigned nhyperthreads = topology->nhwpus / topology->nhwcpus;
  922. unsigned ncores = topology->nhwpus / nhyperthreads;
  923. unsigned i;
  924. if (npreferred)
  925. {
  926. STARPU_ASSERT_MSG(preferred_binding, "Passing NULL pointer for parameter preferred_binding with a non-0 value of parameter npreferred");
  927. }
  928. /* loop over the preference list */
  929. for (current_preferred = 0;
  930. current_preferred < npreferred;
  931. current_preferred++)
  932. {
  933. /* can we bind the worker on the preferred core ? */
  934. unsigned requested_core = preferred_binding[current_preferred];
  935. unsigned requested_bindid = requested_core * nhyperthreads;
  936. /* Look at the remaining cores to be bound to */
  937. for (i = 0; i < ncores; i++)
  938. {
  939. if (topology->workers_bindid[i] == requested_bindid &&
  940. (!config->currently_bound[i] ||
  941. (config->currently_shared[i] && !(flags & STARPU_THREAD_ACTIVE)))
  942. )
  943. {
  944. /* the cpu is available, or shareable with us, we use it ! */
  945. config->currently_bound[i] = 1;
  946. if (!(flags & STARPU_THREAD_ACTIVE))
  947. config->currently_shared[i] = 1;
  948. return requested_bindid;
  949. }
  950. }
  951. }
  952. if (!(flags & STARPU_THREAD_ACTIVE))
  953. {
  954. /* Try to find a shareable PU */
  955. for (i = 0; i < ncores; i++)
  956. if (config->currently_shared[i])
  957. return topology->workers_bindid[i];
  958. }
  959. /* Try to find an available PU from last used PU */
  960. for (i = config->current_bindid; i < ncores; i++)
  961. if (!config->currently_bound[i])
  962. /* Found a cpu ready for use, use it! */
  963. break;
  964. if (i == ncores)
  965. {
  966. /* Finished binding on all cpus, restart from start in
  967. * case the user really wants overloading */
  968. memset(&config->currently_bound, 0, sizeof(config->currently_bound));
  969. i = 0;
  970. }
  971. STARPU_ASSERT(i < ncores);
  972. unsigned bindid = topology->workers_bindid[i];
  973. config->currently_bound[i] = 1;
  974. if (!(flags & STARPU_THREAD_ACTIVE))
  975. config->currently_shared[i] = 1;
  976. config->current_bindid = i;
  977. return bindid;
  978. }
  979. unsigned starpu_get_next_bindid(unsigned flags, unsigned *preferred, unsigned npreferred)
  980. {
  981. return _starpu_get_next_bindid(_starpu_get_machine_config(), flags, preferred, npreferred);
  982. }
  983. unsigned _starpu_topology_get_nhwcpu(struct _starpu_machine_config *config)
  984. {
  985. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  986. if (config->conf.nopencl != 0)
  987. _starpu_opencl_init();
  988. #endif
  989. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  990. if (config->conf.ncuda != 0)
  991. _starpu_init_cuda();
  992. #endif
  993. _starpu_init_topology(config);
  994. return config->topology.nhwcpus;
  995. }
  996. unsigned _starpu_topology_get_nhwpu(struct _starpu_machine_config *config)
  997. {
  998. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  999. if (config->conf.nopencl != 0)
  1000. _starpu_opencl_init();
  1001. #endif
  1002. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  1003. if (config->conf.ncuda != 0)
  1004. _starpu_init_cuda();
  1005. #endif
  1006. _starpu_init_topology(config);
  1007. return config->topology.nhwpus;
  1008. }
  1009. unsigned _starpu_topology_get_nnumanodes(struct _starpu_machine_config *config STARPU_ATTRIBUTE_UNUSED)
  1010. {
  1011. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  1012. if (config->conf.nopencl != 0)
  1013. _starpu_opencl_init();
  1014. #endif
  1015. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  1016. if (config->conf.ncuda != 0)
  1017. _starpu_init_cuda();
  1018. #endif
  1019. _starpu_init_topology(config);
  1020. int res;
  1021. #if defined(STARPU_HAVE_HWLOC)
  1022. if (numa_enabled == -1)
  1023. numa_enabled = starpu_get_env_number_default("STARPU_USE_NUMA", 0);
  1024. if (numa_enabled)
  1025. {
  1026. struct _starpu_machine_topology *topology = &config->topology ;
  1027. int nnumanodes = hwloc_get_nbobjs_by_type(topology->hwtopology, HWLOC_OBJ_NUMANODE) ;
  1028. res = nnumanodes > 0 ? nnumanodes : 1 ;
  1029. }
  1030. else
  1031. #endif
  1032. {
  1033. res = 1;
  1034. }
  1035. STARPU_ASSERT_MSG(res <= STARPU_MAXNUMANODES, "Number of NUMA nodes discovered %d is higher than maximum accepted %d ! Use configure option --enable-maxnumanodes=xxx to increase the maximum value of supported NUMA nodes.\n", res, STARPU_MAXNUMANODES);
  1036. return res;
  1037. }
  1038. #ifdef STARPU_HAVE_HWLOC
  1039. void _starpu_topology_filter(hwloc_topology_t topology)
  1040. {
  1041. #if HWLOC_API_VERSION >= 0x20000
  1042. hwloc_topology_set_io_types_filter(topology, HWLOC_TYPE_FILTER_KEEP_IMPORTANT);
  1043. hwloc_topology_set_flags(topology, HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM);
  1044. #else
  1045. hwloc_topology_set_flags(topology, HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM | HWLOC_TOPOLOGY_FLAG_IO_DEVICES | HWLOC_TOPOLOGY_FLAG_IO_BRIDGES);
  1046. #endif
  1047. #ifdef HAVE_HWLOC_TOPOLOGY_SET_COMPONENTS
  1048. # ifndef STARPU_USE_CUDA
  1049. hwloc_topology_set_components(topology, HWLOC_TOPOLOGY_COMPONENTS_FLAG_BLACKLIST, "cuda");
  1050. hwloc_topology_set_components(topology, HWLOC_TOPOLOGY_COMPONENTS_FLAG_BLACKLIST, "nvml");
  1051. # endif
  1052. # ifndef STARPU_USE_OPENCL
  1053. hwloc_topology_set_components(topology, HWLOC_TOPOLOGY_COMPONENTS_FLAG_BLACKLIST, "opencl");
  1054. # endif
  1055. #endif
  1056. }
  1057. #endif
  1058. #ifdef STARPU_USE_MIC
  1059. static void _starpu_init_mic_config(struct _starpu_machine_config *config,
  1060. struct starpu_conf *user_conf,
  1061. unsigned mic_idx)
  1062. {
  1063. // Configure the MIC device of index MIC_IDX.
  1064. struct _starpu_machine_topology *topology = &config->topology;
  1065. topology->nhwmiccores[mic_idx] = 0;
  1066. _starpu_init_mic_topology(config, mic_idx);
  1067. int nmiccores;
  1068. nmiccores = starpu_get_env_number("STARPU_NMICTHREADS");
  1069. STARPU_ASSERT_MSG(nmiccores >= -1, "nmiccores can not be negative and different from -1 (is is %d)", nmiccores);
  1070. if (nmiccores == -1)
  1071. {
  1072. /* Nothing was specified, so let's use the number of
  1073. * detected mic cores. ! */
  1074. nmiccores = topology->nhwmiccores[mic_idx];
  1075. }
  1076. else
  1077. {
  1078. if ((unsigned) nmiccores > topology->nhwmiccores[mic_idx])
  1079. {
  1080. /* The user requires more MIC cores than there is available */
  1081. _STARPU_MSG("# Warning: %d MIC cores requested. Only %u available.\n", nmiccores, topology->nhwmiccores[mic_idx]);
  1082. nmiccores = topology->nhwmiccores[mic_idx];
  1083. }
  1084. }
  1085. topology->nmiccores[mic_idx] = nmiccores;
  1086. STARPU_ASSERT_MSG(topology->nmiccores[mic_idx] + topology->nworkers <= STARPU_NMAXWORKERS,
  1087. "topology->nmiccores[mic_idx(%u)] (%u) + topology->nworkers (%u) <= STARPU_NMAXWORKERS (%d)",
  1088. mic_idx, topology->nmiccores[mic_idx], topology->nworkers, STARPU_NMAXWORKERS);
  1089. /* _starpu_initialize_workers_mic_deviceid (config); */
  1090. mic_worker_set[mic_idx].workers = &config->workers[topology->nworkers];
  1091. mic_worker_set[mic_idx].nworkers = topology->nmiccores[mic_idx];
  1092. unsigned miccore_id;
  1093. for (miccore_id = 0; miccore_id < topology->nmiccores[mic_idx]; miccore_id++)
  1094. {
  1095. int worker_idx = topology->nworkers + miccore_id;
  1096. config->workers[worker_idx].set = &mic_worker_set[mic_idx];
  1097. config->workers[worker_idx].arch = STARPU_MIC_WORKER;
  1098. _STARPU_MALLOC(config->workers[worker_idx].perf_arch.devices, sizeof(struct starpu_perfmodel_device));
  1099. config->workers[worker_idx].perf_arch.ndevices = 1;
  1100. config->workers[worker_idx].perf_arch.devices[0].type = STARPU_MIC_WORKER;
  1101. config->workers[worker_idx].perf_arch.devices[0].devid = mic_idx;
  1102. config->workers[worker_idx].perf_arch.devices[0].ncores = 1;
  1103. config->workers[worker_idx].devid = mic_idx;
  1104. config->workers[worker_idx].subworkerid = miccore_id;
  1105. config->workers[worker_idx].worker_mask = STARPU_MIC;
  1106. config->worker_mask |= STARPU_MIC;
  1107. }
  1108. _starpu_mic_nodes[mic_idx]->baseworkerid = topology->nworkers;
  1109. topology->nworkers += topology->nmiccores[mic_idx];
  1110. }
  1111. static COIENGINE mic_handles[STARPU_MAXMICDEVS];
  1112. COIPROCESS _starpu_mic_process[STARPU_MAXMICDEVS];
  1113. #endif
  1114. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1115. static void _starpu_init_mpi_config(struct _starpu_machine_config *config,
  1116. struct starpu_conf *user_conf,
  1117. unsigned mpi_idx)
  1118. {
  1119. struct _starpu_machine_topology *topology = &config->topology;
  1120. topology->nhwmpicores[mpi_idx] = 0;
  1121. _starpu_init_mpi_topology(config, mpi_idx);
  1122. int nmpicores;
  1123. nmpicores = starpu_get_env_number("STARPU_NMPIMSTHREADS");
  1124. if (nmpicores == -1)
  1125. {
  1126. /* Nothing was specified, so let's use the number of
  1127. * detected mpi cores. ! */
  1128. nmpicores = topology->nhwmpicores[mpi_idx];
  1129. }
  1130. else
  1131. {
  1132. if ((unsigned) nmpicores > topology->nhwmpicores[mpi_idx])
  1133. {
  1134. /* The user requires more MPI cores than there is available */
  1135. _STARPU_MSG("# Warning: %d MPI cores requested. Only %u available.\n",
  1136. nmpicores, topology->nhwmpicores[mpi_idx]);
  1137. nmpicores = topology->nhwmpicores[mpi_idx];
  1138. }
  1139. }
  1140. topology->nmpicores[mpi_idx] = nmpicores;
  1141. STARPU_ASSERT_MSG(topology->nmpicores[mpi_idx] + topology->nworkers <= STARPU_NMAXWORKERS,
  1142. "topology->nmpicores[mpi_idx(%u)] (%u) + topology->nworkers (%u) <= STARPU_NMAXWORKERS (%d)",
  1143. mpi_idx, topology->nmpicores[mpi_idx], topology->nworkers, STARPU_NMAXWORKERS);
  1144. mpi_worker_set[mpi_idx].workers = &config->workers[topology->nworkers];
  1145. mpi_worker_set[mpi_idx].nworkers = topology->nmpicores[mpi_idx];
  1146. unsigned mpicore_id;
  1147. for (mpicore_id = 0; mpicore_id < topology->nmpicores[mpi_idx]; mpicore_id++)
  1148. {
  1149. int worker_idx = topology->nworkers + mpicore_id;
  1150. config->workers[worker_idx].set = &mpi_worker_set[mpi_idx];
  1151. config->workers[worker_idx].arch = STARPU_MPI_MS_WORKER;
  1152. _STARPU_MALLOC(config->workers[worker_idx].perf_arch.devices, sizeof(struct starpu_perfmodel_device));
  1153. config->workers[worker_idx].perf_arch.ndevices = 1;
  1154. config->workers[worker_idx].perf_arch.devices[0].type = STARPU_MPI_MS_WORKER;
  1155. config->workers[worker_idx].perf_arch.devices[0].devid = mpi_idx;
  1156. config->workers[worker_idx].perf_arch.devices[0].ncores = 1;
  1157. config->workers[worker_idx].devid = mpi_idx;
  1158. config->workers[worker_idx].subworkerid = mpicore_id;
  1159. config->workers[worker_idx].worker_mask = STARPU_MPI_MS;
  1160. config->worker_mask |= STARPU_MPI_MS;
  1161. }
  1162. _starpu_mpi_ms_nodes[mpi_idx]->baseworkerid = topology->nworkers;
  1163. topology->nworkers += topology->nmpicores[mpi_idx];
  1164. }
  1165. #endif
  1166. #if defined(STARPU_USE_MIC) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  1167. static void _starpu_init_mp_config(struct _starpu_machine_config *config,
  1168. struct starpu_conf *user_conf, int no_mp_config)
  1169. {
  1170. /* Discover and configure the mp topology. That means:
  1171. * - discover the number of mp nodes;
  1172. * - initialize each discovered node;
  1173. * - discover the local topology (number of PUs/devices) of each node;
  1174. * - configure the workers accordingly.
  1175. */
  1176. #ifdef STARPU_USE_MIC
  1177. if (!no_mp_config)
  1178. {
  1179. struct _starpu_machine_topology *topology = &config->topology;
  1180. /* Discover and initialize the number of MIC nodes through the mp
  1181. * infrastructure. */
  1182. unsigned nhwmicdevices = _starpu_mic_src_get_device_count();
  1183. int reqmicdevices = starpu_get_env_number("STARPU_NMIC");
  1184. if (reqmicdevices == -1 && user_conf)
  1185. reqmicdevices = user_conf->nmic;
  1186. if (reqmicdevices == -1)
  1187. /* Nothing was specified, so let's use the number of
  1188. * detected mic devices. ! */
  1189. reqmicdevices = nhwmicdevices;
  1190. STARPU_ASSERT_MSG(reqmicdevices >= -1, "nmic can not be negative and different from -1 (is is %d)", reqmicdevices);
  1191. if (reqmicdevices != -1)
  1192. {
  1193. if ((unsigned) reqmicdevices > nhwmicdevices)
  1194. {
  1195. /* The user requires more MIC devices than there is available */
  1196. _STARPU_MSG("# Warning: %d MIC devices requested. Only %u available.\n", reqmicdevices, nhwmicdevices);
  1197. reqmicdevices = nhwmicdevices;
  1198. }
  1199. }
  1200. topology->nmicdevices = 0;
  1201. unsigned i;
  1202. for (i = 0; i < (unsigned) reqmicdevices; i++)
  1203. if (0 == _starpu_init_mic_node(config, i, &mic_handles[i], &_starpu_mic_process[i]))
  1204. topology->nmicdevices++;
  1205. for (i = 0; i < topology->nmicdevices; i++)
  1206. _starpu_init_mic_config(config, user_conf, i);
  1207. }
  1208. #endif
  1209. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1210. {
  1211. struct _starpu_machine_topology *topology = &config->topology;
  1212. /* Discover and initialize the number of MPI nodes through the mp
  1213. * infrastructure. */
  1214. unsigned nhwmpidevices = _starpu_mpi_src_get_device_count();
  1215. int reqmpidevices = starpu_get_env_number("STARPU_NMPI_MS");
  1216. if (reqmpidevices == -1 && user_conf)
  1217. reqmpidevices = user_conf->nmpi_ms;
  1218. if (reqmpidevices == -1)
  1219. /* Nothing was specified, so let's use the number of
  1220. * detected mpi devices. ! */
  1221. reqmpidevices = nhwmpidevices;
  1222. if (reqmpidevices != -1)
  1223. {
  1224. if ((unsigned) reqmpidevices > nhwmpidevices)
  1225. {
  1226. /* The user requires more MPI devices than there is available */
  1227. _STARPU_MSG("# Warning: %d MPI Master-Slave devices requested. Only %u available.\n",
  1228. reqmpidevices, nhwmpidevices);
  1229. reqmpidevices = nhwmpidevices;
  1230. }
  1231. }
  1232. topology->nmpidevices = reqmpidevices;
  1233. /* if user don't want to use MPI slaves, we close the slave processes */
  1234. if (no_mp_config && topology->nmpidevices == 0)
  1235. {
  1236. _starpu_mpi_common_mp_deinit();
  1237. exit(0);
  1238. }
  1239. if (!no_mp_config)
  1240. {
  1241. unsigned i;
  1242. for (i = 0; i < topology->nmpidevices; i++)
  1243. _starpu_mpi_ms_nodes[i] = _starpu_mp_common_node_create(STARPU_NODE_MPI_SOURCE, i);
  1244. for (i = 0; i < topology->nmpidevices; i++)
  1245. _starpu_init_mpi_config(config, user_conf, i);
  1246. }
  1247. }
  1248. #endif
  1249. }
  1250. #endif
  1251. #ifdef STARPU_USE_MIC
  1252. static void _starpu_deinit_mic_node(unsigned mic_idx)
  1253. {
  1254. _starpu_mp_common_send_command(_starpu_mic_nodes[mic_idx], STARPU_MP_COMMAND_EXIT, NULL, 0);
  1255. COIProcessDestroy(_starpu_mic_process[mic_idx], -1, 0, NULL, NULL);
  1256. _starpu_mp_common_node_destroy(_starpu_mic_nodes[mic_idx]);
  1257. }
  1258. #endif
  1259. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1260. static void _starpu_deinit_mpi_node(int devid)
  1261. {
  1262. _starpu_mp_common_send_command(_starpu_mpi_ms_nodes[devid], STARPU_MP_COMMAND_EXIT, NULL, 0);
  1263. _starpu_mp_common_node_destroy(_starpu_mpi_ms_nodes[devid]);
  1264. }
  1265. #endif
  1266. #if defined(STARPU_USE_MIC) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  1267. static void _starpu_deinit_mp_config(struct _starpu_machine_config *config)
  1268. {
  1269. struct _starpu_machine_topology *topology = &config->topology;
  1270. unsigned i;
  1271. #ifdef STARPU_USE_MIC
  1272. for (i = 0; i < topology->nmicdevices; i++)
  1273. _starpu_deinit_mic_node(i);
  1274. _starpu_mic_clear_kernels();
  1275. #endif
  1276. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1277. for (i = 0; i < topology->nmpidevices; i++)
  1278. _starpu_deinit_mpi_node(i);
  1279. #endif
  1280. }
  1281. #endif
  1282. #ifdef STARPU_HAVE_HWLOC
  1283. static unsigned _starpu_topology_count_ngpus(hwloc_obj_t obj)
  1284. {
  1285. struct _starpu_hwloc_userdata *data = obj->userdata;
  1286. unsigned n = data->ngpus;
  1287. unsigned i;
  1288. for (i = 0; i < obj->arity; i++)
  1289. n += _starpu_topology_count_ngpus(obj->children[i]);
  1290. data->ngpus = n;
  1291. //#ifdef STARPU_VERBOSE
  1292. // {
  1293. // char name[64];
  1294. // hwloc_obj_type_snprintf(name, sizeof(name), obj, 0);
  1295. // _STARPU_DEBUG("hwloc obj %s has %u GPUs below\n", name, n);
  1296. // }
  1297. //#endif
  1298. return n;
  1299. }
  1300. #endif
  1301. static int _starpu_init_machine_config(struct _starpu_machine_config *config, int no_mp_config STARPU_ATTRIBUTE_UNUSED)
  1302. {
  1303. int i;
  1304. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  1305. {
  1306. config->workers[i].workerid = i;
  1307. config->workers[i].set = NULL;
  1308. }
  1309. struct _starpu_machine_topology *topology = &config->topology;
  1310. topology->nworkers = 0;
  1311. topology->ncombinedworkers = 0;
  1312. topology->nsched_ctxs = 0;
  1313. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  1314. if (config->conf.nopencl != 0)
  1315. _starpu_opencl_init();
  1316. #endif
  1317. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  1318. if (config->conf.ncuda != 0)
  1319. _starpu_init_cuda();
  1320. #endif
  1321. _starpu_init_topology(config);
  1322. _starpu_initialize_workers_bindid(config);
  1323. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  1324. for (i = 0; i < (int) (sizeof(cuda_worker_set)/sizeof(cuda_worker_set[0])); i++)
  1325. cuda_worker_set[i].workers = NULL;
  1326. #endif
  1327. #ifdef STARPU_USE_MIC
  1328. for (i = 0; i < (int) (sizeof(mic_worker_set)/sizeof(mic_worker_set[0])); i++)
  1329. mic_worker_set[i].workers = NULL;
  1330. #endif
  1331. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1332. for (i = 0; i < (int) (sizeof(mpi_worker_set)/sizeof(mpi_worker_set[0])); i++)
  1333. mpi_worker_set[i].workers = NULL;
  1334. #endif
  1335. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  1336. int ncuda = config->conf.ncuda;
  1337. int nworker_per_cuda = starpu_get_env_number_default("STARPU_NWORKER_PER_CUDA", 1);
  1338. STARPU_ASSERT_MSG(nworker_per_cuda > 0, "STARPU_NWORKER_PER_CUDA has to be > 0");
  1339. STARPU_ASSERT_MSG(nworker_per_cuda < STARPU_NMAXWORKERS, "STARPU_NWORKER_PER_CUDA (%d) cannot be higher than STARPU_NMAXWORKERS (%d)\n", nworker_per_cuda, STARPU_NMAXWORKERS);
  1340. #ifndef STARPU_NON_BLOCKING_DRIVERS
  1341. if (nworker_per_cuda > 1)
  1342. {
  1343. _STARPU_DISP("Warning: reducing STARPU_NWORKER_PER_CUDA to 1 because blocking drivers are enabled\n");
  1344. nworker_per_cuda = 1;
  1345. }
  1346. #endif
  1347. if (ncuda != 0)
  1348. {
  1349. /* The user did not disable CUDA. We need to initialize CUDA
  1350. * early to count the number of devices */
  1351. _starpu_init_cuda();
  1352. int nb_devices = _starpu_get_cuda_device_count();
  1353. STARPU_ASSERT_MSG(ncuda >= -1, "ncuda can not be negative and different from -1 (is is %d)", ncuda);
  1354. if (ncuda == -1)
  1355. {
  1356. /* Nothing was specified, so let's choose ! */
  1357. ncuda = nb_devices;
  1358. }
  1359. else
  1360. {
  1361. if (ncuda > nb_devices)
  1362. {
  1363. /* The user requires more CUDA devices than
  1364. * there is available */
  1365. _STARPU_DISP("Warning: %d CUDA devices requested. Only %d available.\n", ncuda, nb_devices);
  1366. ncuda = nb_devices;
  1367. }
  1368. }
  1369. }
  1370. /* Now we know how many CUDA devices will be used */
  1371. topology->ncudagpus = ncuda;
  1372. topology->nworkerpercuda = nworker_per_cuda;
  1373. STARPU_ASSERT(topology->ncudagpus <= STARPU_MAXCUDADEVS);
  1374. _starpu_initialize_workers_cuda_gpuid(config);
  1375. /* allow having one worker per stream */
  1376. topology->cuda_th_per_stream = starpu_get_env_number_default("STARPU_CUDA_THREAD_PER_WORKER", -1);
  1377. topology->cuda_th_per_dev = starpu_get_env_number_default("STARPU_CUDA_THREAD_PER_DEV", -1);
  1378. STARPU_ASSERT_MSG(!(topology->cuda_th_per_stream == 1 && topology->cuda_th_per_dev != -1), "It does not make sense to set both STARPU_CUDA_THREAD_PER_WORKER to 1 and to set STARPU_CUDA_THREAD_PER_DEV, please choose either per worker or per device or none");
  1379. /* per device by default */
  1380. if (topology->cuda_th_per_dev == -1)
  1381. {
  1382. if (topology->cuda_th_per_stream == 1)
  1383. topology->cuda_th_per_dev = 0;
  1384. else
  1385. topology->cuda_th_per_dev = 1;
  1386. }
  1387. /* Not per stream by default */
  1388. if (topology->cuda_th_per_stream == -1)
  1389. {
  1390. topology->cuda_th_per_stream = 0;
  1391. }
  1392. if (!topology->cuda_th_per_dev)
  1393. {
  1394. cuda_worker_set[0].workers = &config->workers[topology->nworkers];
  1395. cuda_worker_set[0].nworkers = topology->ncudagpus * nworker_per_cuda;
  1396. }
  1397. unsigned cudagpu;
  1398. for (cudagpu = 0; cudagpu < topology->ncudagpus; cudagpu++)
  1399. {
  1400. int devid = _starpu_get_next_cuda_gpuid(config);
  1401. int worker_idx0 = topology->nworkers + cudagpu * nworker_per_cuda;
  1402. struct _starpu_worker_set *worker_set;
  1403. if (topology->cuda_th_per_dev)
  1404. {
  1405. worker_set = &cuda_worker_set[devid];
  1406. worker_set->workers = &config->workers[worker_idx0];
  1407. worker_set->nworkers = nworker_per_cuda;
  1408. }
  1409. else
  1410. {
  1411. /* Same worker set for all devices */
  1412. worker_set = &cuda_worker_set[0];
  1413. }
  1414. for (i = 0; i < nworker_per_cuda; i++)
  1415. {
  1416. int worker_idx = worker_idx0 + i;
  1417. if(topology->cuda_th_per_stream)
  1418. {
  1419. /* Just one worker in the set */
  1420. _STARPU_CALLOC(config->workers[worker_idx].set, 1, sizeof(struct _starpu_worker_set));
  1421. config->workers[worker_idx].set->workers = &config->workers[worker_idx];
  1422. config->workers[worker_idx].set->nworkers = 1;
  1423. }
  1424. else
  1425. config->workers[worker_idx].set = worker_set;
  1426. config->workers[worker_idx].arch = STARPU_CUDA_WORKER;
  1427. _STARPU_MALLOC(config->workers[worker_idx].perf_arch.devices, sizeof(struct starpu_perfmodel_device));
  1428. config->workers[worker_idx].perf_arch.ndevices = 1;
  1429. config->workers[worker_idx].perf_arch.devices[0].type = STARPU_CUDA_WORKER;
  1430. config->workers[worker_idx].perf_arch.devices[0].devid = devid;
  1431. // TODO: fix perfmodels etc.
  1432. //config->workers[worker_idx].perf_arch.ncore = nworker_per_cuda - 1;
  1433. config->workers[worker_idx].perf_arch.devices[0].ncores = 1;
  1434. config->workers[worker_idx].devid = devid;
  1435. config->workers[worker_idx].subworkerid = i;
  1436. config->workers[worker_idx].worker_mask = STARPU_CUDA;
  1437. config->worker_mask |= STARPU_CUDA;
  1438. struct handle_entry *entry;
  1439. HASH_FIND_INT(devices_using_cuda, &devid, entry);
  1440. if (!entry)
  1441. {
  1442. _STARPU_MALLOC(entry, sizeof(*entry));
  1443. entry->gpuid = devid;
  1444. HASH_ADD_INT(devices_using_cuda, gpuid, entry);
  1445. }
  1446. }
  1447. #ifndef STARPU_SIMGRID
  1448. #if defined(HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX) && HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX
  1449. {
  1450. hwloc_obj_t obj = hwloc_cuda_get_device_osdev_by_index(topology->hwtopology, devid);
  1451. if (obj)
  1452. {
  1453. struct _starpu_hwloc_userdata *data = obj->userdata;
  1454. data->ngpus++;
  1455. }
  1456. else
  1457. {
  1458. _STARPU_DEBUG("Warning: could not find location of CUDA%u, do you have the hwloc CUDA plugin installed?\n", devid);
  1459. }
  1460. }
  1461. #endif
  1462. #endif
  1463. }
  1464. topology->nworkers += topology->ncudagpus * nworker_per_cuda;
  1465. #endif
  1466. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  1467. int nopencl = config->conf.nopencl;
  1468. if (nopencl != 0)
  1469. {
  1470. /* The user did not disable OPENCL. We need to initialize
  1471. * OpenCL early to count the number of devices */
  1472. _starpu_opencl_init();
  1473. int nb_devices;
  1474. nb_devices = _starpu_opencl_get_device_count();
  1475. STARPU_ASSERT_MSG(nopencl >= -1, "nopencl can not be negative and different from -1 (is is %d)", nopencl);
  1476. if (nopencl == -1)
  1477. {
  1478. /* Nothing was specified, so let's choose ! */
  1479. nopencl = nb_devices;
  1480. if (nopencl > STARPU_MAXOPENCLDEVS)
  1481. {
  1482. _STARPU_DISP("Warning: %d OpenCL devices available. Only %d enabled. Use configure option --enable-maxopencldadev=xxx to update the maximum value of supported OpenCL devices.\n", nb_devices, STARPU_MAXOPENCLDEVS);
  1483. nopencl = STARPU_MAXOPENCLDEVS;
  1484. }
  1485. }
  1486. else
  1487. {
  1488. /* Let's make sure this value is OK. */
  1489. if (nopencl > nb_devices)
  1490. {
  1491. /* The user requires more OpenCL devices than
  1492. * there is available */
  1493. _STARPU_DISP("Warning: %d OpenCL devices requested. Only %d available.\n", nopencl, nb_devices);
  1494. nopencl = nb_devices;
  1495. }
  1496. /* Let's make sure this value is OK. */
  1497. if (nopencl > STARPU_MAXOPENCLDEVS)
  1498. {
  1499. _STARPU_DISP("Warning: %d OpenCL devices requested. Only %d enabled. Use configure option --enable-maxopencldev=xxx to update the maximum value of supported OpenCL devices.\n", nopencl, STARPU_MAXOPENCLDEVS);
  1500. nopencl = STARPU_MAXOPENCLDEVS;
  1501. }
  1502. }
  1503. }
  1504. topology->nopenclgpus = nopencl;
  1505. STARPU_ASSERT(topology->nopenclgpus + topology->nworkers <= STARPU_NMAXWORKERS);
  1506. _starpu_initialize_workers_opencl_gpuid(config);
  1507. unsigned openclgpu;
  1508. for (openclgpu = 0; openclgpu < topology->nopenclgpus; openclgpu++)
  1509. {
  1510. int worker_idx = topology->nworkers + openclgpu;
  1511. int devid = _starpu_get_next_opencl_gpuid(config);
  1512. if (devid == -1)
  1513. {
  1514. // There is no more devices left
  1515. topology->nopenclgpus = openclgpu;
  1516. break;
  1517. }
  1518. config->workers[worker_idx].arch = STARPU_OPENCL_WORKER;
  1519. _STARPU_MALLOC(config->workers[worker_idx].perf_arch.devices, sizeof(struct starpu_perfmodel_device));
  1520. config->workers[worker_idx].perf_arch.ndevices = 1;
  1521. config->workers[worker_idx].perf_arch.devices[0].type = STARPU_OPENCL_WORKER;
  1522. config->workers[worker_idx].perf_arch.devices[0].devid = devid;
  1523. config->workers[worker_idx].perf_arch.devices[0].ncores = 1;
  1524. config->workers[worker_idx].subworkerid = 0;
  1525. config->workers[worker_idx].devid = devid;
  1526. config->workers[worker_idx].worker_mask = STARPU_OPENCL;
  1527. config->worker_mask |= STARPU_OPENCL;
  1528. }
  1529. topology->nworkers += topology->nopenclgpus;
  1530. #endif
  1531. #if defined(STARPU_USE_MIC) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  1532. _starpu_init_mp_config(config, &config->conf, no_mp_config);
  1533. #endif
  1534. /* we put the CPU section after the accelerator : in case there was an
  1535. * accelerator found, we devote one cpu */
  1536. #if defined(STARPU_USE_CPU) || defined(STARPU_SIMGRID)
  1537. int ncpu = config->conf.ncpus;
  1538. if (ncpu != 0)
  1539. {
  1540. STARPU_ASSERT_MSG(ncpu >= -1, "ncpus can not be negative and different from -1 (is is %d)", ncpu);
  1541. if (ncpu == -1)
  1542. {
  1543. unsigned mic_busy_cpus = 0;
  1544. int j = 0;
  1545. for (j = 0; j < STARPU_MAXMICDEVS; j++)
  1546. mic_busy_cpus += (topology->nmiccores[j] ? 1 : 0);
  1547. unsigned mpi_ms_busy_cpus = 0;
  1548. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1549. #ifdef STARPU_MPI_MASTER_SLAVE_MULTIPLE_THREAD
  1550. for (j = 0; j < STARPU_MAXMPIDEVS; j++)
  1551. mpi_ms_busy_cpus += (topology->nmpicores[j] ? 1 : 0);
  1552. #else
  1553. mpi_ms_busy_cpus = 1; /* we launch one thread to control all slaves */
  1554. #endif
  1555. #endif /* STARPU_USE_MPI_MASTER_SLAVE */
  1556. unsigned cuda_busy_cpus = 0;
  1557. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  1558. cuda_busy_cpus =
  1559. topology->cuda_th_per_dev == 0 && topology->cuda_th_per_stream == 0 ? (topology->ncudagpus ? 1 : 0) :
  1560. topology->cuda_th_per_stream ? (nworker_per_cuda * topology->ncudagpus) : topology->ncudagpus;
  1561. #endif
  1562. unsigned already_busy_cpus = mpi_ms_busy_cpus + mic_busy_cpus
  1563. + cuda_busy_cpus
  1564. + topology->nopenclgpus;
  1565. long avail_cpus = (long) topology->nhwcpus - (long) already_busy_cpus;
  1566. if (avail_cpus < 0)
  1567. avail_cpus = 0;
  1568. int nth_per_core = starpu_get_env_number_default("STARPU_NTHREADS_PER_CORE", 1);
  1569. avail_cpus *= nth_per_core;
  1570. ncpu = avail_cpus;
  1571. }
  1572. if (ncpu > STARPU_MAXCPUS)
  1573. {
  1574. _STARPU_DISP("Warning: %d CPU cores requested. Only %d enabled. Use configure option --enable-maxcpus=xxx to update the maximum value of supported CPU devices.\n", ncpu, STARPU_MAXCPUS);
  1575. ncpu = STARPU_MAXCPUS;
  1576. }
  1577. if (config->conf.reserve_ncpus > 0)
  1578. {
  1579. if (ncpu < config->conf.reserve_ncpus)
  1580. {
  1581. _STARPU_DISP("Warning: %d CPU cores were requested to be reserved, but only %d were available,\n", config->conf.reserve_ncpus, ncpu);
  1582. ncpu = 0;
  1583. }
  1584. else
  1585. {
  1586. ncpu -= config->conf.reserve_ncpus;
  1587. }
  1588. }
  1589. }
  1590. topology->ncpus = ncpu;
  1591. STARPU_ASSERT(topology->ncpus + topology->nworkers <= STARPU_NMAXWORKERS);
  1592. unsigned cpu;
  1593. unsigned homogeneous = starpu_get_env_number_default("STARPU_PERF_MODEL_HOMOGENEOUS_CPU", 1);
  1594. for (cpu = 0; cpu < topology->ncpus; cpu++)
  1595. {
  1596. int worker_idx = topology->nworkers + cpu;
  1597. config->workers[worker_idx].arch = STARPU_CPU_WORKER;
  1598. _STARPU_MALLOC(config->workers[worker_idx].perf_arch.devices, sizeof(struct starpu_perfmodel_device));
  1599. config->workers[worker_idx].perf_arch.ndevices = 1;
  1600. config->workers[worker_idx].perf_arch.devices[0].type = STARPU_CPU_WORKER;
  1601. config->workers[worker_idx].perf_arch.devices[0].devid = homogeneous ? 0 : cpu;
  1602. config->workers[worker_idx].perf_arch.devices[0].ncores = 1;
  1603. config->workers[worker_idx].subworkerid = 0;
  1604. config->workers[worker_idx].devid = cpu;
  1605. config->workers[worker_idx].worker_mask = STARPU_CPU;
  1606. config->worker_mask |= STARPU_CPU;
  1607. }
  1608. topology->nworkers += topology->ncpus;
  1609. #endif
  1610. if (topology->nworkers == 0)
  1611. {
  1612. _STARPU_DEBUG("No worker found, aborting ...\n");
  1613. return -ENODEV;
  1614. }
  1615. return 0;
  1616. }
  1617. void _starpu_destroy_machine_config(struct _starpu_machine_config *config)
  1618. {
  1619. _starpu_close_debug_logfile();
  1620. unsigned worker;
  1621. for (worker = 0; worker < config->topology.nworkers; worker++)
  1622. {
  1623. struct _starpu_worker *workerarg = &config->workers[worker];
  1624. int bindid = workerarg->bindid;
  1625. free(workerarg->perf_arch.devices);
  1626. #ifdef STARPU_HAVE_HWLOC
  1627. hwloc_bitmap_free(workerarg->hwloc_cpu_set);
  1628. if (bindid != -1)
  1629. {
  1630. hwloc_obj_t worker_obj = hwloc_get_obj_by_depth(config->topology.hwtopology,
  1631. config->pu_depth,
  1632. bindid);
  1633. struct _starpu_hwloc_userdata *data = worker_obj->userdata;
  1634. if (data->worker_list)
  1635. {
  1636. _starpu_worker_list_delete(data->worker_list);
  1637. data->worker_list = NULL;
  1638. }
  1639. }
  1640. #endif
  1641. if (bindid != -1)
  1642. {
  1643. free(config->bindid_workers[bindid].workerids);
  1644. config->bindid_workers[bindid].workerids = NULL;
  1645. }
  1646. }
  1647. free(config->bindid_workers);
  1648. config->bindid_workers = NULL;
  1649. config->nbindid = 0;
  1650. unsigned combined_worker_id;
  1651. for(combined_worker_id=0 ; combined_worker_id < config->topology.ncombinedworkers ; combined_worker_id++)
  1652. {
  1653. struct _starpu_combined_worker *combined_worker = &config->combined_workers[combined_worker_id];
  1654. #ifdef STARPU_HAVE_HWLOC
  1655. hwloc_bitmap_free(combined_worker->hwloc_cpu_set);
  1656. #endif
  1657. free(combined_worker->perf_arch.devices);
  1658. }
  1659. #ifdef STARPU_HAVE_HWLOC
  1660. _starpu_deallocate_topology_userdata(hwloc_get_root_obj(config->topology.hwtopology));
  1661. hwloc_topology_destroy(config->topology.hwtopology);
  1662. #endif
  1663. topology_is_initialized = 0;
  1664. #ifdef STARPU_USE_CUDA
  1665. struct handle_entry *entry=NULL, *tmp=NULL;
  1666. HASH_ITER(hh, devices_using_cuda, entry, tmp)
  1667. {
  1668. HASH_DEL(devices_using_cuda, entry);
  1669. free(entry);
  1670. }
  1671. devices_using_cuda = NULL;
  1672. #endif
  1673. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  1674. int i;
  1675. for (i=0; i<STARPU_NARCH; i++)
  1676. may_bind_automatically[i] = 0;
  1677. #endif
  1678. }
  1679. int _starpu_bind_thread_on_cpu(int cpuid STARPU_ATTRIBUTE_UNUSED, int workerid STARPU_ATTRIBUTE_UNUSED, const char *name STARPU_ATTRIBUTE_UNUSED)
  1680. {
  1681. int ret = 0;
  1682. #ifdef STARPU_SIMGRID
  1683. return ret;
  1684. #else
  1685. if (nobind > 0)
  1686. return ret;
  1687. if (cpuid < 0)
  1688. return ret;
  1689. #ifdef STARPU_HAVE_HWLOC
  1690. const struct hwloc_topology_support *support;
  1691. struct _starpu_machine_config *config = _starpu_get_machine_config();
  1692. #ifdef STARPU_USE_OPENCL
  1693. if (config->conf.nopencl != 0)
  1694. _starpu_opencl_init();
  1695. #endif
  1696. #ifdef STARPU_USE_CUDA
  1697. if (config->conf.ncuda != 0)
  1698. _starpu_init_cuda();
  1699. #endif
  1700. _starpu_init_topology(config);
  1701. if (workerid != STARPU_NOWORKERID && cpuid < STARPU_MAXCPUS)
  1702. {
  1703. /* TODO: mutex... */
  1704. int previous = cpu_worker[cpuid];
  1705. /* We would like the PU to be available, or we are perhaps fine to share it */
  1706. if ( !( previous == STARPU_NOWORKERID ||
  1707. (previous == STARPU_NONACTIVETHREAD && workerid == STARPU_NONACTIVETHREAD) ||
  1708. (previous >= 0 && previous == workerid) ||
  1709. (name && cpu_name[cpuid] && !strcmp(name, cpu_name[cpuid])) ) )
  1710. {
  1711. char hostname[65];
  1712. gethostname(hostname, sizeof(hostname));
  1713. if (previous == STARPU_ACTIVETHREAD)
  1714. _STARPU_DISP("[%s] Warning: active thread %s was already bound to PU %d\n", hostname, cpu_name[cpuid], cpuid);
  1715. else if (previous == STARPU_NONACTIVETHREAD)
  1716. _STARPU_DISP("[%s] Warning: non-active thread %s was already bound to PU %d\n", hostname, cpu_name[cpuid], cpuid);
  1717. else
  1718. _STARPU_DISP("[%s] Warning: worker %d was already bound to PU %d\n", hostname, previous, cpuid);
  1719. if (workerid == STARPU_ACTIVETHREAD)
  1720. _STARPU_DISP("and we were told to also bind active thread %s to it.\n", name);
  1721. else if (previous == STARPU_NONACTIVETHREAD)
  1722. _STARPU_DISP("and we were told to also bind non-active thread %s to it.\n", name);
  1723. else
  1724. _STARPU_DISP("and we were told to also bind worker %d to it.\n", workerid);
  1725. _STARPU_DISP("This will strongly degrade performance.\n");
  1726. if (workerid >= 0)
  1727. /* This shouldn't happen for workers */
  1728. _STARPU_DISP("[%s] Maybe check starpu_machine_display's output to determine what wrong binding happened. Hwloc reported %d cores and %d threads, perhaps there is misdetection between hwloc, the kernel and the BIOS, or an administrative allocation issue from e.g. the job scheduler?\n", hostname, config->topology.nhwcpus, config->topology.nhwpus);
  1729. ret = -1;
  1730. }
  1731. else
  1732. {
  1733. cpu_worker[cpuid] = workerid;
  1734. if (name)
  1735. cpu_name[cpuid] = strdup(name);
  1736. }
  1737. }
  1738. support = hwloc_topology_get_support(config->topology.hwtopology);
  1739. if (support->cpubind->set_thisthread_cpubind)
  1740. {
  1741. hwloc_obj_t obj = hwloc_get_obj_by_depth(config->topology.hwtopology, config->pu_depth, cpuid);
  1742. hwloc_bitmap_t set = obj->cpuset;
  1743. int res;
  1744. hwloc_bitmap_singlify(set);
  1745. res = hwloc_set_cpubind(config->topology.hwtopology, set, HWLOC_CPUBIND_THREAD);
  1746. if (res)
  1747. {
  1748. perror("hwloc_set_cpubind");
  1749. STARPU_ABORT();
  1750. }
  1751. }
  1752. #elif defined(HAVE_PTHREAD_SETAFFINITY_NP) && defined(__linux__)
  1753. int res;
  1754. /* fix the thread on the correct cpu */
  1755. cpu_set_t aff_mask;
  1756. CPU_ZERO(&aff_mask);
  1757. CPU_SET(cpuid, &aff_mask);
  1758. starpu_pthread_t self = starpu_pthread_self();
  1759. res = pthread_setaffinity_np(self, sizeof(aff_mask), &aff_mask);
  1760. if (res)
  1761. {
  1762. const char *msg = strerror(res);
  1763. _STARPU_MSG("pthread_setaffinity_np: %s\n", msg);
  1764. STARPU_ABORT();
  1765. }
  1766. #elif defined(_WIN32)
  1767. DWORD mask = 1 << cpuid;
  1768. if (!SetThreadAffinityMask(GetCurrentThread(), mask))
  1769. {
  1770. _STARPU_ERROR("SetThreadMaskAffinity(%lx) failed\n", mask);
  1771. }
  1772. #else
  1773. #warning no CPU binding support
  1774. #endif
  1775. #endif
  1776. return ret;
  1777. }
  1778. int
  1779. starpu_bind_thread_on(int cpuid, unsigned flags, const char *name)
  1780. {
  1781. int workerid;
  1782. STARPU_ASSERT_MSG(name, "starpu_bind_thread_on must be provided with a name");
  1783. starpu_pthread_setname(name);
  1784. if (flags & STARPU_THREAD_ACTIVE)
  1785. workerid = STARPU_ACTIVETHREAD;
  1786. else
  1787. workerid = STARPU_NONACTIVETHREAD;
  1788. return _starpu_bind_thread_on_cpu(cpuid, workerid, name);
  1789. }
  1790. void _starpu_bind_thread_on_cpus(struct _starpu_combined_worker *combined_worker STARPU_ATTRIBUTE_UNUSED)
  1791. {
  1792. #ifdef STARPU_SIMGRID
  1793. return;
  1794. #endif
  1795. #ifdef STARPU_HAVE_HWLOC
  1796. const struct hwloc_topology_support *support;
  1797. struct _starpu_machine_config *config = _starpu_get_machine_config();
  1798. #ifdef STARPU_USE_OPENC
  1799. if (config->conf.nopencl != 0)
  1800. _starpu_opencl_init();
  1801. #endif
  1802. #ifdef STARPU_USE_CUDA
  1803. if (config->conf.ncuda != 0)
  1804. _starpu_init_cuda();
  1805. #endif
  1806. _starpu_init_topology(config);
  1807. support = hwloc_topology_get_support(config->topology.hwtopology);
  1808. if (support->cpubind->set_thisthread_cpubind)
  1809. {
  1810. hwloc_bitmap_t set = combined_worker->hwloc_cpu_set;
  1811. int ret;
  1812. ret = hwloc_set_cpubind(config->topology.hwtopology, set, HWLOC_CPUBIND_THREAD);
  1813. if (ret)
  1814. {
  1815. perror("binding thread");
  1816. STARPU_ABORT();
  1817. }
  1818. }
  1819. #else
  1820. #ifdef __GLIBC__
  1821. sched_setaffinity(0,sizeof(combined_worker->cpu_set),&combined_worker->cpu_set);
  1822. #else
  1823. # warning no parallel worker CPU binding support
  1824. #endif
  1825. #endif
  1826. }
  1827. static void _starpu_init_binding_cpu(struct _starpu_machine_config *config)
  1828. {
  1829. unsigned worker;
  1830. for (worker = 0; worker < config->topology.nworkers; worker++)
  1831. {
  1832. struct _starpu_worker *workerarg = &config->workers[worker];
  1833. switch (workerarg->arch)
  1834. {
  1835. case STARPU_CPU_WORKER:
  1836. {
  1837. /* Dedicate a cpu core to that worker */
  1838. workerarg->bindid = _starpu_get_next_bindid(config, STARPU_THREAD_ACTIVE, NULL, 0);
  1839. break;
  1840. }
  1841. default:
  1842. /* Do nothing */
  1843. break;
  1844. }
  1845. }
  1846. }
  1847. static size_t _starpu_cpu_get_global_mem_size(int nodeid, struct _starpu_machine_config *config)
  1848. {
  1849. size_t global_mem;
  1850. starpu_ssize_t limit = -1;
  1851. #if defined(STARPU_HAVE_HWLOC)
  1852. struct _starpu_machine_topology *topology = &config->topology;
  1853. STARPU_ASSERT(numa_enabled != -1);
  1854. if (numa_enabled)
  1855. {
  1856. int depth_node = hwloc_get_type_depth(topology->hwtopology, HWLOC_OBJ_NUMANODE);
  1857. if (depth_node == HWLOC_TYPE_DEPTH_UNKNOWN)
  1858. {
  1859. #if HWLOC_API_VERSION >= 0x00020000
  1860. global_mem = hwloc_get_root_obj(topology->hwtopology)->total_memory;
  1861. #else
  1862. global_mem = hwloc_get_root_obj(topology->hwtopology)->memory.total_memory;
  1863. #endif
  1864. }
  1865. else
  1866. {
  1867. char name[32];
  1868. hwloc_obj_t obj = hwloc_get_obj_by_depth(topology->hwtopology, depth_node, nodeid);
  1869. #if HWLOC_API_VERSION >= 0x00020000
  1870. global_mem = obj->attr->numanode.local_memory;
  1871. #else
  1872. global_mem = obj->memory.local_memory;
  1873. #endif
  1874. snprintf(name, sizeof(name), "STARPU_LIMIT_CPU_NUMA_%d_MEM", obj->os_index);
  1875. limit = starpu_get_env_number(name);
  1876. }
  1877. }
  1878. else
  1879. {
  1880. /* Do not limit ourself to a single NUMA node */
  1881. #if HWLOC_API_VERSION >= 0x00020000
  1882. global_mem = hwloc_get_root_obj(topology->hwtopology)->total_memory;
  1883. #else
  1884. global_mem = hwloc_get_root_obj(topology->hwtopology)->memory.total_memory;
  1885. #endif
  1886. }
  1887. #else /* STARPU_HAVE_HWLOC */
  1888. #ifdef STARPU_DEVEL
  1889. # warning TODO: use sysinfo when available to get global size
  1890. #endif
  1891. global_mem = 0;
  1892. #endif
  1893. if (limit == -1)
  1894. limit = starpu_get_env_number("STARPU_LIMIT_CPU_NUMA_MEM");
  1895. if (limit == -1)
  1896. {
  1897. limit = starpu_get_env_number("STARPU_LIMIT_CPU_MEM");
  1898. if (limit != -1 && numa_enabled)
  1899. {
  1900. _STARPU_DISP("NUMA is enabled and STARPU_LIMIT_CPU_MEM is set to %luMB. Assuming that it should be distributed over the %d NUMA node(s). You probably want to use STARPU_LIMIT_CPU_NUMA_MEM instead.\n", (long) limit, _starpu_topology_get_nnumanodes(config));
  1901. limit /= _starpu_topology_get_nnumanodes(config);
  1902. }
  1903. }
  1904. if (limit < 0)
  1905. // No limit is defined, we return the global memory size
  1906. return global_mem;
  1907. else if (global_mem && (size_t)limit * 1024*1024 > global_mem)
  1908. {
  1909. if (numa_enabled)
  1910. _STARPU_DISP("The requested limit %ldMB for NUMA node %d is higher that available memory %luMB, using the latter\n", (unsigned long) limit, nodeid, (unsigned long) global_mem / (1024*1024));
  1911. else
  1912. _STARPU_DISP("The requested limit %ldMB is higher that available memory %luMB, using the latter\n", (long) limit, (unsigned long) global_mem / (1024*1024));
  1913. return global_mem;
  1914. }
  1915. else
  1916. // We limit the memory
  1917. return limit*1024*1024;
  1918. }
  1919. //TODO : Check SIMGRID
  1920. static void _starpu_init_numa_node(struct _starpu_machine_config *config)
  1921. {
  1922. nb_numa_nodes = 0;
  1923. unsigned i;
  1924. for (i = 0; i < STARPU_MAXNUMANODES; i++)
  1925. {
  1926. numa_memory_nodes_to_hwloclogid[i] = STARPU_NUMA_UNINITIALIZED;
  1927. numa_memory_nodes_to_physicalid[i] = STARPU_NUMA_UNINITIALIZED;
  1928. }
  1929. #ifdef STARPU_SIMGRID
  1930. char name[16];
  1931. starpu_sg_host_t host;
  1932. #endif
  1933. numa_enabled = starpu_get_env_number_default("STARPU_USE_NUMA", 0);
  1934. /* NUMA mode activated */
  1935. if (numa_enabled)
  1936. {
  1937. /* Take all NUMA nodes used by CPU workers */
  1938. unsigned worker;
  1939. for (worker = 0; worker < config->topology.nworkers; worker++)
  1940. {
  1941. struct _starpu_worker *workerarg = &config->workers[worker];
  1942. if (workerarg->arch == STARPU_CPU_WORKER)
  1943. {
  1944. int numa_logical_id = _starpu_get_logical_numa_node_worker(worker);
  1945. /* Convert logical id to StarPU id to check if this NUMA node is already saved or not */
  1946. int numa_starpu_id = starpu_memory_nodes_numa_hwloclogid_to_id(numa_logical_id);
  1947. /* This shouldn't happen */
  1948. if (numa_starpu_id == -1 && nb_numa_nodes == STARPU_MAXNUMANODES)
  1949. {
  1950. _STARPU_MSG("Warning: %u NUMA nodes available. Only %u enabled. Use configure option --enable-maxnumanodes=xxx to update the maximum value of supported NUMA nodes.\n", _starpu_topology_get_nnumanodes(config), STARPU_MAXNUMANODES);
  1951. STARPU_ABORT();
  1952. }
  1953. if (numa_starpu_id == -1)
  1954. {
  1955. int devid = numa_logical_id == STARPU_NUMA_MAIN_RAM ? 0 : numa_logical_id;
  1956. int memnode = _starpu_memory_node_register(STARPU_CPU_RAM, devid, &_starpu_driver_cpu_node_ops);
  1957. _starpu_memory_manager_set_global_memory_size(memnode, _starpu_cpu_get_global_mem_size(devid, config));
  1958. STARPU_ASSERT_MSG(memnode < STARPU_MAXNUMANODES, "Wrong Memory Node : %d (only %d available)", memnode, STARPU_MAXNUMANODES);
  1959. numa_memory_nodes_to_hwloclogid[memnode] = numa_logical_id;
  1960. int numa_physical_id = _starpu_get_physical_numa_node_worker(worker);
  1961. numa_memory_nodes_to_physicalid[memnode] = numa_physical_id;
  1962. nb_numa_nodes++;
  1963. #ifdef STARPU_SIMGRID
  1964. snprintf(name, sizeof(name), "RAM%d", memnode);
  1965. host = _starpu_simgrid_get_host_by_name(name);
  1966. STARPU_ASSERT(host);
  1967. _starpu_simgrid_memory_node_set_host(memnode, host);
  1968. #endif
  1969. }
  1970. }
  1971. }
  1972. /* If we found NUMA nodes from CPU workers, it's good */
  1973. if (nb_numa_nodes != 0)
  1974. return;
  1975. _STARPU_DISP("No NUMA nodes found when checking CPU workers...\n");
  1976. #if (defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)) && defined(STARPU_HAVE_HWLOC)
  1977. _STARPU_DISP("Take NUMA nodes attached to CUDA and OpenCL devices...\n");
  1978. #endif
  1979. #if defined(STARPU_USE_CUDA) && defined(STARPU_HAVE_HWLOC)
  1980. for (i = 0; i < config->topology.ncudagpus; i++)
  1981. {
  1982. hwloc_obj_t obj = hwloc_cuda_get_device_osdev_by_index(config->topology.hwtopology, i);
  1983. if (obj)
  1984. obj = numa_get_obj(obj);
  1985. /* Hwloc cannot recognize some devices */
  1986. if (!obj)
  1987. continue;
  1988. int numa_starpu_id = starpu_memory_nodes_numa_hwloclogid_to_id(obj->logical_index);
  1989. /* This shouldn't happen */
  1990. if (numa_starpu_id == -1 && nb_numa_nodes == STARPU_MAXNUMANODES)
  1991. {
  1992. _STARPU_MSG("Warning: %u NUMA nodes available. Only %u enabled. Use configure option --enable-maxnumanodes=xxx to update the maximum value of supported NUMA nodes.\n", _starpu_topology_get_nnumanodes(config), STARPU_MAXNUMANODES);
  1993. STARPU_ABORT();
  1994. }
  1995. if (numa_starpu_id == -1)
  1996. {
  1997. int memnode = _starpu_memory_node_register(STARPU_CPU_RAM, obj->logical_index, &_starpu_driver_cpu_node_ops);
  1998. _starpu_memory_manager_set_global_memory_size(memnode, _starpu_cpu_get_global_mem_size(obj->logical_index, config));
  1999. STARPU_ASSERT_MSG(memnode < STARPU_MAXNUMANODES, "Wrong Memory Node : %d (only %d available)", memnode, STARPU_MAXNUMANODES);
  2000. numa_memory_nodes_to_hwloclogid[memnode] = obj->logical_index;
  2001. numa_memory_nodes_to_physicalid[memnode] = obj->os_index;
  2002. nb_numa_nodes++;
  2003. #ifdef STARPU_SIMGRID
  2004. snprintf(name, sizeof(name), "RAM%d", memnode);
  2005. host = _starpu_simgrid_get_host_by_name(name);
  2006. STARPU_ASSERT(host);
  2007. _starpu_simgrid_memory_node_set_host(memnode, host);
  2008. #endif
  2009. }
  2010. }
  2011. #endif
  2012. #if defined(STARPU_USE_OPENCL) && defined(STARPU_HAVE_HWLOC)
  2013. if (config->topology.nopenclgpus > 0)
  2014. {
  2015. cl_int err;
  2016. cl_platform_id platform_id[_STARPU_OPENCL_PLATFORM_MAX];
  2017. cl_uint nb_platforms;
  2018. unsigned platform;
  2019. unsigned nb_opencl_devices = 0, num = 0;
  2020. err = clGetPlatformIDs(_STARPU_OPENCL_PLATFORM_MAX, platform_id, &nb_platforms);
  2021. if (STARPU_UNLIKELY(err != CL_SUCCESS))
  2022. nb_platforms=0;
  2023. cl_device_type device_type = CL_DEVICE_TYPE_GPU|CL_DEVICE_TYPE_ACCELERATOR;
  2024. if (starpu_get_env_number("STARPU_OPENCL_ON_CPUS") > 0)
  2025. device_type |= CL_DEVICE_TYPE_CPU;
  2026. if (starpu_get_env_number("STARPU_OPENCL_ONLY_ON_CPUS") > 0)
  2027. device_type = CL_DEVICE_TYPE_CPU;
  2028. for (platform = 0; platform < nb_platforms ; platform++)
  2029. {
  2030. err = clGetDeviceIDs(platform_id[platform], device_type, 0, NULL, &num);
  2031. if (err != CL_SUCCESS)
  2032. num = 0;
  2033. nb_opencl_devices += num;
  2034. for (i = 0; i < num; i++)
  2035. {
  2036. hwloc_obj_t obj = hwloc_opencl_get_device_osdev_by_index(config->topology.hwtopology, platform, i);
  2037. if (obj)
  2038. obj = numa_get_obj(obj);
  2039. /* Hwloc cannot recognize some devices */
  2040. if (!obj)
  2041. continue;
  2042. int numa_starpu_id = starpu_memory_nodes_numa_hwloclogid_to_id(obj->logical_index);
  2043. /* This shouldn't happen */
  2044. if (numa_starpu_id == -1 && nb_numa_nodes == STARPU_MAXNUMANODES)
  2045. {
  2046. _STARPU_MSG("Warning: %u NUMA nodes available. Only %u enabled. Use configure option --enable-maxnumanodes=xxx to update the maximum value of supported NUMA nodes.\n", _starpu_topology_get_nnumanodes(config), STARPU_MAXNUMANODES);
  2047. STARPU_ABORT();
  2048. }
  2049. if (numa_starpu_id == -1)
  2050. {
  2051. int memnode = _starpu_memory_node_register(STARPU_CPU_RAM, obj->logical_index, &_starpu_driver_cpu_node_ops);
  2052. _starpu_memory_manager_set_global_memory_size(memnode, _starpu_cpu_get_global_mem_size(obj->logical_index, config));
  2053. STARPU_ASSERT_MSG(memnode < STARPU_MAXNUMANODES, "Wrong Memory Node : %d (only %d available)", memnode, STARPU_MAXNUMANODES);
  2054. numa_memory_nodes_to_hwloclogid[memnode] = obj->logical_index;
  2055. numa_memory_nodes_to_physicalid[memnode] = obj->os_index;
  2056. nb_numa_nodes++;
  2057. #ifdef STARPU_SIMGRID
  2058. snprintf(name, sizeof(name), "RAM%d", memnode);
  2059. host = _starpu_simgrid_get_host_by_name(name);
  2060. STARPU_ASSERT(host);
  2061. _starpu_simgrid_memory_node_set_host(memnode, host);
  2062. #endif
  2063. }
  2064. }
  2065. }
  2066. }
  2067. #endif
  2068. }
  2069. #if (defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)) && defined(STARPU_HAVE_HWLOC)
  2070. //Found NUMA nodes from CUDA nodes
  2071. if (nb_numa_nodes != 0)
  2072. return;
  2073. /* In case, we do not find any NUMA nodes when checking NUMA nodes attached to GPUs, we take all of them */
  2074. if (numa_enabled)
  2075. _STARPU_DISP("No NUMA nodes found when checking GPUs devices...\n");
  2076. #endif
  2077. if (numa_enabled)
  2078. _STARPU_DISP("Finally, take all NUMA nodes available... \n");
  2079. unsigned nnuma = _starpu_topology_get_nnumanodes(config);
  2080. if (nnuma > STARPU_MAXNUMANODES)
  2081. {
  2082. _STARPU_MSG("Warning: %u NUMA nodes available. Only %u enabled. Use configure option --enable-maxnumanodes=xxx to update the maximum value of supported NUMA nodes.\n", _starpu_topology_get_nnumanodes(config), STARPU_MAXNUMANODES);
  2083. nnuma = STARPU_MAXNUMANODES;
  2084. }
  2085. unsigned numa;
  2086. for (numa = 0; numa < nnuma; numa++)
  2087. {
  2088. unsigned numa_logical_id;
  2089. unsigned numa_physical_id;
  2090. #if defined(STARPU_HAVE_HWLOC)
  2091. hwloc_obj_t obj = hwloc_get_obj_by_type(config->topology.hwtopology, HWLOC_OBJ_NUMANODE, numa);
  2092. if (obj)
  2093. {
  2094. numa_logical_id = obj->logical_index;
  2095. numa_physical_id = obj->os_index;
  2096. }
  2097. else
  2098. #endif
  2099. {
  2100. numa_logical_id = 0;
  2101. numa_physical_id = 0;
  2102. }
  2103. int memnode = _starpu_memory_node_register(STARPU_CPU_RAM, numa_logical_id, &_starpu_driver_cpu_node_ops);
  2104. _starpu_memory_manager_set_global_memory_size(memnode, _starpu_cpu_get_global_mem_size(numa_logical_id, config));
  2105. numa_memory_nodes_to_hwloclogid[memnode] = numa_logical_id;
  2106. numa_memory_nodes_to_physicalid[memnode] = numa_physical_id;
  2107. nb_numa_nodes++;
  2108. if (numa == 0)
  2109. STARPU_ASSERT_MSG(memnode == STARPU_MAIN_RAM, "Wrong Memory Node : %d (expected %d) \n", memnode, STARPU_MAIN_RAM);
  2110. STARPU_ASSERT_MSG(memnode < STARPU_MAXNUMANODES, "Wrong Memory Node : %d (only %d available) \n", memnode, STARPU_MAXNUMANODES);
  2111. #ifdef STARPU_SIMGRID
  2112. if (nnuma > 1)
  2113. {
  2114. snprintf(name, sizeof(name), "RAM%d", memnode);
  2115. host = _starpu_simgrid_get_host_by_name(name);
  2116. }
  2117. else
  2118. {
  2119. /* In this case, nnuma has only one node */
  2120. host = _starpu_simgrid_get_host_by_name("RAM");
  2121. }
  2122. STARPU_ASSERT(host);
  2123. _starpu_simgrid_memory_node_set_host(memnode, host);
  2124. #endif
  2125. }
  2126. STARPU_ASSERT_MSG(nb_numa_nodes > 0, "No NUMA node found... We need at least one memory node !\n");
  2127. }
  2128. static void _starpu_init_numa_bus()
  2129. {
  2130. unsigned i, j;
  2131. for (i = 0; i < nb_numa_nodes; i++)
  2132. for (j = 0; j < nb_numa_nodes; j++)
  2133. if (i != j)
  2134. numa_bus_id[i*nb_numa_nodes+j] = _starpu_register_bus(i, j);
  2135. }
  2136. #if defined(STARPU_HAVE_HWLOC) && !defined(STARPU_SIMGRID)
  2137. static int _starpu_find_pu_driving_numa_from(hwloc_obj_t root, unsigned node)
  2138. {
  2139. unsigned i;
  2140. int found = 0;
  2141. if (!root->arity)
  2142. {
  2143. if (root->type == HWLOC_OBJ_PU)
  2144. {
  2145. struct _starpu_hwloc_userdata *userdata = root->userdata;
  2146. if (userdata->pu_worker)
  2147. {
  2148. /* Cool, found a worker! */
  2149. _STARPU_DEBUG("found PU %d to drive memory node %d\n", userdata->pu_worker->bindid, node);
  2150. _starpu_worker_drives_memory_node(userdata->pu_worker, node);
  2151. found = 1;
  2152. }
  2153. }
  2154. }
  2155. for (i = 0; i < root->arity; i++)
  2156. {
  2157. if (_starpu_find_pu_driving_numa_from(root->children[i], node))
  2158. found = 1;
  2159. }
  2160. return found;
  2161. }
  2162. /* Look upward to find a level containing the given NUMA node and workers to drive it */
  2163. static int _starpu_find_pu_driving_numa_up(hwloc_obj_t root, unsigned node)
  2164. {
  2165. if (_starpu_find_pu_driving_numa_from(root, node))
  2166. /* Ok, we already managed to find drivers */
  2167. return 1;
  2168. if (!root->parent)
  2169. /* And no parent!? nobody can drive this... */
  2170. return 0;
  2171. /* Try from parent */
  2172. return _starpu_find_pu_driving_numa_up(root->parent, node);
  2173. }
  2174. #endif
  2175. static void _starpu_init_workers_binding_and_memory(struct _starpu_machine_config *config, int no_mp_config STARPU_ATTRIBUTE_UNUSED)
  2176. {
  2177. /* We will store all the busid of the different (src, dst)
  2178. * combinations in a matrix which we initialize here. */
  2179. _starpu_initialize_busid_matrix();
  2180. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  2181. unsigned cuda_init[STARPU_MAXCUDADEVS] = { };
  2182. unsigned cuda_memory_nodes[STARPU_MAXCUDADEVS];
  2183. unsigned cuda_bindid[STARPU_MAXCUDADEVS];
  2184. int cuda_globalbindid = -1;
  2185. #endif
  2186. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  2187. unsigned opencl_init[STARPU_MAXOPENCLDEVS] = { };
  2188. unsigned opencl_memory_nodes[STARPU_MAXOPENCLDEVS];
  2189. unsigned opencl_bindid[STARPU_MAXOPENCLDEVS];
  2190. #endif
  2191. #ifdef STARPU_USE_MIC
  2192. unsigned mic_init[STARPU_MAXMICDEVS] = { };
  2193. unsigned mic_memory_nodes[STARPU_MAXMICDEVS];
  2194. unsigned mic_bindid[STARPU_MAXMICDEVS];
  2195. #endif
  2196. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  2197. unsigned mpi_init[STARPU_MAXMPIDEVS] = { };
  2198. unsigned mpi_memory_nodes[STARPU_MAXMPIDEVS];
  2199. unsigned mpi_bindid[STARPU_MAXMPIDEVS];
  2200. #endif
  2201. unsigned bindid;
  2202. for (bindid = 0; bindid < config->nbindid; bindid++)
  2203. {
  2204. free(config->bindid_workers[bindid].workerids);
  2205. config->bindid_workers[bindid].workerids = NULL;
  2206. config->bindid_workers[bindid].nworkers = 0;
  2207. }
  2208. /* Init CPU binding before NUMA nodes, because we use it to discover NUMA nodes */
  2209. _starpu_init_binding_cpu(config);
  2210. /* Initialize NUMA nodes */
  2211. _starpu_init_numa_node(config);
  2212. _starpu_init_numa_bus();
  2213. unsigned worker;
  2214. for (worker = 0; worker < config->topology.nworkers; worker++)
  2215. {
  2216. unsigned memory_node = -1;
  2217. struct _starpu_worker *workerarg = &config->workers[worker];
  2218. unsigned devid STARPU_ATTRIBUTE_UNUSED = workerarg->devid;
  2219. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL) || defined(STARPU_USE_MIC) || defined(STARPU_SIMGRID) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  2220. /* Perhaps the worker has some "favourite" bindings */
  2221. unsigned *preferred_binding = NULL;
  2222. unsigned npreferred = 0;
  2223. #endif
  2224. /* select the memory node that contains worker's memory */
  2225. switch (workerarg->arch)
  2226. {
  2227. case STARPU_CPU_WORKER:
  2228. {
  2229. int numa_logical_id = _starpu_get_logical_numa_node_worker(worker);
  2230. int numa_starpu_id = starpu_memory_nodes_numa_hwloclogid_to_id(numa_logical_id);
  2231. if (numa_starpu_id < 0 || numa_starpu_id >= STARPU_MAXNUMANODES)
  2232. numa_starpu_id = STARPU_MAIN_RAM;
  2233. #if defined(STARPU_HAVE_HWLOC) && !defined(STARPU_SIMGRID)
  2234. hwloc_obj_t pu_obj = hwloc_get_obj_by_type(config->topology.hwtopology, HWLOC_OBJ_PU, workerarg->bindid);
  2235. struct _starpu_hwloc_userdata *userdata = pu_obj->userdata;
  2236. userdata->pu_worker = workerarg;
  2237. #endif
  2238. workerarg->numa_memory_node = memory_node = numa_starpu_id;
  2239. _starpu_memory_node_add_nworkers(memory_node);
  2240. _starpu_worker_drives_memory_node(workerarg, numa_starpu_id);
  2241. break;
  2242. }
  2243. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  2244. case STARPU_CUDA_WORKER:
  2245. {
  2246. unsigned numa;
  2247. #ifndef STARPU_SIMGRID
  2248. if (may_bind_automatically[STARPU_CUDA_WORKER])
  2249. {
  2250. /* StarPU is allowed to bind threads automatically */
  2251. preferred_binding = _starpu_get_cuda_affinity_vector(devid);
  2252. npreferred = config->topology.nhwpus;
  2253. }
  2254. #endif /* SIMGRID */
  2255. if (cuda_init[devid])
  2256. {
  2257. memory_node = cuda_memory_nodes[devid];
  2258. if (config->topology.cuda_th_per_stream == 0)
  2259. workerarg->bindid = cuda_bindid[devid];
  2260. else
  2261. workerarg->bindid = _starpu_get_next_bindid(config, STARPU_THREAD_ACTIVE, preferred_binding, npreferred);
  2262. }
  2263. else
  2264. {
  2265. cuda_init[devid] = 1;
  2266. if (config->topology.cuda_th_per_dev == 0 && config->topology.cuda_th_per_stream == 0)
  2267. {
  2268. if (cuda_globalbindid == -1)
  2269. cuda_globalbindid = _starpu_get_next_bindid(config, STARPU_THREAD_ACTIVE, preferred_binding, npreferred);
  2270. workerarg->bindid = cuda_bindid[devid] = cuda_globalbindid;
  2271. }
  2272. else
  2273. workerarg->bindid = cuda_bindid[devid] = _starpu_get_next_bindid(config, STARPU_THREAD_ACTIVE, preferred_binding, npreferred);
  2274. memory_node = cuda_memory_nodes[devid] = _starpu_memory_node_register(STARPU_CUDA_RAM, devid, &_starpu_driver_cuda_node_ops);
  2275. for (numa = 0; numa < nb_numa_nodes; numa++)
  2276. {
  2277. _starpu_cuda_bus_ids[numa][devid+STARPU_MAXNUMANODES] = _starpu_register_bus(numa, memory_node);
  2278. _starpu_cuda_bus_ids[devid+STARPU_MAXNUMANODES][numa] = _starpu_register_bus(memory_node, numa);
  2279. }
  2280. #ifdef STARPU_SIMGRID
  2281. const char* cuda_memcpy_peer;
  2282. char name[16];
  2283. snprintf(name, sizeof(name), "CUDA%u", devid);
  2284. starpu_sg_host_t host = _starpu_simgrid_get_host_by_name(name);
  2285. STARPU_ASSERT(host);
  2286. _starpu_simgrid_memory_node_set_host(memory_node, host);
  2287. # ifdef STARPU_HAVE_SIMGRID_ACTOR_H
  2288. cuda_memcpy_peer = sg_host_get_property_value(host, "memcpy_peer");
  2289. # else
  2290. cuda_memcpy_peer = MSG_host_get_property_value(host, "memcpy_peer");
  2291. # endif
  2292. #endif /* SIMGRID */
  2293. if (
  2294. #ifdef STARPU_SIMGRID
  2295. cuda_memcpy_peer && atoll(cuda_memcpy_peer)
  2296. #elif defined(STARPU_HAVE_CUDA_MEMCPY_PEER)
  2297. 1
  2298. #else /* MEMCPY_PEER */
  2299. 0
  2300. #endif /* MEMCPY_PEER */
  2301. )
  2302. {
  2303. unsigned worker2;
  2304. for (worker2 = 0; worker2 < worker; worker2++)
  2305. {
  2306. struct _starpu_worker *workerarg2 = &config->workers[worker2];
  2307. int devid2 = workerarg2->devid;
  2308. if (workerarg2->arch == STARPU_CUDA_WORKER)
  2309. {
  2310. unsigned memory_node2 = starpu_worker_get_memory_node(worker2);
  2311. _starpu_cuda_bus_ids[devid2+STARPU_MAXNUMANODES][devid+STARPU_MAXNUMANODES] = _starpu_register_bus(memory_node2, memory_node);
  2312. _starpu_cuda_bus_ids[devid+STARPU_MAXNUMANODES][devid2+STARPU_MAXNUMANODES] = _starpu_register_bus(memory_node, memory_node2);
  2313. #ifndef STARPU_SIMGRID
  2314. #if defined(HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX) && HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX
  2315. {
  2316. hwloc_obj_t obj, obj2, ancestor;
  2317. obj = hwloc_cuda_get_device_osdev_by_index(config->topology.hwtopology, devid);
  2318. obj2 = hwloc_cuda_get_device_osdev_by_index(config->topology.hwtopology, devid2);
  2319. ancestor = hwloc_get_common_ancestor_obj(config->topology.hwtopology, obj, obj2);
  2320. if (ancestor)
  2321. {
  2322. struct _starpu_hwloc_userdata *data = ancestor->userdata;
  2323. #ifdef STARPU_VERBOSE
  2324. {
  2325. char name[64];
  2326. hwloc_obj_type_snprintf(name, sizeof(name), ancestor, 0);
  2327. _STARPU_DEBUG("CUDA%u and CUDA%u are linked through %s, along %u GPUs\n", devid, devid2, name, data->ngpus);
  2328. }
  2329. #endif
  2330. starpu_bus_set_ngpus(_starpu_cuda_bus_ids[devid2+STARPU_MAXNUMANODES][devid+STARPU_MAXNUMANODES], data->ngpus);
  2331. starpu_bus_set_ngpus(_starpu_cuda_bus_ids[devid+STARPU_MAXNUMANODES][devid2+STARPU_MAXNUMANODES], data->ngpus);
  2332. }
  2333. }
  2334. #endif
  2335. #endif
  2336. }
  2337. }
  2338. }
  2339. }
  2340. _starpu_memory_node_add_nworkers(memory_node);
  2341. //This worker can manage transfers on NUMA nodes
  2342. for (numa = 0; numa < nb_numa_nodes; numa++)
  2343. _starpu_worker_drives_memory_node(&workerarg->set->workers[0], numa);
  2344. _starpu_worker_drives_memory_node(&workerarg->set->workers[0], memory_node);
  2345. break;
  2346. }
  2347. #endif
  2348. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  2349. case STARPU_OPENCL_WORKER:
  2350. {
  2351. unsigned numa;
  2352. #ifndef STARPU_SIMGRID
  2353. if (may_bind_automatically[STARPU_OPENCL_WORKER])
  2354. {
  2355. /* StarPU is allowed to bind threads automatically */
  2356. preferred_binding = _starpu_get_opencl_affinity_vector(devid);
  2357. npreferred = config->topology.nhwpus;
  2358. }
  2359. #endif /* SIMGRID */
  2360. if (opencl_init[devid])
  2361. {
  2362. memory_node = opencl_memory_nodes[devid];
  2363. #ifndef STARPU_SIMGRID
  2364. workerarg->bindid = opencl_bindid[devid];
  2365. #endif /* SIMGRID */
  2366. }
  2367. else
  2368. {
  2369. opencl_init[devid] = 1;
  2370. workerarg->bindid = opencl_bindid[devid] = _starpu_get_next_bindid(config, STARPU_THREAD_ACTIVE, preferred_binding, npreferred);
  2371. memory_node = opencl_memory_nodes[devid] = _starpu_memory_node_register(STARPU_OPENCL_RAM, devid, &_starpu_driver_opencl_node_ops);
  2372. for (numa = 0; numa < nb_numa_nodes; numa++)
  2373. {
  2374. _starpu_register_bus(numa, memory_node);
  2375. _starpu_register_bus(memory_node, numa);
  2376. }
  2377. #ifdef STARPU_SIMGRID
  2378. char name[16];
  2379. snprintf(name, sizeof(name), "OpenCL%u", devid);
  2380. starpu_sg_host_t host = _starpu_simgrid_get_host_by_name(name);
  2381. STARPU_ASSERT(host);
  2382. _starpu_simgrid_memory_node_set_host(memory_node, host);
  2383. #endif /* SIMGRID */
  2384. }
  2385. _starpu_memory_node_add_nworkers(memory_node);
  2386. //This worker can manage transfers on NUMA nodes
  2387. for (numa = 0; numa < nb_numa_nodes; numa++)
  2388. _starpu_worker_drives_memory_node(workerarg, numa);
  2389. _starpu_worker_drives_memory_node(workerarg, memory_node);
  2390. break;
  2391. }
  2392. #endif
  2393. #ifdef STARPU_USE_MIC
  2394. case STARPU_MIC_WORKER:
  2395. {
  2396. unsigned numa;
  2397. if (mic_init[devid])
  2398. {
  2399. memory_node = mic_memory_nodes[devid];
  2400. }
  2401. else
  2402. {
  2403. mic_init[devid] = 1;
  2404. /* TODO */
  2405. //if (may_bind_automatically)
  2406. //{
  2407. // /* StarPU is allowed to bind threads automatically */
  2408. // preferred_binding = _starpu_get_mic_affinity_vector(devid);
  2409. // npreferred = config->topology.nhwpus;
  2410. //}
  2411. mic_bindid[devid] = _starpu_get_next_bindid(config, STARPU_THREAD_ACTIVE, preferred_binding, npreferred);
  2412. memory_node = mic_memory_nodes[devid] = _starpu_memory_node_register(STARPU_MIC_RAM, devid, &_starpu_driver_mic_node_ops);
  2413. for (numa = 0; numa < nb_numa_nodes; numa++)
  2414. {
  2415. _starpu_register_bus(numa, memory_node);
  2416. _starpu_register_bus(memory_node, numa);
  2417. }
  2418. }
  2419. workerarg->bindid = mic_bindid[devid];
  2420. _starpu_memory_node_add_nworkers(memory_node);
  2421. //This worker can manage transfers on NUMA nodes
  2422. for (numa = 0; numa < nb_numa_nodes; numa++)
  2423. _starpu_worker_drives_memory_node(&workerarg->set->workers[0], numa);
  2424. _starpu_worker_drives_memory_node(&workerarg->set->workers[0], memory_node);
  2425. break;
  2426. }
  2427. #endif /* STARPU_USE_MIC */
  2428. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  2429. case STARPU_MPI_MS_WORKER:
  2430. {
  2431. unsigned numa;
  2432. if (mpi_init[devid])
  2433. {
  2434. memory_node = mpi_memory_nodes[devid];
  2435. }
  2436. else
  2437. {
  2438. mpi_init[devid] = 1;
  2439. mpi_bindid[devid] = _starpu_get_next_bindid(config, STARPU_THREAD_ACTIVE, preferred_binding, npreferred);
  2440. memory_node = mpi_memory_nodes[devid] = _starpu_memory_node_register(STARPU_MPI_MS_RAM, devid, &_starpu_driver_mpi_node_ops);
  2441. for (numa = 0; numa < nb_numa_nodes; numa++)
  2442. {
  2443. _starpu_register_bus(numa, memory_node);
  2444. _starpu_register_bus(memory_node, numa);
  2445. }
  2446. }
  2447. //This worker can manage transfers on NUMA nodes
  2448. for (numa = 0; numa < nb_numa_nodes; numa++)
  2449. _starpu_worker_drives_memory_node(&workerarg->set->workers[0], numa);
  2450. _starpu_worker_drives_memory_node(&workerarg->set->workers[0], memory_node);
  2451. #ifndef STARPU_MPI_MASTER_SLAVE_MULTIPLE_THREAD
  2452. /* MPI driver thread can manage all slave memories if we disable the MPI multiple thread */
  2453. unsigned findworker;
  2454. for (findworker = 0; findworker < worker; findworker++)
  2455. {
  2456. struct _starpu_worker *findworkerarg = &config->workers[findworker];
  2457. if (findworkerarg->arch == STARPU_MPI_MS_WORKER)
  2458. {
  2459. _starpu_worker_drives_memory_node(workerarg, findworkerarg->memory_node);
  2460. _starpu_worker_drives_memory_node(findworkerarg, memory_node);
  2461. }
  2462. }
  2463. #endif
  2464. workerarg->bindid = mpi_bindid[devid];
  2465. _starpu_memory_node_add_nworkers(memory_node);
  2466. break;
  2467. }
  2468. #endif /* STARPU_USE_MPI_MASTER_SLAVE */
  2469. default:
  2470. STARPU_ABORT();
  2471. }
  2472. workerarg->memory_node = memory_node;
  2473. _STARPU_DEBUG("worker %u type %d devid %u bound to cpu %d, STARPU memory node %u\n", worker, workerarg->arch, devid, workerarg->bindid, memory_node);
  2474. #ifdef __GLIBC__
  2475. if (workerarg->bindid != -1)
  2476. {
  2477. /* Save the initial cpuset */
  2478. CPU_ZERO(&workerarg->cpu_set);
  2479. CPU_SET(workerarg->bindid, &workerarg->cpu_set);
  2480. }
  2481. #endif /* __GLIBC__ */
  2482. #ifdef STARPU_HAVE_HWLOC
  2483. if (workerarg->bindid == -1)
  2484. {
  2485. workerarg->hwloc_cpu_set = hwloc_bitmap_alloc();
  2486. workerarg->hwloc_obj = NULL;
  2487. }
  2488. else
  2489. {
  2490. /* Put the worker descriptor in the userdata field of the
  2491. * hwloc object describing the CPU */
  2492. hwloc_obj_t worker_obj = hwloc_get_obj_by_depth(config->topology.hwtopology,
  2493. config->pu_depth,
  2494. workerarg->bindid);
  2495. struct _starpu_hwloc_userdata *data = worker_obj->userdata;
  2496. if (data->worker_list == NULL)
  2497. data->worker_list = _starpu_worker_list_new();
  2498. _starpu_worker_list_push_front(data->worker_list, workerarg);
  2499. /* Clear the cpu set and set the cpu */
  2500. workerarg->hwloc_cpu_set = hwloc_bitmap_dup(worker_obj->cpuset);
  2501. workerarg->hwloc_obj = worker_obj;
  2502. }
  2503. #endif
  2504. if (workerarg->bindid != -1)
  2505. {
  2506. bindid = workerarg->bindid;
  2507. unsigned old_nbindid = config->nbindid;
  2508. if (bindid >= old_nbindid)
  2509. {
  2510. /* More room needed */
  2511. if (!old_nbindid)
  2512. config->nbindid = STARPU_NMAXWORKERS;
  2513. else
  2514. config->nbindid = 2 * old_nbindid;
  2515. if (bindid >= config->nbindid)
  2516. {
  2517. config->nbindid = bindid+1;
  2518. }
  2519. _STARPU_REALLOC(config->bindid_workers, config->nbindid * sizeof(config->bindid_workers[0]));
  2520. memset(&config->bindid_workers[old_nbindid], 0, (config->nbindid - old_nbindid) * sizeof(config->bindid_workers[0]));
  2521. }
  2522. /* Add slot for this worker */
  2523. /* Don't care about amortizing the cost, there are usually very few workers sharing the same bindid */
  2524. config->bindid_workers[bindid].nworkers++;
  2525. _STARPU_REALLOC(config->bindid_workers[bindid].workerids, config->bindid_workers[bindid].nworkers * sizeof(config->bindid_workers[bindid].workerids[0]));
  2526. config->bindid_workers[bindid].workerids[config->bindid_workers[bindid].nworkers-1] = worker;
  2527. }
  2528. }
  2529. #if defined(STARPU_HAVE_HWLOC) && !defined(STARPU_SIMGRID)
  2530. /* If some NUMA nodes don't have drivers, attribute some */
  2531. unsigned node, nnodes = starpu_memory_nodes_get_count();;
  2532. for (node = 0; node < nnodes; node++)
  2533. {
  2534. if (starpu_node_get_kind(node) != STARPU_CPU_RAM)
  2535. /* Only RAM nodes can be processed by any CPU */
  2536. continue;
  2537. for (worker = 0; worker < config->topology.nworkers; worker++)
  2538. {
  2539. if (_starpu_worker_drives_memory[worker][node])
  2540. break;
  2541. }
  2542. if (worker < config->topology.nworkers)
  2543. /* Already somebody driving it */
  2544. continue;
  2545. /* Nobody driving this node! Attribute some */
  2546. _STARPU_DEBUG("nobody drives memory node %d\n", node);
  2547. hwloc_obj_t numa_node_obj = hwloc_get_obj_by_type(config->topology.hwtopology, HWLOC_OBJ_NUMANODE, starpu_memory_nodes_numa_id_to_hwloclogid(node));
  2548. int ret = _starpu_find_pu_driving_numa_up(numa_node_obj, node);
  2549. STARPU_ASSERT_MSG(ret, "oops, didn't find any worker to drive memory node %d!?", node);
  2550. }
  2551. #endif
  2552. #ifdef STARPU_SIMGRID
  2553. _starpu_simgrid_count_ngpus();
  2554. #else
  2555. #ifdef STARPU_HAVE_HWLOC
  2556. _starpu_topology_count_ngpus(hwloc_get_root_obj(config->topology.hwtopology));
  2557. #endif
  2558. #endif
  2559. }
  2560. int _starpu_build_topology(struct _starpu_machine_config *config, int no_mp_config)
  2561. {
  2562. int ret;
  2563. unsigned i;
  2564. ret = _starpu_init_machine_config(config, no_mp_config);
  2565. if (ret)
  2566. return ret;
  2567. /* for the data management library */
  2568. _starpu_memory_nodes_init();
  2569. _starpu_datastats_init();
  2570. _starpu_init_workers_binding_and_memory(config, no_mp_config);
  2571. _starpu_mem_chunk_init_last();
  2572. config->cpus_nodeid = -1;
  2573. config->cuda_nodeid = -1;
  2574. config->opencl_nodeid = -1;
  2575. config->mic_nodeid = -1;
  2576. config->mpi_nodeid = -1;
  2577. for (i = 0; i < starpu_worker_get_count(); i++)
  2578. {
  2579. switch (starpu_worker_get_type(i))
  2580. {
  2581. case STARPU_CPU_WORKER:
  2582. if (config->cpus_nodeid == -1)
  2583. config->cpus_nodeid = starpu_worker_get_memory_node(i);
  2584. else if (config->cpus_nodeid != (int) starpu_worker_get_memory_node(i))
  2585. config->cpus_nodeid = -2;
  2586. break;
  2587. case STARPU_CUDA_WORKER:
  2588. if (config->cuda_nodeid == -1)
  2589. config->cuda_nodeid = starpu_worker_get_memory_node(i);
  2590. else if (config->cuda_nodeid != (int) starpu_worker_get_memory_node(i))
  2591. config->cuda_nodeid = -2;
  2592. break;
  2593. case STARPU_OPENCL_WORKER:
  2594. if (config->opencl_nodeid == -1)
  2595. config->opencl_nodeid = starpu_worker_get_memory_node(i);
  2596. else if (config->opencl_nodeid != (int) starpu_worker_get_memory_node(i))
  2597. config->opencl_nodeid = -2;
  2598. break;
  2599. case STARPU_MIC_WORKER:
  2600. if (config->mic_nodeid == -1)
  2601. config->mic_nodeid = starpu_worker_get_memory_node(i);
  2602. else if (config->mic_nodeid != (int) starpu_worker_get_memory_node(i))
  2603. config->mic_nodeid = -2;
  2604. break;
  2605. case STARPU_MPI_MS_WORKER:
  2606. if (config->mpi_nodeid == -1)
  2607. config->mpi_nodeid = starpu_worker_get_memory_node(i);
  2608. else if (config->mpi_nodeid != (int) starpu_worker_get_memory_node(i))
  2609. config->mpi_nodeid = -2;
  2610. break;
  2611. case STARPU_ANY_WORKER:
  2612. STARPU_ASSERT(0);
  2613. }
  2614. }
  2615. return 0;
  2616. }
  2617. void _starpu_destroy_topology(struct _starpu_machine_config *config STARPU_ATTRIBUTE_UNUSED)
  2618. {
  2619. #if defined(STARPU_USE_MIC) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  2620. _starpu_deinit_mp_config(config);
  2621. #endif
  2622. /* cleanup StarPU internal data structures */
  2623. _starpu_memory_nodes_deinit();
  2624. _starpu_destroy_machine_config(config);
  2625. _starpu_deinitialize_workers_bindid(config);
  2626. }
  2627. void starpu_topology_print(FILE *output)
  2628. {
  2629. struct _starpu_machine_config *config = _starpu_get_machine_config();
  2630. struct _starpu_machine_topology *topology = &config->topology;
  2631. unsigned pu;
  2632. unsigned worker;
  2633. unsigned nworkers = starpu_worker_get_count();
  2634. unsigned ncombinedworkers = topology->ncombinedworkers;
  2635. unsigned nthreads_per_core = topology->nhwpus / topology->nhwcpus;
  2636. #ifdef STARPU_HAVE_HWLOC
  2637. hwloc_topology_t topo = topology->hwtopology;
  2638. hwloc_obj_t pu_obj;
  2639. hwloc_obj_t last_numa_obj = NULL, numa_obj;
  2640. hwloc_obj_t last_package_obj = NULL, package_obj;
  2641. #endif
  2642. for (pu = 0; pu < topology->nhwpus; pu++)
  2643. {
  2644. #ifdef STARPU_HAVE_HWLOC
  2645. pu_obj = hwloc_get_obj_by_type(topo, HWLOC_OBJ_PU, pu);
  2646. numa_obj = numa_get_obj(pu_obj);
  2647. if (numa_obj != last_numa_obj)
  2648. {
  2649. fprintf(output, "numa %u", numa_obj->logical_index);
  2650. last_numa_obj = numa_obj;
  2651. }
  2652. fprintf(output, "\t");
  2653. package_obj = hwloc_get_ancestor_obj_by_type(topo, HWLOC_OBJ_SOCKET, pu_obj);
  2654. if (package_obj != last_package_obj)
  2655. {
  2656. fprintf(output, "pack %u", package_obj->logical_index);
  2657. last_package_obj = package_obj;
  2658. }
  2659. fprintf(output, "\t");
  2660. #endif
  2661. if ((pu % nthreads_per_core) == 0)
  2662. fprintf(output, "core %u", pu / nthreads_per_core);
  2663. fprintf(output, "\tPU %u\t", pu);
  2664. for (worker = 0;
  2665. worker < nworkers + ncombinedworkers;
  2666. worker++)
  2667. {
  2668. if (worker < nworkers)
  2669. {
  2670. struct _starpu_worker *workerarg = &config->workers[worker];
  2671. if (workerarg->bindid == (int) pu)
  2672. {
  2673. char name[256];
  2674. starpu_worker_get_name(worker, name, sizeof(name));
  2675. fprintf(output, "%s\t", name);
  2676. }
  2677. }
  2678. else
  2679. {
  2680. int worker_size, i;
  2681. int *combined_workerid;
  2682. starpu_combined_worker_get_description(worker, &worker_size, &combined_workerid);
  2683. for (i = 0; i < worker_size; i++)
  2684. {
  2685. if (topology->workers_bindid[combined_workerid[i]] == pu)
  2686. fprintf(output, "comb %u\t", worker-nworkers);
  2687. }
  2688. }
  2689. }
  2690. fprintf(output, "\n");
  2691. }
  2692. }