topology.c 85 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2017 Université de Bordeaux
  4. * Copyright (C) 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 CNRS
  5. * Copyright (C) 2011, 2016, 2017 INRIA
  6. * Copyright (C) 2016 Uppsala University
  7. *
  8. * StarPU is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU Lesser General Public License as published by
  10. * the Free Software Foundation; either version 2.1 of the License, or (at
  11. * your option) any later version.
  12. *
  13. * StarPU is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  16. *
  17. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  18. */
  19. #include <stdlib.h>
  20. #include <stdio.h>
  21. #include <common/config.h>
  22. #include <core/workers.h>
  23. #include <core/debug.h>
  24. #include <core/topology.h>
  25. #include <drivers/cuda/driver_cuda.h>
  26. #include <drivers/mic/driver_mic_source.h>
  27. #include <drivers/scc/driver_scc_source.h>
  28. #include <drivers/mpi/driver_mpi_source.h>
  29. #include <drivers/mpi/driver_mpi_common.h>
  30. #include <drivers/mp_common/source_common.h>
  31. #include <drivers/opencl/driver_opencl.h>
  32. #include <drivers/opencl/driver_opencl_utils.h>
  33. #include <profiling/profiling.h>
  34. #include <datawizard/datastats.h>
  35. #include <datawizard/memory_nodes.h>
  36. #include <common/uthash.h>
  37. #ifdef STARPU_HAVE_HWLOC
  38. #include <hwloc.h>
  39. #ifndef HWLOC_API_VERSION
  40. #define HWLOC_OBJ_PU HWLOC_OBJ_PROC
  41. #endif
  42. #endif
  43. #ifdef STARPU_HAVE_WINDOWS
  44. #include <windows.h>
  45. #endif
  46. #ifdef STARPU_SIMGRID
  47. #include <core/simgrid.h>
  48. #endif
  49. #if defined(HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX) && HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX
  50. #include <hwloc/cuda.h>
  51. #endif
  52. #if defined(STARPU_USE_OPENCL)
  53. #include <hwloc/opencl.h>
  54. #endif
  55. static unsigned topology_is_initialized = 0;
  56. static int nobind;
  57. /* For checking whether two workers share the same PU, indexed by PU number */
  58. static int cpu_worker[STARPU_MAXCPUS];
  59. static unsigned nb_numa_nodes = 0;
  60. static int numa_memory_nodes_to_hwloclogid[STARPU_MAXNUMANODES]; /* indexed by StarPU numa node to convert in hwloc logid */
  61. static int numa_memory_nodes_to_physicalid[STARPU_MAXNUMANODES]; /* indexed by StarPU numa node to convert in physical id */
  62. static unsigned numa_bus_id[STARPU_MAXNUMANODES*STARPU_MAXNUMANODES];
  63. static int _starpu_get_logical_numa_node_worker(unsigned workerid);
  64. #define STARPU_NUMA_UNINITIALIZED (-2)
  65. #define STARPU_NUMA_MAIN_RAM (-1)
  66. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL) || defined(STARPU_USE_SCC) || defined(STARPU_SIMGRID) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  67. struct handle_entry
  68. {
  69. UT_hash_handle hh;
  70. unsigned gpuid;
  71. };
  72. # if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  73. /* Entry in the `devices_using_cuda' hash table. */
  74. static struct handle_entry *devices_using_cuda;
  75. # endif
  76. static unsigned may_bind_automatically[STARPU_NARCH] = { 0 };
  77. #endif // defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  78. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  79. static struct _starpu_worker_set cuda_worker_set[STARPU_MAXCUDADEVS];
  80. #endif
  81. #ifdef STARPU_USE_MIC
  82. static struct _starpu_worker_set mic_worker_set[STARPU_MAXMICDEVS];
  83. #endif
  84. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  85. struct _starpu_worker_set mpi_worker_set[STARPU_MAXMPIDEVS];
  86. #endif
  87. int starpu_memory_nodes_get_numa_count(void)
  88. {
  89. return nb_numa_nodes;
  90. }
  91. #if defined(STARPU_HAVE_HWLOC)
  92. static int numa_get_logical_id(hwloc_obj_t obj)
  93. {
  94. STARPU_ASSERT(obj);
  95. while (obj->type != HWLOC_OBJ_NODE)
  96. {
  97. obj = obj->parent;
  98. /* If we don't find a "node" obj before the root, this means
  99. * hwloc does not know whether there are numa nodes or not, so
  100. * we should not use a per-node sampling in that case. */
  101. if (!obj)
  102. return STARPU_NUMA_MAIN_RAM;
  103. }
  104. return obj->logical_index;
  105. }
  106. static int numa_get_physical_id(hwloc_obj_t obj)
  107. {
  108. STARPU_ASSERT(obj);
  109. while (obj->type != HWLOC_OBJ_NODE)
  110. {
  111. obj = obj->parent;
  112. /* If we don't find a "node" obj before the root, this means
  113. * hwloc does not know whether there are numa nodes or not, so
  114. * we should not use a per-node sampling in that case. */
  115. if (!obj)
  116. return STARPU_NUMA_MAIN_RAM;
  117. }
  118. return obj->os_index;
  119. }
  120. #endif
  121. static int _starpu_get_logical_numa_node_worker(unsigned workerid)
  122. {
  123. #if defined(STARPU_HAVE_HWLOC)
  124. char * state;
  125. if ((state = starpu_getenv("STARPU_USE_NUMA")) && atoi(state))
  126. {
  127. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  128. struct _starpu_machine_config *config = (struct _starpu_machine_config *)_starpu_get_machine_config() ;
  129. struct _starpu_machine_topology *topology = &config->topology ;
  130. hwloc_obj_t obj;
  131. switch(worker->arch)
  132. {
  133. case STARPU_CPU_WORKER:
  134. obj = hwloc_get_obj_by_type(topology->hwtopology, HWLOC_OBJ_PU, worker->bindid) ;
  135. break;
  136. default:
  137. STARPU_ABORT();
  138. }
  139. return numa_get_logical_id(obj);
  140. }
  141. else
  142. #endif
  143. {
  144. (void) workerid; /* unused */
  145. return STARPU_NUMA_MAIN_RAM;
  146. }
  147. }
  148. static int _starpu_get_physical_numa_node_worker(unsigned workerid)
  149. {
  150. #if defined(STARPU_HAVE_HWLOC)
  151. char * state;
  152. if ((state = starpu_getenv("STARPU_USE_NUMA")) && atoi(state))
  153. {
  154. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  155. struct _starpu_machine_config *config = (struct _starpu_machine_config *)_starpu_get_machine_config() ;
  156. struct _starpu_machine_topology *topology = &config->topology ;
  157. hwloc_obj_t obj;
  158. switch(worker->arch)
  159. {
  160. case STARPU_CPU_WORKER:
  161. obj = hwloc_get_obj_by_type(topology->hwtopology, HWLOC_OBJ_PU, worker->bindid) ;
  162. break;
  163. default:
  164. STARPU_ABORT();
  165. }
  166. return numa_get_physical_id(obj);
  167. }
  168. else
  169. #endif
  170. {
  171. (void) workerid; /* unused */
  172. return STARPU_NUMA_MAIN_RAM;
  173. }
  174. }
  175. static int _starpu_numa_get_logical_id_from_pu(int pu)
  176. {
  177. #if defined(STARPU_HAVE_HWLOC)
  178. if (nb_numa_nodes > 1)
  179. {
  180. struct _starpu_machine_config *config = _starpu_get_machine_config();
  181. struct _starpu_machine_topology *topology = &config->topology;
  182. hwloc_obj_t obj = hwloc_get_obj_by_type(topology->hwtopology, HWLOC_OBJ_PU, pu);
  183. return numa_get_logical_id(obj);
  184. }
  185. else
  186. #endif
  187. {
  188. return -1;
  189. }
  190. }
  191. struct _starpu_worker *_starpu_get_worker_from_driver(struct starpu_driver *d)
  192. {
  193. unsigned nworkers = starpu_worker_get_count();
  194. unsigned workerid;
  195. for (workerid = 0; workerid < nworkers; workerid++)
  196. {
  197. if (starpu_worker_get_type(workerid) == d->type)
  198. {
  199. struct _starpu_worker *worker;
  200. worker = _starpu_get_worker_struct(workerid);
  201. switch (d->type)
  202. {
  203. #ifdef STARPU_USE_CPU
  204. case STARPU_CPU_WORKER:
  205. if (worker->devid == d->id.cpu_id)
  206. return worker;
  207. break;
  208. #endif
  209. #ifdef STARPU_USE_OPENCL
  210. case STARPU_OPENCL_WORKER:
  211. {
  212. cl_device_id device;
  213. starpu_opencl_get_device(worker->devid, &device);
  214. if (device == d->id.opencl_id)
  215. return worker;
  216. break;
  217. }
  218. #endif
  219. #ifdef STARPU_USE_CUDA
  220. case STARPU_CUDA_WORKER:
  221. {
  222. if (worker->devid == d->id.cuda_id)
  223. return worker;
  224. break;
  225. }
  226. #endif
  227. default:
  228. (void) worker;
  229. _STARPU_DEBUG("Invalid device type\n");
  230. return NULL;
  231. }
  232. }
  233. }
  234. return NULL;
  235. }
  236. /*
  237. * Discover the topology of the machine
  238. */
  239. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL) || defined(STARPU_USE_SCC) || defined(STARPU_SIMGRID) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  240. static void
  241. _starpu_initialize_workers_deviceid (int *explicit_workers_gpuid,
  242. int *current, int *workers_gpuid,
  243. const char *varname, unsigned nhwgpus,
  244. enum starpu_worker_archtype type)
  245. {
  246. char *strval;
  247. unsigned i;
  248. *current = 0;
  249. /* conf->workers_gpuid indicates the successive GPU identifier that
  250. * should be used to bind the workers. It should be either filled
  251. * according to the user's explicit parameters (from starpu_conf) or
  252. * according to the STARPU_WORKERS_CUDAID env. variable. Otherwise, a
  253. * round-robin policy is used to distributed the workers over the
  254. * cores. */
  255. /* what do we use, explicit value, env. variable, or round-robin ? */
  256. if ((strval = starpu_getenv(varname)))
  257. {
  258. /* STARPU_WORKERS_CUDAID certainly contains less entries than
  259. * STARPU_NMAXWORKERS, so we reuse its entries in a round
  260. * robin fashion: "1 2" is equivalent to "1 2 1 2 1 2 .... 1
  261. * 2". */
  262. unsigned wrap = 0;
  263. unsigned number_of_entries = 0;
  264. char *endptr;
  265. /* we use the content of the STARPU_WORKERS_CUDAID
  266. * env. variable */
  267. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  268. {
  269. if (!wrap)
  270. {
  271. long int val;
  272. val = strtol(strval, &endptr, 10);
  273. if (endptr != strval)
  274. {
  275. workers_gpuid[i] = (unsigned)val;
  276. strval = endptr;
  277. }
  278. else
  279. {
  280. /* there must be at least one entry */
  281. STARPU_ASSERT(i != 0);
  282. number_of_entries = i;
  283. /* there is no more values in the
  284. * string */
  285. wrap = 1;
  286. workers_gpuid[i] = workers_gpuid[0];
  287. }
  288. }
  289. else
  290. {
  291. workers_gpuid[i] =
  292. workers_gpuid[i % number_of_entries];
  293. }
  294. }
  295. }
  296. else if (explicit_workers_gpuid)
  297. {
  298. /* we use the explicit value from the user */
  299. memcpy(workers_gpuid,
  300. explicit_workers_gpuid,
  301. STARPU_NMAXWORKERS*sizeof(unsigned));
  302. }
  303. else
  304. {
  305. /* by default, we take a round robin policy */
  306. if (nhwgpus > 0)
  307. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  308. workers_gpuid[i] = (unsigned)(i % nhwgpus);
  309. /* StarPU can use sampling techniques to bind threads
  310. * correctly */
  311. may_bind_automatically[type] = 1;
  312. }
  313. }
  314. #endif
  315. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  316. static void
  317. _starpu_initialize_workers_cuda_gpuid (struct _starpu_machine_config *config)
  318. {
  319. struct _starpu_machine_topology *topology = &config->topology;
  320. struct starpu_conf *uconf = &config->conf;
  321. _starpu_initialize_workers_deviceid (
  322. uconf->use_explicit_workers_cuda_gpuid == 0
  323. ? NULL
  324. : (int *)uconf->workers_cuda_gpuid,
  325. &(config->current_cuda_gpuid),
  326. (int *)topology->workers_cuda_gpuid,
  327. "STARPU_WORKERS_CUDAID",
  328. topology->nhwcudagpus,
  329. STARPU_CUDA_WORKER);
  330. }
  331. static inline int
  332. _starpu_get_next_cuda_gpuid (struct _starpu_machine_config *config)
  333. {
  334. unsigned i =
  335. ((config->current_cuda_gpuid++) % config->topology.ncudagpus);
  336. return (int)config->topology.workers_cuda_gpuid[i];
  337. }
  338. #endif
  339. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  340. static void
  341. _starpu_initialize_workers_opencl_gpuid (struct _starpu_machine_config*config)
  342. {
  343. struct _starpu_machine_topology *topology = &config->topology;
  344. struct starpu_conf *uconf = &config->conf;
  345. _starpu_initialize_workers_deviceid(
  346. uconf->use_explicit_workers_opencl_gpuid == 0
  347. ? NULL
  348. : (int *)uconf->workers_opencl_gpuid,
  349. &(config->current_opencl_gpuid),
  350. (int *)topology->workers_opencl_gpuid,
  351. "STARPU_WORKERS_OPENCLID",
  352. topology->nhwopenclgpus,
  353. STARPU_OPENCL_WORKER);
  354. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  355. // Detect devices which are already used with CUDA
  356. {
  357. unsigned tmp[STARPU_NMAXWORKERS];
  358. unsigned nb=0;
  359. int i;
  360. for(i=0 ; i<STARPU_NMAXWORKERS ; i++)
  361. {
  362. struct handle_entry *entry;
  363. int devid = config->topology.workers_opencl_gpuid[i];
  364. HASH_FIND_INT(devices_using_cuda, &devid, entry);
  365. if (entry == NULL)
  366. {
  367. tmp[nb] = topology->workers_opencl_gpuid[i];
  368. nb++;
  369. }
  370. }
  371. for (i=nb ; i<STARPU_NMAXWORKERS ; i++)
  372. tmp[i] = -1;
  373. memcpy (topology->workers_opencl_gpuid, tmp,
  374. sizeof(unsigned)*STARPU_NMAXWORKERS);
  375. }
  376. #endif /* STARPU_USE_CUDA */
  377. {
  378. // Detect identical devices
  379. struct handle_entry *devices_already_used = NULL;
  380. unsigned tmp[STARPU_NMAXWORKERS];
  381. unsigned nb=0;
  382. int i;
  383. for(i=0 ; i<STARPU_NMAXWORKERS ; i++)
  384. {
  385. int devid = topology->workers_opencl_gpuid[i];
  386. struct handle_entry *entry;
  387. HASH_FIND_INT(devices_already_used, &devid, entry);
  388. if (entry == NULL)
  389. {
  390. struct handle_entry *entry2;
  391. _STARPU_MALLOC(entry2, sizeof(*entry2));
  392. entry2->gpuid = devid;
  393. HASH_ADD_INT(devices_already_used, gpuid,
  394. entry2);
  395. tmp[nb] = devid;
  396. nb ++;
  397. }
  398. }
  399. struct handle_entry *entry, *tempo;
  400. HASH_ITER(hh, devices_already_used, entry, tempo)
  401. {
  402. HASH_DEL(devices_already_used, entry);
  403. free(entry);
  404. }
  405. for (i=nb ; i<STARPU_NMAXWORKERS ; i++)
  406. tmp[i] = -1;
  407. memcpy (topology->workers_opencl_gpuid, tmp,
  408. sizeof(unsigned)*STARPU_NMAXWORKERS);
  409. }
  410. }
  411. static inline int
  412. _starpu_get_next_opencl_gpuid (struct _starpu_machine_config *config)
  413. {
  414. unsigned i =
  415. ((config->current_opencl_gpuid++) % config->topology.nopenclgpus);
  416. return (int)config->topology.workers_opencl_gpuid[i];
  417. }
  418. #endif
  419. #if 0
  420. #if defined(STARPU_USE_MIC) || defined(STARPU_SIMGRID)
  421. static void _starpu_initialize_workers_mic_deviceid(struct _starpu_machine_config *config)
  422. {
  423. struct _starpu_machine_topology *topology = &config->topology;
  424. struct starpu_conf *uconf = &config->conf;
  425. _starpu_initialize_workers_deviceid(
  426. uconf->use_explicit_workers_mic_deviceid == 0
  427. ? NULL
  428. : (int *)config->user_conf->workers_mic_deviceid,
  429. &(config->current_mic_deviceid),
  430. (int *)topology->workers_mic_deviceid,
  431. "STARPU_WORKERS_MICID",
  432. topology->nhwmiccores,
  433. STARPU_MIC_WORKER);
  434. }
  435. #endif
  436. #endif
  437. #ifdef STARPU_USE_SCC
  438. static void _starpu_initialize_workers_scc_deviceid(struct _starpu_machine_config *config)
  439. {
  440. struct _starpu_machine_topology *topology = &config->topology;
  441. struct starpu_conf *uconf = &config->conf;
  442. _starpu_initialize_workers_deviceid(
  443. uconf->use_explicit_workers_scc_deviceid == 0
  444. ? NULL
  445. : (int *) uconf->workers_scc_deviceid,
  446. &(config->current_scc_deviceid),
  447. (int *)topology->workers_scc_deviceid,
  448. "STARPU_WORKERS_SCCID",
  449. topology->nhwscc,
  450. STARPU_SCC_WORKER);
  451. }
  452. #endif /* STARPU_USE_SCC */
  453. #if 0
  454. #ifdef STARPU_USE_MIC
  455. static inline int _starpu_get_next_mic_deviceid(struct _starpu_machine_config *config)
  456. {
  457. unsigned i = ((config->current_mic_deviceid++) % config->topology.nmicdevices);
  458. return (int)config->topology.workers_mic_deviceid[i];
  459. }
  460. #endif
  461. #endif
  462. #ifdef STARPU_USE_SCC
  463. static inline int _starpu_get_next_scc_deviceid(struct _starpu_machine_config *config)
  464. {
  465. unsigned i = ((config->current_scc_deviceid++) % config->topology.nsccdevices);
  466. return (int)config->topology.workers_scc_deviceid[i];
  467. }
  468. #endif
  469. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  470. static inline int _starpu_get_next_mpi_deviceid(struct _starpu_machine_config *config)
  471. {
  472. unsigned i = ((config->current_mpi_deviceid++) % config->topology.nmpidevices);
  473. return (int)config->topology.workers_mpi_ms_deviceid[i];
  474. }
  475. static void
  476. _starpu_init_mpi_topology (struct _starpu_machine_config *config, long mpi_idx)
  477. {
  478. /* Discover the topology of the mpi node identifier by MPI_IDX. That
  479. * means, make this StarPU instance aware of the number of cores available
  480. * on this MPI device. Update the `nhwmpicores' topology field
  481. * accordingly. */
  482. struct _starpu_machine_topology *topology = &config->topology;
  483. int nbcores;
  484. _starpu_src_common_sink_nbcores (mpi_ms_nodes[mpi_idx], &nbcores);
  485. topology->nhwmpicores[mpi_idx] = nbcores;
  486. }
  487. #endif /* STARPU_USE_MPI_MASTER_SLAVE */
  488. #ifdef STARPU_USE_MIC
  489. static void
  490. _starpu_init_mic_topology (struct _starpu_machine_config *config, long mic_idx)
  491. {
  492. /* Discover the topology of the mic node identifier by MIC_IDX. That
  493. * means, make this StarPU instance aware of the number of cores available
  494. * on this MIC device. Update the `nhwmiccores' topology field
  495. * accordingly. */
  496. struct _starpu_machine_topology *topology = &config->topology;
  497. int nbcores;
  498. _starpu_src_common_sink_nbcores (mic_nodes[mic_idx], &nbcores);
  499. topology->nhwmiccores[mic_idx] = nbcores;
  500. }
  501. static int
  502. _starpu_init_mic_node (struct _starpu_machine_config *config, int mic_idx,
  503. COIENGINE *coi_handle, COIPROCESS *coi_process)
  504. {
  505. /* Initialize the MIC node of index MIC_IDX. */
  506. struct starpu_conf *user_conf = &config->conf;
  507. char ***argv = _starpu_get_argv();
  508. const char *suffixes[] = {"-mic", "_mic", NULL};
  509. /* Environment variables to send to the Sink, it informs it what kind
  510. * of node it is (architecture and type) as there is no way to discover
  511. * it itself */
  512. char mic_idx_env[32];
  513. sprintf(mic_idx_env, "_STARPU_MIC_DEVID=%d", mic_idx);
  514. /* XXX: this is currently necessary so that the remote process does not
  515. * segfault. */
  516. char nb_mic_env[32];
  517. sprintf(nb_mic_env, "_STARPU_MIC_NB=%d", 2);
  518. const char *mic_sink_env[] = {"STARPU_SINK=STARPU_MIC", mic_idx_env, nb_mic_env, NULL};
  519. char mic_sink_program_path[1024];
  520. /* Let's get the helper program to run on the MIC device */
  521. int mic_file_found =
  522. _starpu_src_common_locate_file (mic_sink_program_path,
  523. starpu_getenv("STARPU_MIC_SINK_PROGRAM_NAME"),
  524. starpu_getenv("STARPU_MIC_SINK_PROGRAM_PATH"),
  525. user_conf->mic_sink_program_path,
  526. (argv ? (*argv)[0] : NULL),
  527. suffixes);
  528. if (0 != mic_file_found)
  529. {
  530. _STARPU_MSG("No MIC program specified, use the environment\n"
  531. "variable STARPU_MIC_SINK_PROGRAM_NAME or the environment\n"
  532. "or the field 'starpu_conf.mic_sink_program_path'\n"
  533. "to define it.\n");
  534. return -1;
  535. }
  536. COIRESULT res;
  537. /* Let's get the handle which let us manage the remote MIC device */
  538. res = COIEngineGetHandle(COI_ISA_MIC, mic_idx, coi_handle);
  539. if (STARPU_UNLIKELY(res != COI_SUCCESS))
  540. STARPU_MIC_SRC_REPORT_COI_ERROR(res);
  541. /* We launch the helper on the MIC device, which will wait for us
  542. * to give it work to do.
  543. * As we will communicate further with the device throught scif we
  544. * don't need to keep the process pointer */
  545. res = COIProcessCreateFromFile(*coi_handle, mic_sink_program_path, 0, NULL, 0,
  546. mic_sink_env, 1, NULL, 0, NULL,
  547. coi_process);
  548. if (STARPU_UNLIKELY(res != COI_SUCCESS))
  549. STARPU_MIC_SRC_REPORT_COI_ERROR(res);
  550. /* Let's create the node structure, we'll communicate with the peer
  551. * through scif thanks to it */
  552. mic_nodes[mic_idx] =
  553. _starpu_mp_common_node_create(STARPU_NODE_MIC_SOURCE, mic_idx);
  554. return 0;
  555. }
  556. #endif
  557. #ifndef STARPU_SIMGRID
  558. #ifdef STARPU_HAVE_HWLOC
  559. static void
  560. _starpu_allocate_topology_userdata(hwloc_obj_t obj)
  561. {
  562. unsigned i;
  563. _STARPU_CALLOC(obj->userdata, 1, sizeof(struct _starpu_hwloc_userdata));
  564. for (i = 0; i < obj->arity; i++)
  565. _starpu_allocate_topology_userdata(obj->children[i]);
  566. }
  567. static void
  568. _starpu_deallocate_topology_userdata(hwloc_obj_t obj)
  569. {
  570. unsigned i;
  571. struct _starpu_hwloc_userdata *data = obj->userdata;
  572. STARPU_ASSERT(!data->worker_list || data->worker_list == (void*)-1);
  573. free(data);
  574. for (i = 0; i < obj->arity; i++)
  575. _starpu_deallocate_topology_userdata(obj->children[i]);
  576. }
  577. #endif
  578. #endif
  579. static void
  580. _starpu_init_topology (struct _starpu_machine_config *config)
  581. {
  582. /* Discover the topology, meaning finding all the available PUs for
  583. the compiled drivers. These drivers MUST have been initialized
  584. before calling this function. The discovered topology is filled in
  585. CONFIG. */
  586. struct _starpu_machine_topology *topology = &config->topology;
  587. if (topology_is_initialized)
  588. return;
  589. nobind = starpu_get_env_number("STARPU_WORKERS_NOBIND");
  590. topology->nhwcpus = 0;
  591. topology->nhwpus = 0;
  592. #ifndef STARPU_SIMGRID
  593. #ifdef STARPU_HAVE_HWLOC
  594. hwloc_topology_init(&topology->hwtopology);
  595. _starpu_topology_filter(topology->hwtopology);
  596. hwloc_topology_load(topology->hwtopology);
  597. _starpu_allocate_topology_userdata(hwloc_get_root_obj(topology->hwtopology));
  598. #endif
  599. #endif
  600. #ifdef STARPU_SIMGRID
  601. config->topology.nhwcpus = config->topology.nhwpus = _starpu_simgrid_get_nbhosts("CPU");
  602. #elif defined(STARPU_HAVE_HWLOC)
  603. /* Discover the CPUs relying on the hwloc interface and fills CONFIG
  604. * accordingly. */
  605. config->cpu_depth = hwloc_get_type_depth (topology->hwtopology,
  606. HWLOC_OBJ_CORE);
  607. config->pu_depth = hwloc_get_type_depth (topology->hwtopology,
  608. HWLOC_OBJ_PU);
  609. /* Would be very odd */
  610. STARPU_ASSERT(config->cpu_depth != HWLOC_TYPE_DEPTH_MULTIPLE);
  611. if (config->cpu_depth == HWLOC_TYPE_DEPTH_UNKNOWN)
  612. {
  613. /* unknown, using logical procesors as fallback */
  614. _STARPU_DISP("Warning: The OS did not report CPU cores. Assuming there is only one hardware thread per core.\n");
  615. config->cpu_depth = hwloc_get_type_depth(topology->hwtopology,
  616. HWLOC_OBJ_PU);
  617. }
  618. topology->nhwcpus = hwloc_get_nbobjs_by_depth (topology->hwtopology,
  619. config->cpu_depth);
  620. topology->nhwpus = hwloc_get_nbobjs_by_depth (topology->hwtopology,
  621. config->pu_depth);
  622. #elif defined(HAVE_SYSCONF)
  623. /* Discover the CPUs relying on the sysconf(3) function and fills
  624. * CONFIG accordingly. */
  625. config->topology.nhwcpus = config->topology.nhwpus = sysconf(_SC_NPROCESSORS_ONLN);
  626. #elif defined(_WIN32)
  627. /* Discover the CPUs on Cygwin and MinGW systems. */
  628. SYSTEM_INFO sysinfo;
  629. GetSystemInfo(&sysinfo);
  630. config->topology.nhwcpus = config->topology.nhwpus = sysinfo.dwNumberOfProcessors;
  631. #else
  632. #warning no way to know number of cores, assuming 1
  633. config->topology.nhwcpus = config->topology.nhwpus = 1;
  634. #endif
  635. _starpu_cuda_discover_devices(config);
  636. _starpu_opencl_discover_devices(config);
  637. #ifdef STARPU_USE_SCC
  638. config->topology.nhwscc = _starpu_scc_src_get_device_count();
  639. #endif
  640. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  641. config->topology.nhwmpi = _starpu_mpi_src_get_device_count();
  642. #endif
  643. topology_is_initialized = 1;
  644. }
  645. /*
  646. * Bind workers on the different processors
  647. */
  648. static void
  649. _starpu_initialize_workers_bindid (struct _starpu_machine_config *config)
  650. {
  651. char *strval;
  652. unsigned i;
  653. struct _starpu_machine_topology *topology = &config->topology;
  654. config->current_bindid = 0;
  655. /* conf->workers_bindid indicates the successive logical PU identifier that
  656. * should be used to bind the workers. It should be either filled
  657. * according to the user's explicit parameters (from starpu_conf) or
  658. * according to the STARPU_WORKERS_CPUID env. variable. Otherwise, a
  659. * round-robin policy is used to distributed the workers over the
  660. * cores. */
  661. /* what do we use, explicit value, env. variable, or round-robin ? */
  662. if ((strval = starpu_getenv("STARPU_WORKERS_CPUID")))
  663. {
  664. /* STARPU_WORKERS_CPUID certainly contains less entries than
  665. * STARPU_NMAXWORKERS, so we reuse its entries in a round
  666. * robin fashion: "1 2" is equivalent to "1 2 1 2 1 2 .... 1
  667. * 2". */
  668. unsigned wrap = 0;
  669. unsigned number_of_entries = 0;
  670. char *endptr;
  671. /* we use the content of the STARPU_WORKERS_CPUID
  672. * env. variable */
  673. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  674. {
  675. if (!wrap)
  676. {
  677. long int val;
  678. val = strtol(strval, &endptr, 10);
  679. if (endptr != strval)
  680. {
  681. topology->workers_bindid[i] =
  682. (unsigned)(val % topology->nhwpus);
  683. strval = endptr;
  684. if (*strval == '-')
  685. {
  686. /* range of values */
  687. long int endval;
  688. strval++;
  689. if (*strval && *strval != ' ' && *strval != ',')
  690. {
  691. endval = strtol(strval, &endptr, 10);
  692. strval = endptr;
  693. }
  694. else
  695. {
  696. endval = topology->nhwpus-1;
  697. if (*strval)
  698. strval++;
  699. }
  700. for (val++; val <= endval && i < STARPU_NMAXWORKERS-1; val++)
  701. {
  702. i++;
  703. topology->workers_bindid[i] =
  704. (unsigned)(val % topology->nhwpus);
  705. }
  706. }
  707. if (*strval == ',')
  708. strval++;
  709. }
  710. else
  711. {
  712. /* there must be at least one entry */
  713. STARPU_ASSERT(i != 0);
  714. number_of_entries = i;
  715. /* there is no more values in the
  716. * string */
  717. wrap = 1;
  718. topology->workers_bindid[i] =
  719. topology->workers_bindid[0];
  720. }
  721. }
  722. else
  723. {
  724. topology->workers_bindid[i] =
  725. topology->workers_bindid[i % number_of_entries];
  726. }
  727. }
  728. }
  729. else if (config->conf.use_explicit_workers_bindid)
  730. {
  731. /* we use the explicit value from the user */
  732. memcpy(topology->workers_bindid,
  733. config->conf.workers_bindid,
  734. STARPU_NMAXWORKERS*sizeof(unsigned));
  735. }
  736. else
  737. {
  738. int nth_per_core = starpu_get_env_number_default("STARPU_NTHREADS_PER_CORE", 1);
  739. int k;
  740. int nbindids=0;
  741. int nhyperthreads = topology->nhwpus / topology->nhwcpus;
  742. STARPU_ASSERT_MSG(nth_per_core > 0 && nth_per_core <= nhyperthreads , "Incorrect number of hyperthreads");
  743. i = 0; /* PU number currently assigned */
  744. k = 0; /* Number of threads already put on the current core */
  745. while(nbindids < STARPU_NMAXWORKERS)
  746. {
  747. if (k >= nth_per_core)
  748. {
  749. /* We have already put enough workers on this
  750. * core, skip remaining PUs from this core, and
  751. * proceed with next core */
  752. i += nhyperthreads-nth_per_core;
  753. k = 0;
  754. continue;
  755. }
  756. /* Add a worker to this core, by using this logical PU */
  757. topology->workers_bindid[nbindids++] =
  758. (unsigned)(i % topology->nhwpus);
  759. k++;
  760. i++;
  761. }
  762. }
  763. for (i = 0; i < STARPU_MAXCPUS;i++)
  764. cpu_worker[i] = STARPU_NOWORKERID;
  765. /* no binding yet */
  766. memset(&config->currently_bound, 0, sizeof(config->currently_bound));
  767. }
  768. /* This function gets the identifier of the next core on which to bind a
  769. * worker. In case a list of preferred cores was specified (logical indexes),
  770. * we look for a an available core among the list if possible, otherwise a
  771. * round-robin policy is used. */
  772. static inline int
  773. _starpu_get_next_bindid (struct _starpu_machine_config *config,
  774. int *preferred_binding, int npreferred)
  775. {
  776. struct _starpu_machine_topology *topology = &config->topology;
  777. int current_preferred;
  778. int nhyperthreads = topology->nhwpus / topology->nhwcpus;
  779. unsigned i;
  780. if (npreferred)
  781. {
  782. STARPU_ASSERT_MSG(preferred_binding, "Passing NULL pointer for parameter preferred_binding with a non-0 value of parameter npreferred");
  783. }
  784. /* loop over the preference list */
  785. for (current_preferred = 0;
  786. current_preferred < npreferred;
  787. current_preferred++)
  788. {
  789. /* Try to get this core */
  790. unsigned requested_core = preferred_binding[current_preferred];
  791. unsigned requested_bindid = requested_core * nhyperthreads;
  792. /* can we bind the worker on the preferred core ? */
  793. unsigned ind;
  794. /* Look at the remaining cores to be bound to */
  795. for (ind = 0;
  796. ind < topology->nhwpus / nhyperthreads;
  797. ind++)
  798. {
  799. if (topology->workers_bindid[ind] == requested_bindid && !config->currently_bound[ind])
  800. {
  801. /* the cpu is available, we use it ! */
  802. config->currently_bound[ind] = 1;
  803. return requested_bindid;
  804. }
  805. }
  806. }
  807. for (i = config->current_bindid; i < topology->nhwpus / nhyperthreads; i++)
  808. if (!config->currently_bound[i])
  809. /* Found a cpu ready for use, use it! */
  810. break;
  811. STARPU_ASSERT(i < topology->nhwpus / nhyperthreads);
  812. int bindid = topology->workers_bindid[i];
  813. config->currently_bound[i] = 1;
  814. i++;
  815. if (i == topology->nhwpus / nhyperthreads)
  816. {
  817. /* Finished binding on all cpus, restart from start in
  818. * case the user really wants overloading */
  819. memset(&config->currently_bound, 0, sizeof(config->currently_bound));
  820. i = 0;
  821. }
  822. config->current_bindid = i;
  823. return bindid;
  824. }
  825. unsigned
  826. _starpu_topology_get_nhwcpu (struct _starpu_machine_config *config)
  827. {
  828. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  829. _starpu_opencl_init();
  830. #endif
  831. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  832. _starpu_init_cuda();
  833. #endif
  834. _starpu_init_topology(config);
  835. return config->topology.nhwcpus;
  836. }
  837. unsigned
  838. _starpu_topology_get_nhwpu (struct _starpu_machine_config *config)
  839. {
  840. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  841. _starpu_opencl_init();
  842. #endif
  843. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  844. _starpu_init_cuda();
  845. #endif
  846. _starpu_init_topology(config);
  847. return config->topology.nhwpus;
  848. }
  849. unsigned _starpu_topology_get_nnumanodes(struct _starpu_machine_config *config STARPU_ATTRIBUTE_UNUSED)
  850. {
  851. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  852. _starpu_opencl_init();
  853. #endif
  854. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  855. _starpu_init_cuda();
  856. #endif
  857. _starpu_init_topology(config);
  858. int res;
  859. #if defined(STARPU_HAVE_HWLOC)
  860. char * state;
  861. if ((state = starpu_getenv("STARPU_USE_NUMA")) && atoi(state))
  862. {
  863. struct _starpu_machine_topology *topology = &config->topology ;
  864. int nnumanodes = hwloc_get_nbobjs_by_type(topology->hwtopology, HWLOC_OBJ_NODE) ;
  865. res = nnumanodes > 0 ? nnumanodes : 1 ;
  866. }
  867. else
  868. #endif
  869. {
  870. res = 1;
  871. }
  872. STARPU_ASSERT_MSG(res <= STARPU_MAXNUMANODES, "Number of NUMA nodes discovered is higher than maximum accepted ! Use configure option --enable-maxnumanodes=xxx to increase the maximum value of supported NUMA nodes.\n");
  873. return res;
  874. }
  875. //TODO change this in an array
  876. int starpu_memory_nodes_numa_hwloclogid_to_id(int logid)
  877. {
  878. unsigned n;
  879. for (n = 0; n < nb_numa_nodes; n++)
  880. if (numa_memory_nodes_to_hwloclogid[n] == logid)
  881. return n;
  882. return -1;
  883. }
  884. int starpu_memory_nodes_numa_id_to_hwloclogid(unsigned id)
  885. {
  886. STARPU_ASSERT(id < STARPU_MAXNUMANODES);
  887. return numa_memory_nodes_to_hwloclogid[id];
  888. }
  889. int starpu_memory_nodes_numa_devid_to_id(unsigned id)
  890. {
  891. STARPU_ASSERT(id < STARPU_MAXNUMANODES);
  892. return numa_memory_nodes_to_physicalid[id];
  893. }
  894. //TODO change this in an array
  895. int starpu_memory_nodes_numa_id_to_devid(int osid)
  896. {
  897. unsigned n;
  898. for (n = 0; n < nb_numa_nodes; n++)
  899. if (numa_memory_nodes_to_physicalid[n] == osid)
  900. return n;
  901. return -1;
  902. }
  903. #ifdef STARPU_HAVE_HWLOC
  904. void _starpu_topology_filter(hwloc_topology_t topology)
  905. {
  906. #if HWLOC_API_VERSION >= 0x20000
  907. hwloc_topology_set_io_types_filter(topology, HWLOC_TYPE_FILTER_KEEP_IMPORTANT);
  908. #else
  909. hwloc_topology_set_flags(topology, HWLOC_TOPOLOGY_FLAG_IO_DEVICES | HWLOC_TOPOLOGY_FLAG_IO_BRIDGES);
  910. #endif
  911. }
  912. #endif
  913. #ifdef STARPU_USE_MIC
  914. static void
  915. _starpu_init_mic_config (struct _starpu_machine_config *config,
  916. struct starpu_conf *user_conf,
  917. unsigned mic_idx)
  918. {
  919. // Configure the MIC device of index MIC_IDX.
  920. struct _starpu_machine_topology *topology = &config->topology;
  921. topology->nhwmiccores[mic_idx] = 0;
  922. _starpu_init_mic_topology (config, mic_idx);
  923. int nmiccores;
  924. nmiccores = starpu_get_env_number("STARPU_NMICTHREADS");
  925. if (nmiccores == -1)
  926. {
  927. /* Nothing was specified, so let's use the number of
  928. * detected mic cores. ! */
  929. nmiccores = topology->nhwmiccores[mic_idx];
  930. }
  931. else
  932. {
  933. if ((unsigned) nmiccores > topology->nhwmiccores[mic_idx])
  934. {
  935. /* The user requires more MIC cores than there is available */
  936. _STARPU_MSG("# Warning: %d MIC cores requested. Only %u available.\n", nmiccores, topology->nhwmiccores[mic_idx]);
  937. nmiccores = topology->nhwmiccores[mic_idx];
  938. }
  939. }
  940. topology->nmiccores[mic_idx] = nmiccores;
  941. STARPU_ASSERT_MSG(topology->nmiccores[mic_idx] + topology->nworkers <= STARPU_NMAXWORKERS,
  942. "topology->nmiccores[mic_idx(%u)] (%u) + topology->nworkers (%u) <= STARPU_NMAXWORKERS (%d)",
  943. mic_idx, topology->nmiccores[mic_idx], topology->nworkers, STARPU_NMAXWORKERS);
  944. /* _starpu_initialize_workers_mic_deviceid (config); */
  945. mic_worker_set[mic_idx].workers = &config->workers[topology->nworkers];
  946. mic_worker_set[mic_idx].nworkers = topology->nmiccores[mic_idx];
  947. unsigned miccore_id;
  948. for (miccore_id = 0; miccore_id < topology->nmiccores[mic_idx]; miccore_id++)
  949. {
  950. int worker_idx = topology->nworkers + miccore_id;
  951. config->workers[worker_idx].set = &mic_worker_set[mic_idx];
  952. config->workers[worker_idx].arch = STARPU_MIC_WORKER;
  953. _STARPU_MALLOC(config->workers[worker_idx].perf_arch.devices, sizeof(struct starpu_perfmodel_device));
  954. config->workers[worker_idx].perf_arch.ndevices = 1;
  955. config->workers[worker_idx].perf_arch.devices[0].type = STARPU_MIC_WORKER;
  956. config->workers[worker_idx].perf_arch.devices[0].devid = mic_idx;
  957. config->workers[worker_idx].perf_arch.devices[0].ncores = 1;
  958. config->workers[worker_idx].devid = mic_idx;
  959. config->workers[worker_idx].subworkerid = miccore_id;
  960. config->workers[worker_idx].worker_mask = STARPU_MIC;
  961. config->worker_mask |= STARPU_MIC;
  962. }
  963. topology->nworkers += topology->nmiccores[mic_idx];
  964. }
  965. static COIENGINE mic_handles[STARPU_MAXMICDEVS];
  966. COIPROCESS _starpu_mic_process[STARPU_MAXMICDEVS];
  967. #endif
  968. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  969. static void
  970. _starpu_init_mpi_config (struct _starpu_machine_config *config,
  971. struct starpu_conf *user_conf,
  972. unsigned mpi_idx)
  973. {
  974. struct _starpu_machine_topology *topology = &config->topology;
  975. topology->nhwmpicores[mpi_idx] = 0;
  976. _starpu_init_mpi_topology (config, mpi_idx);
  977. int nmpicores;
  978. nmpicores = starpu_get_env_number("STARPU_NMPIMSTHREADS");
  979. if (nmpicores == -1)
  980. {
  981. /* Nothing was specified, so let's use the number of
  982. * detected mpi cores. ! */
  983. nmpicores = topology->nhwmpicores[mpi_idx];
  984. }
  985. else
  986. {
  987. if ((unsigned) nmpicores > topology->nhwmpicores[mpi_idx])
  988. {
  989. /* The user requires more MPI cores than there is available */
  990. _STARPU_MSG("# Warning: %d MPI cores requested. Only %u available.\n",
  991. nmpicores, topology->nhwmpicores[mpi_idx]);
  992. nmpicores = topology->nhwmpicores[mpi_idx];
  993. }
  994. }
  995. topology->nmpicores[mpi_idx] = nmpicores;
  996. STARPU_ASSERT_MSG(topology->nmpicores[mpi_idx] + topology->nworkers <= STARPU_NMAXWORKERS,
  997. "topology->nmpicores[mpi_idx(%u)] (%u) + topology->nworkers (%u) <= STARPU_NMAXWORKERS (%d)",
  998. mpi_idx, topology->nmpicores[mpi_idx], topology->nworkers, STARPU_NMAXWORKERS);
  999. mpi_worker_set[mpi_idx].workers = &config->workers[topology->nworkers];
  1000. mpi_worker_set[mpi_idx].nworkers = topology->nmpicores[mpi_idx];
  1001. unsigned mpicore_id;
  1002. for (mpicore_id = 0; mpicore_id < topology->nmpicores[mpi_idx]; mpicore_id++)
  1003. {
  1004. int worker_idx = topology->nworkers + mpicore_id;
  1005. config->workers[worker_idx].set = &mpi_worker_set[mpi_idx];
  1006. config->workers[worker_idx].arch = STARPU_MPI_MS_WORKER;
  1007. _STARPU_MALLOC(config->workers[worker_idx].perf_arch.devices, sizeof(struct starpu_perfmodel_device));
  1008. config->workers[worker_idx].perf_arch.ndevices = 1;
  1009. config->workers[worker_idx].perf_arch.devices[0].type = STARPU_MPI_MS_WORKER;
  1010. config->workers[worker_idx].perf_arch.devices[0].devid = mpi_idx;
  1011. config->workers[worker_idx].perf_arch.devices[0].ncores = 1;
  1012. config->workers[worker_idx].devid = mpi_idx;
  1013. config->workers[worker_idx].subworkerid = mpicore_id;
  1014. config->workers[worker_idx].worker_mask = STARPU_MPI_MS;
  1015. config->worker_mask |= STARPU_MPI_MS;
  1016. }
  1017. mpi_ms_nodes[mpi_idx]->baseworkerid = topology->nworkers;
  1018. topology->nworkers += topology->nmpicores[mpi_idx];
  1019. }
  1020. #endif
  1021. #if defined(STARPU_USE_MIC) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  1022. static void
  1023. _starpu_init_mp_config (struct _starpu_machine_config *config,
  1024. struct starpu_conf *user_conf, int no_mp_config)
  1025. {
  1026. /* Discover and configure the mp topology. That means:
  1027. * - discover the number of mp nodes;
  1028. * - initialize each discovered node;
  1029. * - discover the local topology (number of PUs/devices) of each node;
  1030. * - configure the workers accordingly.
  1031. */
  1032. #ifdef STARPU_USE_MIC
  1033. if (!no_mp_config)
  1034. {
  1035. struct _starpu_machine_topology *topology = &config->topology;
  1036. /* Discover and initialize the number of MIC nodes through the mp
  1037. * infrastructure. */
  1038. unsigned nhwmicdevices = _starpu_mic_src_get_device_count();
  1039. int reqmicdevices = starpu_get_env_number("STARPU_NMIC");
  1040. if (reqmicdevices == -1 && user_conf)
  1041. reqmicdevices = user_conf->nmic;
  1042. if (reqmicdevices == -1)
  1043. /* Nothing was specified, so let's use the number of
  1044. * detected mic devices. ! */
  1045. reqmicdevices = nhwmicdevices;
  1046. if (reqmicdevices != -1)
  1047. {
  1048. if ((unsigned) reqmicdevices > nhwmicdevices)
  1049. {
  1050. /* The user requires more MIC devices than there is available */
  1051. _STARPU_MSG("# Warning: %d MIC devices requested. Only %u available.\n", reqmicdevices, nhwmicdevices);
  1052. reqmicdevices = nhwmicdevices;
  1053. }
  1054. }
  1055. topology->nmicdevices = 0;
  1056. unsigned i;
  1057. for (i = 0; i < (unsigned) reqmicdevices; i++)
  1058. if (0 == _starpu_init_mic_node (config, i, &mic_handles[i], &_starpu_mic_process[i]))
  1059. topology->nmicdevices++;
  1060. for (i = 0; i < topology->nmicdevices; i++)
  1061. _starpu_init_mic_config (config, user_conf, i);
  1062. }
  1063. #endif
  1064. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1065. {
  1066. struct _starpu_machine_topology *topology = &config->topology;
  1067. /* Discover and initialize the number of MPI nodes through the mp
  1068. * infrastructure. */
  1069. unsigned nhwmpidevices = _starpu_mpi_src_get_device_count();
  1070. int reqmpidevices = starpu_get_env_number("STARPU_NMPI_MS");
  1071. if (reqmpidevices == -1 && user_conf)
  1072. reqmpidevices = user_conf->nmpi_ms;
  1073. if (reqmpidevices == -1)
  1074. /* Nothing was specified, so let's use the number of
  1075. * detected mpi devices. ! */
  1076. reqmpidevices = nhwmpidevices;
  1077. if (reqmpidevices != -1)
  1078. {
  1079. if ((unsigned) reqmpidevices > nhwmpidevices)
  1080. {
  1081. /* The user requires more MPI devices than there is available */
  1082. _STARPU_MSG("# Warning: %d MPI Master-Slave devices requested. Only %u available.\n",
  1083. reqmpidevices, nhwmpidevices);
  1084. reqmpidevices = nhwmpidevices;
  1085. }
  1086. }
  1087. topology->nmpidevices = reqmpidevices;
  1088. /* if user don't want to use MPI slaves, we close the slave processes */
  1089. if (no_mp_config && topology->nmpidevices == 0)
  1090. {
  1091. _starpu_mpi_common_mp_deinit();
  1092. exit(0);
  1093. }
  1094. if (!no_mp_config)
  1095. {
  1096. unsigned i;
  1097. for (i = 0; i < topology->nmpidevices; i++)
  1098. mpi_ms_nodes[i] = _starpu_mp_common_node_create(STARPU_NODE_MPI_SOURCE, i);
  1099. for (i = 0; i < topology->nmpidevices; i++)
  1100. _starpu_init_mpi_config (config, user_conf, i);
  1101. }
  1102. }
  1103. #endif
  1104. }
  1105. #endif
  1106. #ifdef STARPU_USE_MIC
  1107. static void
  1108. _starpu_deinit_mic_node (unsigned mic_idx)
  1109. {
  1110. _starpu_mp_common_send_command(mic_nodes[mic_idx], STARPU_MP_COMMAND_EXIT, NULL, 0);
  1111. COIProcessDestroy(_starpu_mic_process[mic_idx], -1, 0, NULL, NULL);
  1112. _starpu_mp_common_node_destroy(mic_nodes[mic_idx]);
  1113. }
  1114. #endif
  1115. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1116. static void _starpu_deinit_mpi_node(int devid)
  1117. {
  1118. _starpu_mp_common_send_command(mpi_ms_nodes[devid], STARPU_MP_COMMAND_EXIT, NULL, 0);
  1119. _starpu_mp_common_node_destroy(mpi_ms_nodes[devid]);
  1120. }
  1121. #endif
  1122. #if defined(STARPU_USE_MIC) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  1123. static void
  1124. _starpu_deinit_mp_config (struct _starpu_machine_config *config)
  1125. {
  1126. struct _starpu_machine_topology *topology = &config->topology;
  1127. unsigned i;
  1128. #ifdef STARPU_USE_MIC
  1129. for (i = 0; i < topology->nmicdevices; i++)
  1130. _starpu_deinit_mic_node (i);
  1131. _starpu_mic_clear_kernels();
  1132. #endif
  1133. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1134. for (i = 0; i < topology->nmpidevices; i++)
  1135. _starpu_deinit_mpi_node (i);
  1136. #endif
  1137. }
  1138. #endif
  1139. #ifdef STARPU_HAVE_HWLOC
  1140. static unsigned
  1141. _starpu_topology_count_ngpus(hwloc_obj_t obj)
  1142. {
  1143. struct _starpu_hwloc_userdata *data = obj->userdata;
  1144. unsigned n = data->ngpus;
  1145. unsigned i;
  1146. for (i = 0; i < obj->arity; i++)
  1147. n += _starpu_topology_count_ngpus(obj->children[i]);
  1148. data->ngpus = n;
  1149. #ifdef STARPU_VERBOSE
  1150. {
  1151. char name[64];
  1152. hwloc_obj_type_snprintf(name, sizeof(name), obj, 0);
  1153. _STARPU_DEBUG("hwloc obj %s has %u GPUs below\n", name, n);
  1154. }
  1155. #endif
  1156. return n;
  1157. }
  1158. #endif
  1159. static int
  1160. _starpu_init_machine_config(struct _starpu_machine_config *config, int no_mp_config STARPU_ATTRIBUTE_UNUSED)
  1161. {
  1162. int i;
  1163. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  1164. {
  1165. config->workers[i].workerid = i;
  1166. config->workers[i].set = NULL;
  1167. }
  1168. struct _starpu_machine_topology *topology = &config->topology;
  1169. topology->nworkers = 0;
  1170. topology->ncombinedworkers = 0;
  1171. topology->nsched_ctxs = 0;
  1172. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  1173. _starpu_opencl_init();
  1174. #endif
  1175. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  1176. _starpu_init_cuda();
  1177. #endif
  1178. _starpu_init_topology(config);
  1179. _starpu_initialize_workers_bindid(config);
  1180. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  1181. for (i = 0; i < (int) (sizeof(cuda_worker_set)/sizeof(cuda_worker_set[0])); i++)
  1182. cuda_worker_set[i].workers = NULL;
  1183. #endif
  1184. #ifdef STARPU_USE_MIC
  1185. for (i = 0; i < (int) (sizeof(mic_worker_set)/sizeof(mic_worker_set[0])); i++)
  1186. mic_worker_set[i].workers = NULL;
  1187. #endif
  1188. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1189. for (i = 0; i < (int) (sizeof(mpi_worker_set)/sizeof(mpi_worker_set[0])); i++)
  1190. mpi_worker_set[i].workers = NULL;
  1191. #endif
  1192. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  1193. int ncuda = config->conf.ncuda;
  1194. int nworker_per_cuda = starpu_get_env_number_default("STARPU_NWORKER_PER_CUDA", 1);
  1195. STARPU_ASSERT_MSG(nworker_per_cuda > 0, "STARPU_NWORKER_PER_CUDA has to be > 0");
  1196. STARPU_ASSERT_MSG(nworker_per_cuda < STARPU_NMAXWORKERS, "STARPU_NWORKER_PER_CUDA (%d) cannot be higher than STARPU_NMAXWORKERS (%d)\n", nworker_per_cuda, STARPU_NMAXWORKERS);
  1197. #ifndef STARPU_NON_BLOCKING_DRIVERS
  1198. if (nworker_per_cuda > 1)
  1199. {
  1200. _STARPU_DISP("Warning: reducing STARPU_NWORKER_PER_CUDA to 1 because blocking drivers are enabled\n");
  1201. nworker_per_cuda = 1;
  1202. }
  1203. #endif
  1204. if (ncuda != 0)
  1205. {
  1206. /* The user did not disable CUDA. We need to initialize CUDA
  1207. * early to count the number of devices */
  1208. _starpu_init_cuda();
  1209. int nb_devices = _starpu_get_cuda_device_count();
  1210. if (ncuda == -1)
  1211. {
  1212. /* Nothing was specified, so let's choose ! */
  1213. ncuda = nb_devices;
  1214. }
  1215. else
  1216. {
  1217. if (ncuda > nb_devices)
  1218. {
  1219. /* The user requires more CUDA devices than
  1220. * there is available */
  1221. _STARPU_DISP("Warning: %d CUDA devices requested. Only %d available.\n", ncuda, nb_devices);
  1222. ncuda = nb_devices;
  1223. }
  1224. }
  1225. }
  1226. /* Now we know how many CUDA devices will be used */
  1227. topology->ncudagpus = ncuda;
  1228. topology->nworkerpercuda = nworker_per_cuda;
  1229. STARPU_ASSERT(topology->ncudagpus <= STARPU_MAXCUDADEVS);
  1230. _starpu_initialize_workers_cuda_gpuid(config);
  1231. /* allow having one worker per stream */
  1232. topology->cuda_th_per_stream = starpu_get_env_number_default("STARPU_CUDA_THREAD_PER_WORKER", -1);
  1233. topology->cuda_th_per_dev = starpu_get_env_number_default("STARPU_CUDA_THREAD_PER_DEV", -1);
  1234. /* per device by default */
  1235. if (topology->cuda_th_per_dev == -1)
  1236. {
  1237. if (topology->cuda_th_per_stream == 1)
  1238. topology->cuda_th_per_dev = 0;
  1239. else
  1240. topology->cuda_th_per_dev = 1;
  1241. }
  1242. /* Not per stream by default */
  1243. if (topology->cuda_th_per_stream == -1)
  1244. {
  1245. topology->cuda_th_per_stream = 0;
  1246. }
  1247. STARPU_ASSERT_MSG(topology->cuda_th_per_dev != 1 || topology->cuda_th_per_stream != 1, "It does not make sense to set both STARPU_CUDA_THREAD_PER_WORKER and STARPU_CUDA_THREAD_PER_DEV to 1, please choose either per worker or per device or none");
  1248. if (!topology->cuda_th_per_dev)
  1249. {
  1250. cuda_worker_set[0].workers = &config->workers[topology->nworkers];
  1251. cuda_worker_set[0].nworkers = topology->ncudagpus * nworker_per_cuda;
  1252. }
  1253. unsigned cudagpu;
  1254. for (cudagpu = 0; cudagpu < topology->ncudagpus; cudagpu++)
  1255. {
  1256. int devid = _starpu_get_next_cuda_gpuid(config);
  1257. int worker_idx0 = topology->nworkers + cudagpu * nworker_per_cuda;
  1258. struct _starpu_worker_set *worker_set;
  1259. if (topology->cuda_th_per_dev)
  1260. {
  1261. worker_set = &cuda_worker_set[devid];
  1262. worker_set->workers = &config->workers[worker_idx0];
  1263. worker_set->nworkers = nworker_per_cuda;
  1264. }
  1265. else
  1266. {
  1267. /* Same worker set for all devices */
  1268. worker_set = &cuda_worker_set[0];
  1269. }
  1270. for (i = 0; i < nworker_per_cuda; i++)
  1271. {
  1272. int worker_idx = worker_idx0 + i;
  1273. if(topology->cuda_th_per_stream)
  1274. {
  1275. /* Just one worker in the set */
  1276. _STARPU_CALLOC(config->workers[worker_idx].set, 1, sizeof(struct _starpu_worker_set));
  1277. config->workers[worker_idx].set->workers = &config->workers[worker_idx];
  1278. config->workers[worker_idx].set->nworkers = 1;
  1279. }
  1280. else
  1281. config->workers[worker_idx].set = worker_set;
  1282. config->workers[worker_idx].arch = STARPU_CUDA_WORKER;
  1283. _STARPU_MALLOC(config->workers[worker_idx].perf_arch.devices, sizeof(struct starpu_perfmodel_device));
  1284. config->workers[worker_idx].perf_arch.ndevices = 1;
  1285. config->workers[worker_idx].perf_arch.devices[0].type = STARPU_CUDA_WORKER;
  1286. config->workers[worker_idx].perf_arch.devices[0].devid = devid;
  1287. // TODO: fix perfmodels etc.
  1288. //config->workers[worker_idx].perf_arch.ncore = nworker_per_cuda - 1;
  1289. config->workers[worker_idx].perf_arch.devices[0].ncores = 1;
  1290. config->workers[worker_idx].devid = devid;
  1291. config->workers[worker_idx].subworkerid = i;
  1292. config->workers[worker_idx].worker_mask = STARPU_CUDA;
  1293. config->worker_mask |= STARPU_CUDA;
  1294. struct handle_entry *entry;
  1295. HASH_FIND_INT(devices_using_cuda, &devid, entry);
  1296. if (!entry)
  1297. {
  1298. _STARPU_MALLOC(entry, sizeof(*entry));
  1299. entry->gpuid = devid;
  1300. HASH_ADD_INT(devices_using_cuda, gpuid, entry);
  1301. }
  1302. }
  1303. #ifndef STARPU_SIMGRID
  1304. #if defined(HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX) && HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX
  1305. {
  1306. hwloc_obj_t obj = hwloc_cuda_get_device_osdev_by_index(topology->hwtopology, devid);
  1307. if (obj)
  1308. {
  1309. struct _starpu_hwloc_userdata *data = obj->userdata;
  1310. data->ngpus++;
  1311. }
  1312. else
  1313. {
  1314. _STARPU_DISP("Warning: could not find location of CUDA%u, do you have the hwloc CUDA plugin installed?\n", devid);
  1315. }
  1316. }
  1317. #endif
  1318. #endif
  1319. }
  1320. topology->nworkers += topology->ncudagpus * nworker_per_cuda;
  1321. #endif
  1322. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  1323. int nopencl = config->conf.nopencl;
  1324. if (nopencl != 0)
  1325. {
  1326. /* The user did not disable OPENCL. We need to initialize
  1327. * OpenCL early to count the number of devices */
  1328. _starpu_opencl_init();
  1329. int nb_devices;
  1330. nb_devices = _starpu_opencl_get_device_count();
  1331. if (nopencl == -1)
  1332. {
  1333. /* Nothing was specified, so let's choose ! */
  1334. nopencl = nb_devices;
  1335. if (nopencl > STARPU_MAXOPENCLDEVS)
  1336. {
  1337. _STARPU_DISP("Warning: %d OpenCL devices available. Only %d enabled. Use configure option --enable-maxopencldadev=xxx to update the maximum value of supported OpenCL devices.\n", nb_devices, STARPU_MAXOPENCLDEVS);
  1338. nopencl = STARPU_MAXOPENCLDEVS;
  1339. }
  1340. }
  1341. else
  1342. {
  1343. /* Let's make sure this value is OK. */
  1344. if (nopencl > nb_devices)
  1345. {
  1346. /* The user requires more OpenCL devices than
  1347. * there is available */
  1348. _STARPU_DISP("Warning: %d OpenCL devices requested. Only %d available.\n", nopencl, nb_devices);
  1349. nopencl = nb_devices;
  1350. }
  1351. /* Let's make sure this value is OK. */
  1352. if (nopencl > STARPU_MAXOPENCLDEVS)
  1353. {
  1354. _STARPU_DISP("Warning: %d OpenCL devices requested. Only %d enabled. Use configure option --enable-maxopencldev=xxx to update the maximum value of supported OpenCL devices.\n", nopencl, STARPU_MAXOPENCLDEVS);
  1355. nopencl = STARPU_MAXOPENCLDEVS;
  1356. }
  1357. }
  1358. }
  1359. topology->nopenclgpus = nopencl;
  1360. STARPU_ASSERT(topology->nopenclgpus + topology->nworkers <= STARPU_NMAXWORKERS);
  1361. _starpu_initialize_workers_opencl_gpuid(config);
  1362. unsigned openclgpu;
  1363. for (openclgpu = 0; openclgpu < topology->nopenclgpus; openclgpu++)
  1364. {
  1365. int worker_idx = topology->nworkers + openclgpu;
  1366. int devid = _starpu_get_next_opencl_gpuid(config);
  1367. if (devid == -1)
  1368. {
  1369. // There is no more devices left
  1370. topology->nopenclgpus = openclgpu;
  1371. break;
  1372. }
  1373. config->workers[worker_idx].arch = STARPU_OPENCL_WORKER;
  1374. _STARPU_MALLOC(config->workers[worker_idx].perf_arch.devices, sizeof(struct starpu_perfmodel_device));
  1375. config->workers[worker_idx].perf_arch.ndevices = 1;
  1376. config->workers[worker_idx].perf_arch.devices[0].type = STARPU_OPENCL_WORKER;
  1377. config->workers[worker_idx].perf_arch.devices[0].devid = devid;
  1378. config->workers[worker_idx].perf_arch.devices[0].ncores = 1;
  1379. config->workers[worker_idx].subworkerid = 0;
  1380. config->workers[worker_idx].devid = devid;
  1381. config->workers[worker_idx].worker_mask = STARPU_OPENCL;
  1382. config->worker_mask |= STARPU_OPENCL;
  1383. }
  1384. topology->nworkers += topology->nopenclgpus;
  1385. #endif
  1386. #ifdef STARPU_USE_SCC
  1387. int nscc = config->conf.nscc;
  1388. unsigned nb_scc_nodes = _starpu_scc_src_get_device_count();
  1389. if (nscc != 0)
  1390. {
  1391. /* The user did not disable SCC. We need to count
  1392. * the number of devices */
  1393. int nb_devices = nb_scc_nodes;
  1394. if (nscc == -1)
  1395. {
  1396. /* Nothing was specified, so let's choose ! */
  1397. nscc = nb_devices;
  1398. if (nscc > STARPU_MAXSCCDEVS)
  1399. {
  1400. _STARPU_DISP("Warning: %d SCC devices available. Only %d enabled. Use configuration option --enable-maxsccdev=xxx to update the maximum value of supported SCC devices.\n", nb_devices, STARPU_MAXSCCDEVS);
  1401. nscc = STARPU_MAXSCCDEVS;
  1402. }
  1403. }
  1404. else
  1405. {
  1406. /* Let's make sure this value is OK. */
  1407. if (nscc > nb_devices)
  1408. {
  1409. /* The user requires more SCC devices than there is available */
  1410. _STARPU_DISP("Warning: %d SCC devices requested. Only %d available.\n", nscc, nb_devices);
  1411. nscc = nb_devices;
  1412. }
  1413. /* Let's make sure this value is OK. */
  1414. if (nscc > STARPU_MAXSCCDEVS)
  1415. {
  1416. _STARPU_DISP("Warning: %d SCC devices requested. Only %d enabled. Use configure option --enable-maxsccdev=xxx to update the maximum value of supported SCC devices.\n", nscc, STARPU_MAXSCCDEVS);
  1417. nscc = STARPU_MAXSCCDEVS;
  1418. }
  1419. }
  1420. }
  1421. /* Now we know how many SCC devices will be used */
  1422. topology->nsccdevices = nscc;
  1423. STARPU_ASSERT(topology->nsccdevices + topology->nworkers <= STARPU_NMAXWORKERS);
  1424. _starpu_initialize_workers_scc_deviceid(config);
  1425. unsigned sccdev;
  1426. for (sccdev = 0; sccdev < topology->nsccdevices; sccdev++)
  1427. {
  1428. config->workers[topology->nworkers + sccdev].arch = STARPU_SCC_WORKER;
  1429. int devid = _starpu_get_next_scc_deviceid(config);
  1430. _STARPU_MALLOC(config->workers[topology->nworkers + sccdev].perf_arch.devices, sizeof(struct starpu_perfmodel_device));
  1431. config->workers[topology->nworkers + sccdev].perf_arch.ndevices = 1;
  1432. config->workers[topology->nworkers + sccdev].perf_arch.devices[0].type = STARPU_SCC_WORKER;
  1433. config->workers[topology->nworkers + sccdev].perf_arch.devices[0].devid = sccdev;
  1434. config->workers[topology->nworkers + sccdev].perf_arch.devices[0].ncores = 1;
  1435. config->workers[topology->nworkers + sccdev].subworkerid = 0;
  1436. config->workers[topology->nworkers + sccdev].devid = devid;
  1437. config->workers[topology->nworkers + sccdev].worker_mask = STARPU_SCC;
  1438. config->worker_mask |= STARPU_SCC;
  1439. }
  1440. for (; sccdev < nb_scc_nodes; ++sccdev)
  1441. _starpu_scc_exit_useless_node(sccdev);
  1442. topology->nworkers += topology->nsccdevices;
  1443. #endif /* STARPU_USE_SCC */
  1444. #if defined(STARPU_USE_MIC) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  1445. _starpu_init_mp_config (config, &config->conf, no_mp_config);
  1446. #endif
  1447. /* we put the CPU section after the accelerator : in case there was an
  1448. * accelerator found, we devote one cpu */
  1449. #if defined(STARPU_USE_CPU) || defined(STARPU_SIMGRID)
  1450. int ncpu = config->conf.ncpus;
  1451. if (ncpu != 0)
  1452. {
  1453. if (ncpu == -1)
  1454. {
  1455. unsigned mic_busy_cpus = 0;
  1456. int j = 0;
  1457. for (j = 0; j < STARPU_MAXMICDEVS; j++)
  1458. mic_busy_cpus += (topology->nmiccores[j] ? 1 : 0);
  1459. unsigned mpi_ms_busy_cpus = 0;
  1460. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1461. #ifdef STARPU_MPI_MASTER_SLAVE_MULTIPLE_THREAD
  1462. for (j = 0; j < STARPU_MAXMPIDEVS; j++)
  1463. mpi_ms_busy_cpus += (topology->nmpicores[j] ? 1 : 0);
  1464. #else
  1465. mpi_ms_busy_cpus = 1; /* we launch one thread to control all slaves */
  1466. #endif
  1467. #endif /* STARPU_USE_MPI_MASTER_SLAVE */
  1468. unsigned cuda_busy_cpus = 0;
  1469. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  1470. cuda_busy_cpus =
  1471. topology->cuda_th_per_dev == 0 && topology->cuda_th_per_stream == 0 ?
  1472. (topology->ncudagpus ? 1 : 0) :
  1473. topology->cuda_th_per_stream ?
  1474. (nworker_per_cuda * topology->ncudagpus) :
  1475. topology->ncudagpus;
  1476. #endif
  1477. unsigned already_busy_cpus = mpi_ms_busy_cpus + mic_busy_cpus
  1478. + cuda_busy_cpus
  1479. + topology->nopenclgpus + topology->nsccdevices;
  1480. long avail_cpus = (long) topology->nhwcpus - (long) already_busy_cpus;
  1481. if (avail_cpus < 0)
  1482. avail_cpus = 0;
  1483. int nth_per_core = starpu_get_env_number_default("STARPU_NTHREADS_PER_CORE", 1);
  1484. avail_cpus *= nth_per_core;
  1485. ncpu = STARPU_MIN(avail_cpus, STARPU_MAXCPUS);
  1486. }
  1487. else
  1488. {
  1489. if (ncpu > STARPU_MAXCPUS)
  1490. {
  1491. _STARPU_DISP("Warning: %d CPU devices requested. Only %d enabled. Use configure option --enable-maxcpus=xxx to update the maximum value of supported CPU devices.\n", ncpu, STARPU_MAXCPUS);
  1492. ncpu = STARPU_MAXCPUS;
  1493. }
  1494. }
  1495. }
  1496. topology->ncpus = ncpu;
  1497. STARPU_ASSERT(topology->ncpus + topology->nworkers <= STARPU_NMAXWORKERS);
  1498. unsigned cpu;
  1499. unsigned homogeneous = starpu_get_env_number_default("STARPU_PERF_MODEL_HOMOGENEOUS_CPU", 1);
  1500. for (cpu = 0; cpu < topology->ncpus; cpu++)
  1501. {
  1502. int worker_idx = topology->nworkers + cpu;
  1503. config->workers[worker_idx].arch = STARPU_CPU_WORKER;
  1504. _STARPU_MALLOC(config->workers[worker_idx].perf_arch.devices, sizeof(struct starpu_perfmodel_device));
  1505. config->workers[worker_idx].perf_arch.ndevices = 1;
  1506. config->workers[worker_idx].perf_arch.devices[0].type = STARPU_CPU_WORKER;
  1507. config->workers[worker_idx].perf_arch.devices[0].devid = homogeneous ? 0 : cpu;
  1508. config->workers[worker_idx].perf_arch.devices[0].ncores = 1;
  1509. config->workers[worker_idx].subworkerid = 0;
  1510. config->workers[worker_idx].devid = cpu;
  1511. config->workers[worker_idx].worker_mask = STARPU_CPU;
  1512. config->worker_mask |= STARPU_CPU;
  1513. }
  1514. topology->nworkers += topology->ncpus;
  1515. #endif
  1516. if (topology->nworkers == 0)
  1517. {
  1518. _STARPU_DEBUG("No worker found, aborting ...\n");
  1519. return -ENODEV;
  1520. }
  1521. return 0;
  1522. }
  1523. void _starpu_destroy_machine_config(struct _starpu_machine_config *config)
  1524. {
  1525. _starpu_close_debug_logfile();
  1526. unsigned worker;
  1527. for (worker = 0; worker < config->topology.nworkers; worker++)
  1528. {
  1529. struct _starpu_worker *workerarg = &config->workers[worker];
  1530. int bindid = workerarg->bindid;
  1531. free(workerarg->perf_arch.devices);
  1532. #ifdef STARPU_HAVE_HWLOC
  1533. hwloc_bitmap_free(workerarg->hwloc_cpu_set);
  1534. if (bindid != -1)
  1535. {
  1536. hwloc_obj_t worker_obj = hwloc_get_obj_by_depth(config->topology.hwtopology,
  1537. config->pu_depth,
  1538. bindid);
  1539. struct _starpu_hwloc_userdata *data = worker_obj->userdata;
  1540. if (data->worker_list)
  1541. {
  1542. _starpu_worker_list_delete(data->worker_list);
  1543. data->worker_list = NULL;
  1544. }
  1545. }
  1546. #endif
  1547. if (bindid != -1)
  1548. {
  1549. free(config->bindid_workers[bindid].workerids);
  1550. config->bindid_workers[bindid].workerids = NULL;
  1551. }
  1552. }
  1553. free(config->bindid_workers);
  1554. config->bindid_workers = NULL;
  1555. config->nbindid = 0;
  1556. unsigned combined_worker_id;
  1557. for(combined_worker_id=0 ; combined_worker_id < config->topology.ncombinedworkers ; combined_worker_id++)
  1558. {
  1559. struct _starpu_combined_worker *combined_worker = &config->combined_workers[combined_worker_id];
  1560. #ifdef STARPU_HAVE_HWLOC
  1561. hwloc_bitmap_free(combined_worker->hwloc_cpu_set);
  1562. #endif
  1563. free(combined_worker->perf_arch.devices);
  1564. }
  1565. #ifdef STARPU_HAVE_HWLOC
  1566. _starpu_deallocate_topology_userdata(hwloc_get_root_obj(config->topology.hwtopology));
  1567. hwloc_topology_destroy(config->topology.hwtopology);
  1568. #endif
  1569. topology_is_initialized = 0;
  1570. #ifdef STARPU_USE_CUDA
  1571. struct handle_entry *entry, *tmp;
  1572. HASH_ITER(hh, devices_using_cuda, entry, tmp)
  1573. {
  1574. HASH_DEL(devices_using_cuda, entry);
  1575. free(entry);
  1576. }
  1577. devices_using_cuda = NULL;
  1578. #endif
  1579. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  1580. int i;
  1581. for (i=0; i<STARPU_NARCH; i++)
  1582. may_bind_automatically[i] = 0;
  1583. #endif
  1584. }
  1585. void
  1586. _starpu_bind_thread_on_cpu (
  1587. struct _starpu_machine_config *config STARPU_ATTRIBUTE_UNUSED,
  1588. int cpuid STARPU_ATTRIBUTE_UNUSED, int workerid STARPU_ATTRIBUTE_UNUSED)
  1589. {
  1590. #ifdef STARPU_SIMGRID
  1591. return;
  1592. #else
  1593. if (nobind > 0)
  1594. return;
  1595. if (cpuid < 0)
  1596. return;
  1597. if (workerid != STARPU_NOWORKERID && cpuid < STARPU_MAXCPUS)
  1598. {
  1599. int previous = cpu_worker[cpuid];
  1600. if (previous != STARPU_NOWORKERID && previous != workerid)
  1601. _STARPU_DISP("Warning: both workers %d and %d are bound to the same PU %d, this will strongly degrade performance\n", previous, workerid, cpuid);
  1602. else
  1603. cpu_worker[cpuid] = workerid;
  1604. }
  1605. #ifdef STARPU_HAVE_HWLOC
  1606. const struct hwloc_topology_support *support;
  1607. #ifdef STARPU_USE_OPENCL
  1608. _starpu_opencl_init();
  1609. #endif
  1610. #ifdef STARPU_USE_CUDA
  1611. _starpu_init_cuda();
  1612. #endif
  1613. _starpu_init_topology(config);
  1614. support = hwloc_topology_get_support (config->topology.hwtopology);
  1615. if (support->cpubind->set_thisthread_cpubind)
  1616. {
  1617. hwloc_obj_t obj =
  1618. hwloc_get_obj_by_depth (config->topology.hwtopology,
  1619. config->pu_depth, cpuid);
  1620. hwloc_bitmap_t set = obj->cpuset;
  1621. int ret;
  1622. hwloc_bitmap_singlify(set);
  1623. ret = hwloc_set_cpubind (config->topology.hwtopology, set,
  1624. HWLOC_CPUBIND_THREAD);
  1625. if (ret)
  1626. {
  1627. perror("hwloc_set_cpubind");
  1628. STARPU_ABORT();
  1629. }
  1630. }
  1631. #elif defined(HAVE_PTHREAD_SETAFFINITY_NP) && defined(__linux__)
  1632. int ret;
  1633. /* fix the thread on the correct cpu */
  1634. cpu_set_t aff_mask;
  1635. CPU_ZERO(&aff_mask);
  1636. CPU_SET(cpuid, &aff_mask);
  1637. starpu_pthread_t self = starpu_pthread_self();
  1638. ret = pthread_setaffinity_np(self, sizeof(aff_mask), &aff_mask);
  1639. if (ret)
  1640. {
  1641. const char *msg = strerror(ret);
  1642. _STARPU_MSG("pthread_setaffinity_np: %s\n", msg);
  1643. STARPU_ABORT();
  1644. }
  1645. #elif defined(_WIN32)
  1646. DWORD mask = 1 << cpuid;
  1647. if (!SetThreadAffinityMask(GetCurrentThread(), mask))
  1648. {
  1649. _STARPU_ERROR("SetThreadMaskAffinity(%lx) failed\n", mask);
  1650. }
  1651. #else
  1652. #warning no CPU binding support
  1653. #endif
  1654. #endif
  1655. }
  1656. void
  1657. _starpu_bind_thread_on_cpus (
  1658. struct _starpu_machine_config *config STARPU_ATTRIBUTE_UNUSED,
  1659. struct _starpu_combined_worker *combined_worker STARPU_ATTRIBUTE_UNUSED)
  1660. {
  1661. #ifdef STARPU_SIMGRID
  1662. return;
  1663. #endif
  1664. #ifdef STARPU_HAVE_HWLOC
  1665. const struct hwloc_topology_support *support;
  1666. #ifdef STARPU_USE_OPENC
  1667. _starpu_opencl_init();
  1668. #endif
  1669. #ifdef STARPU_USE_CUDA
  1670. _starpu_init_cuda();
  1671. #endif
  1672. _starpu_init_topology(config);
  1673. support = hwloc_topology_get_support(config->topology.hwtopology);
  1674. if (support->cpubind->set_thisthread_cpubind)
  1675. {
  1676. hwloc_bitmap_t set = combined_worker->hwloc_cpu_set;
  1677. int ret;
  1678. ret = hwloc_set_cpubind (config->topology.hwtopology, set,
  1679. HWLOC_CPUBIND_THREAD);
  1680. if (ret)
  1681. {
  1682. perror("binding thread");
  1683. STARPU_ABORT();
  1684. }
  1685. }
  1686. #else
  1687. #ifdef __GLIBC__
  1688. sched_setaffinity(0,sizeof(combined_worker->cpu_set),&combined_worker->cpu_set);
  1689. #else
  1690. # warning no parallel worker CPU binding support
  1691. #endif
  1692. #endif
  1693. }
  1694. static void _starpu_init_binding_cpu(struct _starpu_machine_config *config)
  1695. {
  1696. unsigned worker;
  1697. for (worker = 0; worker < config->topology.nworkers; worker++)
  1698. {
  1699. struct _starpu_worker *workerarg = &config->workers[worker];
  1700. switch (workerarg->arch)
  1701. {
  1702. case STARPU_CPU_WORKER:
  1703. {
  1704. /* Dedicate a cpu core to that worker */
  1705. workerarg->bindid = _starpu_get_next_bindid(config, NULL, 0);
  1706. break;
  1707. }
  1708. default:
  1709. /* Do nothing */
  1710. break;
  1711. }
  1712. }
  1713. }
  1714. //TODO : Check SIMGRID
  1715. static void _starpu_init_numa_node(struct _starpu_machine_config *config)
  1716. {
  1717. nb_numa_nodes = 0;
  1718. unsigned i;
  1719. for (i = 0; i < STARPU_MAXNUMANODES; i++)
  1720. {
  1721. numa_memory_nodes_to_hwloclogid[i] = STARPU_NUMA_UNINITIALIZED;
  1722. numa_memory_nodes_to_physicalid[i] = STARPU_NUMA_UNINITIALIZED;
  1723. }
  1724. char * state;
  1725. /* NUMA mode activated */
  1726. if ((state = starpu_getenv("STARPU_USE_NUMA")) && atoi(state))
  1727. {
  1728. /* Take all NUMA nodes used by CPU workers */
  1729. unsigned worker;
  1730. for (worker = 0; worker < config->topology.nworkers; worker++)
  1731. {
  1732. struct _starpu_worker *workerarg = &config->workers[worker];
  1733. if (workerarg->arch == STARPU_CPU_WORKER)
  1734. {
  1735. int numa_logical_id = _starpu_get_logical_numa_node_worker(worker);
  1736. /* Convert logical id to StarPU id to check if this NUMA node is already saved or not */
  1737. int numa_starpu_id = starpu_memory_nodes_numa_hwloclogid_to_id(numa_logical_id);
  1738. /* This shouldn't happen */
  1739. if (numa_starpu_id == -1 && nb_numa_nodes == STARPU_MAXNUMANODES)
  1740. {
  1741. _STARPU_MSG("Warning: %u NUMA nodes available. Only %u enabled. Use configure option --enable-maxnumanodes=xxx to update the maximum value of supported NUMA nodes.\n", _starpu_topology_get_nnumanodes(config), STARPU_MAXNUMANODES);
  1742. STARPU_ABORT();
  1743. }
  1744. if (numa_starpu_id == -1)
  1745. {
  1746. int devid = numa_logical_id == STARPU_NUMA_MAIN_RAM ? 0 : numa_logical_id;
  1747. int memnode = _starpu_memory_node_register(STARPU_CPU_RAM, devid);
  1748. STARPU_ASSERT_MSG(memnode < STARPU_MAXNUMANODES, "Wrong Memory Node : %d (only %d available)", memnode, STARPU_MAXNUMANODES);
  1749. numa_memory_nodes_to_hwloclogid[memnode] = numa_logical_id;
  1750. int numa_physical_id = _starpu_get_physical_numa_node_worker(worker);
  1751. numa_memory_nodes_to_physicalid[memnode] = numa_physical_id;
  1752. nb_numa_nodes++;
  1753. #ifdef STARPU_SIMGRID
  1754. snprintf(name, sizeof(name), "RAM%d", memnode);
  1755. host = _starpu_simgrid_get_host_by_name(name);
  1756. STARPU_ASSERT(host);
  1757. _starpu_simgrid_memory_node_set_host(memnode, host);
  1758. #endif
  1759. }
  1760. }
  1761. }
  1762. /* If we found NUMA nodes from CPU workers, it's good */
  1763. if (nb_numa_nodes != 0)
  1764. return;
  1765. _STARPU_DISP("No NUMA nodes found when checking CPU workers...\n");
  1766. #if (defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)) && defined(STARPU_HAVE_HWLOC)
  1767. _STARPU_DISP("Take NUMA nodes attached to CUDA and OpenCL devices...\n");
  1768. #endif
  1769. #if defined(STARPU_USE_CUDA) && defined(STARPU_HAVE_HWLOC)
  1770. for (i = 0; i < config->topology.ncudagpus; i++)
  1771. {
  1772. hwloc_obj_t obj = hwloc_cuda_get_device_osdev_by_index(config->topology.hwtopology, i);
  1773. /* Hwloc cannot recognize some devices */
  1774. if (!obj)
  1775. continue;
  1776. while (obj->type != HWLOC_OBJ_NODE)
  1777. {
  1778. obj = obj->parent;
  1779. /* If we don't find a "node" obj before the root, this means
  1780. * hwloc does not know whether there are numa nodes or not, so
  1781. * we should not use a per-node sampling in that case. */
  1782. if (!obj)
  1783. continue;
  1784. }
  1785. int numa_starpu_id = starpu_memory_nodes_numa_hwloclogid_to_id(obj->logical_index);
  1786. /* This shouldn't happen */
  1787. if (numa_starpu_id == -1 && nb_numa_nodes == STARPU_MAXNUMANODES)
  1788. {
  1789. _STARPU_MSG("Warning: %u NUMA nodes available. Only %u enabled. Use configure option --enable-maxnumanodes=xxx to update the maximum value of supported NUMA nodes.\n", _starpu_topology_get_nnumanodes(config), STARPU_MAXNUMANODES);
  1790. STARPU_ABORT();
  1791. }
  1792. if (numa_starpu_id == -1)
  1793. {
  1794. int memnode = _starpu_memory_node_register(STARPU_CPU_RAM, obj->logical_index);
  1795. STARPU_ASSERT_MSG(memnode < STARPU_MAXNUMANODES, "Wrong Memory Node : %d (only %d available)", memnode, STARPU_MAXNUMANODES);
  1796. numa_memory_nodes_to_hwloclogid[memnode] = obj->logical_index;
  1797. numa_memory_nodes_to_physicalid[memnode] = obj->os_index;
  1798. nb_numa_nodes++;
  1799. #ifdef STARPU_SIMGRID
  1800. snprintf(name, sizeof(name), "RAM%d", memnode);
  1801. host = _starpu_simgrid_get_host_by_name(name);
  1802. STARPU_ASSERT(host);
  1803. _starpu_simgrid_memory_node_set_host(memnode, host);
  1804. #endif
  1805. }
  1806. }
  1807. #endif
  1808. #if defined(STARPU_USE_OPENCL) && defined(STARPU_HAVE_HWLOC)
  1809. if (config->topology.nopenclgpus > 0)
  1810. {
  1811. cl_int err;
  1812. cl_platform_id platform_id[_STARPU_OPENCL_PLATFORM_MAX];
  1813. cl_uint nb_platforms;
  1814. unsigned platform;
  1815. unsigned nb_opencl_devices = 0, num = 0;
  1816. err = clGetPlatformIDs(_STARPU_OPENCL_PLATFORM_MAX, platform_id, &nb_platforms);
  1817. if (STARPU_UNLIKELY(err != CL_SUCCESS))
  1818. nb_platforms=0;
  1819. cl_device_type device_type = CL_DEVICE_TYPE_GPU|CL_DEVICE_TYPE_ACCELERATOR;
  1820. if (starpu_get_env_number("STARPU_OPENCL_ON_CPUS") > 0)
  1821. device_type |= CL_DEVICE_TYPE_CPU;
  1822. if (starpu_get_env_number("STARPU_OPENCL_ONLY_ON_CPUS") > 0)
  1823. device_type = CL_DEVICE_TYPE_CPU;
  1824. for (platform = 0; platform < nb_platforms ; platform++)
  1825. {
  1826. err = clGetDeviceIDs(platform_id[platform], device_type, 0, NULL, &num);
  1827. if (err != CL_SUCCESS)
  1828. num = 0;
  1829. nb_opencl_devices += num;
  1830. for (i = 0; i < num; i++)
  1831. {
  1832. hwloc_obj_t obj = hwloc_opencl_get_device_osdev_by_index(config->topology.hwtopology, platform, i);
  1833. /* Hwloc cannot recognize some devices */
  1834. if (!obj)
  1835. continue;
  1836. while (obj->type != HWLOC_OBJ_NODE)
  1837. {
  1838. obj = obj->parent;
  1839. /* If we don't find a "node" obj before the root, this means
  1840. * hwloc does not know whether there are numa nodes or not, so
  1841. * we should not use a per-node sampling in that case. */
  1842. if (!obj)
  1843. continue;
  1844. }
  1845. int numa_starpu_id = starpu_memory_nodes_numa_hwloclogid_to_id(obj->logical_index);
  1846. /* This shouldn't happen */
  1847. if (numa_starpu_id == -1 && nb_numa_nodes == STARPU_MAXNUMANODES)
  1848. {
  1849. _STARPU_MSG("Warning: %u NUMA nodes available. Only %u enabled. Use configure option --enable-maxnumanodes=xxx to update the maximum value of supported NUMA nodes.\n", _starpu_topology_get_nnumanodes(config), STARPU_MAXNUMANODES);
  1850. STARPU_ABORT();
  1851. }
  1852. if (numa_starpu_id == -1)
  1853. {
  1854. int memnode = _starpu_memory_node_register(STARPU_CPU_RAM, obj->logical_index);
  1855. STARPU_ASSERT_MSG(memnode < STARPU_MAXNUMANODES, "Wrong Memory Node : %d (only %d available)", memnode, STARPU_MAXNUMANODES);
  1856. numa_memory_nodes_to_hwloclogid[memnode] = obj->logical_index;
  1857. numa_memory_nodes_to_physicalid[memnode] = obj->os_index;
  1858. nb_numa_nodes++;
  1859. #ifdef STARPU_SIMGRID
  1860. snprintf(name, sizeof(name), "RAM%d", memnode);
  1861. host = _starpu_simgrid_get_host_by_name(name);
  1862. STARPU_ASSERT(host);
  1863. _starpu_simgrid_memory_node_set_host(memnode, host);
  1864. #endif
  1865. }
  1866. }
  1867. }
  1868. }
  1869. #endif
  1870. }
  1871. #if (defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)) && defined(STARPU_HAVE_HWLOC)
  1872. //Found NUMA nodes from CUDA nodes
  1873. if (nb_numa_nodes != 0)
  1874. return;
  1875. /* In case, we do not find any NUMA nodes when checking NUMA nodes attached to GPUs, we take all of them */
  1876. _STARPU_DISP("No NUMA nodes found when checking GPUs devices...\n");
  1877. #endif
  1878. _STARPU_DISP("Finally, take all NUMA nodes available... \n");
  1879. unsigned nnuma = _starpu_topology_get_nnumanodes(config);
  1880. if (nnuma > STARPU_MAXNUMANODES)
  1881. {
  1882. _STARPU_MSG("Warning: %u NUMA nodes available. Only %u enabled. Use configure option --enable-maxnumanodes=xxx to update the maximum value of supported NUMA nodes.\n", _starpu_topology_get_nnumanodes(config), STARPU_MAXNUMANODES);
  1883. nnuma = STARPU_MAXNUMANODES;
  1884. }
  1885. unsigned numa;
  1886. for (numa = 0; numa < nnuma; numa++)
  1887. {
  1888. #if defined(STARPU_HAVE_HWLOC)
  1889. if (nnuma > 1)
  1890. {
  1891. hwloc_obj_t obj = hwloc_get_obj_by_type(config->topology.hwtopology, HWLOC_OBJ_NUMANODE, numa);
  1892. unsigned numa_logical_id = obj->logical_index;
  1893. unsigned numa_physical_id = obj->os_index;
  1894. int memnode = _starpu_memory_node_register(STARPU_CPU_RAM, 0);
  1895. STARPU_ASSERT_MSG(memnode < STARPU_MAXNUMANODES, "Wrong Memory Node : %d (only %d available) \n", memnode, STARPU_MAXNUMANODES);
  1896. numa_memory_nodes_to_hwloclogid[memnode] = numa_logical_id;
  1897. numa_memory_nodes_to_physicalid[memnode] = numa_physical_id;
  1898. nb_numa_nodes++;
  1899. #ifdef STARPU_SIMGRID
  1900. snprintf(name, sizeof(name), "RAM%d", memnode);
  1901. host = _starpu_simgrid_get_host_by_name(name);
  1902. STARPU_ASSERT(host);
  1903. _starpu_simgrid_memory_node_set_host(memnode, host);
  1904. #endif
  1905. }
  1906. else
  1907. #endif /* defined(STARPU_HAVE_HWLOC) */
  1908. {
  1909. /* In this case, nnuma has only one node */
  1910. int memnode = _starpu_memory_node_register(STARPU_CPU_RAM, 0);
  1911. STARPU_ASSERT_MSG(memnode == STARPU_MAIN_RAM, "Wrong Memory Node : %d (expected %d) \n", memnode, STARPU_MAIN_RAM);
  1912. numa_memory_nodes_to_hwloclogid[memnode] = STARPU_NUMA_MAIN_RAM;
  1913. numa_memory_nodes_to_physicalid[memnode] = STARPU_NUMA_MAIN_RAM;
  1914. nb_numa_nodes++;
  1915. #ifdef STARPU_SIMGRID
  1916. char name[16];
  1917. msg_host_t host = _starpu_simgrid_get_host_by_name("RAM");
  1918. STARPU_ASSERT(host);
  1919. _starpu_simgrid_memory_node_set_host(STARPU_MAIN_RAM, host);
  1920. #endif
  1921. }
  1922. }
  1923. STARPU_ASSERT_MSG(nb_numa_nodes > 0, "No NUMA node found... We need at least one memory node !\n");
  1924. }
  1925. static void _starpu_init_numa_bus()
  1926. {
  1927. unsigned i, j;
  1928. for (i = 0; i < nb_numa_nodes; i++)
  1929. for (j = 0; j < nb_numa_nodes; j++)
  1930. if (i != j)
  1931. numa_bus_id[i*nb_numa_nodes+j] = _starpu_register_bus(i, j);
  1932. }
  1933. static void
  1934. _starpu_init_workers_binding_and_memory (struct _starpu_machine_config *config, int no_mp_config STARPU_ATTRIBUTE_UNUSED)
  1935. {
  1936. /* We will store all the busid of the different (src, dst)
  1937. * combinations in a matrix which we initialize here. */
  1938. _starpu_initialize_busid_matrix();
  1939. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  1940. unsigned cuda_init[STARPU_MAXCUDADEVS] = { };
  1941. unsigned cuda_memory_nodes[STARPU_MAXCUDADEVS];
  1942. unsigned cuda_bindid[STARPU_MAXCUDADEVS];
  1943. int cuda_globalbindid = -1;
  1944. #endif
  1945. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  1946. unsigned opencl_init[STARPU_MAXOPENCLDEVS] = { };
  1947. unsigned opencl_memory_nodes[STARPU_MAXOPENCLDEVS];
  1948. unsigned opencl_bindid[STARPU_MAXOPENCLDEVS];
  1949. #endif
  1950. #ifdef STARPU_USE_MIC
  1951. unsigned mic_init[STARPU_MAXMICDEVS] = { };
  1952. unsigned mic_memory_nodes[STARPU_MAXMICDEVS];
  1953. unsigned mic_bindid[STARPU_MAXMICDEVS];
  1954. #endif
  1955. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1956. unsigned mpi_init[STARPU_MAXMPIDEVS] = { };
  1957. unsigned mpi_memory_nodes[STARPU_MAXMPIDEVS];
  1958. unsigned mpi_bindid[STARPU_MAXMPIDEVS];
  1959. #endif
  1960. unsigned bindid;
  1961. for (bindid = 0; bindid < config->nbindid; bindid++)
  1962. {
  1963. free(config->bindid_workers[bindid].workerids);
  1964. config->bindid_workers[bindid].workerids = NULL;
  1965. config->bindid_workers[bindid].nworkers = 0;
  1966. }
  1967. /* Init CPU binding before NUMA nodes, because we use it to discover NUMA nodes */
  1968. _starpu_init_binding_cpu(config);
  1969. /* Initialize NUMA nodes */
  1970. _starpu_init_numa_node(config);
  1971. _starpu_init_numa_bus();
  1972. unsigned worker;
  1973. for (worker = 0; worker < config->topology.nworkers; worker++)
  1974. {
  1975. unsigned memory_node = -1;
  1976. struct _starpu_worker *workerarg = &config->workers[worker];
  1977. unsigned devid = workerarg->devid;
  1978. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL) || defined(STARPU_USE_MIC) || defined(STARPU_SIMGRID) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  1979. /* Perhaps the worker has some "favourite" bindings */
  1980. int *preferred_binding = NULL;
  1981. int npreferred = 0;
  1982. #endif
  1983. /* select the memory node that contains worker's memory */
  1984. switch (workerarg->arch)
  1985. {
  1986. case STARPU_CPU_WORKER:
  1987. {
  1988. int numa_logical_id = _starpu_get_logical_numa_node_worker(worker);
  1989. int numa_starpu_id = starpu_memory_nodes_numa_hwloclogid_to_id(numa_logical_id);
  1990. if (numa_starpu_id >= STARPU_MAXNUMANODES)
  1991. numa_starpu_id = STARPU_MAIN_RAM;
  1992. workerarg->numa_memory_node = memory_node = numa_starpu_id;
  1993. _starpu_memory_node_add_nworkers(memory_node);
  1994. _starpu_worker_drives_memory_node(workerarg, numa_starpu_id);
  1995. break;
  1996. }
  1997. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  1998. case STARPU_CUDA_WORKER:
  1999. {
  2000. unsigned numa;
  2001. #ifndef STARPU_SIMGRID
  2002. if (may_bind_automatically[STARPU_CUDA_WORKER])
  2003. {
  2004. /* StarPU is allowed to bind threads automatically */
  2005. preferred_binding = _starpu_get_cuda_affinity_vector(devid);
  2006. npreferred = config->topology.nhwpus;
  2007. }
  2008. #endif /* SIMGRID */
  2009. if (cuda_init[devid])
  2010. {
  2011. memory_node = cuda_memory_nodes[devid];
  2012. if (config->topology.cuda_th_per_stream == 0)
  2013. workerarg->bindid = cuda_bindid[devid];
  2014. else
  2015. workerarg->bindid = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  2016. }
  2017. else
  2018. {
  2019. cuda_init[devid] = 1;
  2020. if (config->topology.cuda_th_per_dev == 0 && config->topology.cuda_th_per_stream == 0)
  2021. {
  2022. if (cuda_globalbindid == -1)
  2023. cuda_globalbindid = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  2024. workerarg->bindid = cuda_bindid[devid] = cuda_globalbindid;
  2025. }
  2026. else
  2027. workerarg->bindid = cuda_bindid[devid] = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  2028. memory_node = cuda_memory_nodes[devid] = _starpu_memory_node_register(STARPU_CUDA_RAM, devid);
  2029. for (numa = 0; numa < nb_numa_nodes; numa++)
  2030. {
  2031. _starpu_cuda_bus_ids[numa][devid+STARPU_MAXNUMANODES] = _starpu_register_bus(numa, memory_node);
  2032. _starpu_cuda_bus_ids[devid+STARPU_MAXNUMANODES][numa] = _starpu_register_bus(memory_node, numa);
  2033. }
  2034. #ifdef STARPU_SIMGRID
  2035. const char* cuda_memcpy_peer;
  2036. snprintf(name, sizeof(name), "CUDA%u", devid);
  2037. host = _starpu_simgrid_get_host_by_name(name);
  2038. STARPU_ASSERT(host);
  2039. _starpu_simgrid_memory_node_set_host(memory_node, host);
  2040. cuda_memcpy_peer = MSG_host_get_property_value(host, "memcpy_peer");
  2041. #endif /* SIMGRID */
  2042. if (
  2043. #ifdef STARPU_SIMGRID
  2044. cuda_memcpy_peer && atoll(cuda_memcpy_peer)
  2045. #elif defined(HAVE_CUDA_MEMCPY_PEER)
  2046. 1
  2047. #else /* MEMCPY_PEER */
  2048. 0
  2049. #endif /* MEMCPY_PEER */
  2050. )
  2051. {
  2052. unsigned worker2;
  2053. for (worker2 = 0; worker2 < worker; worker2++)
  2054. {
  2055. struct _starpu_worker *workerarg2 = &config->workers[worker2];
  2056. int devid2 = workerarg2->devid;
  2057. if (workerarg2->arch == STARPU_CUDA_WORKER)
  2058. {
  2059. unsigned memory_node2 = starpu_worker_get_memory_node(worker2);
  2060. _starpu_cuda_bus_ids[devid2+STARPU_MAXNUMANODES][devid+STARPU_MAXNUMANODES] = _starpu_register_bus(memory_node2, memory_node);
  2061. _starpu_cuda_bus_ids[devid+STARPU_MAXNUMANODES][devid2+STARPU_MAXNUMANODES] = _starpu_register_bus(memory_node, memory_node2);
  2062. #ifndef STARPU_SIMGRID
  2063. #if defined(HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX) && HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX
  2064. {
  2065. hwloc_obj_t obj, obj2, ancestor;
  2066. obj = hwloc_cuda_get_device_osdev_by_index(config->topology.hwtopology, devid);
  2067. obj2 = hwloc_cuda_get_device_osdev_by_index(config->topology.hwtopology, devid2);
  2068. ancestor = hwloc_get_common_ancestor_obj(config->topology.hwtopology, obj, obj2);
  2069. if (ancestor)
  2070. {
  2071. struct _starpu_hwloc_userdata *data = ancestor->userdata;
  2072. #ifdef STARPU_VERBOSE
  2073. {
  2074. char name[64];
  2075. hwloc_obj_type_snprintf(name, sizeof(name), ancestor, 0);
  2076. _STARPU_DEBUG("CUDA%u and CUDA%u are linked through %s, along %u GPUs\n", devid, devid2, name, data->ngpus);
  2077. }
  2078. #endif
  2079. starpu_bus_set_ngpus(_starpu_cuda_bus_ids[devid2+STARPU_MAXNUMANODES][devid+STARPU_MAXNUMANODES], data->ngpus);
  2080. starpu_bus_set_ngpus(_starpu_cuda_bus_ids[devid+STARPU_MAXNUMANODES][devid2+STARPU_MAXNUMANODES], data->ngpus);
  2081. }
  2082. }
  2083. #endif
  2084. #endif
  2085. }
  2086. }
  2087. }
  2088. }
  2089. _starpu_memory_node_add_nworkers(memory_node);
  2090. //This worker can manage transfers on NUMA nodes
  2091. for (numa = 0; numa < nb_numa_nodes; numa++)
  2092. _starpu_worker_drives_memory_node(&workerarg->set->workers[0], numa);
  2093. _starpu_worker_drives_memory_node(&workerarg->set->workers[0], memory_node);
  2094. break;
  2095. }
  2096. #endif
  2097. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  2098. case STARPU_OPENCL_WORKER:
  2099. {
  2100. unsigned numa;
  2101. #ifndef STARPU_SIMGRID
  2102. if (may_bind_automatically[STARPU_OPENCL_WORKER])
  2103. {
  2104. /* StarPU is allowed to bind threads automatically */
  2105. preferred_binding = _starpu_get_opencl_affinity_vector(devid);
  2106. npreferred = config->topology.nhwpus;
  2107. }
  2108. #endif /* SIMGRID */
  2109. if (opencl_init[devid])
  2110. {
  2111. memory_node = opencl_memory_nodes[devid];
  2112. #ifndef STARPU_SIMGRID
  2113. workerarg->bindid = opencl_bindid[devid];
  2114. #endif /* SIMGRID */
  2115. }
  2116. else
  2117. {
  2118. opencl_init[devid] = 1;
  2119. workerarg->bindid = opencl_bindid[devid] = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  2120. memory_node = opencl_memory_nodes[devid] = _starpu_memory_node_register(STARPU_OPENCL_RAM, devid);
  2121. for (numa = 0; numa < nb_numa_nodes; numa++)
  2122. {
  2123. _starpu_register_bus(numa, memory_node);
  2124. _starpu_register_bus(memory_node, numa);
  2125. }
  2126. #ifdef STARPU_SIMGRID
  2127. snprintf(name, sizeof(name), "OpenCL%u", devid);
  2128. host = _starpu_simgrid_get_host_by_name(name);
  2129. STARPU_ASSERT(host);
  2130. _starpu_simgrid_memory_node_set_host(memory_node, host);
  2131. #endif /* SIMGRID */
  2132. }
  2133. _starpu_memory_node_add_nworkers(memory_node);
  2134. //This worker can manage transfers on NUMA nodes
  2135. for (numa = 0; numa < nb_numa_nodes; numa++)
  2136. _starpu_worker_drives_memory_node(workerarg, numa);
  2137. _starpu_worker_drives_memory_node(workerarg, memory_node);
  2138. break;
  2139. }
  2140. #endif
  2141. #ifdef STARPU_USE_MIC
  2142. case STARPU_MIC_WORKER:
  2143. {
  2144. unsigned numa;
  2145. if (mic_init[devid])
  2146. {
  2147. memory_node = mic_memory_nodes[devid];
  2148. }
  2149. else
  2150. {
  2151. mic_init[devid] = 1;
  2152. /* TODO */
  2153. //if (may_bind_automatically)
  2154. //{
  2155. // /* StarPU is allowed to bind threads automatically */
  2156. // preferred_binding = _starpu_get_mic_affinity_vector(devid);
  2157. // npreferred = config->topology.nhwpus;
  2158. //}
  2159. mic_bindid[devid] = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  2160. memory_node = mic_memory_nodes[devid] = _starpu_memory_node_register(STARPU_MIC_RAM, devid);
  2161. for (numa = 0; numa < nb_numa_nodes; numa++)
  2162. {
  2163. _starpu_register_bus(numa, memory_node);
  2164. _starpu_register_bus(memory_node, numa);
  2165. }
  2166. }
  2167. workerarg->bindid = mic_bindid[devid];
  2168. _starpu_memory_node_add_nworkers(memory_node);
  2169. //This worker can manage transfers on NUMA nodes
  2170. for (numa = 0; numa < nb_numa_nodes; numa++)
  2171. _starpu_worker_drives_memory_node(&workerarg->set->workers[0], numa);
  2172. _starpu_worker_drives_memory_node(&workerarg->set->workers[0], memory_node);
  2173. break;
  2174. }
  2175. #endif /* STARPU_USE_MIC */
  2176. #ifdef STARPU_USE_SCC
  2177. case STARPU_SCC_WORKER:
  2178. {
  2179. unsigned numa;
  2180. /* Node 0 represents the SCC shared memory when we're on SCC. */
  2181. struct _starpu_memory_node_descr *descr = _starpu_memory_node_get_description();
  2182. descr->nodes[ram_memory_node] = STARPU_SCC_SHM;
  2183. memory_node = ram_memory_node;
  2184. _starpu_memory_node_add_nworkers(memory_node);
  2185. //This worker can manage transfers on NUMA nodes
  2186. for (numa = 0; numa < nb_numa_nodes; numa++)
  2187. _starpu_worker_drives_memory_node(workerarg, numa);
  2188. _starpu_worker_drives_memory_node(workerarg, memory_node);
  2189. }
  2190. break;
  2191. #endif /* STARPU_USE_SCC */
  2192. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  2193. case STARPU_MPI_MS_WORKER:
  2194. {
  2195. unsigned numa;
  2196. if (mpi_init[devid])
  2197. {
  2198. memory_node = mpi_memory_nodes[devid];
  2199. }
  2200. else
  2201. {
  2202. mpi_init[devid] = 1;
  2203. mpi_bindid[devid] = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  2204. memory_node = mpi_memory_nodes[devid] = _starpu_memory_node_register(STARPU_MPI_MS_RAM, devid);
  2205. for (numa = 0; numa < nb_numa_nodes; numa++)
  2206. {
  2207. _starpu_register_bus(numa, memory_node);
  2208. _starpu_register_bus(memory_node, numa);
  2209. }
  2210. }
  2211. //This worker can manage transfers on NUMA nodes
  2212. for (numa = 0; numa < nb_numa_nodes; numa++)
  2213. _starpu_worker_drives_memory_node(&workerarg->set->workers[0], numa);
  2214. _starpu_worker_drives_memory_node(&workerarg->set->workers[0], memory_node);
  2215. #ifndef STARPU_MPI_MASTER_SLAVE_MULTIPLE_THREAD
  2216. /* MPI driver thread can manage all slave memories if we disable the MPI multiple thread */
  2217. unsigned findworker;
  2218. for (findworker = 0; findworker < worker; findworker++)
  2219. {
  2220. struct _starpu_worker *findworkerarg = &config->workers[findworker];
  2221. if (findworkerarg->arch == STARPU_MPI_MS_WORKER)
  2222. {
  2223. _starpu_worker_drives_memory_node(workerarg, findworkerarg->memory_node);
  2224. _starpu_worker_drives_memory_node(findworkerarg, memory_node);
  2225. }
  2226. }
  2227. #endif
  2228. workerarg->bindid = mpi_bindid[devid];
  2229. _starpu_memory_node_add_nworkers(memory_node);
  2230. break;
  2231. }
  2232. #endif /* STARPU_USE_MPI_MASTER_SLAVE */
  2233. default:
  2234. STARPU_ABORT();
  2235. }
  2236. workerarg->memory_node = memory_node;
  2237. _STARPU_DEBUG("worker %u type %d devid %u bound to cpu %d, STARPU memory node %u\n", worker, workerarg->arch, devid, workerarg->bindid, memory_node);
  2238. #ifdef __GLIBC__
  2239. if (workerarg->bindid != -1)
  2240. {
  2241. /* Save the initial cpuset */
  2242. CPU_ZERO(&workerarg->cpu_set);
  2243. CPU_SET(workerarg->bindid, &workerarg->cpu_set);
  2244. }
  2245. #endif /* __GLIBC__ */
  2246. #ifdef STARPU_HAVE_HWLOC
  2247. if (workerarg->bindid == -1)
  2248. {
  2249. workerarg->hwloc_cpu_set = hwloc_bitmap_alloc();
  2250. }
  2251. else
  2252. {
  2253. /* Put the worker descriptor in the userdata field of the
  2254. * hwloc object describing the CPU */
  2255. hwloc_obj_t worker_obj = hwloc_get_obj_by_depth(config->topology.hwtopology,
  2256. config->pu_depth,
  2257. workerarg->bindid);
  2258. struct _starpu_hwloc_userdata *data = worker_obj->userdata;
  2259. if (data->worker_list == NULL)
  2260. data->worker_list = _starpu_worker_list_new();
  2261. _starpu_worker_list_push_front(data->worker_list, workerarg);
  2262. /* Clear the cpu set and set the cpu */
  2263. workerarg->hwloc_cpu_set = hwloc_bitmap_dup (worker_obj->cpuset);
  2264. }
  2265. #endif
  2266. if (workerarg->bindid != -1)
  2267. {
  2268. bindid = workerarg->bindid;
  2269. unsigned old_nbindid = config->nbindid;
  2270. if (bindid >= old_nbindid)
  2271. {
  2272. /* More room needed */
  2273. if (!old_nbindid)
  2274. config->nbindid = STARPU_NMAXWORKERS;
  2275. else
  2276. config->nbindid = 2 * old_nbindid;
  2277. _STARPU_REALLOC(config->bindid_workers, config->nbindid * sizeof(config->bindid_workers[0]));
  2278. memset(&config->bindid_workers[old_nbindid], 0, (config->nbindid - old_nbindid) * sizeof(config->bindid_workers[0]));
  2279. }
  2280. /* Add slot for this worker */
  2281. /* Don't care about amortizing the cost, there are usually very few workers sharing the same bindid */
  2282. config->bindid_workers[bindid].nworkers++;
  2283. _STARPU_REALLOC(config->bindid_workers[bindid].workerids, config->bindid_workers[bindid].nworkers * sizeof(config->bindid_workers[bindid].workerids[0]));
  2284. config->bindid_workers[bindid].workerids[config->bindid_workers[bindid].nworkers-1] = worker;
  2285. }
  2286. }
  2287. #ifdef STARPU_SIMGRID
  2288. _starpu_simgrid_count_ngpus();
  2289. #else
  2290. #ifdef STARPU_HAVE_HWLOC
  2291. _starpu_topology_count_ngpus(hwloc_get_root_obj(config->topology.hwtopology));
  2292. #endif
  2293. #endif
  2294. }
  2295. int
  2296. _starpu_build_topology (struct _starpu_machine_config *config, int no_mp_config)
  2297. {
  2298. int ret;
  2299. unsigned i;
  2300. ret = _starpu_init_machine_config(config, no_mp_config);
  2301. if (ret)
  2302. return ret;
  2303. /* for the data management library */
  2304. _starpu_memory_nodes_init();
  2305. _starpu_datastats_init();
  2306. _starpu_init_workers_binding_and_memory(config, no_mp_config);
  2307. config->cpus_nodeid = -1;
  2308. config->cuda_nodeid = -1;
  2309. config->opencl_nodeid = -1;
  2310. config->mic_nodeid = -1;
  2311. config->scc_nodeid = -1;
  2312. config->mpi_nodeid = -1;
  2313. for (i = 0; i < starpu_worker_get_count(); i++)
  2314. {
  2315. switch (starpu_worker_get_type(i))
  2316. {
  2317. case STARPU_CPU_WORKER:
  2318. if (config->cpus_nodeid == -1)
  2319. config->cpus_nodeid = starpu_worker_get_memory_node(i);
  2320. else if (config->cpus_nodeid != (int) starpu_worker_get_memory_node(i))
  2321. config->cpus_nodeid = -2;
  2322. break;
  2323. case STARPU_CUDA_WORKER:
  2324. if (config->cuda_nodeid == -1)
  2325. config->cuda_nodeid = starpu_worker_get_memory_node(i);
  2326. else if (config->cuda_nodeid != (int) starpu_worker_get_memory_node(i))
  2327. config->cuda_nodeid = -2;
  2328. break;
  2329. case STARPU_OPENCL_WORKER:
  2330. if (config->opencl_nodeid == -1)
  2331. config->opencl_nodeid = starpu_worker_get_memory_node(i);
  2332. else if (config->opencl_nodeid != (int) starpu_worker_get_memory_node(i))
  2333. config->opencl_nodeid = -2;
  2334. break;
  2335. case STARPU_MIC_WORKER:
  2336. if (config->mic_nodeid == -1)
  2337. config->mic_nodeid = starpu_worker_get_memory_node(i);
  2338. else if (config->mic_nodeid != (int) starpu_worker_get_memory_node(i))
  2339. config->mic_nodeid = -2;
  2340. break;
  2341. case STARPU_SCC_WORKER:
  2342. if (config->scc_nodeid == -1)
  2343. config->scc_nodeid = starpu_worker_get_memory_node(i);
  2344. else if (config->scc_nodeid != (int) starpu_worker_get_memory_node(i))
  2345. config->scc_nodeid = -2;
  2346. break;
  2347. case STARPU_MPI_MS_WORKER:
  2348. if (config->mpi_nodeid == -1)
  2349. config->mpi_nodeid = starpu_worker_get_memory_node(i);
  2350. else if (config->mpi_nodeid != (int) starpu_worker_get_memory_node(i))
  2351. config->mpi_nodeid = -2;
  2352. break;
  2353. case STARPU_ANY_WORKER:
  2354. STARPU_ASSERT(0);
  2355. }
  2356. }
  2357. return 0;
  2358. }
  2359. void _starpu_destroy_topology(struct _starpu_machine_config *config STARPU_ATTRIBUTE_UNUSED)
  2360. {
  2361. #if defined(STARPU_USE_MIC) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  2362. _starpu_deinit_mp_config(config);
  2363. #endif
  2364. /* cleanup StarPU internal data structures */
  2365. _starpu_memory_nodes_deinit();
  2366. _starpu_destroy_machine_config(config);
  2367. }
  2368. void
  2369. starpu_topology_print (FILE *output)
  2370. {
  2371. struct _starpu_machine_config *config = _starpu_get_machine_config();
  2372. struct _starpu_machine_topology *topology = &config->topology;
  2373. unsigned pu;
  2374. unsigned worker;
  2375. unsigned nworkers = starpu_worker_get_count();
  2376. unsigned ncombinedworkers = topology->ncombinedworkers;
  2377. unsigned nthreads_per_core = topology->nhwpus / topology->nhwcpus;
  2378. #ifdef STARPU_HAVE_HWLOC
  2379. hwloc_topology_t topo = topology->hwtopology;
  2380. hwloc_obj_t pu_obj;
  2381. hwloc_obj_t last_numa_obj = NULL, numa_obj;
  2382. hwloc_obj_t last_package_obj = NULL, package_obj;
  2383. #endif
  2384. for (pu = 0; pu < topology->nhwpus; pu++)
  2385. {
  2386. #ifdef STARPU_HAVE_HWLOC
  2387. pu_obj = hwloc_get_obj_by_type(topo, HWLOC_OBJ_PU, pu);
  2388. numa_obj = hwloc_get_ancestor_obj_by_type(topo, HWLOC_OBJ_NODE, pu_obj);
  2389. if (numa_obj != last_numa_obj)
  2390. {
  2391. fprintf(output, "numa %u", numa_obj->logical_index);
  2392. last_numa_obj = numa_obj;
  2393. }
  2394. fprintf(output, "\t");
  2395. package_obj = hwloc_get_ancestor_obj_by_type(topo, HWLOC_OBJ_SOCKET, pu_obj);
  2396. if (package_obj != last_package_obj)
  2397. {
  2398. fprintf(output, "pack %u", package_obj->logical_index);
  2399. last_package_obj = package_obj;
  2400. }
  2401. fprintf(output, "\t");
  2402. #endif
  2403. if ((pu % nthreads_per_core) == 0)
  2404. fprintf(output, "core %u", pu / nthreads_per_core);
  2405. fprintf(output, "\tPU %u\t", pu);
  2406. for (worker = 0;
  2407. worker < nworkers + ncombinedworkers;
  2408. worker++)
  2409. {
  2410. if (worker < nworkers)
  2411. {
  2412. struct _starpu_worker *workerarg = &config->workers[worker];
  2413. if (workerarg->bindid == (int) pu)
  2414. {
  2415. char name[256];
  2416. starpu_worker_get_name (worker, name,
  2417. sizeof(name));
  2418. fprintf(output, "%s\t", name);
  2419. }
  2420. }
  2421. else
  2422. {
  2423. int worker_size, i;
  2424. int *combined_workerid;
  2425. starpu_combined_worker_get_description(worker, &worker_size, &combined_workerid);
  2426. for (i = 0; i < worker_size; i++)
  2427. {
  2428. if (topology->workers_bindid[combined_workerid[i]] == pu)
  2429. fprintf(output, "comb %u\t", worker-nworkers);
  2430. }
  2431. }
  2432. }
  2433. fprintf(output, "\n");
  2434. }
  2435. }