topology.c 85 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2017 Université de Bordeaux
  4. * Copyright (C) 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 CNRS
  5. * Copyright (C) 2011, 2016, 2017 INRIA
  6. * Copyright (C) 2016 Uppsala University
  7. *
  8. * StarPU is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU Lesser General Public License as published by
  10. * the Free Software Foundation; either version 2.1 of the License, or (at
  11. * your option) any later version.
  12. *
  13. * StarPU is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  16. *
  17. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  18. */
  19. #include <stdlib.h>
  20. #include <stdio.h>
  21. #include <common/config.h>
  22. #include <core/workers.h>
  23. #include <core/debug.h>
  24. #include <core/topology.h>
  25. #include <drivers/cuda/driver_cuda.h>
  26. #include <drivers/mic/driver_mic_source.h>
  27. #include <drivers/scc/driver_scc_source.h>
  28. #include <drivers/mpi/driver_mpi_source.h>
  29. #include <drivers/mpi/driver_mpi_common.h>
  30. #include <drivers/mp_common/source_common.h>
  31. #include <drivers/opencl/driver_opencl.h>
  32. #include <drivers/opencl/driver_opencl_utils.h>
  33. #include <profiling/profiling.h>
  34. #include <datawizard/datastats.h>
  35. #include <datawizard/memory_nodes.h>
  36. #include <common/uthash.h>
  37. #ifdef STARPU_HAVE_HWLOC
  38. #include <hwloc.h>
  39. #ifndef HWLOC_API_VERSION
  40. #define HWLOC_OBJ_PU HWLOC_OBJ_PROC
  41. #endif
  42. #if HWLOC_API_VERSION < 0x00010b00
  43. #define HWLOC_OBJ_NUMANODE HWLOC_OBJ_NODE
  44. #endif
  45. #endif
  46. #ifdef STARPU_HAVE_WINDOWS
  47. #include <windows.h>
  48. #endif
  49. #ifdef STARPU_SIMGRID
  50. #include <core/simgrid.h>
  51. #endif
  52. #if defined(HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX) && HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX
  53. #include <hwloc/cuda.h>
  54. #endif
  55. #if defined(STARPU_HAVE_HWLOC) && defined(STARPU_USE_OPENCL)
  56. #include <hwloc/opencl.h>
  57. #endif
  58. static unsigned topology_is_initialized = 0;
  59. static int nobind;
  60. /* For checking whether two workers share the same PU, indexed by PU number */
  61. static int cpu_worker[STARPU_MAXCPUS];
  62. static unsigned nb_numa_nodes = 0;
  63. static int numa_memory_nodes_to_hwloclogid[STARPU_MAXNUMANODES]; /* indexed by StarPU numa node to convert in hwloc logid */
  64. static int numa_memory_nodes_to_physicalid[STARPU_MAXNUMANODES]; /* indexed by StarPU numa node to convert in physical id */
  65. static unsigned numa_bus_id[STARPU_MAXNUMANODES*STARPU_MAXNUMANODES];
  66. static int _starpu_get_logical_numa_node_worker(unsigned workerid);
  67. #define STARPU_NUMA_UNINITIALIZED (-2)
  68. #define STARPU_NUMA_MAIN_RAM (-1)
  69. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL) || defined(STARPU_USE_SCC) || defined(STARPU_SIMGRID) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  70. struct handle_entry
  71. {
  72. UT_hash_handle hh;
  73. unsigned gpuid;
  74. };
  75. # if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  76. /* Entry in the `devices_using_cuda' hash table. */
  77. static struct handle_entry *devices_using_cuda;
  78. # endif
  79. static unsigned may_bind_automatically[STARPU_NARCH] = { 0 };
  80. #endif // defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  81. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  82. static struct _starpu_worker_set cuda_worker_set[STARPU_MAXCUDADEVS];
  83. #endif
  84. #ifdef STARPU_USE_MIC
  85. static struct _starpu_worker_set mic_worker_set[STARPU_MAXMICDEVS];
  86. #endif
  87. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  88. struct _starpu_worker_set mpi_worker_set[STARPU_MAXMPIDEVS];
  89. #endif
  90. int starpu_memory_nodes_get_numa_count(void)
  91. {
  92. return nb_numa_nodes;
  93. }
  94. #if defined(STARPU_HAVE_HWLOC)
  95. static int numa_get_logical_id(hwloc_obj_t obj)
  96. {
  97. STARPU_ASSERT(obj);
  98. while (obj->type != HWLOC_OBJ_NODE)
  99. {
  100. obj = obj->parent;
  101. /* If we don't find a "node" obj before the root, this means
  102. * hwloc does not know whether there are numa nodes or not, so
  103. * we should not use a per-node sampling in that case. */
  104. if (!obj)
  105. return STARPU_NUMA_MAIN_RAM;
  106. }
  107. return obj->logical_index;
  108. }
  109. static int numa_get_physical_id(hwloc_obj_t obj)
  110. {
  111. STARPU_ASSERT(obj);
  112. while (obj->type != HWLOC_OBJ_NODE)
  113. {
  114. obj = obj->parent;
  115. /* If we don't find a "node" obj before the root, this means
  116. * hwloc does not know whether there are numa nodes or not, so
  117. * we should not use a per-node sampling in that case. */
  118. if (!obj)
  119. return STARPU_NUMA_MAIN_RAM;
  120. }
  121. return obj->os_index;
  122. }
  123. #endif
  124. static int _starpu_get_logical_numa_node_worker(unsigned workerid)
  125. {
  126. #if defined(STARPU_HAVE_HWLOC)
  127. if (starpu_get_env_number_default("STARPU_USE_NUMA", 0))
  128. {
  129. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  130. struct _starpu_machine_config *config = (struct _starpu_machine_config *)_starpu_get_machine_config() ;
  131. struct _starpu_machine_topology *topology = &config->topology ;
  132. hwloc_obj_t obj;
  133. switch(worker->arch)
  134. {
  135. case STARPU_CPU_WORKER:
  136. obj = hwloc_get_obj_by_type(topology->hwtopology, HWLOC_OBJ_PU, worker->bindid) ;
  137. break;
  138. default:
  139. STARPU_ABORT();
  140. }
  141. return numa_get_logical_id(obj);
  142. }
  143. else
  144. #endif
  145. {
  146. (void) workerid; /* unused */
  147. return STARPU_NUMA_MAIN_RAM;
  148. }
  149. }
  150. static int _starpu_get_physical_numa_node_worker(unsigned workerid)
  151. {
  152. #if defined(STARPU_HAVE_HWLOC)
  153. if (starpu_get_env_number_default("STARPU_USE_NUMA", 0))
  154. {
  155. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  156. struct _starpu_machine_config *config = (struct _starpu_machine_config *)_starpu_get_machine_config() ;
  157. struct _starpu_machine_topology *topology = &config->topology ;
  158. hwloc_obj_t obj;
  159. switch(worker->arch)
  160. {
  161. case STARPU_CPU_WORKER:
  162. obj = hwloc_get_obj_by_type(topology->hwtopology, HWLOC_OBJ_PU, worker->bindid) ;
  163. break;
  164. default:
  165. STARPU_ABORT();
  166. }
  167. return numa_get_physical_id(obj);
  168. }
  169. else
  170. #endif
  171. {
  172. (void) workerid; /* unused */
  173. return STARPU_NUMA_MAIN_RAM;
  174. }
  175. }
  176. struct _starpu_worker *_starpu_get_worker_from_driver(struct starpu_driver *d)
  177. {
  178. unsigned nworkers = starpu_worker_get_count();
  179. unsigned workerid;
  180. for (workerid = 0; workerid < nworkers; workerid++)
  181. {
  182. if (starpu_worker_get_type(workerid) == d->type)
  183. {
  184. struct _starpu_worker *worker;
  185. worker = _starpu_get_worker_struct(workerid);
  186. switch (d->type)
  187. {
  188. #ifdef STARPU_USE_CPU
  189. case STARPU_CPU_WORKER:
  190. if (worker->devid == d->id.cpu_id)
  191. return worker;
  192. break;
  193. #endif
  194. #ifdef STARPU_USE_OPENCL
  195. case STARPU_OPENCL_WORKER:
  196. {
  197. cl_device_id device;
  198. starpu_opencl_get_device(worker->devid, &device);
  199. if (device == d->id.opencl_id)
  200. return worker;
  201. break;
  202. }
  203. #endif
  204. #ifdef STARPU_USE_CUDA
  205. case STARPU_CUDA_WORKER:
  206. {
  207. if (worker->devid == d->id.cuda_id)
  208. return worker;
  209. break;
  210. }
  211. #endif
  212. default:
  213. (void) worker;
  214. _STARPU_DEBUG("Invalid device type\n");
  215. return NULL;
  216. }
  217. }
  218. }
  219. return NULL;
  220. }
  221. /*
  222. * Discover the topology of the machine
  223. */
  224. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL) || defined(STARPU_USE_SCC) || defined(STARPU_SIMGRID) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  225. static void
  226. _starpu_initialize_workers_deviceid (int *explicit_workers_gpuid,
  227. int *current, int *workers_gpuid,
  228. const char *varname, unsigned nhwgpus,
  229. enum starpu_worker_archtype type)
  230. {
  231. char *strval;
  232. unsigned i;
  233. *current = 0;
  234. /* conf->workers_gpuid indicates the successive GPU identifier that
  235. * should be used to bind the workers. It should be either filled
  236. * according to the user's explicit parameters (from starpu_conf) or
  237. * according to the STARPU_WORKERS_CUDAID env. variable. Otherwise, a
  238. * round-robin policy is used to distributed the workers over the
  239. * cores. */
  240. /* what do we use, explicit value, env. variable, or round-robin ? */
  241. strval = starpu_getenv(varname);
  242. if (strval)
  243. {
  244. /* STARPU_WORKERS_CUDAID certainly contains less entries than
  245. * STARPU_NMAXWORKERS, so we reuse its entries in a round
  246. * robin fashion: "1 2" is equivalent to "1 2 1 2 1 2 .... 1
  247. * 2". */
  248. unsigned wrap = 0;
  249. unsigned number_of_entries = 0;
  250. char *endptr;
  251. /* we use the content of the STARPU_WORKERS_CUDAID
  252. * env. variable */
  253. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  254. {
  255. if (!wrap)
  256. {
  257. long int val;
  258. val = strtol(strval, &endptr, 10);
  259. if (endptr != strval)
  260. {
  261. workers_gpuid[i] = (unsigned)val;
  262. strval = endptr;
  263. }
  264. else
  265. {
  266. /* there must be at least one entry */
  267. STARPU_ASSERT(i != 0);
  268. number_of_entries = i;
  269. /* there is no more values in the
  270. * string */
  271. wrap = 1;
  272. workers_gpuid[i] = workers_gpuid[0];
  273. }
  274. }
  275. else
  276. {
  277. workers_gpuid[i] =
  278. workers_gpuid[i % number_of_entries];
  279. }
  280. }
  281. }
  282. else if (explicit_workers_gpuid)
  283. {
  284. /* we use the explicit value from the user */
  285. memcpy(workers_gpuid,
  286. explicit_workers_gpuid,
  287. STARPU_NMAXWORKERS*sizeof(unsigned));
  288. }
  289. else
  290. {
  291. /* by default, we take a round robin policy */
  292. if (nhwgpus > 0)
  293. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  294. workers_gpuid[i] = (unsigned)(i % nhwgpus);
  295. /* StarPU can use sampling techniques to bind threads
  296. * correctly */
  297. may_bind_automatically[type] = 1;
  298. }
  299. }
  300. #endif
  301. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  302. static void
  303. _starpu_initialize_workers_cuda_gpuid (struct _starpu_machine_config *config)
  304. {
  305. struct _starpu_machine_topology *topology = &config->topology;
  306. struct starpu_conf *uconf = &config->conf;
  307. _starpu_initialize_workers_deviceid (
  308. uconf->use_explicit_workers_cuda_gpuid == 0
  309. ? NULL
  310. : (int *)uconf->workers_cuda_gpuid,
  311. &(config->current_cuda_gpuid),
  312. (int *)topology->workers_cuda_gpuid,
  313. "STARPU_WORKERS_CUDAID",
  314. topology->nhwcudagpus,
  315. STARPU_CUDA_WORKER);
  316. }
  317. static inline int
  318. _starpu_get_next_cuda_gpuid (struct _starpu_machine_config *config)
  319. {
  320. unsigned i =
  321. ((config->current_cuda_gpuid++) % config->topology.ncudagpus);
  322. return (int)config->topology.workers_cuda_gpuid[i];
  323. }
  324. #endif
  325. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  326. static void
  327. _starpu_initialize_workers_opencl_gpuid (struct _starpu_machine_config*config)
  328. {
  329. struct _starpu_machine_topology *topology = &config->topology;
  330. struct starpu_conf *uconf = &config->conf;
  331. _starpu_initialize_workers_deviceid(
  332. uconf->use_explicit_workers_opencl_gpuid == 0
  333. ? NULL
  334. : (int *)uconf->workers_opencl_gpuid,
  335. &(config->current_opencl_gpuid),
  336. (int *)topology->workers_opencl_gpuid,
  337. "STARPU_WORKERS_OPENCLID",
  338. topology->nhwopenclgpus,
  339. STARPU_OPENCL_WORKER);
  340. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  341. // Detect devices which are already used with CUDA
  342. {
  343. unsigned tmp[STARPU_NMAXWORKERS];
  344. unsigned nb=0;
  345. int i;
  346. for(i=0 ; i<STARPU_NMAXWORKERS ; i++)
  347. {
  348. struct handle_entry *entry;
  349. int devid = config->topology.workers_opencl_gpuid[i];
  350. HASH_FIND_INT(devices_using_cuda, &devid, entry);
  351. if (entry == NULL)
  352. {
  353. tmp[nb] = topology->workers_opencl_gpuid[i];
  354. nb++;
  355. }
  356. }
  357. for (i=nb ; i<STARPU_NMAXWORKERS ; i++)
  358. tmp[i] = -1;
  359. memcpy (topology->workers_opencl_gpuid, tmp,
  360. sizeof(unsigned)*STARPU_NMAXWORKERS);
  361. }
  362. #endif /* STARPU_USE_CUDA */
  363. {
  364. // Detect identical devices
  365. struct handle_entry *devices_already_used = NULL;
  366. unsigned tmp[STARPU_NMAXWORKERS];
  367. unsigned nb=0;
  368. int i;
  369. for(i=0 ; i<STARPU_NMAXWORKERS ; i++)
  370. {
  371. int devid = topology->workers_opencl_gpuid[i];
  372. struct handle_entry *entry;
  373. HASH_FIND_INT(devices_already_used, &devid, entry);
  374. if (entry == NULL)
  375. {
  376. struct handle_entry *entry2;
  377. _STARPU_MALLOC(entry2, sizeof(*entry2));
  378. entry2->gpuid = devid;
  379. HASH_ADD_INT(devices_already_used, gpuid,
  380. entry2);
  381. tmp[nb] = devid;
  382. nb ++;
  383. }
  384. }
  385. struct handle_entry *entry, *tempo;
  386. HASH_ITER(hh, devices_already_used, entry, tempo)
  387. {
  388. HASH_DEL(devices_already_used, entry);
  389. free(entry);
  390. }
  391. for (i=nb ; i<STARPU_NMAXWORKERS ; i++)
  392. tmp[i] = -1;
  393. memcpy (topology->workers_opencl_gpuid, tmp,
  394. sizeof(unsigned)*STARPU_NMAXWORKERS);
  395. }
  396. }
  397. static inline int
  398. _starpu_get_next_opencl_gpuid (struct _starpu_machine_config *config)
  399. {
  400. unsigned i =
  401. ((config->current_opencl_gpuid++) % config->topology.nopenclgpus);
  402. return (int)config->topology.workers_opencl_gpuid[i];
  403. }
  404. #endif
  405. #if 0
  406. #if defined(STARPU_USE_MIC) || defined(STARPU_SIMGRID)
  407. static void _starpu_initialize_workers_mic_deviceid(struct _starpu_machine_config *config)
  408. {
  409. struct _starpu_machine_topology *topology = &config->topology;
  410. struct starpu_conf *uconf = &config->conf;
  411. _starpu_initialize_workers_deviceid(
  412. uconf->use_explicit_workers_mic_deviceid == 0
  413. ? NULL
  414. : (int *)config->user_conf->workers_mic_deviceid,
  415. &(config->current_mic_deviceid),
  416. (int *)topology->workers_mic_deviceid,
  417. "STARPU_WORKERS_MICID",
  418. topology->nhwmiccores,
  419. STARPU_MIC_WORKER);
  420. }
  421. #endif
  422. #endif
  423. #ifdef STARPU_USE_SCC
  424. static void _starpu_initialize_workers_scc_deviceid(struct _starpu_machine_config *config)
  425. {
  426. struct _starpu_machine_topology *topology = &config->topology;
  427. struct starpu_conf *uconf = &config->conf;
  428. _starpu_initialize_workers_deviceid(
  429. uconf->use_explicit_workers_scc_deviceid == 0
  430. ? NULL
  431. : (int *) uconf->workers_scc_deviceid,
  432. &(config->current_scc_deviceid),
  433. (int *)topology->workers_scc_deviceid,
  434. "STARPU_WORKERS_SCCID",
  435. topology->nhwscc,
  436. STARPU_SCC_WORKER);
  437. }
  438. #endif /* STARPU_USE_SCC */
  439. #if 0
  440. #ifdef STARPU_USE_MIC
  441. static inline int _starpu_get_next_mic_deviceid(struct _starpu_machine_config *config)
  442. {
  443. unsigned i = ((config->current_mic_deviceid++) % config->topology.nmicdevices);
  444. return (int)config->topology.workers_mic_deviceid[i];
  445. }
  446. #endif
  447. #endif
  448. #ifdef STARPU_USE_SCC
  449. static inline int _starpu_get_next_scc_deviceid(struct _starpu_machine_config *config)
  450. {
  451. unsigned i = ((config->current_scc_deviceid++) % config->topology.nsccdevices);
  452. return (int)config->topology.workers_scc_deviceid[i];
  453. }
  454. #endif
  455. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  456. static inline int _starpu_get_next_mpi_deviceid(struct _starpu_machine_config *config)
  457. {
  458. unsigned i = ((config->current_mpi_deviceid++) % config->topology.nmpidevices);
  459. return (int)config->topology.workers_mpi_ms_deviceid[i];
  460. }
  461. static void
  462. _starpu_init_mpi_topology (struct _starpu_machine_config *config, long mpi_idx)
  463. {
  464. /* Discover the topology of the mpi node identifier by MPI_IDX. That
  465. * means, make this StarPU instance aware of the number of cores available
  466. * on this MPI device. Update the `nhwmpicores' topology field
  467. * accordingly. */
  468. struct _starpu_machine_topology *topology = &config->topology;
  469. int nbcores;
  470. _starpu_src_common_sink_nbcores (mpi_ms_nodes[mpi_idx], &nbcores);
  471. topology->nhwmpicores[mpi_idx] = nbcores;
  472. }
  473. #endif /* STARPU_USE_MPI_MASTER_SLAVE */
  474. #ifdef STARPU_USE_MIC
  475. static void
  476. _starpu_init_mic_topology (struct _starpu_machine_config *config, long mic_idx)
  477. {
  478. /* Discover the topology of the mic node identifier by MIC_IDX. That
  479. * means, make this StarPU instance aware of the number of cores available
  480. * on this MIC device. Update the `nhwmiccores' topology field
  481. * accordingly. */
  482. struct _starpu_machine_topology *topology = &config->topology;
  483. int nbcores;
  484. _starpu_src_common_sink_nbcores (mic_nodes[mic_idx], &nbcores);
  485. topology->nhwmiccores[mic_idx] = nbcores;
  486. }
  487. static int
  488. _starpu_init_mic_node (struct _starpu_machine_config *config, int mic_idx,
  489. COIENGINE *coi_handle, COIPROCESS *coi_process)
  490. {
  491. /* Initialize the MIC node of index MIC_IDX. */
  492. struct starpu_conf *user_conf = &config->conf;
  493. char ***argv = _starpu_get_argv();
  494. const char *suffixes[] = {"-mic", "_mic", NULL};
  495. /* Environment variables to send to the Sink, it informs it what kind
  496. * of node it is (architecture and type) as there is no way to discover
  497. * it itself */
  498. char mic_idx_env[32];
  499. snprintf(mic_idx_env, sizeof(mic_idx_env), "_STARPU_MIC_DEVID=%d", mic_idx);
  500. /* XXX: this is currently necessary so that the remote process does not
  501. * segfault. */
  502. char nb_mic_env[32];
  503. snprintf(nb_mic_env, sizeof(nb_mic_env), "_STARPU_MIC_NB=%d", 2);
  504. const char *mic_sink_env[] = {"STARPU_SINK=STARPU_MIC", mic_idx_env, nb_mic_env, NULL};
  505. char mic_sink_program_path[1024];
  506. /* Let's get the helper program to run on the MIC device */
  507. int mic_file_found =
  508. _starpu_src_common_locate_file (mic_sink_program_path,
  509. sizeof(mic_sink_program_path),
  510. starpu_getenv("STARPU_MIC_SINK_PROGRAM_NAME"),
  511. starpu_getenv("STARPU_MIC_SINK_PROGRAM_PATH"),
  512. user_conf->mic_sink_program_path,
  513. (argv ? (*argv)[0] : NULL),
  514. suffixes);
  515. if (0 != mic_file_found)
  516. {
  517. _STARPU_MSG("No MIC program specified, use the environment\n"
  518. "variable STARPU_MIC_SINK_PROGRAM_NAME or the environment\n"
  519. "or the field 'starpu_conf.mic_sink_program_path'\n"
  520. "to define it.\n");
  521. return -1;
  522. }
  523. COIRESULT res;
  524. /* Let's get the handle which let us manage the remote MIC device */
  525. res = COIEngineGetHandle(COI_ISA_MIC, mic_idx, coi_handle);
  526. if (STARPU_UNLIKELY(res != COI_SUCCESS))
  527. STARPU_MIC_SRC_REPORT_COI_ERROR(res);
  528. /* We launch the helper on the MIC device, which will wait for us
  529. * to give it work to do.
  530. * As we will communicate further with the device throught scif we
  531. * don't need to keep the process pointer */
  532. res = COIProcessCreateFromFile(*coi_handle, mic_sink_program_path, 0, NULL, 0,
  533. mic_sink_env, 1, NULL, 0, NULL,
  534. coi_process);
  535. if (STARPU_UNLIKELY(res != COI_SUCCESS))
  536. STARPU_MIC_SRC_REPORT_COI_ERROR(res);
  537. /* Let's create the node structure, we'll communicate with the peer
  538. * through scif thanks to it */
  539. mic_nodes[mic_idx] =
  540. _starpu_mp_common_node_create(STARPU_NODE_MIC_SOURCE, mic_idx);
  541. return 0;
  542. }
  543. #endif
  544. #ifndef STARPU_SIMGRID
  545. #ifdef STARPU_HAVE_HWLOC
  546. static void
  547. _starpu_allocate_topology_userdata(hwloc_obj_t obj)
  548. {
  549. unsigned i;
  550. _STARPU_CALLOC(obj->userdata, 1, sizeof(struct _starpu_hwloc_userdata));
  551. for (i = 0; i < obj->arity; i++)
  552. _starpu_allocate_topology_userdata(obj->children[i]);
  553. }
  554. static void
  555. _starpu_deallocate_topology_userdata(hwloc_obj_t obj)
  556. {
  557. unsigned i;
  558. struct _starpu_hwloc_userdata *data = obj->userdata;
  559. STARPU_ASSERT(!data->worker_list || data->worker_list == (void*)-1);
  560. free(data);
  561. for (i = 0; i < obj->arity; i++)
  562. _starpu_deallocate_topology_userdata(obj->children[i]);
  563. }
  564. #endif
  565. #endif
  566. static void
  567. _starpu_init_topology (struct _starpu_machine_config *config)
  568. {
  569. /* Discover the topology, meaning finding all the available PUs for
  570. the compiled drivers. These drivers MUST have been initialized
  571. before calling this function. The discovered topology is filled in
  572. CONFIG. */
  573. struct _starpu_machine_topology *topology = &config->topology;
  574. if (topology_is_initialized)
  575. return;
  576. nobind = starpu_get_env_number("STARPU_WORKERS_NOBIND");
  577. topology->nhwcpus = 0;
  578. topology->nhwpus = 0;
  579. #ifndef STARPU_SIMGRID
  580. #ifdef STARPU_HAVE_HWLOC
  581. hwloc_topology_init(&topology->hwtopology);
  582. _starpu_topology_filter(topology->hwtopology);
  583. hwloc_topology_load(topology->hwtopology);
  584. _starpu_allocate_topology_userdata(hwloc_get_root_obj(topology->hwtopology));
  585. #endif
  586. #endif
  587. #ifdef STARPU_SIMGRID
  588. config->topology.nhwcpus = config->topology.nhwpus = _starpu_simgrid_get_nbhosts("CPU");
  589. #elif defined(STARPU_HAVE_HWLOC)
  590. /* Discover the CPUs relying on the hwloc interface and fills CONFIG
  591. * accordingly. */
  592. config->cpu_depth = hwloc_get_type_depth (topology->hwtopology,
  593. HWLOC_OBJ_CORE);
  594. config->pu_depth = hwloc_get_type_depth (topology->hwtopology,
  595. HWLOC_OBJ_PU);
  596. /* Would be very odd */
  597. STARPU_ASSERT(config->cpu_depth != HWLOC_TYPE_DEPTH_MULTIPLE);
  598. if (config->cpu_depth == HWLOC_TYPE_DEPTH_UNKNOWN)
  599. {
  600. /* unknown, using logical procesors as fallback */
  601. _STARPU_DISP("Warning: The OS did not report CPU cores. Assuming there is only one hardware thread per core.\n");
  602. config->cpu_depth = hwloc_get_type_depth(topology->hwtopology,
  603. HWLOC_OBJ_PU);
  604. }
  605. topology->nhwcpus = hwloc_get_nbobjs_by_depth (topology->hwtopology,
  606. config->cpu_depth);
  607. topology->nhwpus = hwloc_get_nbobjs_by_depth (topology->hwtopology,
  608. config->pu_depth);
  609. #elif defined(HAVE_SYSCONF)
  610. /* Discover the CPUs relying on the sysconf(3) function and fills
  611. * CONFIG accordingly. */
  612. config->topology.nhwcpus = config->topology.nhwpus = sysconf(_SC_NPROCESSORS_ONLN);
  613. #elif defined(_WIN32)
  614. /* Discover the CPUs on Cygwin and MinGW systems. */
  615. SYSTEM_INFO sysinfo;
  616. GetSystemInfo(&sysinfo);
  617. config->topology.nhwcpus = config->topology.nhwpus = sysinfo.dwNumberOfProcessors;
  618. #else
  619. #warning no way to know number of cores, assuming 1
  620. config->topology.nhwcpus = config->topology.nhwpus = 1;
  621. #endif
  622. _starpu_cuda_discover_devices(config);
  623. _starpu_opencl_discover_devices(config);
  624. #ifdef STARPU_USE_SCC
  625. config->topology.nhwscc = _starpu_scc_src_get_device_count();
  626. #endif
  627. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  628. config->topology.nhwmpi = _starpu_mpi_src_get_device_count();
  629. #endif
  630. topology_is_initialized = 1;
  631. }
  632. /*
  633. * Bind workers on the different processors
  634. */
  635. static void
  636. _starpu_initialize_workers_bindid (struct _starpu_machine_config *config)
  637. {
  638. char *strval;
  639. unsigned i;
  640. struct _starpu_machine_topology *topology = &config->topology;
  641. config->current_bindid = 0;
  642. /* conf->workers_bindid indicates the successive logical PU identifier that
  643. * should be used to bind the workers. It should be either filled
  644. * according to the user's explicit parameters (from starpu_conf) or
  645. * according to the STARPU_WORKERS_CPUID env. variable. Otherwise, a
  646. * round-robin policy is used to distributed the workers over the
  647. * cores. */
  648. /* what do we use, explicit value, env. variable, or round-robin ? */
  649. strval = starpu_getenv("STARPU_WORKERS_CPUID");
  650. if (strval)
  651. {
  652. /* STARPU_WORKERS_CPUID certainly contains less entries than
  653. * STARPU_NMAXWORKERS, so we reuse its entries in a round
  654. * robin fashion: "1 2" is equivalent to "1 2 1 2 1 2 .... 1
  655. * 2". */
  656. unsigned wrap = 0;
  657. unsigned number_of_entries = 0;
  658. char *endptr;
  659. /* we use the content of the STARPU_WORKERS_CPUID
  660. * env. variable */
  661. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  662. {
  663. if (!wrap)
  664. {
  665. long int val;
  666. val = strtol(strval, &endptr, 10);
  667. if (endptr != strval)
  668. {
  669. topology->workers_bindid[i] =
  670. (unsigned)(val % topology->nhwpus);
  671. strval = endptr;
  672. if (*strval == '-')
  673. {
  674. /* range of values */
  675. long int endval;
  676. strval++;
  677. if (*strval && *strval != ' ' && *strval != ',')
  678. {
  679. endval = strtol(strval, &endptr, 10);
  680. strval = endptr;
  681. }
  682. else
  683. {
  684. endval = topology->nhwpus-1;
  685. if (*strval)
  686. strval++;
  687. }
  688. for (val++; val <= endval && i < STARPU_NMAXWORKERS-1; val++)
  689. {
  690. i++;
  691. topology->workers_bindid[i] =
  692. (unsigned)(val % topology->nhwpus);
  693. }
  694. }
  695. if (*strval == ',')
  696. strval++;
  697. }
  698. else
  699. {
  700. /* there must be at least one entry */
  701. STARPU_ASSERT(i != 0);
  702. number_of_entries = i;
  703. /* there is no more values in the
  704. * string */
  705. wrap = 1;
  706. topology->workers_bindid[i] =
  707. topology->workers_bindid[0];
  708. }
  709. }
  710. else
  711. {
  712. topology->workers_bindid[i] =
  713. topology->workers_bindid[i % number_of_entries];
  714. }
  715. }
  716. }
  717. else if (config->conf.use_explicit_workers_bindid)
  718. {
  719. /* we use the explicit value from the user */
  720. memcpy(topology->workers_bindid,
  721. config->conf.workers_bindid,
  722. STARPU_NMAXWORKERS*sizeof(unsigned));
  723. }
  724. else
  725. {
  726. int nth_per_core = starpu_get_env_number_default("STARPU_NTHREADS_PER_CORE", 1);
  727. int k;
  728. int nbindids=0;
  729. int nhyperthreads = topology->nhwpus / topology->nhwcpus;
  730. STARPU_ASSERT_MSG(nth_per_core > 0 && nth_per_core <= nhyperthreads , "Incorrect number of hyperthreads");
  731. i = 0; /* PU number currently assigned */
  732. k = 0; /* Number of threads already put on the current core */
  733. while(nbindids < STARPU_NMAXWORKERS)
  734. {
  735. if (k >= nth_per_core)
  736. {
  737. /* We have already put enough workers on this
  738. * core, skip remaining PUs from this core, and
  739. * proceed with next core */
  740. i += nhyperthreads-nth_per_core;
  741. k = 0;
  742. continue;
  743. }
  744. /* Add a worker to this core, by using this logical PU */
  745. topology->workers_bindid[nbindids++] =
  746. (unsigned)(i % topology->nhwpus);
  747. k++;
  748. i++;
  749. }
  750. }
  751. for (i = 0; i < STARPU_MAXCPUS;i++)
  752. cpu_worker[i] = STARPU_NOWORKERID;
  753. /* no binding yet */
  754. memset(&config->currently_bound, 0, sizeof(config->currently_bound));
  755. }
  756. /* This function gets the identifier of the next core on which to bind a
  757. * worker. In case a list of preferred cores was specified (logical indexes),
  758. * we look for a an available core among the list if possible, otherwise a
  759. * round-robin policy is used. */
  760. static inline int
  761. _starpu_get_next_bindid (struct _starpu_machine_config *config,
  762. int *preferred_binding, int npreferred)
  763. {
  764. struct _starpu_machine_topology *topology = &config->topology;
  765. int current_preferred;
  766. int nhyperthreads = topology->nhwpus / topology->nhwcpus;
  767. unsigned i;
  768. if (npreferred)
  769. {
  770. STARPU_ASSERT_MSG(preferred_binding, "Passing NULL pointer for parameter preferred_binding with a non-0 value of parameter npreferred");
  771. }
  772. /* loop over the preference list */
  773. for (current_preferred = 0;
  774. current_preferred < npreferred;
  775. current_preferred++)
  776. {
  777. /* Try to get this core */
  778. unsigned requested_core = preferred_binding[current_preferred];
  779. unsigned requested_bindid = requested_core * nhyperthreads;
  780. /* can we bind the worker on the preferred core ? */
  781. unsigned ind;
  782. /* Look at the remaining cores to be bound to */
  783. for (ind = 0;
  784. ind < topology->nhwpus / nhyperthreads;
  785. ind++)
  786. {
  787. if (topology->workers_bindid[ind] == requested_bindid && !config->currently_bound[ind])
  788. {
  789. /* the cpu is available, we use it ! */
  790. config->currently_bound[ind] = 1;
  791. return requested_bindid;
  792. }
  793. }
  794. }
  795. for (i = config->current_bindid; i < topology->nhwpus / nhyperthreads; i++)
  796. if (!config->currently_bound[i])
  797. /* Found a cpu ready for use, use it! */
  798. break;
  799. STARPU_ASSERT(i < topology->nhwpus / nhyperthreads);
  800. int bindid = topology->workers_bindid[i];
  801. config->currently_bound[i] = 1;
  802. i++;
  803. if (i == topology->nhwpus / nhyperthreads)
  804. {
  805. /* Finished binding on all cpus, restart from start in
  806. * case the user really wants overloading */
  807. memset(&config->currently_bound, 0, sizeof(config->currently_bound));
  808. i = 0;
  809. }
  810. config->current_bindid = i;
  811. return bindid;
  812. }
  813. unsigned
  814. _starpu_topology_get_nhwcpu (struct _starpu_machine_config *config)
  815. {
  816. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  817. _starpu_opencl_init();
  818. #endif
  819. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  820. _starpu_init_cuda();
  821. #endif
  822. _starpu_init_topology(config);
  823. return config->topology.nhwcpus;
  824. }
  825. unsigned
  826. _starpu_topology_get_nhwpu (struct _starpu_machine_config *config)
  827. {
  828. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  829. _starpu_opencl_init();
  830. #endif
  831. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  832. _starpu_init_cuda();
  833. #endif
  834. _starpu_init_topology(config);
  835. return config->topology.nhwpus;
  836. }
  837. unsigned _starpu_topology_get_nnumanodes(struct _starpu_machine_config *config STARPU_ATTRIBUTE_UNUSED)
  838. {
  839. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  840. _starpu_opencl_init();
  841. #endif
  842. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  843. _starpu_init_cuda();
  844. #endif
  845. _starpu_init_topology(config);
  846. int res;
  847. #if defined(STARPU_HAVE_HWLOC)
  848. if (starpu_get_env_number_default("STARPU_USE_NUMA", 0))
  849. {
  850. struct _starpu_machine_topology *topology = &config->topology ;
  851. int nnumanodes = hwloc_get_nbobjs_by_type(topology->hwtopology, HWLOC_OBJ_NODE) ;
  852. res = nnumanodes > 0 ? nnumanodes : 1 ;
  853. }
  854. else
  855. #endif
  856. {
  857. res = 1;
  858. }
  859. STARPU_ASSERT_MSG(res <= STARPU_MAXNUMANODES, "Number of NUMA nodes discovered is higher than maximum accepted ! Use configure option --enable-maxnumanodes=xxx to increase the maximum value of supported NUMA nodes.\n");
  860. return res;
  861. }
  862. //TODO change this in an array
  863. int starpu_memory_nodes_numa_hwloclogid_to_id(int logid)
  864. {
  865. unsigned n;
  866. for (n = 0; n < nb_numa_nodes; n++)
  867. if (numa_memory_nodes_to_hwloclogid[n] == logid)
  868. return n;
  869. return -1;
  870. }
  871. int starpu_memory_nodes_numa_id_to_hwloclogid(unsigned id)
  872. {
  873. STARPU_ASSERT(id < STARPU_MAXNUMANODES);
  874. return numa_memory_nodes_to_hwloclogid[id];
  875. }
  876. int starpu_memory_nodes_numa_devid_to_id(unsigned id)
  877. {
  878. STARPU_ASSERT(id < STARPU_MAXNUMANODES);
  879. return numa_memory_nodes_to_physicalid[id];
  880. }
  881. //TODO change this in an array
  882. int starpu_memory_nodes_numa_id_to_devid(int osid)
  883. {
  884. unsigned n;
  885. for (n = 0; n < nb_numa_nodes; n++)
  886. if (numa_memory_nodes_to_physicalid[n] == osid)
  887. return n;
  888. return -1;
  889. }
  890. #ifdef STARPU_HAVE_HWLOC
  891. void _starpu_topology_filter(hwloc_topology_t topology)
  892. {
  893. #if HWLOC_API_VERSION >= 0x20000
  894. hwloc_topology_set_io_types_filter(topology, HWLOC_TYPE_FILTER_KEEP_IMPORTANT);
  895. #else
  896. hwloc_topology_set_flags(topology, HWLOC_TOPOLOGY_FLAG_IO_DEVICES | HWLOC_TOPOLOGY_FLAG_IO_BRIDGES);
  897. #endif
  898. }
  899. #endif
  900. #ifdef STARPU_USE_MIC
  901. static void
  902. _starpu_init_mic_config (struct _starpu_machine_config *config,
  903. struct starpu_conf *user_conf,
  904. unsigned mic_idx)
  905. {
  906. // Configure the MIC device of index MIC_IDX.
  907. struct _starpu_machine_topology *topology = &config->topology;
  908. topology->nhwmiccores[mic_idx] = 0;
  909. _starpu_init_mic_topology (config, mic_idx);
  910. int nmiccores;
  911. nmiccores = starpu_get_env_number("STARPU_NMICTHREADS");
  912. if (nmiccores == -1)
  913. {
  914. /* Nothing was specified, so let's use the number of
  915. * detected mic cores. ! */
  916. nmiccores = topology->nhwmiccores[mic_idx];
  917. }
  918. else
  919. {
  920. if ((unsigned) nmiccores > topology->nhwmiccores[mic_idx])
  921. {
  922. /* The user requires more MIC cores than there is available */
  923. _STARPU_MSG("# Warning: %d MIC cores requested. Only %u available.\n", nmiccores, topology->nhwmiccores[mic_idx]);
  924. nmiccores = topology->nhwmiccores[mic_idx];
  925. }
  926. }
  927. topology->nmiccores[mic_idx] = nmiccores;
  928. STARPU_ASSERT_MSG(topology->nmiccores[mic_idx] + topology->nworkers <= STARPU_NMAXWORKERS,
  929. "topology->nmiccores[mic_idx(%u)] (%u) + topology->nworkers (%u) <= STARPU_NMAXWORKERS (%d)",
  930. mic_idx, topology->nmiccores[mic_idx], topology->nworkers, STARPU_NMAXWORKERS);
  931. /* _starpu_initialize_workers_mic_deviceid (config); */
  932. mic_worker_set[mic_idx].workers = &config->workers[topology->nworkers];
  933. mic_worker_set[mic_idx].nworkers = topology->nmiccores[mic_idx];
  934. unsigned miccore_id;
  935. for (miccore_id = 0; miccore_id < topology->nmiccores[mic_idx]; miccore_id++)
  936. {
  937. int worker_idx = topology->nworkers + miccore_id;
  938. config->workers[worker_idx].set = &mic_worker_set[mic_idx];
  939. config->workers[worker_idx].arch = STARPU_MIC_WORKER;
  940. _STARPU_MALLOC(config->workers[worker_idx].perf_arch.devices, sizeof(struct starpu_perfmodel_device));
  941. config->workers[worker_idx].perf_arch.ndevices = 1;
  942. config->workers[worker_idx].perf_arch.devices[0].type = STARPU_MIC_WORKER;
  943. config->workers[worker_idx].perf_arch.devices[0].devid = mic_idx;
  944. config->workers[worker_idx].perf_arch.devices[0].ncores = 1;
  945. config->workers[worker_idx].devid = mic_idx;
  946. config->workers[worker_idx].subworkerid = miccore_id;
  947. config->workers[worker_idx].worker_mask = STARPU_MIC;
  948. config->worker_mask |= STARPU_MIC;
  949. }
  950. topology->nworkers += topology->nmiccores[mic_idx];
  951. }
  952. static COIENGINE mic_handles[STARPU_MAXMICDEVS];
  953. COIPROCESS _starpu_mic_process[STARPU_MAXMICDEVS];
  954. #endif
  955. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  956. static void
  957. _starpu_init_mpi_config (struct _starpu_machine_config *config,
  958. struct starpu_conf *user_conf,
  959. unsigned mpi_idx)
  960. {
  961. struct _starpu_machine_topology *topology = &config->topology;
  962. topology->nhwmpicores[mpi_idx] = 0;
  963. _starpu_init_mpi_topology (config, mpi_idx);
  964. int nmpicores;
  965. nmpicores = starpu_get_env_number("STARPU_NMPIMSTHREADS");
  966. if (nmpicores == -1)
  967. {
  968. /* Nothing was specified, so let's use the number of
  969. * detected mpi cores. ! */
  970. nmpicores = topology->nhwmpicores[mpi_idx];
  971. }
  972. else
  973. {
  974. if ((unsigned) nmpicores > topology->nhwmpicores[mpi_idx])
  975. {
  976. /* The user requires more MPI cores than there is available */
  977. _STARPU_MSG("# Warning: %d MPI cores requested. Only %u available.\n",
  978. nmpicores, topology->nhwmpicores[mpi_idx]);
  979. nmpicores = topology->nhwmpicores[mpi_idx];
  980. }
  981. }
  982. topology->nmpicores[mpi_idx] = nmpicores;
  983. STARPU_ASSERT_MSG(topology->nmpicores[mpi_idx] + topology->nworkers <= STARPU_NMAXWORKERS,
  984. "topology->nmpicores[mpi_idx(%u)] (%u) + topology->nworkers (%u) <= STARPU_NMAXWORKERS (%d)",
  985. mpi_idx, topology->nmpicores[mpi_idx], topology->nworkers, STARPU_NMAXWORKERS);
  986. mpi_worker_set[mpi_idx].workers = &config->workers[topology->nworkers];
  987. mpi_worker_set[mpi_idx].nworkers = topology->nmpicores[mpi_idx];
  988. unsigned mpicore_id;
  989. for (mpicore_id = 0; mpicore_id < topology->nmpicores[mpi_idx]; mpicore_id++)
  990. {
  991. int worker_idx = topology->nworkers + mpicore_id;
  992. config->workers[worker_idx].set = &mpi_worker_set[mpi_idx];
  993. config->workers[worker_idx].arch = STARPU_MPI_MS_WORKER;
  994. _STARPU_MALLOC(config->workers[worker_idx].perf_arch.devices, sizeof(struct starpu_perfmodel_device));
  995. config->workers[worker_idx].perf_arch.ndevices = 1;
  996. config->workers[worker_idx].perf_arch.devices[0].type = STARPU_MPI_MS_WORKER;
  997. config->workers[worker_idx].perf_arch.devices[0].devid = mpi_idx;
  998. config->workers[worker_idx].perf_arch.devices[0].ncores = 1;
  999. config->workers[worker_idx].devid = mpi_idx;
  1000. config->workers[worker_idx].subworkerid = mpicore_id;
  1001. config->workers[worker_idx].worker_mask = STARPU_MPI_MS;
  1002. config->worker_mask |= STARPU_MPI_MS;
  1003. }
  1004. mpi_ms_nodes[mpi_idx]->baseworkerid = topology->nworkers;
  1005. topology->nworkers += topology->nmpicores[mpi_idx];
  1006. }
  1007. #endif
  1008. #if defined(STARPU_USE_MIC) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  1009. static void
  1010. _starpu_init_mp_config (struct _starpu_machine_config *config,
  1011. struct starpu_conf *user_conf, int no_mp_config)
  1012. {
  1013. /* Discover and configure the mp topology. That means:
  1014. * - discover the number of mp nodes;
  1015. * - initialize each discovered node;
  1016. * - discover the local topology (number of PUs/devices) of each node;
  1017. * - configure the workers accordingly.
  1018. */
  1019. #ifdef STARPU_USE_MIC
  1020. if (!no_mp_config)
  1021. {
  1022. struct _starpu_machine_topology *topology = &config->topology;
  1023. /* Discover and initialize the number of MIC nodes through the mp
  1024. * infrastructure. */
  1025. unsigned nhwmicdevices = _starpu_mic_src_get_device_count();
  1026. int reqmicdevices = starpu_get_env_number("STARPU_NMIC");
  1027. if (reqmicdevices == -1 && user_conf)
  1028. reqmicdevices = user_conf->nmic;
  1029. if (reqmicdevices == -1)
  1030. /* Nothing was specified, so let's use the number of
  1031. * detected mic devices. ! */
  1032. reqmicdevices = nhwmicdevices;
  1033. if (reqmicdevices != -1)
  1034. {
  1035. if ((unsigned) reqmicdevices > nhwmicdevices)
  1036. {
  1037. /* The user requires more MIC devices than there is available */
  1038. _STARPU_MSG("# Warning: %d MIC devices requested. Only %u available.\n", reqmicdevices, nhwmicdevices);
  1039. reqmicdevices = nhwmicdevices;
  1040. }
  1041. }
  1042. topology->nmicdevices = 0;
  1043. unsigned i;
  1044. for (i = 0; i < (unsigned) reqmicdevices; i++)
  1045. if (0 == _starpu_init_mic_node (config, i, &mic_handles[i], &_starpu_mic_process[i]))
  1046. topology->nmicdevices++;
  1047. for (i = 0; i < topology->nmicdevices; i++)
  1048. _starpu_init_mic_config (config, user_conf, i);
  1049. }
  1050. #endif
  1051. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1052. {
  1053. struct _starpu_machine_topology *topology = &config->topology;
  1054. /* Discover and initialize the number of MPI nodes through the mp
  1055. * infrastructure. */
  1056. unsigned nhwmpidevices = _starpu_mpi_src_get_device_count();
  1057. int reqmpidevices = starpu_get_env_number("STARPU_NMPI_MS");
  1058. if (reqmpidevices == -1 && user_conf)
  1059. reqmpidevices = user_conf->nmpi_ms;
  1060. if (reqmpidevices == -1)
  1061. /* Nothing was specified, so let's use the number of
  1062. * detected mpi devices. ! */
  1063. reqmpidevices = nhwmpidevices;
  1064. if (reqmpidevices != -1)
  1065. {
  1066. if ((unsigned) reqmpidevices > nhwmpidevices)
  1067. {
  1068. /* The user requires more MPI devices than there is available */
  1069. _STARPU_MSG("# Warning: %d MPI Master-Slave devices requested. Only %u available.\n",
  1070. reqmpidevices, nhwmpidevices);
  1071. reqmpidevices = nhwmpidevices;
  1072. }
  1073. }
  1074. topology->nmpidevices = reqmpidevices;
  1075. /* if user don't want to use MPI slaves, we close the slave processes */
  1076. if (no_mp_config && topology->nmpidevices == 0)
  1077. {
  1078. _starpu_mpi_common_mp_deinit();
  1079. exit(0);
  1080. }
  1081. if (!no_mp_config)
  1082. {
  1083. unsigned i;
  1084. for (i = 0; i < topology->nmpidevices; i++)
  1085. mpi_ms_nodes[i] = _starpu_mp_common_node_create(STARPU_NODE_MPI_SOURCE, i);
  1086. for (i = 0; i < topology->nmpidevices; i++)
  1087. _starpu_init_mpi_config (config, user_conf, i);
  1088. }
  1089. }
  1090. #endif
  1091. }
  1092. #endif
  1093. #ifdef STARPU_USE_MIC
  1094. static void
  1095. _starpu_deinit_mic_node (unsigned mic_idx)
  1096. {
  1097. _starpu_mp_common_send_command(mic_nodes[mic_idx], STARPU_MP_COMMAND_EXIT, NULL, 0);
  1098. COIProcessDestroy(_starpu_mic_process[mic_idx], -1, 0, NULL, NULL);
  1099. _starpu_mp_common_node_destroy(mic_nodes[mic_idx]);
  1100. }
  1101. #endif
  1102. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1103. static void _starpu_deinit_mpi_node(int devid)
  1104. {
  1105. _starpu_mp_common_send_command(mpi_ms_nodes[devid], STARPU_MP_COMMAND_EXIT, NULL, 0);
  1106. _starpu_mp_common_node_destroy(mpi_ms_nodes[devid]);
  1107. }
  1108. #endif
  1109. #if defined(STARPU_USE_MIC) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  1110. static void
  1111. _starpu_deinit_mp_config (struct _starpu_machine_config *config)
  1112. {
  1113. struct _starpu_machine_topology *topology = &config->topology;
  1114. unsigned i;
  1115. #ifdef STARPU_USE_MIC
  1116. for (i = 0; i < topology->nmicdevices; i++)
  1117. _starpu_deinit_mic_node (i);
  1118. _starpu_mic_clear_kernels();
  1119. #endif
  1120. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1121. for (i = 0; i < topology->nmpidevices; i++)
  1122. _starpu_deinit_mpi_node (i);
  1123. #endif
  1124. }
  1125. #endif
  1126. #ifdef STARPU_HAVE_HWLOC
  1127. static unsigned
  1128. _starpu_topology_count_ngpus(hwloc_obj_t obj)
  1129. {
  1130. struct _starpu_hwloc_userdata *data = obj->userdata;
  1131. unsigned n = data->ngpus;
  1132. unsigned i;
  1133. for (i = 0; i < obj->arity; i++)
  1134. n += _starpu_topology_count_ngpus(obj->children[i]);
  1135. data->ngpus = n;
  1136. #ifdef STARPU_VERBOSE
  1137. {
  1138. char name[64];
  1139. hwloc_obj_type_snprintf(name, sizeof(name), obj, 0);
  1140. _STARPU_DEBUG("hwloc obj %s has %u GPUs below\n", name, n);
  1141. }
  1142. #endif
  1143. return n;
  1144. }
  1145. #endif
  1146. static int
  1147. _starpu_init_machine_config(struct _starpu_machine_config *config, int no_mp_config STARPU_ATTRIBUTE_UNUSED)
  1148. {
  1149. int i;
  1150. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  1151. {
  1152. config->workers[i].workerid = i;
  1153. config->workers[i].set = NULL;
  1154. }
  1155. struct _starpu_machine_topology *topology = &config->topology;
  1156. topology->nworkers = 0;
  1157. topology->ncombinedworkers = 0;
  1158. topology->nsched_ctxs = 0;
  1159. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  1160. _starpu_opencl_init();
  1161. #endif
  1162. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  1163. _starpu_init_cuda();
  1164. #endif
  1165. _starpu_init_topology(config);
  1166. _starpu_initialize_workers_bindid(config);
  1167. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  1168. for (i = 0; i < (int) (sizeof(cuda_worker_set)/sizeof(cuda_worker_set[0])); i++)
  1169. cuda_worker_set[i].workers = NULL;
  1170. #endif
  1171. #ifdef STARPU_USE_MIC
  1172. for (i = 0; i < (int) (sizeof(mic_worker_set)/sizeof(mic_worker_set[0])); i++)
  1173. mic_worker_set[i].workers = NULL;
  1174. #endif
  1175. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1176. for (i = 0; i < (int) (sizeof(mpi_worker_set)/sizeof(mpi_worker_set[0])); i++)
  1177. mpi_worker_set[i].workers = NULL;
  1178. #endif
  1179. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  1180. int ncuda = config->conf.ncuda;
  1181. int nworker_per_cuda = starpu_get_env_number_default("STARPU_NWORKER_PER_CUDA", 1);
  1182. STARPU_ASSERT_MSG(nworker_per_cuda > 0, "STARPU_NWORKER_PER_CUDA has to be > 0");
  1183. STARPU_ASSERT_MSG(nworker_per_cuda < STARPU_NMAXWORKERS, "STARPU_NWORKER_PER_CUDA (%d) cannot be higher than STARPU_NMAXWORKERS (%d)\n", nworker_per_cuda, STARPU_NMAXWORKERS);
  1184. #ifndef STARPU_NON_BLOCKING_DRIVERS
  1185. if (nworker_per_cuda > 1)
  1186. {
  1187. _STARPU_DISP("Warning: reducing STARPU_NWORKER_PER_CUDA to 1 because blocking drivers are enabled\n");
  1188. nworker_per_cuda = 1;
  1189. }
  1190. #endif
  1191. if (ncuda != 0)
  1192. {
  1193. /* The user did not disable CUDA. We need to initialize CUDA
  1194. * early to count the number of devices */
  1195. _starpu_init_cuda();
  1196. int nb_devices = _starpu_get_cuda_device_count();
  1197. if (ncuda == -1)
  1198. {
  1199. /* Nothing was specified, so let's choose ! */
  1200. ncuda = nb_devices;
  1201. }
  1202. else
  1203. {
  1204. if (ncuda > nb_devices)
  1205. {
  1206. /* The user requires more CUDA devices than
  1207. * there is available */
  1208. _STARPU_DISP("Warning: %d CUDA devices requested. Only %d available.\n", ncuda, nb_devices);
  1209. ncuda = nb_devices;
  1210. }
  1211. }
  1212. }
  1213. /* Now we know how many CUDA devices will be used */
  1214. topology->ncudagpus = ncuda;
  1215. topology->nworkerpercuda = nworker_per_cuda;
  1216. STARPU_ASSERT(topology->ncudagpus <= STARPU_MAXCUDADEVS);
  1217. _starpu_initialize_workers_cuda_gpuid(config);
  1218. /* allow having one worker per stream */
  1219. topology->cuda_th_per_stream = starpu_get_env_number_default("STARPU_CUDA_THREAD_PER_WORKER", -1);
  1220. topology->cuda_th_per_dev = starpu_get_env_number_default("STARPU_CUDA_THREAD_PER_DEV", -1);
  1221. /* per device by default */
  1222. if (topology->cuda_th_per_dev == -1)
  1223. {
  1224. if (topology->cuda_th_per_stream == 1)
  1225. topology->cuda_th_per_dev = 0;
  1226. else
  1227. topology->cuda_th_per_dev = 1;
  1228. }
  1229. /* Not per stream by default */
  1230. if (topology->cuda_th_per_stream == -1)
  1231. {
  1232. topology->cuda_th_per_stream = 0;
  1233. }
  1234. STARPU_ASSERT_MSG(topology->cuda_th_per_dev != 1 || topology->cuda_th_per_stream != 1, "It does not make sense to set both STARPU_CUDA_THREAD_PER_WORKER and STARPU_CUDA_THREAD_PER_DEV to 1, please choose either per worker or per device or none");
  1235. if (!topology->cuda_th_per_dev)
  1236. {
  1237. cuda_worker_set[0].workers = &config->workers[topology->nworkers];
  1238. cuda_worker_set[0].nworkers = topology->ncudagpus * nworker_per_cuda;
  1239. }
  1240. unsigned cudagpu;
  1241. for (cudagpu = 0; cudagpu < topology->ncudagpus; cudagpu++)
  1242. {
  1243. int devid = _starpu_get_next_cuda_gpuid(config);
  1244. int worker_idx0 = topology->nworkers + cudagpu * nworker_per_cuda;
  1245. struct _starpu_worker_set *worker_set;
  1246. if (topology->cuda_th_per_dev)
  1247. {
  1248. worker_set = &cuda_worker_set[devid];
  1249. worker_set->workers = &config->workers[worker_idx0];
  1250. worker_set->nworkers = nworker_per_cuda;
  1251. }
  1252. else
  1253. {
  1254. /* Same worker set for all devices */
  1255. worker_set = &cuda_worker_set[0];
  1256. }
  1257. for (i = 0; i < nworker_per_cuda; i++)
  1258. {
  1259. int worker_idx = worker_idx0 + i;
  1260. if(topology->cuda_th_per_stream)
  1261. {
  1262. /* Just one worker in the set */
  1263. _STARPU_CALLOC(config->workers[worker_idx].set, 1, sizeof(struct _starpu_worker_set));
  1264. config->workers[worker_idx].set->workers = &config->workers[worker_idx];
  1265. config->workers[worker_idx].set->nworkers = 1;
  1266. }
  1267. else
  1268. config->workers[worker_idx].set = worker_set;
  1269. config->workers[worker_idx].arch = STARPU_CUDA_WORKER;
  1270. _STARPU_MALLOC(config->workers[worker_idx].perf_arch.devices, sizeof(struct starpu_perfmodel_device));
  1271. config->workers[worker_idx].perf_arch.ndevices = 1;
  1272. config->workers[worker_idx].perf_arch.devices[0].type = STARPU_CUDA_WORKER;
  1273. config->workers[worker_idx].perf_arch.devices[0].devid = devid;
  1274. // TODO: fix perfmodels etc.
  1275. //config->workers[worker_idx].perf_arch.ncore = nworker_per_cuda - 1;
  1276. config->workers[worker_idx].perf_arch.devices[0].ncores = 1;
  1277. config->workers[worker_idx].devid = devid;
  1278. config->workers[worker_idx].subworkerid = i;
  1279. config->workers[worker_idx].worker_mask = STARPU_CUDA;
  1280. config->worker_mask |= STARPU_CUDA;
  1281. struct handle_entry *entry;
  1282. HASH_FIND_INT(devices_using_cuda, &devid, entry);
  1283. if (!entry)
  1284. {
  1285. _STARPU_MALLOC(entry, sizeof(*entry));
  1286. entry->gpuid = devid;
  1287. HASH_ADD_INT(devices_using_cuda, gpuid, entry);
  1288. }
  1289. }
  1290. #ifndef STARPU_SIMGRID
  1291. #if defined(HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX) && HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX
  1292. {
  1293. hwloc_obj_t obj = hwloc_cuda_get_device_osdev_by_index(topology->hwtopology, devid);
  1294. if (obj)
  1295. {
  1296. struct _starpu_hwloc_userdata *data = obj->userdata;
  1297. data->ngpus++;
  1298. }
  1299. else
  1300. {
  1301. _STARPU_DISP("Warning: could not find location of CUDA%u, do you have the hwloc CUDA plugin installed?\n", devid);
  1302. }
  1303. }
  1304. #endif
  1305. #endif
  1306. }
  1307. topology->nworkers += topology->ncudagpus * nworker_per_cuda;
  1308. #endif
  1309. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  1310. int nopencl = config->conf.nopencl;
  1311. if (nopencl != 0)
  1312. {
  1313. /* The user did not disable OPENCL. We need to initialize
  1314. * OpenCL early to count the number of devices */
  1315. _starpu_opencl_init();
  1316. int nb_devices;
  1317. nb_devices = _starpu_opencl_get_device_count();
  1318. if (nopencl == -1)
  1319. {
  1320. /* Nothing was specified, so let's choose ! */
  1321. nopencl = nb_devices;
  1322. if (nopencl > STARPU_MAXOPENCLDEVS)
  1323. {
  1324. _STARPU_DISP("Warning: %d OpenCL devices available. Only %d enabled. Use configure option --enable-maxopencldadev=xxx to update the maximum value of supported OpenCL devices.\n", nb_devices, STARPU_MAXOPENCLDEVS);
  1325. nopencl = STARPU_MAXOPENCLDEVS;
  1326. }
  1327. }
  1328. else
  1329. {
  1330. /* Let's make sure this value is OK. */
  1331. if (nopencl > nb_devices)
  1332. {
  1333. /* The user requires more OpenCL devices than
  1334. * there is available */
  1335. _STARPU_DISP("Warning: %d OpenCL devices requested. Only %d available.\n", nopencl, nb_devices);
  1336. nopencl = nb_devices;
  1337. }
  1338. /* Let's make sure this value is OK. */
  1339. if (nopencl > STARPU_MAXOPENCLDEVS)
  1340. {
  1341. _STARPU_DISP("Warning: %d OpenCL devices requested. Only %d enabled. Use configure option --enable-maxopencldev=xxx to update the maximum value of supported OpenCL devices.\n", nopencl, STARPU_MAXOPENCLDEVS);
  1342. nopencl = STARPU_MAXOPENCLDEVS;
  1343. }
  1344. }
  1345. }
  1346. topology->nopenclgpus = nopencl;
  1347. STARPU_ASSERT(topology->nopenclgpus + topology->nworkers <= STARPU_NMAXWORKERS);
  1348. _starpu_initialize_workers_opencl_gpuid(config);
  1349. unsigned openclgpu;
  1350. for (openclgpu = 0; openclgpu < topology->nopenclgpus; openclgpu++)
  1351. {
  1352. int worker_idx = topology->nworkers + openclgpu;
  1353. int devid = _starpu_get_next_opencl_gpuid(config);
  1354. if (devid == -1)
  1355. {
  1356. // There is no more devices left
  1357. topology->nopenclgpus = openclgpu;
  1358. break;
  1359. }
  1360. config->workers[worker_idx].arch = STARPU_OPENCL_WORKER;
  1361. _STARPU_MALLOC(config->workers[worker_idx].perf_arch.devices, sizeof(struct starpu_perfmodel_device));
  1362. config->workers[worker_idx].perf_arch.ndevices = 1;
  1363. config->workers[worker_idx].perf_arch.devices[0].type = STARPU_OPENCL_WORKER;
  1364. config->workers[worker_idx].perf_arch.devices[0].devid = devid;
  1365. config->workers[worker_idx].perf_arch.devices[0].ncores = 1;
  1366. config->workers[worker_idx].subworkerid = 0;
  1367. config->workers[worker_idx].devid = devid;
  1368. config->workers[worker_idx].worker_mask = STARPU_OPENCL;
  1369. config->worker_mask |= STARPU_OPENCL;
  1370. }
  1371. topology->nworkers += topology->nopenclgpus;
  1372. #endif
  1373. #ifdef STARPU_USE_SCC
  1374. int nscc = config->conf.nscc;
  1375. unsigned nb_scc_nodes = _starpu_scc_src_get_device_count();
  1376. if (nscc != 0)
  1377. {
  1378. /* The user did not disable SCC. We need to count
  1379. * the number of devices */
  1380. int nb_devices = nb_scc_nodes;
  1381. if (nscc == -1)
  1382. {
  1383. /* Nothing was specified, so let's choose ! */
  1384. nscc = nb_devices;
  1385. if (nscc > STARPU_MAXSCCDEVS)
  1386. {
  1387. _STARPU_DISP("Warning: %d SCC devices available. Only %d enabled. Use configuration option --enable-maxsccdev=xxx to update the maximum value of supported SCC devices.\n", nb_devices, STARPU_MAXSCCDEVS);
  1388. nscc = STARPU_MAXSCCDEVS;
  1389. }
  1390. }
  1391. else
  1392. {
  1393. /* Let's make sure this value is OK. */
  1394. if (nscc > nb_devices)
  1395. {
  1396. /* The user requires more SCC devices than there is available */
  1397. _STARPU_DISP("Warning: %d SCC devices requested. Only %d available.\n", nscc, nb_devices);
  1398. nscc = nb_devices;
  1399. }
  1400. /* Let's make sure this value is OK. */
  1401. if (nscc > STARPU_MAXSCCDEVS)
  1402. {
  1403. _STARPU_DISP("Warning: %d SCC devices requested. Only %d enabled. Use configure option --enable-maxsccdev=xxx to update the maximum value of supported SCC devices.\n", nscc, STARPU_MAXSCCDEVS);
  1404. nscc = STARPU_MAXSCCDEVS;
  1405. }
  1406. }
  1407. }
  1408. /* Now we know how many SCC devices will be used */
  1409. topology->nsccdevices = nscc;
  1410. STARPU_ASSERT(topology->nsccdevices + topology->nworkers <= STARPU_NMAXWORKERS);
  1411. _starpu_initialize_workers_scc_deviceid(config);
  1412. unsigned sccdev;
  1413. for (sccdev = 0; sccdev < topology->nsccdevices; sccdev++)
  1414. {
  1415. config->workers[topology->nworkers + sccdev].arch = STARPU_SCC_WORKER;
  1416. int devid = _starpu_get_next_scc_deviceid(config);
  1417. _STARPU_MALLOC(config->workers[topology->nworkers + sccdev].perf_arch.devices, sizeof(struct starpu_perfmodel_device));
  1418. config->workers[topology->nworkers + sccdev].perf_arch.ndevices = 1;
  1419. config->workers[topology->nworkers + sccdev].perf_arch.devices[0].type = STARPU_SCC_WORKER;
  1420. config->workers[topology->nworkers + sccdev].perf_arch.devices[0].devid = sccdev;
  1421. config->workers[topology->nworkers + sccdev].perf_arch.devices[0].ncores = 1;
  1422. config->workers[topology->nworkers + sccdev].subworkerid = 0;
  1423. config->workers[topology->nworkers + sccdev].devid = devid;
  1424. config->workers[topology->nworkers + sccdev].worker_mask = STARPU_SCC;
  1425. config->worker_mask |= STARPU_SCC;
  1426. }
  1427. for (; sccdev < nb_scc_nodes; ++sccdev)
  1428. _starpu_scc_exit_useless_node(sccdev);
  1429. topology->nworkers += topology->nsccdevices;
  1430. #endif /* STARPU_USE_SCC */
  1431. #if defined(STARPU_USE_MIC) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  1432. _starpu_init_mp_config (config, &config->conf, no_mp_config);
  1433. #endif
  1434. /* we put the CPU section after the accelerator : in case there was an
  1435. * accelerator found, we devote one cpu */
  1436. #if defined(STARPU_USE_CPU) || defined(STARPU_SIMGRID)
  1437. int ncpu = config->conf.ncpus;
  1438. if (ncpu != 0)
  1439. {
  1440. if (ncpu == -1)
  1441. {
  1442. unsigned mic_busy_cpus = 0;
  1443. int j = 0;
  1444. for (j = 0; j < STARPU_MAXMICDEVS; j++)
  1445. mic_busy_cpus += (topology->nmiccores[j] ? 1 : 0);
  1446. unsigned mpi_ms_busy_cpus = 0;
  1447. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1448. #ifdef STARPU_MPI_MASTER_SLAVE_MULTIPLE_THREAD
  1449. for (j = 0; j < STARPU_MAXMPIDEVS; j++)
  1450. mpi_ms_busy_cpus += (topology->nmpicores[j] ? 1 : 0);
  1451. #else
  1452. mpi_ms_busy_cpus = 1; /* we launch one thread to control all slaves */
  1453. #endif
  1454. #endif /* STARPU_USE_MPI_MASTER_SLAVE */
  1455. unsigned cuda_busy_cpus = 0;
  1456. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  1457. cuda_busy_cpus =
  1458. topology->cuda_th_per_dev == 0 && topology->cuda_th_per_stream == 0 ?
  1459. (topology->ncudagpus ? 1 : 0) :
  1460. topology->cuda_th_per_stream ?
  1461. (nworker_per_cuda * topology->ncudagpus) :
  1462. topology->ncudagpus;
  1463. #endif
  1464. unsigned already_busy_cpus = mpi_ms_busy_cpus + mic_busy_cpus
  1465. + cuda_busy_cpus
  1466. + topology->nopenclgpus + topology->nsccdevices;
  1467. long avail_cpus = (long) topology->nhwcpus - (long) already_busy_cpus;
  1468. if (avail_cpus < 0)
  1469. avail_cpus = 0;
  1470. int nth_per_core = starpu_get_env_number_default("STARPU_NTHREADS_PER_CORE", 1);
  1471. avail_cpus *= nth_per_core;
  1472. ncpu = STARPU_MIN(avail_cpus, STARPU_MAXCPUS);
  1473. }
  1474. else
  1475. {
  1476. if (ncpu > STARPU_MAXCPUS)
  1477. {
  1478. _STARPU_DISP("Warning: %d CPU devices requested. Only %d enabled. Use configure option --enable-maxcpus=xxx to update the maximum value of supported CPU devices.\n", ncpu, STARPU_MAXCPUS);
  1479. ncpu = STARPU_MAXCPUS;
  1480. }
  1481. }
  1482. }
  1483. topology->ncpus = ncpu;
  1484. STARPU_ASSERT(topology->ncpus + topology->nworkers <= STARPU_NMAXWORKERS);
  1485. unsigned cpu;
  1486. unsigned homogeneous = starpu_get_env_number_default("STARPU_PERF_MODEL_HOMOGENEOUS_CPU", 1);
  1487. for (cpu = 0; cpu < topology->ncpus; cpu++)
  1488. {
  1489. int worker_idx = topology->nworkers + cpu;
  1490. config->workers[worker_idx].arch = STARPU_CPU_WORKER;
  1491. _STARPU_MALLOC(config->workers[worker_idx].perf_arch.devices, sizeof(struct starpu_perfmodel_device));
  1492. config->workers[worker_idx].perf_arch.ndevices = 1;
  1493. config->workers[worker_idx].perf_arch.devices[0].type = STARPU_CPU_WORKER;
  1494. config->workers[worker_idx].perf_arch.devices[0].devid = homogeneous ? 0 : cpu;
  1495. config->workers[worker_idx].perf_arch.devices[0].ncores = 1;
  1496. config->workers[worker_idx].subworkerid = 0;
  1497. config->workers[worker_idx].devid = cpu;
  1498. config->workers[worker_idx].worker_mask = STARPU_CPU;
  1499. config->worker_mask |= STARPU_CPU;
  1500. }
  1501. topology->nworkers += topology->ncpus;
  1502. #endif
  1503. if (topology->nworkers == 0)
  1504. {
  1505. _STARPU_DEBUG("No worker found, aborting ...\n");
  1506. return -ENODEV;
  1507. }
  1508. return 0;
  1509. }
  1510. void _starpu_destroy_machine_config(struct _starpu_machine_config *config)
  1511. {
  1512. _starpu_close_debug_logfile();
  1513. unsigned worker;
  1514. for (worker = 0; worker < config->topology.nworkers; worker++)
  1515. {
  1516. struct _starpu_worker *workerarg = &config->workers[worker];
  1517. int bindid = workerarg->bindid;
  1518. free(workerarg->perf_arch.devices);
  1519. #ifdef STARPU_HAVE_HWLOC
  1520. hwloc_bitmap_free(workerarg->hwloc_cpu_set);
  1521. if (bindid != -1)
  1522. {
  1523. hwloc_obj_t worker_obj = hwloc_get_obj_by_depth(config->topology.hwtopology,
  1524. config->pu_depth,
  1525. bindid);
  1526. struct _starpu_hwloc_userdata *data = worker_obj->userdata;
  1527. if (data->worker_list)
  1528. {
  1529. _starpu_worker_list_delete(data->worker_list);
  1530. data->worker_list = NULL;
  1531. }
  1532. }
  1533. #endif
  1534. if (bindid != -1)
  1535. {
  1536. free(config->bindid_workers[bindid].workerids);
  1537. config->bindid_workers[bindid].workerids = NULL;
  1538. }
  1539. }
  1540. free(config->bindid_workers);
  1541. config->bindid_workers = NULL;
  1542. config->nbindid = 0;
  1543. unsigned combined_worker_id;
  1544. for(combined_worker_id=0 ; combined_worker_id < config->topology.ncombinedworkers ; combined_worker_id++)
  1545. {
  1546. struct _starpu_combined_worker *combined_worker = &config->combined_workers[combined_worker_id];
  1547. #ifdef STARPU_HAVE_HWLOC
  1548. hwloc_bitmap_free(combined_worker->hwloc_cpu_set);
  1549. #endif
  1550. free(combined_worker->perf_arch.devices);
  1551. }
  1552. #ifdef STARPU_HAVE_HWLOC
  1553. _starpu_deallocate_topology_userdata(hwloc_get_root_obj(config->topology.hwtopology));
  1554. hwloc_topology_destroy(config->topology.hwtopology);
  1555. #endif
  1556. topology_is_initialized = 0;
  1557. #ifdef STARPU_USE_CUDA
  1558. struct handle_entry *entry, *tmp;
  1559. HASH_ITER(hh, devices_using_cuda, entry, tmp)
  1560. {
  1561. HASH_DEL(devices_using_cuda, entry);
  1562. free(entry);
  1563. }
  1564. devices_using_cuda = NULL;
  1565. #endif
  1566. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  1567. int i;
  1568. for (i=0; i<STARPU_NARCH; i++)
  1569. may_bind_automatically[i] = 0;
  1570. #endif
  1571. }
  1572. void
  1573. _starpu_bind_thread_on_cpu (
  1574. int cpuid STARPU_ATTRIBUTE_UNUSED, int workerid STARPU_ATTRIBUTE_UNUSED)
  1575. {
  1576. #ifdef STARPU_SIMGRID
  1577. return;
  1578. #else
  1579. if (nobind > 0)
  1580. return;
  1581. if (cpuid < 0)
  1582. return;
  1583. if (workerid != STARPU_NOWORKERID && cpuid < STARPU_MAXCPUS)
  1584. {
  1585. int previous = cpu_worker[cpuid];
  1586. if (previous != STARPU_NOWORKERID && previous != workerid)
  1587. _STARPU_DISP("Warning: both workers %d and %d are bound to the same PU %d, this will strongly degrade performance. Maybe check starpu_machine_display's output to determine what wrong binding happened\n", previous, workerid, cpuid);
  1588. else
  1589. cpu_worker[cpuid] = workerid;
  1590. }
  1591. #ifdef STARPU_HAVE_HWLOC
  1592. const struct hwloc_topology_support *support;
  1593. #ifdef STARPU_USE_OPENCL
  1594. _starpu_opencl_init();
  1595. #endif
  1596. #ifdef STARPU_USE_CUDA
  1597. _starpu_init_cuda();
  1598. #endif
  1599. struct _starpu_machine_config *config = _starpu_get_machine_config();
  1600. _starpu_init_topology(config);
  1601. support = hwloc_topology_get_support (config->topology.hwtopology);
  1602. if (support->cpubind->set_thisthread_cpubind)
  1603. {
  1604. hwloc_obj_t obj =
  1605. hwloc_get_obj_by_depth (config->topology.hwtopology,
  1606. config->pu_depth, cpuid);
  1607. hwloc_bitmap_t set = obj->cpuset;
  1608. int ret;
  1609. hwloc_bitmap_singlify(set);
  1610. ret = hwloc_set_cpubind (config->topology.hwtopology, set,
  1611. HWLOC_CPUBIND_THREAD);
  1612. if (ret)
  1613. {
  1614. perror("hwloc_set_cpubind");
  1615. STARPU_ABORT();
  1616. }
  1617. }
  1618. #elif defined(HAVE_PTHREAD_SETAFFINITY_NP) && defined(__linux__)
  1619. int ret;
  1620. /* fix the thread on the correct cpu */
  1621. cpu_set_t aff_mask;
  1622. CPU_ZERO(&aff_mask);
  1623. CPU_SET(cpuid, &aff_mask);
  1624. starpu_pthread_t self = starpu_pthread_self();
  1625. ret = pthread_setaffinity_np(self, sizeof(aff_mask), &aff_mask);
  1626. if (ret)
  1627. {
  1628. const char *msg = strerror(ret);
  1629. _STARPU_MSG("pthread_setaffinity_np: %s\n", msg);
  1630. STARPU_ABORT();
  1631. }
  1632. #elif defined(_WIN32)
  1633. DWORD mask = 1 << cpuid;
  1634. if (!SetThreadAffinityMask(GetCurrentThread(), mask))
  1635. {
  1636. _STARPU_ERROR("SetThreadMaskAffinity(%lx) failed\n", mask);
  1637. }
  1638. #else
  1639. #warning no CPU binding support
  1640. #endif
  1641. #endif
  1642. }
  1643. void
  1644. _starpu_bind_thread_on_cpus (
  1645. struct _starpu_combined_worker *combined_worker STARPU_ATTRIBUTE_UNUSED)
  1646. {
  1647. #ifdef STARPU_SIMGRID
  1648. return;
  1649. #endif
  1650. #ifdef STARPU_HAVE_HWLOC
  1651. const struct hwloc_topology_support *support;
  1652. #ifdef STARPU_USE_OPENC
  1653. _starpu_opencl_init();
  1654. #endif
  1655. #ifdef STARPU_USE_CUDA
  1656. _starpu_init_cuda();
  1657. #endif
  1658. struct _starpu_machine_config *config = _starpu_get_machine_config();
  1659. _starpu_init_topology(config);
  1660. support = hwloc_topology_get_support(config->topology.hwtopology);
  1661. if (support->cpubind->set_thisthread_cpubind)
  1662. {
  1663. hwloc_bitmap_t set = combined_worker->hwloc_cpu_set;
  1664. int ret;
  1665. ret = hwloc_set_cpubind (config->topology.hwtopology, set,
  1666. HWLOC_CPUBIND_THREAD);
  1667. if (ret)
  1668. {
  1669. perror("binding thread");
  1670. STARPU_ABORT();
  1671. }
  1672. }
  1673. #else
  1674. #ifdef __GLIBC__
  1675. sched_setaffinity(0,sizeof(combined_worker->cpu_set),&combined_worker->cpu_set);
  1676. #else
  1677. # warning no parallel worker CPU binding support
  1678. #endif
  1679. #endif
  1680. }
  1681. static void _starpu_init_binding_cpu(struct _starpu_machine_config *config)
  1682. {
  1683. unsigned worker;
  1684. for (worker = 0; worker < config->topology.nworkers; worker++)
  1685. {
  1686. struct _starpu_worker *workerarg = &config->workers[worker];
  1687. switch (workerarg->arch)
  1688. {
  1689. case STARPU_CPU_WORKER:
  1690. {
  1691. /* Dedicate a cpu core to that worker */
  1692. workerarg->bindid = _starpu_get_next_bindid(config, NULL, 0);
  1693. break;
  1694. }
  1695. default:
  1696. /* Do nothing */
  1697. break;
  1698. }
  1699. }
  1700. }
  1701. //TODO : Check SIMGRID
  1702. static void _starpu_init_numa_node(struct _starpu_machine_config *config)
  1703. {
  1704. nb_numa_nodes = 0;
  1705. unsigned i;
  1706. for (i = 0; i < STARPU_MAXNUMANODES; i++)
  1707. {
  1708. numa_memory_nodes_to_hwloclogid[i] = STARPU_NUMA_UNINITIALIZED;
  1709. numa_memory_nodes_to_physicalid[i] = STARPU_NUMA_UNINITIALIZED;
  1710. }
  1711. #ifdef STARPU_SIMGRID
  1712. char name[16];
  1713. msg_host_t host;
  1714. #endif
  1715. int numa_enabled = starpu_get_env_number_default("STARPU_USE_NUMA", 0);
  1716. /* NUMA mode activated */
  1717. if (numa_enabled)
  1718. {
  1719. /* Take all NUMA nodes used by CPU workers */
  1720. unsigned worker;
  1721. for (worker = 0; worker < config->topology.nworkers; worker++)
  1722. {
  1723. struct _starpu_worker *workerarg = &config->workers[worker];
  1724. if (workerarg->arch == STARPU_CPU_WORKER)
  1725. {
  1726. int numa_logical_id = _starpu_get_logical_numa_node_worker(worker);
  1727. /* Convert logical id to StarPU id to check if this NUMA node is already saved or not */
  1728. int numa_starpu_id = starpu_memory_nodes_numa_hwloclogid_to_id(numa_logical_id);
  1729. /* This shouldn't happen */
  1730. if (numa_starpu_id == -1 && nb_numa_nodes == STARPU_MAXNUMANODES)
  1731. {
  1732. _STARPU_MSG("Warning: %u NUMA nodes available. Only %u enabled. Use configure option --enable-maxnumanodes=xxx to update the maximum value of supported NUMA nodes.\n", _starpu_topology_get_nnumanodes(config), STARPU_MAXNUMANODES);
  1733. STARPU_ABORT();
  1734. }
  1735. if (numa_starpu_id == -1)
  1736. {
  1737. int devid = numa_logical_id == STARPU_NUMA_MAIN_RAM ? 0 : numa_logical_id;
  1738. int memnode = _starpu_memory_node_register(STARPU_CPU_RAM, devid);
  1739. STARPU_ASSERT_MSG(memnode < STARPU_MAXNUMANODES, "Wrong Memory Node : %d (only %d available)", memnode, STARPU_MAXNUMANODES);
  1740. numa_memory_nodes_to_hwloclogid[memnode] = numa_logical_id;
  1741. int numa_physical_id = _starpu_get_physical_numa_node_worker(worker);
  1742. numa_memory_nodes_to_physicalid[memnode] = numa_physical_id;
  1743. nb_numa_nodes++;
  1744. #ifdef STARPU_SIMGRID
  1745. snprintf(name, sizeof(name), "RAM%d", memnode);
  1746. host = _starpu_simgrid_get_host_by_name(name);
  1747. STARPU_ASSERT(host);
  1748. _starpu_simgrid_memory_node_set_host(memnode, host);
  1749. #endif
  1750. }
  1751. }
  1752. }
  1753. /* If we found NUMA nodes from CPU workers, it's good */
  1754. if (nb_numa_nodes != 0)
  1755. return;
  1756. _STARPU_DISP("No NUMA nodes found when checking CPU workers...\n");
  1757. #if (defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)) && defined(STARPU_HAVE_HWLOC)
  1758. _STARPU_DISP("Take NUMA nodes attached to CUDA and OpenCL devices...\n");
  1759. #endif
  1760. #if defined(STARPU_USE_CUDA) && defined(STARPU_HAVE_HWLOC)
  1761. for (i = 0; i < config->topology.ncudagpus; i++)
  1762. {
  1763. hwloc_obj_t obj = hwloc_cuda_get_device_osdev_by_index(config->topology.hwtopology, i);
  1764. /* If we don't find a "node" obj before the root, this means
  1765. * hwloc does not know whether there are numa nodes or not, so
  1766. * we should not use a per-node sampling in that case. */
  1767. while (obj && obj->type != HWLOC_OBJ_NODE)
  1768. obj = obj->parent;
  1769. /* Hwloc cannot recognize some devices */
  1770. if (!obj)
  1771. continue;
  1772. int numa_starpu_id = starpu_memory_nodes_numa_hwloclogid_to_id(obj->logical_index);
  1773. /* This shouldn't happen */
  1774. if (numa_starpu_id == -1 && nb_numa_nodes == STARPU_MAXNUMANODES)
  1775. {
  1776. _STARPU_MSG("Warning: %u NUMA nodes available. Only %u enabled. Use configure option --enable-maxnumanodes=xxx to update the maximum value of supported NUMA nodes.\n", _starpu_topology_get_nnumanodes(config), STARPU_MAXNUMANODES);
  1777. STARPU_ABORT();
  1778. }
  1779. if (numa_starpu_id == -1)
  1780. {
  1781. int memnode = _starpu_memory_node_register(STARPU_CPU_RAM, obj->logical_index);
  1782. STARPU_ASSERT_MSG(memnode < STARPU_MAXNUMANODES, "Wrong Memory Node : %d (only %d available)", memnode, STARPU_MAXNUMANODES);
  1783. numa_memory_nodes_to_hwloclogid[memnode] = obj->logical_index;
  1784. numa_memory_nodes_to_physicalid[memnode] = obj->os_index;
  1785. nb_numa_nodes++;
  1786. #ifdef STARPU_SIMGRID
  1787. snprintf(name, sizeof(name), "RAM%d", memnode);
  1788. host = _starpu_simgrid_get_host_by_name(name);
  1789. STARPU_ASSERT(host);
  1790. _starpu_simgrid_memory_node_set_host(memnode, host);
  1791. #endif
  1792. }
  1793. }
  1794. #endif
  1795. #if defined(STARPU_USE_OPENCL) && defined(STARPU_HAVE_HWLOC)
  1796. if (config->topology.nopenclgpus > 0)
  1797. {
  1798. cl_int err;
  1799. cl_platform_id platform_id[_STARPU_OPENCL_PLATFORM_MAX];
  1800. cl_uint nb_platforms;
  1801. unsigned platform;
  1802. unsigned nb_opencl_devices = 0, num = 0;
  1803. err = clGetPlatformIDs(_STARPU_OPENCL_PLATFORM_MAX, platform_id, &nb_platforms);
  1804. if (STARPU_UNLIKELY(err != CL_SUCCESS))
  1805. nb_platforms=0;
  1806. cl_device_type device_type = CL_DEVICE_TYPE_GPU|CL_DEVICE_TYPE_ACCELERATOR;
  1807. if (starpu_get_env_number("STARPU_OPENCL_ON_CPUS") > 0)
  1808. device_type |= CL_DEVICE_TYPE_CPU;
  1809. if (starpu_get_env_number("STARPU_OPENCL_ONLY_ON_CPUS") > 0)
  1810. device_type = CL_DEVICE_TYPE_CPU;
  1811. for (platform = 0; platform < nb_platforms ; platform++)
  1812. {
  1813. err = clGetDeviceIDs(platform_id[platform], device_type, 0, NULL, &num);
  1814. if (err != CL_SUCCESS)
  1815. num = 0;
  1816. nb_opencl_devices += num;
  1817. for (i = 0; i < num; i++)
  1818. {
  1819. hwloc_obj_t obj = hwloc_opencl_get_device_osdev_by_index(config->topology.hwtopology, platform, i);
  1820. /* If we don't find a "node" obj before the root, this means
  1821. * hwloc does not know whether there are numa nodes or not, so
  1822. * we should not use a per-node sampling in that case. */
  1823. while (obj && obj->type != HWLOC_OBJ_NODE)
  1824. obj = obj->parent;
  1825. /* Hwloc cannot recognize some devices */
  1826. if (!obj)
  1827. continue;
  1828. int numa_starpu_id = starpu_memory_nodes_numa_hwloclogid_to_id(obj->logical_index);
  1829. /* This shouldn't happen */
  1830. if (numa_starpu_id == -1 && nb_numa_nodes == STARPU_MAXNUMANODES)
  1831. {
  1832. _STARPU_MSG("Warning: %u NUMA nodes available. Only %u enabled. Use configure option --enable-maxnumanodes=xxx to update the maximum value of supported NUMA nodes.\n", _starpu_topology_get_nnumanodes(config), STARPU_MAXNUMANODES);
  1833. STARPU_ABORT();
  1834. }
  1835. if (numa_starpu_id == -1)
  1836. {
  1837. int memnode = _starpu_memory_node_register(STARPU_CPU_RAM, obj->logical_index);
  1838. STARPU_ASSERT_MSG(memnode < STARPU_MAXNUMANODES, "Wrong Memory Node : %d (only %d available)", memnode, STARPU_MAXNUMANODES);
  1839. numa_memory_nodes_to_hwloclogid[memnode] = obj->logical_index;
  1840. numa_memory_nodes_to_physicalid[memnode] = obj->os_index;
  1841. nb_numa_nodes++;
  1842. #ifdef STARPU_SIMGRID
  1843. snprintf(name, sizeof(name), "RAM%d", memnode);
  1844. host = _starpu_simgrid_get_host_by_name(name);
  1845. STARPU_ASSERT(host);
  1846. _starpu_simgrid_memory_node_set_host(memnode, host);
  1847. #endif
  1848. }
  1849. }
  1850. }
  1851. }
  1852. #endif
  1853. }
  1854. #if (defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)) && defined(STARPU_HAVE_HWLOC)
  1855. //Found NUMA nodes from CUDA nodes
  1856. if (nb_numa_nodes != 0)
  1857. return;
  1858. /* In case, we do not find any NUMA nodes when checking NUMA nodes attached to GPUs, we take all of them */
  1859. if (numa_enabled)
  1860. _STARPU_DISP("No NUMA nodes found when checking GPUs devices...\n");
  1861. #endif
  1862. if (numa_enabled)
  1863. _STARPU_DISP("Finally, take all NUMA nodes available... \n");
  1864. unsigned nnuma = _starpu_topology_get_nnumanodes(config);
  1865. if (nnuma > STARPU_MAXNUMANODES)
  1866. {
  1867. _STARPU_MSG("Warning: %u NUMA nodes available. Only %u enabled. Use configure option --enable-maxnumanodes=xxx to update the maximum value of supported NUMA nodes.\n", _starpu_topology_get_nnumanodes(config), STARPU_MAXNUMANODES);
  1868. nnuma = STARPU_MAXNUMANODES;
  1869. }
  1870. unsigned numa;
  1871. for (numa = 0; numa < nnuma; numa++)
  1872. {
  1873. #if defined(STARPU_HAVE_HWLOC)
  1874. if (nnuma > 1)
  1875. {
  1876. hwloc_obj_t obj = hwloc_get_obj_by_type(config->topology.hwtopology, HWLOC_OBJ_NUMANODE, numa);
  1877. unsigned numa_logical_id = obj->logical_index;
  1878. unsigned numa_physical_id = obj->os_index;
  1879. int memnode = _starpu_memory_node_register(STARPU_CPU_RAM, 0);
  1880. STARPU_ASSERT_MSG(memnode < STARPU_MAXNUMANODES, "Wrong Memory Node : %d (only %d available) \n", memnode, STARPU_MAXNUMANODES);
  1881. numa_memory_nodes_to_hwloclogid[memnode] = numa_logical_id;
  1882. numa_memory_nodes_to_physicalid[memnode] = numa_physical_id;
  1883. nb_numa_nodes++;
  1884. #ifdef STARPU_SIMGRID
  1885. snprintf(name, sizeof(name), "RAM%d", memnode);
  1886. host = _starpu_simgrid_get_host_by_name(name);
  1887. STARPU_ASSERT(host);
  1888. _starpu_simgrid_memory_node_set_host(memnode, host);
  1889. #endif
  1890. }
  1891. else
  1892. #endif /* defined(STARPU_HAVE_HWLOC) */
  1893. {
  1894. /* In this case, nnuma has only one node */
  1895. int memnode = _starpu_memory_node_register(STARPU_CPU_RAM, 0);
  1896. STARPU_ASSERT_MSG(memnode == STARPU_MAIN_RAM, "Wrong Memory Node : %d (expected %d) \n", memnode, STARPU_MAIN_RAM);
  1897. numa_memory_nodes_to_hwloclogid[memnode] = STARPU_NUMA_MAIN_RAM;
  1898. numa_memory_nodes_to_physicalid[memnode] = STARPU_NUMA_MAIN_RAM;
  1899. nb_numa_nodes++;
  1900. #ifdef STARPU_SIMGRID
  1901. host = _starpu_simgrid_get_host_by_name("RAM");
  1902. STARPU_ASSERT(host);
  1903. _starpu_simgrid_memory_node_set_host(STARPU_MAIN_RAM, host);
  1904. #endif
  1905. }
  1906. }
  1907. STARPU_ASSERT_MSG(nb_numa_nodes > 0, "No NUMA node found... We need at least one memory node !\n");
  1908. }
  1909. static void _starpu_init_numa_bus()
  1910. {
  1911. unsigned i, j;
  1912. for (i = 0; i < nb_numa_nodes; i++)
  1913. for (j = 0; j < nb_numa_nodes; j++)
  1914. if (i != j)
  1915. numa_bus_id[i*nb_numa_nodes+j] = _starpu_register_bus(i, j);
  1916. }
  1917. static void
  1918. _starpu_init_workers_binding_and_memory (struct _starpu_machine_config *config, int no_mp_config STARPU_ATTRIBUTE_UNUSED)
  1919. {
  1920. /* We will store all the busid of the different (src, dst)
  1921. * combinations in a matrix which we initialize here. */
  1922. _starpu_initialize_busid_matrix();
  1923. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  1924. unsigned cuda_init[STARPU_MAXCUDADEVS] = { };
  1925. unsigned cuda_memory_nodes[STARPU_MAXCUDADEVS];
  1926. unsigned cuda_bindid[STARPU_MAXCUDADEVS];
  1927. int cuda_globalbindid = -1;
  1928. #endif
  1929. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  1930. unsigned opencl_init[STARPU_MAXOPENCLDEVS] = { };
  1931. unsigned opencl_memory_nodes[STARPU_MAXOPENCLDEVS];
  1932. unsigned opencl_bindid[STARPU_MAXOPENCLDEVS];
  1933. #endif
  1934. #ifdef STARPU_USE_MIC
  1935. unsigned mic_init[STARPU_MAXMICDEVS] = { };
  1936. unsigned mic_memory_nodes[STARPU_MAXMICDEVS];
  1937. unsigned mic_bindid[STARPU_MAXMICDEVS];
  1938. #endif
  1939. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1940. unsigned mpi_init[STARPU_MAXMPIDEVS] = { };
  1941. unsigned mpi_memory_nodes[STARPU_MAXMPIDEVS];
  1942. unsigned mpi_bindid[STARPU_MAXMPIDEVS];
  1943. #endif
  1944. unsigned bindid;
  1945. for (bindid = 0; bindid < config->nbindid; bindid++)
  1946. {
  1947. free(config->bindid_workers[bindid].workerids);
  1948. config->bindid_workers[bindid].workerids = NULL;
  1949. config->bindid_workers[bindid].nworkers = 0;
  1950. }
  1951. /* Init CPU binding before NUMA nodes, because we use it to discover NUMA nodes */
  1952. _starpu_init_binding_cpu(config);
  1953. /* Initialize NUMA nodes */
  1954. _starpu_init_numa_node(config);
  1955. _starpu_init_numa_bus();
  1956. unsigned worker;
  1957. for (worker = 0; worker < config->topology.nworkers; worker++)
  1958. {
  1959. unsigned memory_node = -1;
  1960. struct _starpu_worker *workerarg = &config->workers[worker];
  1961. unsigned devid STARPU_ATTRIBUTE_UNUSED = workerarg->devid;
  1962. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL) || defined(STARPU_USE_MIC) || defined(STARPU_SIMGRID) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  1963. /* Perhaps the worker has some "favourite" bindings */
  1964. int *preferred_binding = NULL;
  1965. int npreferred = 0;
  1966. #endif
  1967. /* select the memory node that contains worker's memory */
  1968. switch (workerarg->arch)
  1969. {
  1970. case STARPU_CPU_WORKER:
  1971. {
  1972. int numa_logical_id = _starpu_get_logical_numa_node_worker(worker);
  1973. int numa_starpu_id = starpu_memory_nodes_numa_hwloclogid_to_id(numa_logical_id);
  1974. if (numa_starpu_id < 0 || numa_starpu_id >= STARPU_MAXNUMANODES)
  1975. numa_starpu_id = STARPU_MAIN_RAM;
  1976. workerarg->numa_memory_node = memory_node = numa_starpu_id;
  1977. _starpu_memory_node_add_nworkers(memory_node);
  1978. _starpu_worker_drives_memory_node(workerarg, numa_starpu_id);
  1979. break;
  1980. }
  1981. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  1982. case STARPU_CUDA_WORKER:
  1983. {
  1984. unsigned numa;
  1985. #ifndef STARPU_SIMGRID
  1986. if (may_bind_automatically[STARPU_CUDA_WORKER])
  1987. {
  1988. /* StarPU is allowed to bind threads automatically */
  1989. preferred_binding = _starpu_get_cuda_affinity_vector(devid);
  1990. npreferred = config->topology.nhwpus;
  1991. }
  1992. #endif /* SIMGRID */
  1993. if (cuda_init[devid])
  1994. {
  1995. memory_node = cuda_memory_nodes[devid];
  1996. if (config->topology.cuda_th_per_stream == 0)
  1997. workerarg->bindid = cuda_bindid[devid];
  1998. else
  1999. workerarg->bindid = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  2000. }
  2001. else
  2002. {
  2003. cuda_init[devid] = 1;
  2004. if (config->topology.cuda_th_per_dev == 0 && config->topology.cuda_th_per_stream == 0)
  2005. {
  2006. if (cuda_globalbindid == -1)
  2007. cuda_globalbindid = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  2008. workerarg->bindid = cuda_bindid[devid] = cuda_globalbindid;
  2009. }
  2010. else
  2011. workerarg->bindid = cuda_bindid[devid] = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  2012. memory_node = cuda_memory_nodes[devid] = _starpu_memory_node_register(STARPU_CUDA_RAM, devid);
  2013. for (numa = 0; numa < nb_numa_nodes; numa++)
  2014. {
  2015. _starpu_cuda_bus_ids[numa][devid+STARPU_MAXNUMANODES] = _starpu_register_bus(numa, memory_node);
  2016. _starpu_cuda_bus_ids[devid+STARPU_MAXNUMANODES][numa] = _starpu_register_bus(memory_node, numa);
  2017. }
  2018. #ifdef STARPU_SIMGRID
  2019. const char* cuda_memcpy_peer;
  2020. char name[16];
  2021. snprintf(name, sizeof(name), "CUDA%u", devid);
  2022. msg_host_t host = _starpu_simgrid_get_host_by_name(name);
  2023. STARPU_ASSERT(host);
  2024. _starpu_simgrid_memory_node_set_host(memory_node, host);
  2025. cuda_memcpy_peer = MSG_host_get_property_value(host, "memcpy_peer");
  2026. #endif /* SIMGRID */
  2027. if (
  2028. #ifdef STARPU_SIMGRID
  2029. cuda_memcpy_peer && atoll(cuda_memcpy_peer)
  2030. #elif defined(HAVE_CUDA_MEMCPY_PEER)
  2031. 1
  2032. #else /* MEMCPY_PEER */
  2033. 0
  2034. #endif /* MEMCPY_PEER */
  2035. )
  2036. {
  2037. unsigned worker2;
  2038. for (worker2 = 0; worker2 < worker; worker2++)
  2039. {
  2040. struct _starpu_worker *workerarg2 = &config->workers[worker2];
  2041. int devid2 = workerarg2->devid;
  2042. if (workerarg2->arch == STARPU_CUDA_WORKER)
  2043. {
  2044. unsigned memory_node2 = starpu_worker_get_memory_node(worker2);
  2045. _starpu_cuda_bus_ids[devid2+STARPU_MAXNUMANODES][devid+STARPU_MAXNUMANODES] = _starpu_register_bus(memory_node2, memory_node);
  2046. _starpu_cuda_bus_ids[devid+STARPU_MAXNUMANODES][devid2+STARPU_MAXNUMANODES] = _starpu_register_bus(memory_node, memory_node2);
  2047. #ifndef STARPU_SIMGRID
  2048. #if defined(HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX) && HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX
  2049. {
  2050. hwloc_obj_t obj, obj2, ancestor;
  2051. obj = hwloc_cuda_get_device_osdev_by_index(config->topology.hwtopology, devid);
  2052. obj2 = hwloc_cuda_get_device_osdev_by_index(config->topology.hwtopology, devid2);
  2053. ancestor = hwloc_get_common_ancestor_obj(config->topology.hwtopology, obj, obj2);
  2054. if (ancestor)
  2055. {
  2056. struct _starpu_hwloc_userdata *data = ancestor->userdata;
  2057. #ifdef STARPU_VERBOSE
  2058. {
  2059. char name[64];
  2060. hwloc_obj_type_snprintf(name, sizeof(name), ancestor, 0);
  2061. _STARPU_DEBUG("CUDA%u and CUDA%u are linked through %s, along %u GPUs\n", devid, devid2, name, data->ngpus);
  2062. }
  2063. #endif
  2064. starpu_bus_set_ngpus(_starpu_cuda_bus_ids[devid2+STARPU_MAXNUMANODES][devid+STARPU_MAXNUMANODES], data->ngpus);
  2065. starpu_bus_set_ngpus(_starpu_cuda_bus_ids[devid+STARPU_MAXNUMANODES][devid2+STARPU_MAXNUMANODES], data->ngpus);
  2066. }
  2067. }
  2068. #endif
  2069. #endif
  2070. }
  2071. }
  2072. }
  2073. }
  2074. _starpu_memory_node_add_nworkers(memory_node);
  2075. //This worker can manage transfers on NUMA nodes
  2076. for (numa = 0; numa < nb_numa_nodes; numa++)
  2077. _starpu_worker_drives_memory_node(&workerarg->set->workers[0], numa);
  2078. _starpu_worker_drives_memory_node(&workerarg->set->workers[0], memory_node);
  2079. break;
  2080. }
  2081. #endif
  2082. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  2083. case STARPU_OPENCL_WORKER:
  2084. {
  2085. unsigned numa;
  2086. #ifndef STARPU_SIMGRID
  2087. if (may_bind_automatically[STARPU_OPENCL_WORKER])
  2088. {
  2089. /* StarPU is allowed to bind threads automatically */
  2090. preferred_binding = _starpu_get_opencl_affinity_vector(devid);
  2091. npreferred = config->topology.nhwpus;
  2092. }
  2093. #endif /* SIMGRID */
  2094. if (opencl_init[devid])
  2095. {
  2096. memory_node = opencl_memory_nodes[devid];
  2097. #ifndef STARPU_SIMGRID
  2098. workerarg->bindid = opencl_bindid[devid];
  2099. #endif /* SIMGRID */
  2100. }
  2101. else
  2102. {
  2103. opencl_init[devid] = 1;
  2104. workerarg->bindid = opencl_bindid[devid] = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  2105. memory_node = opencl_memory_nodes[devid] = _starpu_memory_node_register(STARPU_OPENCL_RAM, devid);
  2106. for (numa = 0; numa < nb_numa_nodes; numa++)
  2107. {
  2108. _starpu_register_bus(numa, memory_node);
  2109. _starpu_register_bus(memory_node, numa);
  2110. }
  2111. #ifdef STARPU_SIMGRID
  2112. char name[16];
  2113. snprintf(name, sizeof(name), "OpenCL%u", devid);
  2114. msg_host_t host = _starpu_simgrid_get_host_by_name(name);
  2115. STARPU_ASSERT(host);
  2116. _starpu_simgrid_memory_node_set_host(memory_node, host);
  2117. #endif /* SIMGRID */
  2118. }
  2119. _starpu_memory_node_add_nworkers(memory_node);
  2120. //This worker can manage transfers on NUMA nodes
  2121. for (numa = 0; numa < nb_numa_nodes; numa++)
  2122. _starpu_worker_drives_memory_node(workerarg, numa);
  2123. _starpu_worker_drives_memory_node(workerarg, memory_node);
  2124. break;
  2125. }
  2126. #endif
  2127. #ifdef STARPU_USE_MIC
  2128. case STARPU_MIC_WORKER:
  2129. {
  2130. unsigned numa;
  2131. if (mic_init[devid])
  2132. {
  2133. memory_node = mic_memory_nodes[devid];
  2134. }
  2135. else
  2136. {
  2137. mic_init[devid] = 1;
  2138. /* TODO */
  2139. //if (may_bind_automatically)
  2140. //{
  2141. // /* StarPU is allowed to bind threads automatically */
  2142. // preferred_binding = _starpu_get_mic_affinity_vector(devid);
  2143. // npreferred = config->topology.nhwpus;
  2144. //}
  2145. mic_bindid[devid] = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  2146. memory_node = mic_memory_nodes[devid] = _starpu_memory_node_register(STARPU_MIC_RAM, devid);
  2147. for (numa = 0; numa < nb_numa_nodes; numa++)
  2148. {
  2149. _starpu_register_bus(numa, memory_node);
  2150. _starpu_register_bus(memory_node, numa);
  2151. }
  2152. }
  2153. workerarg->bindid = mic_bindid[devid];
  2154. _starpu_memory_node_add_nworkers(memory_node);
  2155. //This worker can manage transfers on NUMA nodes
  2156. for (numa = 0; numa < nb_numa_nodes; numa++)
  2157. _starpu_worker_drives_memory_node(&workerarg->set->workers[0], numa);
  2158. _starpu_worker_drives_memory_node(&workerarg->set->workers[0], memory_node);
  2159. break;
  2160. }
  2161. #endif /* STARPU_USE_MIC */
  2162. #ifdef STARPU_USE_SCC
  2163. case STARPU_SCC_WORKER:
  2164. {
  2165. unsigned numa;
  2166. /* Node 0 represents the SCC shared memory when we're on SCC. */
  2167. struct _starpu_memory_node_descr *descr = _starpu_memory_node_get_description();
  2168. descr->nodes[ram_memory_node] = STARPU_SCC_SHM;
  2169. memory_node = ram_memory_node;
  2170. _starpu_memory_node_add_nworkers(memory_node);
  2171. //This worker can manage transfers on NUMA nodes
  2172. for (numa = 0; numa < nb_numa_nodes; numa++)
  2173. _starpu_worker_drives_memory_node(workerarg, numa);
  2174. _starpu_worker_drives_memory_node(workerarg, memory_node);
  2175. }
  2176. break;
  2177. #endif /* STARPU_USE_SCC */
  2178. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  2179. case STARPU_MPI_MS_WORKER:
  2180. {
  2181. unsigned numa;
  2182. if (mpi_init[devid])
  2183. {
  2184. memory_node = mpi_memory_nodes[devid];
  2185. }
  2186. else
  2187. {
  2188. mpi_init[devid] = 1;
  2189. mpi_bindid[devid] = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  2190. memory_node = mpi_memory_nodes[devid] = _starpu_memory_node_register(STARPU_MPI_MS_RAM, devid);
  2191. for (numa = 0; numa < nb_numa_nodes; numa++)
  2192. {
  2193. _starpu_register_bus(numa, memory_node);
  2194. _starpu_register_bus(memory_node, numa);
  2195. }
  2196. }
  2197. //This worker can manage transfers on NUMA nodes
  2198. for (numa = 0; numa < nb_numa_nodes; numa++)
  2199. _starpu_worker_drives_memory_node(&workerarg->set->workers[0], numa);
  2200. _starpu_worker_drives_memory_node(&workerarg->set->workers[0], memory_node);
  2201. #ifndef STARPU_MPI_MASTER_SLAVE_MULTIPLE_THREAD
  2202. /* MPI driver thread can manage all slave memories if we disable the MPI multiple thread */
  2203. unsigned findworker;
  2204. for (findworker = 0; findworker < worker; findworker++)
  2205. {
  2206. struct _starpu_worker *findworkerarg = &config->workers[findworker];
  2207. if (findworkerarg->arch == STARPU_MPI_MS_WORKER)
  2208. {
  2209. _starpu_worker_drives_memory_node(workerarg, findworkerarg->memory_node);
  2210. _starpu_worker_drives_memory_node(findworkerarg, memory_node);
  2211. }
  2212. }
  2213. #endif
  2214. workerarg->bindid = mpi_bindid[devid];
  2215. _starpu_memory_node_add_nworkers(memory_node);
  2216. break;
  2217. }
  2218. #endif /* STARPU_USE_MPI_MASTER_SLAVE */
  2219. default:
  2220. STARPU_ABORT();
  2221. }
  2222. workerarg->memory_node = memory_node;
  2223. _STARPU_DEBUG("worker %u type %d devid %u bound to cpu %d, STARPU memory node %u\n", worker, workerarg->arch, devid, workerarg->bindid, memory_node);
  2224. #ifdef __GLIBC__
  2225. if (workerarg->bindid != -1)
  2226. {
  2227. /* Save the initial cpuset */
  2228. CPU_ZERO(&workerarg->cpu_set);
  2229. CPU_SET(workerarg->bindid, &workerarg->cpu_set);
  2230. }
  2231. #endif /* __GLIBC__ */
  2232. #ifdef STARPU_HAVE_HWLOC
  2233. if (workerarg->bindid == -1)
  2234. {
  2235. workerarg->hwloc_cpu_set = hwloc_bitmap_alloc();
  2236. }
  2237. else
  2238. {
  2239. /* Put the worker descriptor in the userdata field of the
  2240. * hwloc object describing the CPU */
  2241. hwloc_obj_t worker_obj = hwloc_get_obj_by_depth(config->topology.hwtopology,
  2242. config->pu_depth,
  2243. workerarg->bindid);
  2244. struct _starpu_hwloc_userdata *data = worker_obj->userdata;
  2245. if (data->worker_list == NULL)
  2246. data->worker_list = _starpu_worker_list_new();
  2247. _starpu_worker_list_push_front(data->worker_list, workerarg);
  2248. /* Clear the cpu set and set the cpu */
  2249. workerarg->hwloc_cpu_set = hwloc_bitmap_dup (worker_obj->cpuset);
  2250. }
  2251. #endif
  2252. if (workerarg->bindid != -1)
  2253. {
  2254. bindid = workerarg->bindid;
  2255. unsigned old_nbindid = config->nbindid;
  2256. if (bindid >= old_nbindid)
  2257. {
  2258. /* More room needed */
  2259. if (!old_nbindid)
  2260. config->nbindid = STARPU_NMAXWORKERS;
  2261. else
  2262. config->nbindid = 2 * old_nbindid;
  2263. if (bindid > config->nbindid)
  2264. {
  2265. config->nbindid = bindid+1;
  2266. }
  2267. _STARPU_REALLOC(config->bindid_workers, config->nbindid * sizeof(config->bindid_workers[0]));
  2268. memset(&config->bindid_workers[old_nbindid], 0, (config->nbindid - old_nbindid) * sizeof(config->bindid_workers[0]));
  2269. }
  2270. /* Add slot for this worker */
  2271. /* Don't care about amortizing the cost, there are usually very few workers sharing the same bindid */
  2272. config->bindid_workers[bindid].nworkers++;
  2273. _STARPU_REALLOC(config->bindid_workers[bindid].workerids, config->bindid_workers[bindid].nworkers * sizeof(config->bindid_workers[bindid].workerids[0]));
  2274. config->bindid_workers[bindid].workerids[config->bindid_workers[bindid].nworkers-1] = worker;
  2275. }
  2276. }
  2277. #ifdef STARPU_SIMGRID
  2278. _starpu_simgrid_count_ngpus();
  2279. #else
  2280. #ifdef STARPU_HAVE_HWLOC
  2281. _starpu_topology_count_ngpus(hwloc_get_root_obj(config->topology.hwtopology));
  2282. #endif
  2283. #endif
  2284. }
  2285. int
  2286. _starpu_build_topology (struct _starpu_machine_config *config, int no_mp_config)
  2287. {
  2288. int ret;
  2289. unsigned i;
  2290. ret = _starpu_init_machine_config(config, no_mp_config);
  2291. if (ret)
  2292. return ret;
  2293. /* for the data management library */
  2294. _starpu_memory_nodes_init();
  2295. _starpu_datastats_init();
  2296. _starpu_init_workers_binding_and_memory(config, no_mp_config);
  2297. config->cpus_nodeid = -1;
  2298. config->cuda_nodeid = -1;
  2299. config->opencl_nodeid = -1;
  2300. config->mic_nodeid = -1;
  2301. config->scc_nodeid = -1;
  2302. config->mpi_nodeid = -1;
  2303. for (i = 0; i < starpu_worker_get_count(); i++)
  2304. {
  2305. switch (starpu_worker_get_type(i))
  2306. {
  2307. case STARPU_CPU_WORKER:
  2308. if (config->cpus_nodeid == -1)
  2309. config->cpus_nodeid = starpu_worker_get_memory_node(i);
  2310. else if (config->cpus_nodeid != (int) starpu_worker_get_memory_node(i))
  2311. config->cpus_nodeid = -2;
  2312. break;
  2313. case STARPU_CUDA_WORKER:
  2314. if (config->cuda_nodeid == -1)
  2315. config->cuda_nodeid = starpu_worker_get_memory_node(i);
  2316. else if (config->cuda_nodeid != (int) starpu_worker_get_memory_node(i))
  2317. config->cuda_nodeid = -2;
  2318. break;
  2319. case STARPU_OPENCL_WORKER:
  2320. if (config->opencl_nodeid == -1)
  2321. config->opencl_nodeid = starpu_worker_get_memory_node(i);
  2322. else if (config->opencl_nodeid != (int) starpu_worker_get_memory_node(i))
  2323. config->opencl_nodeid = -2;
  2324. break;
  2325. case STARPU_MIC_WORKER:
  2326. if (config->mic_nodeid == -1)
  2327. config->mic_nodeid = starpu_worker_get_memory_node(i);
  2328. else if (config->mic_nodeid != (int) starpu_worker_get_memory_node(i))
  2329. config->mic_nodeid = -2;
  2330. break;
  2331. case STARPU_SCC_WORKER:
  2332. if (config->scc_nodeid == -1)
  2333. config->scc_nodeid = starpu_worker_get_memory_node(i);
  2334. else if (config->scc_nodeid != (int) starpu_worker_get_memory_node(i))
  2335. config->scc_nodeid = -2;
  2336. break;
  2337. case STARPU_MPI_MS_WORKER:
  2338. if (config->mpi_nodeid == -1)
  2339. config->mpi_nodeid = starpu_worker_get_memory_node(i);
  2340. else if (config->mpi_nodeid != (int) starpu_worker_get_memory_node(i))
  2341. config->mpi_nodeid = -2;
  2342. break;
  2343. case STARPU_ANY_WORKER:
  2344. STARPU_ASSERT(0);
  2345. }
  2346. }
  2347. return 0;
  2348. }
  2349. void _starpu_destroy_topology(struct _starpu_machine_config *config STARPU_ATTRIBUTE_UNUSED)
  2350. {
  2351. #if defined(STARPU_USE_MIC) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  2352. _starpu_deinit_mp_config(config);
  2353. #endif
  2354. /* cleanup StarPU internal data structures */
  2355. _starpu_memory_nodes_deinit();
  2356. _starpu_destroy_machine_config(config);
  2357. }
  2358. void
  2359. starpu_topology_print (FILE *output)
  2360. {
  2361. struct _starpu_machine_config *config = _starpu_get_machine_config();
  2362. struct _starpu_machine_topology *topology = &config->topology;
  2363. unsigned pu;
  2364. unsigned worker;
  2365. unsigned nworkers = starpu_worker_get_count();
  2366. unsigned ncombinedworkers = topology->ncombinedworkers;
  2367. unsigned nthreads_per_core = topology->nhwpus / topology->nhwcpus;
  2368. #ifdef STARPU_HAVE_HWLOC
  2369. hwloc_topology_t topo = topology->hwtopology;
  2370. hwloc_obj_t pu_obj;
  2371. hwloc_obj_t last_numa_obj = NULL, numa_obj;
  2372. hwloc_obj_t last_package_obj = NULL, package_obj;
  2373. #endif
  2374. for (pu = 0; pu < topology->nhwpus; pu++)
  2375. {
  2376. #ifdef STARPU_HAVE_HWLOC
  2377. pu_obj = hwloc_get_obj_by_type(topo, HWLOC_OBJ_PU, pu);
  2378. numa_obj = hwloc_get_ancestor_obj_by_type(topo, HWLOC_OBJ_NODE, pu_obj);
  2379. if (numa_obj != last_numa_obj)
  2380. {
  2381. fprintf(output, "numa %u", numa_obj->logical_index);
  2382. last_numa_obj = numa_obj;
  2383. }
  2384. fprintf(output, "\t");
  2385. package_obj = hwloc_get_ancestor_obj_by_type(topo, HWLOC_OBJ_SOCKET, pu_obj);
  2386. if (package_obj != last_package_obj)
  2387. {
  2388. fprintf(output, "pack %u", package_obj->logical_index);
  2389. last_package_obj = package_obj;
  2390. }
  2391. fprintf(output, "\t");
  2392. #endif
  2393. if ((pu % nthreads_per_core) == 0)
  2394. fprintf(output, "core %u", pu / nthreads_per_core);
  2395. fprintf(output, "\tPU %u\t", pu);
  2396. for (worker = 0;
  2397. worker < nworkers + ncombinedworkers;
  2398. worker++)
  2399. {
  2400. if (worker < nworkers)
  2401. {
  2402. struct _starpu_worker *workerarg = &config->workers[worker];
  2403. if (workerarg->bindid == (int) pu)
  2404. {
  2405. char name[256];
  2406. starpu_worker_get_name (worker, name,
  2407. sizeof(name));
  2408. fprintf(output, "%s\t", name);
  2409. }
  2410. }
  2411. else
  2412. {
  2413. int worker_size, i;
  2414. int *combined_workerid;
  2415. starpu_combined_worker_get_description(worker, &worker_size, &combined_workerid);
  2416. for (i = 0; i < worker_size; i++)
  2417. {
  2418. if (topology->workers_bindid[combined_workerid[i]] == pu)
  2419. fprintf(output, "comb %u\t", worker-nworkers);
  2420. }
  2421. }
  2422. }
  2423. fprintf(output, "\n");
  2424. }
  2425. }