topology.c 69 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2017 Université de Bordeaux
  4. * Copyright (C) 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 CNRS
  5. * Copyright (C) 2011, 2016, 2017 INRIA
  6. * Copyright (C) 2016 Uppsala University
  7. *
  8. * StarPU is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU Lesser General Public License as published by
  10. * the Free Software Foundation; either version 2.1 of the License, or (at
  11. * your option) any later version.
  12. *
  13. * StarPU is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  16. *
  17. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  18. */
  19. #include <stdlib.h>
  20. #include <stdio.h>
  21. #include <common/config.h>
  22. #include <core/workers.h>
  23. #include <core/debug.h>
  24. #include <core/topology.h>
  25. #include <drivers/cuda/driver_cuda.h>
  26. #include <drivers/mic/driver_mic_source.h>
  27. #include <drivers/scc/driver_scc_source.h>
  28. #include <drivers/mpi/driver_mpi_source.h>
  29. #include <drivers/mpi/driver_mpi_common.h>
  30. #include <drivers/mp_common/source_common.h>
  31. #include <drivers/opencl/driver_opencl.h>
  32. #include <profiling/profiling.h>
  33. #include <datawizard/datastats.h>
  34. #include <datawizard/memory_nodes.h>
  35. #include <common/uthash.h>
  36. #ifdef STARPU_HAVE_HWLOC
  37. #include <hwloc.h>
  38. #ifndef HWLOC_API_VERSION
  39. #define HWLOC_OBJ_PU HWLOC_OBJ_PROC
  40. #endif
  41. #endif
  42. #ifdef STARPU_HAVE_WINDOWS
  43. #include <windows.h>
  44. #endif
  45. #ifdef STARPU_SIMGRID
  46. #include <core/simgrid.h>
  47. #endif
  48. #if defined(HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX) && HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX
  49. #include <hwloc/cuda.h>
  50. #endif
  51. static unsigned topology_is_initialized = 0;
  52. static int nobind;
  53. /* For checking whether two workers share the same PU, indexed by PU number */
  54. static int cpu_worker[STARPU_MAXCPUS];
  55. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL) || defined(STARPU_USE_SCC) || defined(STARPU_SIMGRID) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  56. struct handle_entry
  57. {
  58. UT_hash_handle hh;
  59. unsigned gpuid;
  60. };
  61. # if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  62. /* Entry in the `devices_using_cuda' hash table. */
  63. static struct handle_entry *devices_using_cuda;
  64. # endif
  65. static unsigned may_bind_automatically[STARPU_NARCH] = { 0 };
  66. #endif // defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  67. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  68. static struct _starpu_worker_set cuda_worker_set[STARPU_MAXCUDADEVS];
  69. #endif
  70. #ifdef STARPU_USE_MIC
  71. static struct _starpu_worker_set mic_worker_set[STARPU_MAXMICDEVS];
  72. #endif
  73. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  74. struct _starpu_worker_set mpi_worker_set[STARPU_MAXMPIDEVS];
  75. #endif
  76. struct _starpu_worker *_starpu_get_worker_from_driver(struct starpu_driver *d)
  77. {
  78. unsigned nworkers = starpu_worker_get_count();
  79. unsigned workerid;
  80. for (workerid = 0; workerid < nworkers; workerid++)
  81. {
  82. if (starpu_worker_get_type(workerid) == d->type)
  83. {
  84. struct _starpu_worker *worker;
  85. worker = _starpu_get_worker_struct(workerid);
  86. switch (d->type)
  87. {
  88. #ifdef STARPU_USE_CPU
  89. case STARPU_CPU_WORKER:
  90. if (worker->devid == d->id.cpu_id)
  91. return worker;
  92. break;
  93. #endif
  94. #ifdef STARPU_USE_OPENCL
  95. case STARPU_OPENCL_WORKER:
  96. {
  97. cl_device_id device;
  98. starpu_opencl_get_device(worker->devid, &device);
  99. if (device == d->id.opencl_id)
  100. return worker;
  101. break;
  102. }
  103. #endif
  104. #ifdef STARPU_USE_CUDA
  105. case STARPU_CUDA_WORKER:
  106. {
  107. if (worker->devid == d->id.cuda_id)
  108. return worker;
  109. break;
  110. }
  111. #endif
  112. default:
  113. (void) worker;
  114. _STARPU_DEBUG("Invalid device type\n");
  115. return NULL;
  116. }
  117. }
  118. }
  119. return NULL;
  120. }
  121. /*
  122. * Discover the topology of the machine
  123. */
  124. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL) || defined(STARPU_USE_SCC) || defined(STARPU_SIMGRID) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  125. static void
  126. _starpu_initialize_workers_deviceid (int *explicit_workers_gpuid,
  127. int *current, int *workers_gpuid,
  128. const char *varname, unsigned nhwgpus,
  129. enum starpu_worker_archtype type)
  130. {
  131. char *strval;
  132. unsigned i;
  133. *current = 0;
  134. /* conf->workers_gpuid indicates the successive GPU identifier that
  135. * should be used to bind the workers. It should be either filled
  136. * according to the user's explicit parameters (from starpu_conf) or
  137. * according to the STARPU_WORKERS_CUDAID env. variable. Otherwise, a
  138. * round-robin policy is used to distributed the workers over the
  139. * cores. */
  140. /* what do we use, explicit value, env. variable, or round-robin ? */
  141. if ((strval = starpu_getenv(varname)))
  142. {
  143. /* STARPU_WORKERS_CUDAID certainly contains less entries than
  144. * STARPU_NMAXWORKERS, so we reuse its entries in a round
  145. * robin fashion: "1 2" is equivalent to "1 2 1 2 1 2 .... 1
  146. * 2". */
  147. unsigned wrap = 0;
  148. unsigned number_of_entries = 0;
  149. char *endptr;
  150. /* we use the content of the STARPU_WORKERS_CUDAID
  151. * env. variable */
  152. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  153. {
  154. if (!wrap)
  155. {
  156. long int val;
  157. val = strtol(strval, &endptr, 10);
  158. if (endptr != strval)
  159. {
  160. workers_gpuid[i] = (unsigned)val;
  161. strval = endptr;
  162. }
  163. else
  164. {
  165. /* there must be at least one entry */
  166. STARPU_ASSERT(i != 0);
  167. number_of_entries = i;
  168. /* there is no more values in the
  169. * string */
  170. wrap = 1;
  171. workers_gpuid[i] = workers_gpuid[0];
  172. }
  173. }
  174. else
  175. {
  176. workers_gpuid[i] =
  177. workers_gpuid[i % number_of_entries];
  178. }
  179. }
  180. }
  181. else if (explicit_workers_gpuid)
  182. {
  183. /* we use the explicit value from the user */
  184. memcpy(workers_gpuid,
  185. explicit_workers_gpuid,
  186. STARPU_NMAXWORKERS*sizeof(unsigned));
  187. }
  188. else
  189. {
  190. /* by default, we take a round robin policy */
  191. if (nhwgpus > 0)
  192. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  193. workers_gpuid[i] = (unsigned)(i % nhwgpus);
  194. /* StarPU can use sampling techniques to bind threads
  195. * correctly */
  196. may_bind_automatically[type] = 1;
  197. }
  198. }
  199. #endif
  200. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  201. static void
  202. _starpu_initialize_workers_cuda_gpuid (struct _starpu_machine_config *config)
  203. {
  204. struct _starpu_machine_topology *topology = &config->topology;
  205. struct starpu_conf *uconf = &config->conf;
  206. _starpu_initialize_workers_deviceid (
  207. uconf->use_explicit_workers_cuda_gpuid == 0
  208. ? NULL
  209. : (int *)uconf->workers_cuda_gpuid,
  210. &(config->current_cuda_gpuid),
  211. (int *)topology->workers_cuda_gpuid,
  212. "STARPU_WORKERS_CUDAID",
  213. topology->nhwcudagpus,
  214. STARPU_CUDA_WORKER);
  215. }
  216. static inline int
  217. _starpu_get_next_cuda_gpuid (struct _starpu_machine_config *config)
  218. {
  219. unsigned i =
  220. ((config->current_cuda_gpuid++) % config->topology.ncudagpus);
  221. return (int)config->topology.workers_cuda_gpuid[i];
  222. }
  223. #endif
  224. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  225. static void
  226. _starpu_initialize_workers_opencl_gpuid (struct _starpu_machine_config*config)
  227. {
  228. struct _starpu_machine_topology *topology = &config->topology;
  229. struct starpu_conf *uconf = &config->conf;
  230. _starpu_initialize_workers_deviceid(
  231. uconf->use_explicit_workers_opencl_gpuid == 0
  232. ? NULL
  233. : (int *)uconf->workers_opencl_gpuid,
  234. &(config->current_opencl_gpuid),
  235. (int *)topology->workers_opencl_gpuid,
  236. "STARPU_WORKERS_OPENCLID",
  237. topology->nhwopenclgpus,
  238. STARPU_OPENCL_WORKER);
  239. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  240. // Detect devices which are already used with CUDA
  241. {
  242. unsigned tmp[STARPU_NMAXWORKERS];
  243. unsigned nb=0;
  244. int i;
  245. for(i=0 ; i<STARPU_NMAXWORKERS ; i++)
  246. {
  247. struct handle_entry *entry;
  248. int devid = config->topology.workers_opencl_gpuid[i];
  249. HASH_FIND_INT(devices_using_cuda, &devid, entry);
  250. if (entry == NULL)
  251. {
  252. tmp[nb] = topology->workers_opencl_gpuid[i];
  253. nb++;
  254. }
  255. }
  256. for (i=nb ; i<STARPU_NMAXWORKERS ; i++)
  257. tmp[i] = -1;
  258. memcpy (topology->workers_opencl_gpuid, tmp,
  259. sizeof(unsigned)*STARPU_NMAXWORKERS);
  260. }
  261. #endif /* STARPU_USE_CUDA */
  262. {
  263. // Detect identical devices
  264. struct handle_entry *devices_already_used = NULL;
  265. unsigned tmp[STARPU_NMAXWORKERS];
  266. unsigned nb=0;
  267. int i;
  268. for(i=0 ; i<STARPU_NMAXWORKERS ; i++)
  269. {
  270. int devid = topology->workers_opencl_gpuid[i];
  271. struct handle_entry *entry;
  272. HASH_FIND_INT(devices_already_used, &devid, entry);
  273. if (entry == NULL)
  274. {
  275. struct handle_entry *entry2;
  276. _STARPU_MALLOC(entry2, sizeof(*entry2));
  277. entry2->gpuid = devid;
  278. HASH_ADD_INT(devices_already_used, gpuid,
  279. entry2);
  280. tmp[nb] = devid;
  281. nb ++;
  282. }
  283. }
  284. struct handle_entry *entry, *tempo;
  285. HASH_ITER(hh, devices_already_used, entry, tempo)
  286. {
  287. HASH_DEL(devices_already_used, entry);
  288. free(entry);
  289. }
  290. for (i=nb ; i<STARPU_NMAXWORKERS ; i++)
  291. tmp[i] = -1;
  292. memcpy (topology->workers_opencl_gpuid, tmp,
  293. sizeof(unsigned)*STARPU_NMAXWORKERS);
  294. }
  295. }
  296. static inline int
  297. _starpu_get_next_opencl_gpuid (struct _starpu_machine_config *config)
  298. {
  299. unsigned i =
  300. ((config->current_opencl_gpuid++) % config->topology.nopenclgpus);
  301. return (int)config->topology.workers_opencl_gpuid[i];
  302. }
  303. #endif
  304. #if 0
  305. #if defined(STARPU_USE_MIC) || defined(STARPU_SIMGRID)
  306. static void _starpu_initialize_workers_mic_deviceid(struct _starpu_machine_config *config)
  307. {
  308. struct _starpu_machine_topology *topology = &config->topology;
  309. struct starpu_conf *uconf = &config->conf;
  310. _starpu_initialize_workers_deviceid(
  311. uconf->use_explicit_workers_mic_deviceid == 0
  312. ? NULL
  313. : (int *)config->user_conf->workers_mic_deviceid,
  314. &(config->current_mic_deviceid),
  315. (int *)topology->workers_mic_deviceid,
  316. "STARPU_WORKERS_MICID",
  317. topology->nhwmiccores,
  318. STARPU_MIC_WORKER);
  319. }
  320. #endif
  321. #endif
  322. #ifdef STARPU_USE_SCC
  323. static void _starpu_initialize_workers_scc_deviceid(struct _starpu_machine_config *config)
  324. {
  325. struct _starpu_machine_topology *topology = &config->topology;
  326. struct starpu_conf *uconf = &config->conf;
  327. _starpu_initialize_workers_deviceid(
  328. uconf->use_explicit_workers_scc_deviceid == 0
  329. ? NULL
  330. : (int *) uconf->workers_scc_deviceid,
  331. &(config->current_scc_deviceid),
  332. (int *)topology->workers_scc_deviceid,
  333. "STARPU_WORKERS_SCCID",
  334. topology->nhwscc,
  335. STARPU_SCC_WORKER);
  336. }
  337. #endif /* STARPU_USE_SCC */
  338. #if 0
  339. #ifdef STARPU_USE_MIC
  340. static inline int _starpu_get_next_mic_deviceid(struct _starpu_machine_config *config)
  341. {
  342. unsigned i = ((config->current_mic_deviceid++) % config->topology.nmicdevices);
  343. return (int)config->topology.workers_mic_deviceid[i];
  344. }
  345. #endif
  346. #endif
  347. #ifdef STARPU_USE_SCC
  348. static inline int _starpu_get_next_scc_deviceid(struct _starpu_machine_config *config)
  349. {
  350. unsigned i = ((config->current_scc_deviceid++) % config->topology.nsccdevices);
  351. return (int)config->topology.workers_scc_deviceid[i];
  352. }
  353. #endif
  354. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  355. static inline int _starpu_get_next_mpi_deviceid(struct _starpu_machine_config *config)
  356. {
  357. unsigned i = ((config->current_mpi_deviceid++) % config->topology.nmpidevices);
  358. return (int)config->topology.workers_mpi_ms_deviceid[i];
  359. }
  360. static void
  361. _starpu_init_mpi_topology (struct _starpu_machine_config *config, long mpi_idx)
  362. {
  363. /* Discover the topology of the mpi node identifier by MPI_IDX. That
  364. * means, make this StarPU instance aware of the number of cores available
  365. * on this MPI device. Update the `nhwmpicores' topology field
  366. * accordingly. */
  367. struct _starpu_machine_topology *topology = &config->topology;
  368. int nbcores;
  369. _starpu_src_common_sink_nbcores (mpi_ms_nodes[mpi_idx], &nbcores);
  370. topology->nhwmpicores[mpi_idx] = nbcores;
  371. }
  372. #endif /* STARPU_USE_MPI_MASTER_SLAVE */
  373. #ifdef STARPU_USE_MIC
  374. static void
  375. _starpu_init_mic_topology (struct _starpu_machine_config *config, long mic_idx)
  376. {
  377. /* Discover the topology of the mic node identifier by MIC_IDX. That
  378. * means, make this StarPU instance aware of the number of cores available
  379. * on this MIC device. Update the `nhwmiccores' topology field
  380. * accordingly. */
  381. struct _starpu_machine_topology *topology = &config->topology;
  382. int nbcores;
  383. _starpu_src_common_sink_nbcores (mic_nodes[mic_idx], &nbcores);
  384. topology->nhwmiccores[mic_idx] = nbcores;
  385. }
  386. static int
  387. _starpu_init_mic_node (struct _starpu_machine_config *config, int mic_idx,
  388. COIENGINE *coi_handle, COIPROCESS *coi_process)
  389. {
  390. /* Initialize the MIC node of index MIC_IDX. */
  391. struct starpu_conf *user_conf = &config->conf;
  392. char ***argv = _starpu_get_argv();
  393. const char *suffixes[] = {"-mic", "_mic", NULL};
  394. /* Environment variables to send to the Sink, it informs it what kind
  395. * of node it is (architecture and type) as there is no way to discover
  396. * it itself */
  397. char mic_idx_env[32];
  398. sprintf(mic_idx_env, "_STARPU_MIC_DEVID=%d", mic_idx);
  399. /* XXX: this is currently necessary so that the remote process does not
  400. * segfault. */
  401. char nb_mic_env[32];
  402. sprintf(nb_mic_env, "_STARPU_MIC_NB=%d", 2);
  403. const char *mic_sink_env[] = {"STARPU_SINK=STARPU_MIC", mic_idx_env, nb_mic_env, NULL};
  404. char mic_sink_program_path[1024];
  405. /* Let's get the helper program to run on the MIC device */
  406. int mic_file_found =
  407. _starpu_src_common_locate_file (mic_sink_program_path,
  408. starpu_getenv("STARPU_MIC_SINK_PROGRAM_NAME"),
  409. starpu_getenv("STARPU_MIC_SINK_PROGRAM_PATH"),
  410. user_conf->mic_sink_program_path,
  411. (argv ? (*argv)[0] : NULL),
  412. suffixes);
  413. if (0 != mic_file_found)
  414. {
  415. _STARPU_MSG("No MIC program specified, use the environment\n"
  416. "variable STARPU_MIC_SINK_PROGRAM_NAME or the environment\n"
  417. "or the field 'starpu_conf.mic_sink_program_path'\n"
  418. "to define it.\n");
  419. return -1;
  420. }
  421. COIRESULT res;
  422. /* Let's get the handle which let us manage the remote MIC device */
  423. res = COIEngineGetHandle(COI_ISA_MIC, mic_idx, coi_handle);
  424. if (STARPU_UNLIKELY(res != COI_SUCCESS))
  425. STARPU_MIC_SRC_REPORT_COI_ERROR(res);
  426. /* We launch the helper on the MIC device, which will wait for us
  427. * to give it work to do.
  428. * As we will communicate further with the device throught scif we
  429. * don't need to keep the process pointer */
  430. res = COIProcessCreateFromFile(*coi_handle, mic_sink_program_path, 0, NULL, 0,
  431. mic_sink_env, 1, NULL, 0, NULL,
  432. coi_process);
  433. if (STARPU_UNLIKELY(res != COI_SUCCESS))
  434. STARPU_MIC_SRC_REPORT_COI_ERROR(res);
  435. /* Let's create the node structure, we'll communicate with the peer
  436. * through scif thanks to it */
  437. mic_nodes[mic_idx] =
  438. _starpu_mp_common_node_create(STARPU_NODE_MIC_SOURCE, mic_idx);
  439. return 0;
  440. }
  441. #endif
  442. #ifndef STARPU_SIMGRID
  443. #ifdef STARPU_HAVE_HWLOC
  444. static void
  445. _starpu_allocate_topology_userdata(hwloc_obj_t obj)
  446. {
  447. unsigned i;
  448. _STARPU_CALLOC(obj->userdata, 1, sizeof(struct _starpu_hwloc_userdata));
  449. for (i = 0; i < obj->arity; i++)
  450. _starpu_allocate_topology_userdata(obj->children[i]);
  451. }
  452. static void
  453. _starpu_deallocate_topology_userdata(hwloc_obj_t obj)
  454. {
  455. unsigned i;
  456. struct _starpu_hwloc_userdata *data = obj->userdata;
  457. STARPU_ASSERT(!data->worker_list || data->worker_list == (void*)-1);
  458. free(data);
  459. for (i = 0; i < obj->arity; i++)
  460. _starpu_deallocate_topology_userdata(obj->children[i]);
  461. }
  462. #endif
  463. #endif
  464. static void
  465. _starpu_init_topology (struct _starpu_machine_config *config)
  466. {
  467. /* Discover the topology, meaning finding all the available PUs for
  468. the compiled drivers. These drivers MUST have been initialized
  469. before calling this function. The discovered topology is filled in
  470. CONFIG. */
  471. struct _starpu_machine_topology *topology = &config->topology;
  472. if (topology_is_initialized)
  473. return;
  474. nobind = starpu_get_env_number("STARPU_WORKERS_NOBIND");
  475. topology->nhwcpus = 0;
  476. topology->nhwpus = 0;
  477. #ifndef STARPU_SIMGRID
  478. #ifdef STARPU_HAVE_HWLOC
  479. hwloc_topology_init(&topology->hwtopology);
  480. _starpu_topology_filter(topology->hwtopology);
  481. hwloc_topology_load(topology->hwtopology);
  482. _starpu_allocate_topology_userdata(hwloc_get_root_obj(topology->hwtopology));
  483. #endif
  484. #endif
  485. #ifdef STARPU_SIMGRID
  486. config->topology.nhwcpus = config->topology.nhwpus = _starpu_simgrid_get_nbhosts("CPU");
  487. #elif defined(STARPU_HAVE_HWLOC)
  488. /* Discover the CPUs relying on the hwloc interface and fills CONFIG
  489. * accordingly. */
  490. config->cpu_depth = hwloc_get_type_depth (topology->hwtopology,
  491. HWLOC_OBJ_CORE);
  492. config->pu_depth = hwloc_get_type_depth (topology->hwtopology,
  493. HWLOC_OBJ_PU);
  494. /* Would be very odd */
  495. STARPU_ASSERT(config->cpu_depth != HWLOC_TYPE_DEPTH_MULTIPLE);
  496. if (config->cpu_depth == HWLOC_TYPE_DEPTH_UNKNOWN)
  497. {
  498. /* unknown, using logical procesors as fallback */
  499. _STARPU_DISP("Warning: The OS did not report CPU cores. Assuming there is only one hardware thread per core.\n");
  500. config->cpu_depth = hwloc_get_type_depth(topology->hwtopology,
  501. HWLOC_OBJ_PU);
  502. }
  503. topology->nhwcpus = hwloc_get_nbobjs_by_depth (topology->hwtopology,
  504. config->cpu_depth);
  505. topology->nhwpus = hwloc_get_nbobjs_by_depth (topology->hwtopology,
  506. config->pu_depth);
  507. #elif defined(HAVE_SYSCONF)
  508. /* Discover the CPUs relying on the sysconf(3) function and fills
  509. * CONFIG accordingly. */
  510. config->topology.nhwcpus = config->topology.nhwpus = sysconf(_SC_NPROCESSORS_ONLN);
  511. #elif defined(_WIN32)
  512. /* Discover the CPUs on Cygwin and MinGW systems. */
  513. SYSTEM_INFO sysinfo;
  514. GetSystemInfo(&sysinfo);
  515. config->topology.nhwcpus = config->topology.nhwpus = sysinfo.dwNumberOfProcessors;
  516. #else
  517. #warning no way to know number of cores, assuming 1
  518. config->topology.nhwcpus = config->topology.nhwpus = 1;
  519. #endif
  520. _starpu_cuda_discover_devices(config);
  521. _starpu_opencl_discover_devices(config);
  522. #ifdef STARPU_USE_SCC
  523. config->topology.nhwscc = _starpu_scc_src_get_device_count();
  524. #endif
  525. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  526. config->topology.nhwmpi = _starpu_mpi_src_get_device_count();
  527. #endif
  528. topology_is_initialized = 1;
  529. }
  530. /*
  531. * Bind workers on the different processors
  532. */
  533. static void
  534. _starpu_initialize_workers_bindid (struct _starpu_machine_config *config)
  535. {
  536. char *strval;
  537. unsigned i;
  538. struct _starpu_machine_topology *topology = &config->topology;
  539. config->current_bindid = 0;
  540. /* conf->workers_bindid indicates the successive logical PU identifier that
  541. * should be used to bind the workers. It should be either filled
  542. * according to the user's explicit parameters (from starpu_conf) or
  543. * according to the STARPU_WORKERS_CPUID env. variable. Otherwise, a
  544. * round-robin policy is used to distributed the workers over the
  545. * cores. */
  546. /* what do we use, explicit value, env. variable, or round-robin ? */
  547. if ((strval = starpu_getenv("STARPU_WORKERS_CPUID")))
  548. {
  549. /* STARPU_WORKERS_CPUID certainly contains less entries than
  550. * STARPU_NMAXWORKERS, so we reuse its entries in a round
  551. * robin fashion: "1 2" is equivalent to "1 2 1 2 1 2 .... 1
  552. * 2". */
  553. unsigned wrap = 0;
  554. unsigned number_of_entries = 0;
  555. char *endptr;
  556. /* we use the content of the STARPU_WORKERS_CPUID
  557. * env. variable */
  558. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  559. {
  560. if (!wrap)
  561. {
  562. long int val;
  563. val = strtol(strval, &endptr, 10);
  564. if (endptr != strval)
  565. {
  566. topology->workers_bindid[i] =
  567. (unsigned)(val % topology->nhwpus);
  568. strval = endptr;
  569. if (*strval == '-')
  570. {
  571. /* range of values */
  572. long int endval;
  573. strval++;
  574. if (*strval && *strval != ' ' && *strval != ',')
  575. {
  576. endval = strtol(strval, &endptr, 10);
  577. strval = endptr;
  578. }
  579. else
  580. {
  581. endval = topology->nhwpus-1;
  582. if (*strval)
  583. strval++;
  584. }
  585. for (val++; val <= endval && i < STARPU_NMAXWORKERS-1; val++)
  586. {
  587. i++;
  588. topology->workers_bindid[i] =
  589. (unsigned)(val % topology->nhwpus);
  590. }
  591. }
  592. if (*strval == ',')
  593. strval++;
  594. }
  595. else
  596. {
  597. /* there must be at least one entry */
  598. STARPU_ASSERT(i != 0);
  599. number_of_entries = i;
  600. /* there is no more values in the
  601. * string */
  602. wrap = 1;
  603. topology->workers_bindid[i] =
  604. topology->workers_bindid[0];
  605. }
  606. }
  607. else
  608. {
  609. topology->workers_bindid[i] =
  610. topology->workers_bindid[i % number_of_entries];
  611. }
  612. }
  613. }
  614. else if (config->conf.use_explicit_workers_bindid)
  615. {
  616. /* we use the explicit value from the user */
  617. memcpy(topology->workers_bindid,
  618. config->conf.workers_bindid,
  619. STARPU_NMAXWORKERS*sizeof(unsigned));
  620. }
  621. else
  622. {
  623. int nth_per_core = starpu_get_env_number_default("STARPU_NTHREADS_PER_CORE", 1);
  624. int k;
  625. int nbindids=0;
  626. int nhyperthreads = topology->nhwpus / topology->nhwcpus;
  627. STARPU_ASSERT_MSG(nth_per_core > 0 && nth_per_core <= nhyperthreads , "Incorrect number of hyperthreads");
  628. i = 0; /* PU number currently assigned */
  629. k = 0; /* Number of threads already put on the current core */
  630. while(nbindids < STARPU_NMAXWORKERS)
  631. {
  632. if (k >= nth_per_core)
  633. {
  634. /* We have already put enough workers on this
  635. * core, skip remaining PUs from this core, and
  636. * proceed with next core */
  637. i += nhyperthreads-nth_per_core;
  638. k = 0;
  639. continue;
  640. }
  641. /* Add a worker to this core, by using this logical PU */
  642. topology->workers_bindid[nbindids++] =
  643. (unsigned)(i % topology->nhwpus);
  644. k++;
  645. i++;
  646. }
  647. }
  648. for (i = 0; i < STARPU_MAXCPUS;i++)
  649. cpu_worker[i] = STARPU_NOWORKERID;
  650. }
  651. /* This function gets the identifier of the next core on which to bind a
  652. * worker. In case a list of preferred cores was specified (logical indexes),
  653. * we look for a an available core among the list if possible, otherwise a
  654. * round-robin policy is used. */
  655. static inline int
  656. _starpu_get_next_bindid (struct _starpu_machine_config *config,
  657. int *preferred_binding, int npreferred)
  658. {
  659. struct _starpu_machine_topology *topology = &config->topology;
  660. unsigned found = 0;
  661. int current_preferred;
  662. int nhyperthreads = topology->nhwpus / topology->nhwcpus;
  663. /* loop over the preference list */
  664. for (current_preferred = 0;
  665. current_preferred < npreferred;
  666. current_preferred++)
  667. {
  668. if (found)
  669. break;
  670. /* Try to get this core */
  671. unsigned requested_core = preferred_binding[current_preferred];
  672. /* can we bind the worker on the preferred core ? */
  673. unsigned ind;
  674. /* Look at the remaining cores to be bound to */
  675. for (ind = config->current_bindid;
  676. ind < topology->nhwpus / nhyperthreads;
  677. ind++)
  678. {
  679. if (topology->workers_bindid[ind] == requested_core * nhyperthreads)
  680. {
  681. /* the cpu is available, we use it ! In order
  682. * to make sure that it will not be used again
  683. * later on, we exchange it with the next bindid we were supposed to use */
  684. topology->workers_bindid[ind] =
  685. topology->workers_bindid[config->current_bindid];
  686. topology->workers_bindid[config->current_bindid] = requested_core * nhyperthreads;
  687. found = 1;
  688. break;
  689. }
  690. }
  691. }
  692. unsigned i = ((config->current_bindid++) % STARPU_NMAXWORKERS);
  693. return (int)topology->workers_bindid[i];
  694. }
  695. unsigned
  696. _starpu_topology_get_nhwcpu (struct _starpu_machine_config *config)
  697. {
  698. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  699. _starpu_opencl_init();
  700. #endif
  701. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  702. _starpu_init_cuda();
  703. #endif
  704. _starpu_init_topology(config);
  705. return config->topology.nhwcpus;
  706. }
  707. unsigned
  708. _starpu_topology_get_nhwpu (struct _starpu_machine_config *config)
  709. {
  710. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  711. _starpu_opencl_init();
  712. #endif
  713. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  714. _starpu_init_cuda();
  715. #endif
  716. _starpu_init_topology(config);
  717. return config->topology.nhwpus;
  718. }
  719. #ifdef STARPU_HAVE_HWLOC
  720. void _starpu_topology_filter(hwloc_topology_t topology)
  721. {
  722. #if HWLOC_API_VERSION >= 0x20000
  723. hwloc_topology_set_io_types_filter(topology, HWLOC_TYPE_FILTER_KEEP_IMPORTANT);
  724. #else
  725. hwloc_topology_set_flags(topology, HWLOC_TOPOLOGY_FLAG_IO_DEVICES | HWLOC_TOPOLOGY_FLAG_IO_BRIDGES);
  726. #endif
  727. }
  728. #endif
  729. #ifdef STARPU_USE_MIC
  730. static void
  731. _starpu_init_mic_config (struct _starpu_machine_config *config,
  732. struct starpu_conf *user_conf,
  733. unsigned mic_idx)
  734. {
  735. // Configure the MIC device of index MIC_IDX.
  736. struct _starpu_machine_topology *topology = &config->topology;
  737. topology->nhwmiccores[mic_idx] = 0;
  738. _starpu_init_mic_topology (config, mic_idx);
  739. int nmiccores;
  740. nmiccores = starpu_get_env_number("STARPU_NMICTHREADS");
  741. if (nmiccores == -1)
  742. {
  743. /* Nothing was specified, so let's use the number of
  744. * detected mic cores. ! */
  745. nmiccores = topology->nhwmiccores[mic_idx];
  746. }
  747. else
  748. {
  749. if ((unsigned) nmiccores > topology->nhwmiccores[mic_idx])
  750. {
  751. /* The user requires more MIC cores than there is available */
  752. _STARPU_MSG("# Warning: %d MIC cores requested. Only %d available.\n", nmiccores, topology->nhwmiccores[mic_idx]);
  753. nmiccores = topology->nhwmiccores[mic_idx];
  754. }
  755. }
  756. topology->nmiccores[mic_idx] = nmiccores;
  757. STARPU_ASSERT_MSG(topology->nmiccores[mic_idx] + topology->nworkers <= STARPU_NMAXWORKERS,
  758. "topology->nmiccores[mic_idx(%d)] (%d) + topology->nworkers (%d) <= STARPU_NMAXWORKERS (%d)",
  759. mic_idx, topology->nmiccores[mic_idx], topology->nworkers, STARPU_NMAXWORKERS);
  760. /* _starpu_initialize_workers_mic_deviceid (config); */
  761. mic_worker_set[mic_idx].workers = &config->workers[topology->nworkers];
  762. mic_worker_set[mic_idx].nworkers = topology->nmiccores[mic_idx];
  763. unsigned miccore_id;
  764. for (miccore_id = 0; miccore_id < topology->nmiccores[mic_idx]; miccore_id++)
  765. {
  766. int worker_idx = topology->nworkers + miccore_id;
  767. config->workers[worker_idx].set = &mic_worker_set[mic_idx];
  768. config->workers[worker_idx].arch = STARPU_MIC_WORKER;
  769. _STARPU_MALLOC(config->workers[worker_idx].perf_arch.devices, sizeof(struct starpu_perfmodel_device));
  770. config->workers[worker_idx].perf_arch.ndevices = 1;
  771. config->workers[worker_idx].perf_arch.devices[0].type = STARPU_MIC_WORKER;
  772. config->workers[worker_idx].perf_arch.devices[0].devid = mic_idx;
  773. config->workers[worker_idx].perf_arch.devices[0].ncores = 1;
  774. config->workers[worker_idx].devid = mic_idx;
  775. config->workers[worker_idx].subworkerid = miccore_id;
  776. config->workers[worker_idx].worker_mask = STARPU_MIC;
  777. config->worker_mask |= STARPU_MIC;
  778. }
  779. topology->nworkers += topology->nmiccores[mic_idx];
  780. }
  781. static COIENGINE mic_handles[STARPU_MAXMICDEVS];
  782. COIPROCESS _starpu_mic_process[STARPU_MAXMICDEVS];
  783. #endif
  784. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  785. static void
  786. _starpu_init_mpi_config (struct _starpu_machine_config *config,
  787. struct starpu_conf *user_conf,
  788. unsigned mpi_idx)
  789. {
  790. struct _starpu_machine_topology *topology = &config->topology;
  791. topology->nhwmpicores[mpi_idx] = 0;
  792. _starpu_init_mpi_topology (config, mpi_idx);
  793. int nmpicores;
  794. nmpicores = starpu_get_env_number("STARPU_NMPIMSTHREADS");
  795. if (nmpicores == -1)
  796. {
  797. /* Nothing was specified, so let's use the number of
  798. * detected mpi cores. ! */
  799. nmpicores = topology->nhwmpicores[mpi_idx];
  800. }
  801. else
  802. {
  803. if ((unsigned) nmpicores > topology->nhwmpicores[mpi_idx])
  804. {
  805. /* The user requires more MPI cores than there is available */
  806. fprintf(stderr,
  807. "# Warning: %d MPI cores requested. Only %d available.\n",
  808. nmpicores, topology->nhwmpicores[mpi_idx]);
  809. nmpicores = topology->nhwmpicores[mpi_idx];
  810. }
  811. }
  812. topology->nmpicores[mpi_idx] = nmpicores;
  813. STARPU_ASSERT_MSG(topology->nmpicores[mpi_idx] + topology->nworkers <= STARPU_NMAXWORKERS,
  814. "topology->nmpicores[mpi_idx(%d)] (%d) + topology->nworkers (%d) <= STARPU_NMAXWORKERS (%d)",
  815. mpi_idx, topology->nmpicores[mpi_idx], topology->nworkers, STARPU_NMAXWORKERS);
  816. mpi_worker_set[mpi_idx].workers = &config->workers[topology->nworkers];
  817. mpi_worker_set[mpi_idx].nworkers = topology->nmpicores[mpi_idx];
  818. unsigned mpicore_id;
  819. for (mpicore_id = 0; mpicore_id < topology->nmpicores[mpi_idx]; mpicore_id++)
  820. {
  821. int worker_idx = topology->nworkers + mpicore_id;
  822. config->workers[worker_idx].set = &mpi_worker_set[mpi_idx];
  823. config->workers[worker_idx].arch = STARPU_MPI_MS_WORKER;
  824. _STARPU_MALLOC(config->workers[worker_idx].perf_arch.devices, sizeof(struct starpu_perfmodel_device));
  825. config->workers[worker_idx].perf_arch.ndevices = 1;
  826. config->workers[worker_idx].perf_arch.devices[0].type = STARPU_MPI_MS_WORKER;
  827. config->workers[worker_idx].perf_arch.devices[0].devid = mpi_idx;
  828. config->workers[worker_idx].perf_arch.devices[0].ncores = 1;
  829. config->workers[worker_idx].devid = mpi_idx;
  830. config->workers[worker_idx].subworkerid = mpicore_id;
  831. config->workers[worker_idx].worker_mask = STARPU_MPI_MS;
  832. config->worker_mask |= STARPU_MPI_MS;
  833. }
  834. mpi_ms_nodes[mpi_idx]->baseworkerid = topology->nworkers;
  835. topology->nworkers += topology->nmpicores[mpi_idx];
  836. }
  837. #endif
  838. #if defined(STARPU_USE_MIC) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  839. static void
  840. _starpu_init_mp_config (struct _starpu_machine_config *config,
  841. struct starpu_conf *user_conf, int no_mp_config)
  842. {
  843. /* Discover and configure the mp topology. That means:
  844. * - discover the number of mp nodes;
  845. * - initialize each discovered node;
  846. * - discover the local topology (number of PUs/devices) of each node;
  847. * - configure the workers accordingly.
  848. */
  849. #ifdef STARPU_USE_MIC
  850. if (!no_mp_config)
  851. {
  852. struct _starpu_machine_topology *topology = &config->topology;
  853. /* Discover and initialize the number of MIC nodes through the mp
  854. * infrastructure. */
  855. unsigned nhwmicdevices = _starpu_mic_src_get_device_count();
  856. int reqmicdevices = starpu_get_env_number("STARPU_NMIC");
  857. if (reqmicdevices == -1 && user_conf)
  858. reqmicdevices = user_conf->nmic;
  859. if (reqmicdevices == -1)
  860. /* Nothing was specified, so let's use the number of
  861. * detected mic devices. ! */
  862. reqmicdevices = nhwmicdevices;
  863. if (reqmicdevices != -1)
  864. {
  865. if ((unsigned) reqmicdevices > nhwmicdevices)
  866. {
  867. /* The user requires more MIC devices than there is available */
  868. _STARPU_MSG("# Warning: %d MIC devices requested. Only %d available.\n", reqmicdevices, nhwmicdevices);
  869. reqmicdevices = nhwmicdevices;
  870. }
  871. }
  872. topology->nmicdevices = 0;
  873. unsigned i;
  874. for (i = 0; i < (unsigned) reqmicdevices; i++)
  875. if (0 == _starpu_init_mic_node (config, i, &mic_handles[i], &_starpu_mic_process[i]))
  876. topology->nmicdevices++;
  877. for (i = 0; i < topology->nmicdevices; i++)
  878. _starpu_init_mic_config (config, user_conf, i);
  879. }
  880. #endif
  881. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  882. {
  883. struct _starpu_machine_topology *topology = &config->topology;
  884. /* Discover and initialize the number of MPI nodes through the mp
  885. * infrastructure. */
  886. unsigned nhwmpidevices = _starpu_mpi_src_get_device_count();
  887. int reqmpidevices = starpu_get_env_number("STARPU_NMPI_MS");
  888. if (reqmpidevices == -1 && user_conf)
  889. reqmpidevices = user_conf->nmpi_ms;
  890. if (reqmpidevices == -1)
  891. /* Nothing was specified, so let's use the number of
  892. * detected mpi devices. ! */
  893. reqmpidevices = nhwmpidevices;
  894. if (reqmpidevices != -1)
  895. {
  896. if ((unsigned) reqmpidevices > nhwmpidevices)
  897. {
  898. /* The user requires more MPI devices than there is available */
  899. _STARPU_MSG("# Warning: %d MPI Master-Slave devices requested. Only %d available.\n",
  900. reqmpidevices, nhwmpidevices);
  901. reqmpidevices = nhwmpidevices;
  902. }
  903. }
  904. topology->nmpidevices = reqmpidevices;
  905. /* if user don't want to use MPI slaves, we close the slave processes */
  906. if (no_mp_config && topology->nmpidevices == 0)
  907. {
  908. _starpu_mpi_common_mp_deinit();
  909. exit(0);
  910. }
  911. if (!no_mp_config)
  912. {
  913. unsigned i;
  914. for (i = 0; i < topology->nmpidevices; i++)
  915. mpi_ms_nodes[i] = _starpu_mp_common_node_create(STARPU_NODE_MPI_SOURCE, i);
  916. for (i = 0; i < topology->nmpidevices; i++)
  917. _starpu_init_mpi_config (config, user_conf, i);
  918. }
  919. }
  920. #endif
  921. }
  922. #endif
  923. #ifdef STARPU_USE_MIC
  924. static void
  925. _starpu_deinit_mic_node (unsigned mic_idx)
  926. {
  927. _starpu_mp_common_send_command(mic_nodes[mic_idx], STARPU_MP_COMMAND_EXIT, NULL, 0);
  928. COIProcessDestroy(_starpu_mic_process[mic_idx], -1, 0, NULL, NULL);
  929. _starpu_mp_common_node_destroy(mic_nodes[mic_idx]);
  930. }
  931. #endif
  932. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  933. static void _starpu_deinit_mpi_node(int devid)
  934. {
  935. _starpu_mp_common_send_command(mpi_ms_nodes[devid], STARPU_MP_COMMAND_EXIT, NULL, 0);
  936. _starpu_mp_common_node_destroy(mpi_ms_nodes[devid]);
  937. }
  938. #endif
  939. #if defined(STARPU_USE_MIC) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  940. static void
  941. _starpu_deinit_mp_config (struct _starpu_machine_config *config)
  942. {
  943. struct _starpu_machine_topology *topology = &config->topology;
  944. unsigned i;
  945. #ifdef STARPU_USE_MIC
  946. for (i = 0; i < topology->nmicdevices; i++)
  947. _starpu_deinit_mic_node (i);
  948. _starpu_mic_clear_kernels();
  949. #endif
  950. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  951. for (i = 0; i < topology->nmpidevices; i++)
  952. _starpu_deinit_mpi_node (i);
  953. #endif
  954. }
  955. #endif
  956. #ifdef STARPU_HAVE_HWLOC
  957. static unsigned
  958. _starpu_topology_count_ngpus(hwloc_obj_t obj)
  959. {
  960. struct _starpu_hwloc_userdata *data = obj->userdata;
  961. unsigned n = data->ngpus;
  962. unsigned i;
  963. for (i = 0; i < obj->arity; i++)
  964. n += _starpu_topology_count_ngpus(obj->children[i]);
  965. data->ngpus = n;
  966. #ifdef STARPU_VERBOSE
  967. {
  968. char name[64];
  969. hwloc_obj_type_snprintf(name, sizeof(name), obj, 0);
  970. _STARPU_DEBUG("hwloc obj %s has %u GPUs below\n", name, n);
  971. }
  972. #endif
  973. return n;
  974. }
  975. #endif
  976. static int
  977. _starpu_init_machine_config(struct _starpu_machine_config *config, int no_mp_config STARPU_ATTRIBUTE_UNUSED)
  978. {
  979. int i;
  980. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  981. {
  982. config->workers[i].workerid = i;
  983. config->workers[i].set = NULL;
  984. }
  985. struct _starpu_machine_topology *topology = &config->topology;
  986. topology->nworkers = 0;
  987. topology->ncombinedworkers = 0;
  988. topology->nsched_ctxs = 0;
  989. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  990. _starpu_opencl_init();
  991. #endif
  992. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  993. _starpu_init_cuda();
  994. #endif
  995. _starpu_init_topology(config);
  996. _starpu_initialize_workers_bindid(config);
  997. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  998. for (i = 0; i < (int) (sizeof(cuda_worker_set)/sizeof(cuda_worker_set[0])); i++)
  999. cuda_worker_set[i].workers = NULL;
  1000. #endif
  1001. #ifdef STARPU_USE_MIC
  1002. for (i = 0; i < (int) (sizeof(mic_worker_set)/sizeof(mic_worker_set[0])); i++)
  1003. mic_worker_set[i].workers = NULL;
  1004. #endif
  1005. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1006. for (i = 0; i < (int) (sizeof(mpi_worker_set)/sizeof(mpi_worker_set[0])); i++)
  1007. mpi_worker_set[i].workers = NULL;
  1008. #endif
  1009. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  1010. int ncuda = config->conf.ncuda;
  1011. int nworker_per_cuda = starpu_get_env_number_default("STARPU_NWORKER_PER_CUDA", 1);
  1012. STARPU_ASSERT_MSG(nworker_per_cuda > 0, "STARPU_NWORKER_PER_CUDA has to be > 0");
  1013. STARPU_ASSERT_MSG(nworker_per_cuda < STARPU_NMAXWORKERS, "STARPU_NWORKER_PER_CUDA (%d) cannot be higher than STARPU_NMAXWORKERS (%d)\n", nworker_per_cuda, STARPU_NMAXWORKERS);
  1014. #ifndef STARPU_NON_BLOCKING_DRIVERS
  1015. if (nworker_per_cuda > 1)
  1016. {
  1017. _STARPU_DISP("Warning: reducing STARPU_NWORKER_PER_CUDA to 1 because blocking drivers are enabled\n");
  1018. nworker_per_cuda = 1;
  1019. }
  1020. #endif
  1021. if (ncuda != 0)
  1022. {
  1023. /* The user did not disable CUDA. We need to initialize CUDA
  1024. * early to count the number of devices */
  1025. _starpu_init_cuda();
  1026. int nb_devices = _starpu_get_cuda_device_count();
  1027. if (ncuda == -1)
  1028. {
  1029. /* Nothing was specified, so let's choose ! */
  1030. ncuda = nb_devices;
  1031. }
  1032. else
  1033. {
  1034. if (ncuda > nb_devices)
  1035. {
  1036. /* The user requires more CUDA devices than
  1037. * there is available */
  1038. _STARPU_DISP("Warning: %d CUDA devices requested. Only %d available.\n", ncuda, nb_devices);
  1039. ncuda = nb_devices;
  1040. }
  1041. }
  1042. }
  1043. /* Now we know how many CUDA devices will be used */
  1044. topology->ncudagpus = ncuda;
  1045. topology->nworkerpercuda = nworker_per_cuda;
  1046. STARPU_ASSERT(topology->ncudagpus <= STARPU_MAXCUDADEVS);
  1047. _starpu_initialize_workers_cuda_gpuid(config);
  1048. /* allow having one worker per stream */
  1049. topology->cuda_th_per_stream = starpu_get_env_number_default("STARPU_CUDA_THREAD_PER_WORKER", -1);
  1050. topology->cuda_th_per_dev = starpu_get_env_number_default("STARPU_CUDA_THREAD_PER_DEV", -1);
  1051. /* per device by default */
  1052. if (topology->cuda_th_per_dev == -1)
  1053. {
  1054. if (topology->cuda_th_per_stream == 1)
  1055. topology->cuda_th_per_dev = 0;
  1056. else
  1057. topology->cuda_th_per_dev = 1;
  1058. }
  1059. /* Not per stream by default */
  1060. if (topology->cuda_th_per_stream == -1)
  1061. {
  1062. topology->cuda_th_per_stream = 0;
  1063. }
  1064. STARPU_ASSERT_MSG(topology->cuda_th_per_dev != 1 || topology->cuda_th_per_stream != 1, "It does not make sense to set both STARPU_CUDA_THREAD_PER_WORKER and STARPU_CUDA_THREAD_PER_DEV to 1, please choose either per worker or per device or none");
  1065. if (!topology->cuda_th_per_dev)
  1066. {
  1067. cuda_worker_set[0].workers = &config->workers[topology->nworkers];
  1068. cuda_worker_set[0].nworkers = topology->ncudagpus * nworker_per_cuda;
  1069. }
  1070. unsigned cudagpu;
  1071. for (cudagpu = 0; cudagpu < topology->ncudagpus; cudagpu++)
  1072. {
  1073. int devid = _starpu_get_next_cuda_gpuid(config);
  1074. int worker_idx0 = topology->nworkers + cudagpu * nworker_per_cuda;
  1075. struct _starpu_worker_set *worker_set;
  1076. if (topology->cuda_th_per_dev)
  1077. {
  1078. worker_set = &cuda_worker_set[devid];
  1079. worker_set->workers = &config->workers[worker_idx0];
  1080. worker_set->nworkers = nworker_per_cuda;
  1081. }
  1082. else
  1083. {
  1084. /* Same worker set for all devices */
  1085. worker_set = &cuda_worker_set[0];
  1086. }
  1087. for (i = 0; i < nworker_per_cuda; i++)
  1088. {
  1089. int worker_idx = worker_idx0 + i;
  1090. if(topology->cuda_th_per_stream)
  1091. {
  1092. /* Just one worker in the set */
  1093. _STARPU_CALLOC(config->workers[worker_idx].set, 1, sizeof(struct _starpu_worker_set));
  1094. config->workers[worker_idx].set->workers = &config->workers[worker_idx];
  1095. config->workers[worker_idx].set->nworkers = 1;
  1096. }
  1097. else
  1098. config->workers[worker_idx].set = worker_set;
  1099. config->workers[worker_idx].arch = STARPU_CUDA_WORKER;
  1100. _STARPU_MALLOC(config->workers[worker_idx].perf_arch.devices, sizeof(struct starpu_perfmodel_device));
  1101. config->workers[worker_idx].perf_arch.ndevices = 1;
  1102. config->workers[worker_idx].perf_arch.devices[0].type = STARPU_CUDA_WORKER;
  1103. config->workers[worker_idx].perf_arch.devices[0].devid = devid;
  1104. // TODO: fix perfmodels etc.
  1105. //config->workers[worker_idx].perf_arch.ncore = nworker_per_cuda - 1;
  1106. config->workers[worker_idx].perf_arch.devices[0].ncores = 1;
  1107. config->workers[worker_idx].devid = devid;
  1108. config->workers[worker_idx].subworkerid = i;
  1109. config->workers[worker_idx].worker_mask = STARPU_CUDA;
  1110. config->worker_mask |= STARPU_CUDA;
  1111. struct handle_entry *entry;
  1112. HASH_FIND_INT(devices_using_cuda, &devid, entry);
  1113. if (!entry)
  1114. {
  1115. _STARPU_MALLOC(entry, sizeof(*entry));
  1116. entry->gpuid = devid;
  1117. HASH_ADD_INT(devices_using_cuda, gpuid, entry);
  1118. }
  1119. }
  1120. #ifndef STARPU_SIMGRID
  1121. #if defined(HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX) && HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX
  1122. {
  1123. hwloc_obj_t obj = hwloc_cuda_get_device_osdev_by_index(topology->hwtopology, devid);
  1124. if (obj)
  1125. {
  1126. struct _starpu_hwloc_userdata *data = obj->userdata;
  1127. data->ngpus++;
  1128. }
  1129. else
  1130. {
  1131. _STARPU_DISP("Warning: could not find location of CUDA%u, do you have the hwloc CUDA plugin installed?\n", devid);
  1132. }
  1133. }
  1134. #endif
  1135. #endif
  1136. }
  1137. topology->nworkers += topology->ncudagpus * nworker_per_cuda;
  1138. #endif
  1139. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  1140. int nopencl = config->conf.nopencl;
  1141. if (nopencl != 0)
  1142. {
  1143. /* The user did not disable OPENCL. We need to initialize
  1144. * OpenCL early to count the number of devices */
  1145. _starpu_opencl_init();
  1146. int nb_devices;
  1147. nb_devices = _starpu_opencl_get_device_count();
  1148. if (nopencl == -1)
  1149. {
  1150. /* Nothing was specified, so let's choose ! */
  1151. nopencl = nb_devices;
  1152. if (nopencl > STARPU_MAXOPENCLDEVS)
  1153. {
  1154. _STARPU_DISP("Warning: %d OpenCL devices available. Only %d enabled. Use configure option --enable-maxopencldadev=xxx to update the maximum value of supported OpenCL devices.\n", nb_devices, STARPU_MAXOPENCLDEVS);
  1155. nopencl = STARPU_MAXOPENCLDEVS;
  1156. }
  1157. }
  1158. else
  1159. {
  1160. /* Let's make sure this value is OK. */
  1161. if (nopencl > nb_devices)
  1162. {
  1163. /* The user requires more OpenCL devices than
  1164. * there is available */
  1165. _STARPU_DISP("Warning: %d OpenCL devices requested. Only %d available.\n", nopencl, nb_devices);
  1166. nopencl = nb_devices;
  1167. }
  1168. /* Let's make sure this value is OK. */
  1169. if (nopencl > STARPU_MAXOPENCLDEVS)
  1170. {
  1171. _STARPU_DISP("Warning: %d OpenCL devices requested. Only %d enabled. Use configure option --enable-maxopencldev=xxx to update the maximum value of supported OpenCL devices.\n", nopencl, STARPU_MAXOPENCLDEVS);
  1172. nopencl = STARPU_MAXOPENCLDEVS;
  1173. }
  1174. }
  1175. }
  1176. topology->nopenclgpus = nopencl;
  1177. STARPU_ASSERT(topology->nopenclgpus + topology->nworkers <= STARPU_NMAXWORKERS);
  1178. _starpu_initialize_workers_opencl_gpuid(config);
  1179. unsigned openclgpu;
  1180. for (openclgpu = 0; openclgpu < topology->nopenclgpus; openclgpu++)
  1181. {
  1182. int worker_idx = topology->nworkers + openclgpu;
  1183. int devid = _starpu_get_next_opencl_gpuid(config);
  1184. if (devid == -1)
  1185. {
  1186. // There is no more devices left
  1187. topology->nopenclgpus = openclgpu;
  1188. break;
  1189. }
  1190. config->workers[worker_idx].arch = STARPU_OPENCL_WORKER;
  1191. _STARPU_MALLOC(config->workers[worker_idx].perf_arch.devices, sizeof(struct starpu_perfmodel_device));
  1192. config->workers[worker_idx].perf_arch.ndevices = 1;
  1193. config->workers[worker_idx].perf_arch.devices[0].type = STARPU_OPENCL_WORKER;
  1194. config->workers[worker_idx].perf_arch.devices[0].devid = devid;
  1195. config->workers[worker_idx].perf_arch.devices[0].ncores = 1;
  1196. config->workers[worker_idx].subworkerid = 0;
  1197. config->workers[worker_idx].devid = devid;
  1198. config->workers[worker_idx].worker_mask = STARPU_OPENCL;
  1199. config->worker_mask |= STARPU_OPENCL;
  1200. }
  1201. topology->nworkers += topology->nopenclgpus;
  1202. #endif
  1203. #ifdef STARPU_USE_SCC
  1204. int nscc = config->conf.nscc;
  1205. unsigned nb_scc_nodes = _starpu_scc_src_get_device_count();
  1206. if (nscc != 0)
  1207. {
  1208. /* The user did not disable SCC. We need to count
  1209. * the number of devices */
  1210. int nb_devices = nb_scc_nodes;
  1211. if (nscc == -1)
  1212. {
  1213. /* Nothing was specified, so let's choose ! */
  1214. nscc = nb_devices;
  1215. if (nscc > STARPU_MAXSCCDEVS)
  1216. {
  1217. _STARPU_DISP("Warning: %d SCC devices available. Only %d enabled. Use configuration option --enable-maxsccdev=xxx to update the maximum value of supported SCC devices.\n", nb_devices, STARPU_MAXSCCDEVS);
  1218. nscc = STARPU_MAXSCCDEVS;
  1219. }
  1220. }
  1221. else
  1222. {
  1223. /* Let's make sure this value is OK. */
  1224. if (nscc > nb_devices)
  1225. {
  1226. /* The user requires more SCC devices than there is available */
  1227. _STARPU_DISP("Warning: %d SCC devices requested. Only %d available.\n", nscc, nb_devices);
  1228. nscc = nb_devices;
  1229. }
  1230. /* Let's make sure this value is OK. */
  1231. if (nscc > STARPU_MAXSCCDEVS)
  1232. {
  1233. _STARPU_DISP("Warning: %d SCC devices requested. Only %d enabled. Use configure option --enable-maxsccdev=xxx to update the maximum value of supported SCC devices.\n", nscc, STARPU_MAXSCCDEVS);
  1234. nscc = STARPU_MAXSCCDEVS;
  1235. }
  1236. }
  1237. }
  1238. /* Now we know how many SCC devices will be used */
  1239. topology->nsccdevices = nscc;
  1240. STARPU_ASSERT(topology->nsccdevices + topology->nworkers <= STARPU_NMAXWORKERS);
  1241. _starpu_initialize_workers_scc_deviceid(config);
  1242. unsigned sccdev;
  1243. for (sccdev = 0; sccdev < topology->nsccdevices; sccdev++)
  1244. {
  1245. config->workers[topology->nworkers + sccdev].arch = STARPU_SCC_WORKER;
  1246. int devid = _starpu_get_next_scc_deviceid(config);
  1247. _STARPU_MALLOC(config->workers[topology->nworkers + sccdev].perf_arch.devices, sizeof(struct starpu_perfmodel_device));
  1248. config->workers[topology->nworkers + sccdev].perf_arch.ndevices = 1;
  1249. config->workers[topology->nworkers + sccdev].perf_arch.devices[0].type = STARPU_SCC_WORKER;
  1250. config->workers[topology->nworkers + sccdev].perf_arch.devices[0].devid = sccdev;
  1251. config->workers[topology->nworkers + sccdev].perf_arch.devices[0].ncores = 1;
  1252. config->workers[topology->nworkers + sccdev].subworkerid = 0;
  1253. config->workers[topology->nworkers + sccdev].devid = devid;
  1254. config->workers[topology->nworkers + sccdev].worker_mask = STARPU_SCC;
  1255. config->worker_mask |= STARPU_SCC;
  1256. }
  1257. for (; sccdev < nb_scc_nodes; ++sccdev)
  1258. _starpu_scc_exit_useless_node(sccdev);
  1259. topology->nworkers += topology->nsccdevices;
  1260. #endif /* STARPU_USE_SCC */
  1261. #if defined(STARPU_USE_MIC) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  1262. _starpu_init_mp_config (config, &config->conf, no_mp_config);
  1263. #endif
  1264. /* we put the CPU section after the accelerator : in case there was an
  1265. * accelerator found, we devote one cpu */
  1266. #if defined(STARPU_USE_CPU) || defined(STARPU_SIMGRID)
  1267. int ncpu = config->conf.ncpus;
  1268. if (ncpu != 0)
  1269. {
  1270. if (ncpu == -1)
  1271. {
  1272. unsigned mic_busy_cpus = 0;
  1273. unsigned j = 0;
  1274. for (j = 0; j < STARPU_MAXMICDEVS; j++)
  1275. mic_busy_cpus += (topology->nmiccores[j] ? 1 : 0);
  1276. unsigned mpi_ms_busy_cpus = 0;
  1277. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1278. #ifdef STARPU_MPI_MASTER_SLAVE_MULTIPLE_THREAD
  1279. for (j = 0; j < STARPU_MAXMPIDEVS; j++)
  1280. mpi_ms_busy_cpus += (topology->nmpicores[j] ? 1 : 0);
  1281. #else
  1282. mpi_ms_busy_cpus = 1; /* we launch one thread to control all slaves */
  1283. #endif
  1284. #endif /* STARPU_USE_MPI_MASTER_SLAVE */
  1285. unsigned cuda_busy_cpus = 0;
  1286. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  1287. cuda_busy_cpus =
  1288. topology->cuda_th_per_dev == 0 && topology->cuda_th_per_stream == 0 ?
  1289. (topology->ncudagpus ? 1 : 0) :
  1290. topology->cuda_th_per_stream ?
  1291. (nworker_per_cuda * topology->ncudagpus) :
  1292. topology->ncudagpus;
  1293. #endif
  1294. unsigned already_busy_cpus = mpi_ms_busy_cpus + mic_busy_cpus
  1295. + cuda_busy_cpus
  1296. + topology->nopenclgpus + topology->nsccdevices;
  1297. long avail_cpus = (long) topology->nhwcpus - (long) already_busy_cpus;
  1298. if (avail_cpus < 0)
  1299. avail_cpus = 0;
  1300. int nth_per_core = starpu_get_env_number_default("STARPU_NTHREADS_PER_CORE", 1);
  1301. avail_cpus *= nth_per_core;
  1302. ncpu = STARPU_MIN(avail_cpus, STARPU_MAXCPUS);
  1303. }
  1304. else
  1305. {
  1306. if (ncpu > STARPU_MAXCPUS)
  1307. {
  1308. _STARPU_DISP("Warning: %d CPU devices requested. Only %d enabled. Use configure option --enable-maxcpus=xxx to update the maximum value of supported CPU devices.\n", ncpu, STARPU_MAXCPUS);
  1309. ncpu = STARPU_MAXCPUS;
  1310. }
  1311. }
  1312. }
  1313. topology->ncpus = ncpu;
  1314. STARPU_ASSERT(topology->ncpus + topology->nworkers <= STARPU_NMAXWORKERS);
  1315. unsigned cpu;
  1316. for (cpu = 0; cpu < topology->ncpus; cpu++)
  1317. {
  1318. int worker_idx = topology->nworkers + cpu;
  1319. config->workers[worker_idx].arch = STARPU_CPU_WORKER;
  1320. _STARPU_MALLOC(config->workers[worker_idx].perf_arch.devices, sizeof(struct starpu_perfmodel_device));
  1321. config->workers[worker_idx].perf_arch.ndevices = 1;
  1322. config->workers[worker_idx].perf_arch.devices[0].type = STARPU_CPU_WORKER;
  1323. config->workers[worker_idx].perf_arch.devices[0].devid = 0;
  1324. config->workers[worker_idx].perf_arch.devices[0].ncores = 1;
  1325. config->workers[worker_idx].subworkerid = 0;
  1326. config->workers[worker_idx].devid = cpu;
  1327. config->workers[worker_idx].worker_mask = STARPU_CPU;
  1328. config->worker_mask |= STARPU_CPU;
  1329. }
  1330. topology->nworkers += topology->ncpus;
  1331. #endif
  1332. if (topology->nworkers == 0)
  1333. {
  1334. _STARPU_DEBUG("No worker found, aborting ...\n");
  1335. return -ENODEV;
  1336. }
  1337. return 0;
  1338. }
  1339. void _starpu_destroy_machine_config(struct _starpu_machine_config *config)
  1340. {
  1341. _starpu_close_debug_logfile();
  1342. unsigned worker;
  1343. for (worker = 0; worker < config->topology.nworkers; worker++)
  1344. {
  1345. struct _starpu_worker *workerarg = &config->workers[worker];
  1346. int bindid = workerarg->bindid;
  1347. free(workerarg->perf_arch.devices);
  1348. #ifdef STARPU_HAVE_HWLOC
  1349. hwloc_bitmap_free(workerarg->hwloc_cpu_set);
  1350. if (bindid != -1)
  1351. {
  1352. hwloc_obj_t worker_obj = hwloc_get_obj_by_depth(config->topology.hwtopology,
  1353. config->pu_depth,
  1354. bindid);
  1355. struct _starpu_hwloc_userdata *data = worker_obj->userdata;
  1356. if (data->worker_list)
  1357. {
  1358. _starpu_worker_list_delete(data->worker_list);
  1359. data->worker_list = NULL;
  1360. }
  1361. }
  1362. #endif
  1363. if (bindid != -1)
  1364. {
  1365. free(config->bindid_workers[bindid].workerids);
  1366. config->bindid_workers[bindid].workerids = NULL;
  1367. }
  1368. }
  1369. free(config->bindid_workers);
  1370. config->bindid_workers = NULL;
  1371. config->nbindid = 0;
  1372. unsigned combined_worker_id;
  1373. for(combined_worker_id=0 ; combined_worker_id < config->topology.ncombinedworkers ; combined_worker_id++)
  1374. {
  1375. struct _starpu_combined_worker *combined_worker = &config->combined_workers[combined_worker_id];
  1376. #ifdef STARPU_HAVE_HWLOC
  1377. hwloc_bitmap_free(combined_worker->hwloc_cpu_set);
  1378. #endif
  1379. free(combined_worker->perf_arch.devices);
  1380. }
  1381. #ifdef STARPU_HAVE_HWLOC
  1382. _starpu_deallocate_topology_userdata(hwloc_get_root_obj(config->topology.hwtopology));
  1383. hwloc_topology_destroy(config->topology.hwtopology);
  1384. #endif
  1385. topology_is_initialized = 0;
  1386. #ifdef STARPU_USE_CUDA
  1387. struct handle_entry *entry, *tmp;
  1388. HASH_ITER(hh, devices_using_cuda, entry, tmp)
  1389. {
  1390. HASH_DEL(devices_using_cuda, entry);
  1391. free(entry);
  1392. }
  1393. devices_using_cuda = NULL;
  1394. #endif
  1395. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  1396. int i;
  1397. for (i=0; i<STARPU_NARCH; i++)
  1398. may_bind_automatically[i] = 0;
  1399. #endif
  1400. }
  1401. void
  1402. _starpu_bind_thread_on_cpu (
  1403. struct _starpu_machine_config *config STARPU_ATTRIBUTE_UNUSED,
  1404. int cpuid STARPU_ATTRIBUTE_UNUSED, int workerid STARPU_ATTRIBUTE_UNUSED)
  1405. {
  1406. #ifdef STARPU_SIMGRID
  1407. return;
  1408. #else
  1409. if (nobind > 0)
  1410. return;
  1411. if (cpuid < 0)
  1412. return;
  1413. if (workerid != STARPU_NOWORKERID && cpuid < STARPU_MAXCPUS)
  1414. {
  1415. int previous = cpu_worker[cpuid];
  1416. if (previous != STARPU_NOWORKERID && previous != workerid)
  1417. _STARPU_DISP("Warning: both workers %d and %d are bound to the same PU %d, this will strongly degrade performance\n", previous, workerid, cpuid);
  1418. else
  1419. cpu_worker[cpuid] = workerid;
  1420. }
  1421. #ifdef STARPU_HAVE_HWLOC
  1422. const struct hwloc_topology_support *support;
  1423. #ifdef STARPU_USE_OPENCL
  1424. _starpu_opencl_init();
  1425. #endif
  1426. #ifdef STARPU_USE_CUDA
  1427. _starpu_init_cuda();
  1428. #endif
  1429. _starpu_init_topology(config);
  1430. support = hwloc_topology_get_support (config->topology.hwtopology);
  1431. if (support->cpubind->set_thisthread_cpubind)
  1432. {
  1433. hwloc_obj_t obj =
  1434. hwloc_get_obj_by_depth (config->topology.hwtopology,
  1435. config->pu_depth, cpuid);
  1436. hwloc_bitmap_t set = obj->cpuset;
  1437. int ret;
  1438. hwloc_bitmap_singlify(set);
  1439. ret = hwloc_set_cpubind (config->topology.hwtopology, set,
  1440. HWLOC_CPUBIND_THREAD);
  1441. if (ret)
  1442. {
  1443. perror("hwloc_set_cpubind");
  1444. STARPU_ABORT();
  1445. }
  1446. }
  1447. #elif defined(HAVE_PTHREAD_SETAFFINITY_NP) && defined(__linux__)
  1448. int ret;
  1449. /* fix the thread on the correct cpu */
  1450. cpu_set_t aff_mask;
  1451. CPU_ZERO(&aff_mask);
  1452. CPU_SET(cpuid, &aff_mask);
  1453. starpu_pthread_t self = starpu_pthread_self();
  1454. ret = pthread_setaffinity_np(self, sizeof(aff_mask), &aff_mask);
  1455. if (ret)
  1456. {
  1457. const char *msg = strerror(ret);
  1458. _STARPU_MSG("pthread_setaffinity_np: %s\n", msg);
  1459. STARPU_ABORT();
  1460. }
  1461. #elif defined(_WIN32)
  1462. DWORD mask = 1 << cpuid;
  1463. if (!SetThreadAffinityMask(GetCurrentThread(), mask))
  1464. {
  1465. _STARPU_ERROR("SetThreadMaskAffinity(%lx) failed\n", mask);
  1466. }
  1467. #else
  1468. #warning no CPU binding support
  1469. #endif
  1470. #endif
  1471. }
  1472. void
  1473. _starpu_bind_thread_on_cpus (
  1474. struct _starpu_machine_config *config STARPU_ATTRIBUTE_UNUSED,
  1475. struct _starpu_combined_worker *combined_worker STARPU_ATTRIBUTE_UNUSED)
  1476. {
  1477. #ifdef STARPU_SIMGRID
  1478. return;
  1479. #endif
  1480. #ifdef STARPU_HAVE_HWLOC
  1481. const struct hwloc_topology_support *support;
  1482. #ifdef STARPU_USE_OPENC
  1483. _starpu_opencl_init();
  1484. #endif
  1485. #ifdef STARPU_USE_CUDA
  1486. _starpu_init_cuda();
  1487. #endif
  1488. _starpu_init_topology(config);
  1489. support = hwloc_topology_get_support(config->topology.hwtopology);
  1490. if (support->cpubind->set_thisthread_cpubind)
  1491. {
  1492. hwloc_bitmap_t set = combined_worker->hwloc_cpu_set;
  1493. int ret;
  1494. ret = hwloc_set_cpubind (config->topology.hwtopology, set,
  1495. HWLOC_CPUBIND_THREAD);
  1496. if (ret)
  1497. {
  1498. perror("binding thread");
  1499. STARPU_ABORT();
  1500. }
  1501. }
  1502. #else
  1503. #ifdef __GLIBC__
  1504. sched_setaffinity(0,sizeof(combined_worker->cpu_set),&combined_worker->cpu_set);
  1505. #else
  1506. # warning no parallel worker CPU binding support
  1507. #endif
  1508. #endif
  1509. }
  1510. static void
  1511. _starpu_init_workers_binding (struct _starpu_machine_config *config, int no_mp_config STARPU_ATTRIBUTE_UNUSED)
  1512. {
  1513. /* launch one thread per CPU */
  1514. unsigned ram_memory_node;
  1515. /* note that even if the CPU cpu are not used, we always have a RAM
  1516. * node */
  1517. /* TODO : support NUMA ;) */
  1518. ram_memory_node = _starpu_memory_node_register(STARPU_CPU_RAM, 0);
  1519. STARPU_ASSERT(ram_memory_node == STARPU_MAIN_RAM);
  1520. #ifdef STARPU_SIMGRID
  1521. char name[16];
  1522. msg_host_t host = _starpu_simgrid_get_host_by_name("RAM");
  1523. STARPU_ASSERT(host);
  1524. _starpu_simgrid_memory_node_set_host(STARPU_MAIN_RAM, host);
  1525. #endif
  1526. /* We will store all the busid of the different (src, dst)
  1527. * combinations in a matrix which we initialize here. */
  1528. _starpu_initialize_busid_matrix();
  1529. /* Each device is initialized,
  1530. * giving it a memory node and a core bind id.
  1531. */
  1532. /* TODO: STARPU_MAXNUMANODES */
  1533. unsigned numa_init[1] = { 1 };
  1534. unsigned numa_memory_nodes[1] = { ram_memory_node };
  1535. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  1536. unsigned cuda_init[STARPU_MAXCUDADEVS] = { };
  1537. unsigned cuda_memory_nodes[STARPU_MAXCUDADEVS];
  1538. unsigned cuda_bindid[STARPU_MAXCUDADEVS];
  1539. int cuda_globalbindid = -1;
  1540. #endif
  1541. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  1542. unsigned opencl_init[STARPU_MAXOPENCLDEVS] = { };
  1543. unsigned opencl_memory_nodes[STARPU_MAXOPENCLDEVS];
  1544. unsigned opencl_bindid[STARPU_MAXOPENCLDEVS];
  1545. #endif
  1546. #ifdef STARPU_USE_MIC
  1547. unsigned mic_init[STARPU_MAXMICDEVS] = { };
  1548. unsigned mic_memory_nodes[STARPU_MAXMICDEVS];
  1549. unsigned mic_bindid[STARPU_MAXMICDEVS];
  1550. #endif
  1551. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1552. unsigned mpi_init[STARPU_MAXMPIDEVS] = { };
  1553. unsigned mpi_memory_nodes[STARPU_MAXMPIDEVS];
  1554. unsigned mpi_bindid[STARPU_MAXMPIDEVS];
  1555. #endif
  1556. unsigned bindid;
  1557. for (bindid = 0; bindid < config->nbindid; bindid++)
  1558. {
  1559. free(config->bindid_workers[bindid].workerids);
  1560. config->bindid_workers[bindid].workerids = NULL;
  1561. config->bindid_workers[bindid].nworkers = 0;
  1562. }
  1563. unsigned worker;
  1564. for (worker = 0; worker < config->topology.nworkers; worker++)
  1565. {
  1566. unsigned memory_node = -1;
  1567. struct _starpu_worker *workerarg = &config->workers[worker];
  1568. unsigned devid = workerarg->devid;
  1569. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL) || defined(STARPU_USE_MIC) || defined(STARPU_SIMGRID) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  1570. /* Perhaps the worker has some "favourite" bindings */
  1571. int *preferred_binding = NULL;
  1572. int npreferred = 0;
  1573. #endif
  1574. /* select the memory node that contains worker's memory */
  1575. switch (workerarg->arch)
  1576. {
  1577. case STARPU_CPU_WORKER:
  1578. {
  1579. /* TODO: NUMA */
  1580. int numaid = 0;
  1581. /* "dedicate" a cpu core to that worker */
  1582. if (numa_init[numaid])
  1583. {
  1584. memory_node = numa_memory_nodes[numaid];
  1585. }
  1586. else
  1587. {
  1588. numa_init[numaid] = 1;
  1589. memory_node = numa_memory_nodes[numaid] = _starpu_memory_node_register(STARPU_CPU_RAM, numaid);
  1590. #ifdef STARPU_SIMGRID
  1591. snprintf(name, sizeof(name), "RAM%d", numaid);
  1592. host = _starpu_simgrid_get_host_by_name(name);
  1593. STARPU_ASSERT(host);
  1594. _starpu_simgrid_memory_node_set_host(memory_node, host);
  1595. #endif
  1596. }
  1597. workerarg->bindid = _starpu_get_next_bindid(config, NULL, 0);
  1598. _starpu_memory_node_add_nworkers(memory_node);
  1599. _starpu_worker_drives_memory_node(workerarg, STARPU_MAIN_RAM);
  1600. if (memory_node != STARPU_MAIN_RAM)
  1601. _starpu_worker_drives_memory_node(workerarg, memory_node);
  1602. break;
  1603. }
  1604. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  1605. case STARPU_CUDA_WORKER:
  1606. #ifndef STARPU_SIMGRID
  1607. if (may_bind_automatically[STARPU_CUDA_WORKER])
  1608. {
  1609. /* StarPU is allowed to bind threads automatically */
  1610. preferred_binding = _starpu_get_cuda_affinity_vector(devid);
  1611. npreferred = config->topology.nhwpus;
  1612. }
  1613. #endif /* SIMGRID */
  1614. if (cuda_init[devid])
  1615. {
  1616. memory_node = cuda_memory_nodes[devid];
  1617. if (config->topology.cuda_th_per_stream == 0)
  1618. workerarg->bindid = cuda_bindid[devid];
  1619. else
  1620. workerarg->bindid = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  1621. }
  1622. else
  1623. {
  1624. cuda_init[devid] = 1;
  1625. if (config->topology.cuda_th_per_dev == 0 && config->topology.cuda_th_per_stream == 0)
  1626. {
  1627. if (cuda_globalbindid == -1)
  1628. cuda_globalbindid = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  1629. workerarg->bindid = cuda_bindid[devid] = cuda_globalbindid;
  1630. }
  1631. else
  1632. workerarg->bindid = cuda_bindid[devid] = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  1633. memory_node = cuda_memory_nodes[devid] = _starpu_memory_node_register(STARPU_CUDA_RAM, devid);
  1634. _starpu_cuda_bus_ids[0][devid+1] = _starpu_register_bus(STARPU_MAIN_RAM, memory_node);
  1635. _starpu_cuda_bus_ids[devid+1][0] = _starpu_register_bus(memory_node, STARPU_MAIN_RAM);
  1636. #ifdef STARPU_SIMGRID
  1637. const char* cuda_memcpy_peer;
  1638. snprintf(name, sizeof(name), "CUDA%d", devid);
  1639. host = _starpu_simgrid_get_host_by_name(name);
  1640. STARPU_ASSERT(host);
  1641. _starpu_simgrid_memory_node_set_host(memory_node, host);
  1642. cuda_memcpy_peer = MSG_host_get_property_value(host, "memcpy_peer");
  1643. #endif /* SIMGRID */
  1644. if (
  1645. #ifdef STARPU_SIMGRID
  1646. cuda_memcpy_peer && atoll(cuda_memcpy_peer)
  1647. #elif defined(HAVE_CUDA_MEMCPY_PEER)
  1648. 1
  1649. #else /* MEMCPY_PEER */
  1650. 0
  1651. #endif /* MEMCPY_PEER */
  1652. )
  1653. {
  1654. unsigned worker2;
  1655. for (worker2 = 0; worker2 < worker; worker2++)
  1656. {
  1657. struct _starpu_worker *workerarg2 = &config->workers[worker2];
  1658. int devid2 = workerarg2->devid;
  1659. if (workerarg2->arch == STARPU_CUDA_WORKER)
  1660. {
  1661. unsigned memory_node2 = starpu_worker_get_memory_node(worker2);
  1662. _starpu_cuda_bus_ids[devid2][devid] = _starpu_register_bus(memory_node2, memory_node);
  1663. _starpu_cuda_bus_ids[devid][devid2] = _starpu_register_bus(memory_node, memory_node2);
  1664. #ifndef STARPU_SIMGRID
  1665. #if defined(HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX) && HAVE_DECL_HWLOC_CUDA_GET_DEVICE_OSDEV_BY_INDEX
  1666. {
  1667. hwloc_obj_t obj, obj2, ancestor;
  1668. obj = hwloc_cuda_get_device_osdev_by_index(config->topology.hwtopology, devid);
  1669. obj2 = hwloc_cuda_get_device_osdev_by_index(config->topology.hwtopology, devid2);
  1670. ancestor = hwloc_get_common_ancestor_obj(config->topology.hwtopology, obj, obj2);
  1671. if (ancestor)
  1672. {
  1673. struct _starpu_hwloc_userdata *data = ancestor->userdata;
  1674. #ifdef STARPU_VERBOSE
  1675. {
  1676. char name[64];
  1677. hwloc_obj_type_snprintf(name, sizeof(name), ancestor, 0);
  1678. _STARPU_DEBUG("CUDA%u and CUDA%u are linked through %s, along %u GPUs\n", devid, devid2, name, data->ngpus);
  1679. }
  1680. #endif
  1681. starpu_bus_set_ngpus(_starpu_cuda_bus_ids[devid2][devid], data->ngpus);
  1682. starpu_bus_set_ngpus(_starpu_cuda_bus_ids[devid][devid2], data->ngpus);
  1683. }
  1684. }
  1685. #endif
  1686. #endif
  1687. }
  1688. }
  1689. }
  1690. }
  1691. _starpu_memory_node_add_nworkers(memory_node);
  1692. _starpu_worker_drives_memory_node(&workerarg->set->workers[0], STARPU_MAIN_RAM);
  1693. if (memory_node != STARPU_MAIN_RAM)
  1694. _starpu_worker_drives_memory_node(&workerarg->set->workers[0], memory_node);
  1695. break;
  1696. #endif
  1697. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  1698. case STARPU_OPENCL_WORKER:
  1699. #ifndef STARPU_SIMGRID
  1700. if (may_bind_automatically[STARPU_OPENCL_WORKER])
  1701. {
  1702. /* StarPU is allowed to bind threads automatically */
  1703. preferred_binding = _starpu_get_opencl_affinity_vector(devid);
  1704. npreferred = config->topology.nhwpus;
  1705. }
  1706. #endif /* SIMGRID */
  1707. if (opencl_init[devid])
  1708. {
  1709. memory_node = opencl_memory_nodes[devid];
  1710. #ifndef STARPU_SIMGRID
  1711. workerarg->bindid = opencl_bindid[devid];
  1712. #endif /* SIMGRID */
  1713. }
  1714. else
  1715. {
  1716. opencl_init[devid] = 1;
  1717. workerarg->bindid = opencl_bindid[devid] = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  1718. memory_node = opencl_memory_nodes[devid] = _starpu_memory_node_register(STARPU_OPENCL_RAM, devid);
  1719. _starpu_register_bus(STARPU_MAIN_RAM, memory_node);
  1720. _starpu_register_bus(memory_node, STARPU_MAIN_RAM);
  1721. #ifdef STARPU_SIMGRID
  1722. snprintf(name, sizeof(name), "OpenCL%d", devid);
  1723. host = _starpu_simgrid_get_host_by_name(name);
  1724. STARPU_ASSERT(host);
  1725. _starpu_simgrid_memory_node_set_host(memory_node, host);
  1726. #endif /* SIMGRID */
  1727. }
  1728. _starpu_memory_node_add_nworkers(memory_node);
  1729. _starpu_worker_drives_memory_node(workerarg, STARPU_MAIN_RAM);
  1730. if (memory_node != STARPU_MAIN_RAM)
  1731. _starpu_worker_drives_memory_node(workerarg, memory_node);
  1732. break;
  1733. #endif
  1734. #ifdef STARPU_USE_MIC
  1735. case STARPU_MIC_WORKER:
  1736. if (mic_init[devid])
  1737. {
  1738. memory_node = mic_memory_nodes[devid];
  1739. }
  1740. else
  1741. {
  1742. mic_init[devid] = 1;
  1743. /* TODO */
  1744. //if (may_bind_automatically)
  1745. //{
  1746. // /* StarPU is allowed to bind threads automatically */
  1747. // preferred_binding = _starpu_get_mic_affinity_vector(devid);
  1748. // npreferred = config->topology.nhwpus;
  1749. //}
  1750. mic_bindid[devid] = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  1751. memory_node = mic_memory_nodes[devid] = _starpu_memory_node_register(STARPU_MIC_RAM, devid);
  1752. _starpu_register_bus(STARPU_MAIN_RAM, memory_node);
  1753. _starpu_register_bus(memory_node, STARPU_MAIN_RAM);
  1754. }
  1755. workerarg->bindid = mic_bindid[devid];
  1756. _starpu_memory_node_add_nworkers(memory_node);
  1757. _starpu_worker_drives_memory_node(&workerarg->set->workers[0], STARPU_MAIN_RAM);
  1758. if (memory_node != STARPU_MAIN_RAM)
  1759. _starpu_worker_drives_memory_node(&workerarg->set->workers[0], memory_node);
  1760. break;
  1761. #endif /* STARPU_USE_MIC */
  1762. #ifdef STARPU_USE_SCC
  1763. case STARPU_SCC_WORKER:
  1764. {
  1765. /* Node 0 represents the SCC shared memory when we're on SCC. */
  1766. struct _starpu_memory_node_descr *descr = _starpu_memory_node_get_description();
  1767. descr->nodes[ram_memory_node] = STARPU_SCC_SHM;
  1768. memory_node = ram_memory_node;
  1769. _starpu_memory_node_add_nworkers(memory_node);
  1770. _starpu_worker_drives_memory_node(workerarg, STARPU_MAIN_RAM);
  1771. if (memory_node != STARPU_MAIN_RAM)
  1772. _starpu_worker_drives_memory_node(workerarg, memory_node);
  1773. }
  1774. break;
  1775. #endif /* STARPU_USE_SCC */
  1776. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1777. case STARPU_MPI_MS_WORKER:
  1778. {
  1779. if (mpi_init[devid])
  1780. {
  1781. memory_node = mpi_memory_nodes[devid];
  1782. }
  1783. else
  1784. {
  1785. mpi_init[devid] = 1;
  1786. mpi_bindid[devid] = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  1787. memory_node = mpi_memory_nodes[devid] = _starpu_memory_node_register(STARPU_MPI_MS_RAM, devid);
  1788. _starpu_register_bus(STARPU_MAIN_RAM, memory_node);
  1789. _starpu_register_bus(memory_node, STARPU_MAIN_RAM);
  1790. }
  1791. _starpu_worker_drives_memory_node(&workerarg->set->workers[0], STARPU_MAIN_RAM);
  1792. if (memory_node != STARPU_MAIN_RAM)
  1793. _starpu_worker_drives_memory_node(&workerarg->set->workers[0], memory_node);
  1794. #ifndef STARPU_MPI_MASTER_SLAVE_MULTIPLE_THREAD
  1795. /* MPI driver thread can manage all slave memories if we disable the MPI multiple thread */
  1796. unsigned findworker;
  1797. for (findworker = 0; findworker < worker; findworker++)
  1798. {
  1799. struct _starpu_worker *findworkerarg = &config->workers[findworker];
  1800. if (findworkerarg->arch == STARPU_MPI_MS_WORKER)
  1801. {
  1802. _starpu_worker_drives_memory_node(workerarg, findworkerarg->memory_node);
  1803. _starpu_worker_drives_memory_node(findworkerarg, memory_node);
  1804. }
  1805. }
  1806. #endif
  1807. workerarg->bindid = mpi_bindid[devid];
  1808. _starpu_memory_node_add_nworkers(memory_node);
  1809. break;
  1810. }
  1811. #endif /* STARPU_USE_MPI_MASTER_SLAVE */
  1812. default:
  1813. STARPU_ABORT();
  1814. }
  1815. workerarg->memory_node = memory_node;
  1816. _STARPU_DEBUG("worker %d type %d devid %d bound to cpu %d, STARPU memory node %d\n", worker, workerarg->arch, devid, workerarg->bindid, memory_node);
  1817. #ifdef __GLIBC__
  1818. if (workerarg->bindid != -1)
  1819. {
  1820. /* Save the initial cpuset */
  1821. CPU_ZERO(&workerarg->cpu_set);
  1822. CPU_SET(workerarg->bindid, &workerarg->cpu_set);
  1823. }
  1824. #endif /* __GLIBC__ */
  1825. #ifdef STARPU_HAVE_HWLOC
  1826. if (workerarg->bindid == -1)
  1827. {
  1828. workerarg->hwloc_cpu_set = hwloc_bitmap_alloc();
  1829. }
  1830. else
  1831. {
  1832. /* Put the worker descriptor in the userdata field of the
  1833. * hwloc object describing the CPU */
  1834. hwloc_obj_t worker_obj = hwloc_get_obj_by_depth(config->topology.hwtopology,
  1835. config->pu_depth,
  1836. workerarg->bindid);
  1837. struct _starpu_hwloc_userdata *data = worker_obj->userdata;
  1838. if (data->worker_list == NULL)
  1839. data->worker_list = _starpu_worker_list_new();
  1840. _starpu_worker_list_push_front(data->worker_list, workerarg);
  1841. /* Clear the cpu set and set the cpu */
  1842. workerarg->hwloc_cpu_set = hwloc_bitmap_dup (worker_obj->cpuset);
  1843. }
  1844. #endif
  1845. if (workerarg->bindid != -1)
  1846. {
  1847. bindid = workerarg->bindid;
  1848. unsigned old_nbindid = config->nbindid;
  1849. if (bindid >= old_nbindid)
  1850. {
  1851. /* More room needed */
  1852. if (!old_nbindid)
  1853. config->nbindid = STARPU_NMAXWORKERS;
  1854. else
  1855. config->nbindid = 2 * old_nbindid;
  1856. _STARPU_REALLOC(config->bindid_workers, config->nbindid * sizeof(config->bindid_workers[0]));
  1857. memset(&config->bindid_workers[old_nbindid], 0, (config->nbindid - old_nbindid) * sizeof(config->bindid_workers[0]));
  1858. }
  1859. /* Add slot for this worker */
  1860. /* Don't care about amortizing the cost, there are usually very few workers sharing the same bindid */
  1861. config->bindid_workers[bindid].nworkers++;
  1862. _STARPU_REALLOC(config->bindid_workers[bindid].workerids, config->bindid_workers[bindid].nworkers * sizeof(config->bindid_workers[bindid].workerids[0]));
  1863. config->bindid_workers[bindid].workerids[config->bindid_workers[bindid].nworkers-1] = worker;
  1864. }
  1865. }
  1866. #ifdef STARPU_SIMGRID
  1867. _starpu_simgrid_count_ngpus();
  1868. #else
  1869. #ifdef STARPU_HAVE_HWLOC
  1870. _starpu_topology_count_ngpus(hwloc_get_root_obj(config->topology.hwtopology));
  1871. #endif
  1872. #endif
  1873. }
  1874. int
  1875. _starpu_build_topology (struct _starpu_machine_config *config, int no_mp_config)
  1876. {
  1877. int ret;
  1878. unsigned i;
  1879. ret = _starpu_init_machine_config(config, no_mp_config);
  1880. if (ret)
  1881. return ret;
  1882. /* for the data management library */
  1883. _starpu_memory_nodes_init();
  1884. _starpu_datastats_init();
  1885. _starpu_init_workers_binding(config, no_mp_config);
  1886. config->cpus_nodeid = -1;
  1887. config->cuda_nodeid = -1;
  1888. config->opencl_nodeid = -1;
  1889. config->mic_nodeid = -1;
  1890. config->scc_nodeid = -1;
  1891. config->mpi_nodeid = -1;
  1892. for (i = 0; i < starpu_worker_get_count(); i++)
  1893. {
  1894. switch (starpu_worker_get_type(i))
  1895. {
  1896. case STARPU_CPU_WORKER:
  1897. if (config->cpus_nodeid == -1)
  1898. config->cpus_nodeid = starpu_worker_get_memory_node(i);
  1899. else if (config->cpus_nodeid != (int) starpu_worker_get_memory_node(i))
  1900. config->cpus_nodeid = -2;
  1901. break;
  1902. case STARPU_CUDA_WORKER:
  1903. if (config->cuda_nodeid == -1)
  1904. config->cuda_nodeid = starpu_worker_get_memory_node(i);
  1905. else if (config->cuda_nodeid != (int) starpu_worker_get_memory_node(i))
  1906. config->cuda_nodeid = -2;
  1907. break;
  1908. case STARPU_OPENCL_WORKER:
  1909. if (config->opencl_nodeid == -1)
  1910. config->opencl_nodeid = starpu_worker_get_memory_node(i);
  1911. else if (config->opencl_nodeid != (int) starpu_worker_get_memory_node(i))
  1912. config->opencl_nodeid = -2;
  1913. break;
  1914. case STARPU_MIC_WORKER:
  1915. if (config->mic_nodeid == -1)
  1916. config->mic_nodeid = starpu_worker_get_memory_node(i);
  1917. else if (config->mic_nodeid != (int) starpu_worker_get_memory_node(i))
  1918. config->mic_nodeid = -2;
  1919. break;
  1920. case STARPU_SCC_WORKER:
  1921. if (config->scc_nodeid == -1)
  1922. config->scc_nodeid = starpu_worker_get_memory_node(i);
  1923. else if (config->scc_nodeid != (int) starpu_worker_get_memory_node(i))
  1924. config->scc_nodeid = -2;
  1925. break;
  1926. case STARPU_MPI_MS_WORKER:
  1927. if (config->mpi_nodeid == -1)
  1928. config->mpi_nodeid = starpu_worker_get_memory_node(i);
  1929. else if (config->mpi_nodeid != (int) starpu_worker_get_memory_node(i))
  1930. config->mpi_nodeid = -2;
  1931. break;
  1932. case STARPU_ANY_WORKER:
  1933. STARPU_ASSERT(0);
  1934. }
  1935. }
  1936. return 0;
  1937. }
  1938. void _starpu_destroy_topology(struct _starpu_machine_config *config STARPU_ATTRIBUTE_UNUSED)
  1939. {
  1940. #if defined(STARPU_USE_MIC) || defined(STARPU_USE_MPI_MASTER_SLAVE)
  1941. _starpu_deinit_mp_config(config);
  1942. #endif
  1943. /* cleanup StarPU internal data structures */
  1944. _starpu_memory_nodes_deinit();
  1945. _starpu_destroy_machine_config(config);
  1946. }
  1947. void
  1948. starpu_topology_print (FILE *output)
  1949. {
  1950. struct _starpu_machine_config *config = _starpu_get_machine_config();
  1951. struct _starpu_machine_topology *topology = &config->topology;
  1952. unsigned pu;
  1953. unsigned worker;
  1954. unsigned nworkers = starpu_worker_get_count();
  1955. unsigned ncombinedworkers = topology->ncombinedworkers;
  1956. unsigned nthreads_per_core = topology->nhwpus / topology->nhwcpus;
  1957. for (pu = 0; pu < topology->nhwpus; pu++)
  1958. {
  1959. if ((pu % nthreads_per_core) == 0)
  1960. fprintf(output, "core %u", pu / nthreads_per_core);
  1961. fprintf(output, "\tPU %u\t", pu);
  1962. for (worker = 0;
  1963. worker < nworkers + ncombinedworkers;
  1964. worker++)
  1965. {
  1966. if (worker < nworkers)
  1967. {
  1968. struct _starpu_worker *workerarg = &config->workers[worker];
  1969. if (workerarg->bindid == (int) pu)
  1970. {
  1971. char name[256];
  1972. starpu_worker_get_name (worker, name,
  1973. sizeof(name));
  1974. fprintf(output, "%s\t", name);
  1975. }
  1976. }
  1977. else
  1978. {
  1979. int worker_size, i;
  1980. int *combined_workerid;
  1981. starpu_combined_worker_get_description(worker, &worker_size, &combined_workerid);
  1982. for (i = 0; i < worker_size; i++)
  1983. {
  1984. if (topology->workers_bindid[combined_workerid[i]] == pu)
  1985. fprintf(output, "comb %u\t", worker-nworkers);
  1986. }
  1987. }
  1988. }
  1989. fprintf(output, "\n");
  1990. }
  1991. }