topology.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2012 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012 Centre National de la Recherche Scientifique
  5. * Copyright (C) 2011 INRIA
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. #include <stdlib.h>
  19. #include <stdio.h>
  20. #include <common/config.h>
  21. #include <core/workers.h>
  22. #include <core/debug.h>
  23. #include <core/topology.h>
  24. #include <drivers/cuda/driver_cuda.h>
  25. #include <drivers/opencl/driver_opencl.h>
  26. #include <profiling/profiling.h>
  27. #include <common/uthash.h>
  28. #ifdef STARPU_HAVE_HWLOC
  29. #include <hwloc.h>
  30. #ifndef HWLOC_API_VERSION
  31. #define HWLOC_OBJ_PU HWLOC_OBJ_PROC
  32. #endif
  33. #endif
  34. #ifdef STARPU_HAVE_WINDOWS
  35. #include <windows.h>
  36. #endif
  37. static unsigned topology_is_initialized = 0;
  38. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  39. struct handle_entry
  40. {
  41. UT_hash_handle hh;
  42. unsigned gpuid;
  43. };
  44. # ifdef STARPU_USE_CUDA
  45. /* Entry in the `devices_using_cuda' hash table. */
  46. static struct handle_entry *devices_using_cuda;
  47. # endif
  48. static unsigned may_bind_automatically = 0;
  49. #endif // defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  50. /*
  51. * Discover the topology of the machine
  52. */
  53. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  54. static void
  55. _starpu_initialize_workers_gpuid (int *explicit_workers_gpuid,
  56. int *current, int *workers_gpuid,
  57. const char *varname, unsigned nhwgpus)
  58. {
  59. char *strval;
  60. unsigned i;
  61. *current = 0;
  62. /* conf->workers_bindid indicates the successive cpu identifier that
  63. * should be used to bind the workers. It should be either filled
  64. * according to the user's explicit parameters (from starpu_conf) or
  65. * according to the STARPU_WORKERS_CPUID env. variable. Otherwise, a
  66. * round-robin policy is used to distributed the workers over the
  67. * cpus. */
  68. /* what do we use, explicit value, env. variable, or round-robin ? */
  69. if ((strval = getenv(varname)))
  70. {
  71. /* STARPU_WORKERS_CUDAID certainly contains less entries than
  72. * STARPU_NMAXWORKERS, so we reuse its entries in a round
  73. * robin fashion: "1 2" is equivalent to "1 2 1 2 1 2 .... 1
  74. * 2". */
  75. unsigned wrap = 0;
  76. unsigned number_of_entries = 0;
  77. char *endptr;
  78. /* we use the content of the STARPU_WORKERS_CUDAID
  79. * env. variable */
  80. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  81. {
  82. if (!wrap)
  83. {
  84. long int val;
  85. val = strtol(strval, &endptr, 10);
  86. if (endptr != strval)
  87. {
  88. workers_gpuid[i] = (unsigned)val;
  89. strval = endptr;
  90. }
  91. else
  92. {
  93. /* there must be at least one entry */
  94. STARPU_ASSERT(i != 0);
  95. number_of_entries = i;
  96. /* there is no more values in the
  97. * string */
  98. wrap = 1;
  99. workers_gpuid[i] = workers_gpuid[0];
  100. }
  101. }
  102. else
  103. {
  104. workers_gpuid[i] =
  105. workers_gpuid[i % number_of_entries];
  106. }
  107. }
  108. }
  109. else if (explicit_workers_gpuid)
  110. {
  111. /* we use the explicit value from the user */
  112. memcpy(workers_gpuid,
  113. explicit_workers_gpuid,
  114. STARPU_NMAXWORKERS*sizeof(unsigned));
  115. }
  116. else
  117. {
  118. /* by default, we take a round robin policy */
  119. if (nhwgpus > 0)
  120. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  121. workers_gpuid[i] = (unsigned)(i % nhwgpus);
  122. /* StarPU can use sampling techniques to bind threads
  123. * correctly */
  124. may_bind_automatically = 1;
  125. }
  126. }
  127. #endif
  128. #ifdef STARPU_USE_CUDA
  129. static void
  130. _starpu_initialize_workers_cuda_gpuid (struct _starpu_machine_config *config)
  131. {
  132. struct starpu_machine_topology *topology = &config->topology;
  133. struct starpu_conf *uconf = config->conf;
  134. _starpu_initialize_workers_gpuid (
  135. uconf->use_explicit_workers_cuda_gpuid == 0
  136. ? NULL
  137. : (int *)uconf->workers_cuda_gpuid,
  138. &(config->current_cuda_gpuid),
  139. (int *)topology->workers_cuda_gpuid,
  140. "STARPU_WORKERS_CUDAID",
  141. topology->nhwcudagpus);
  142. }
  143. static inline int
  144. _starpu_get_next_cuda_gpuid (struct _starpu_machine_config *config)
  145. {
  146. unsigned i =
  147. ((config->current_cuda_gpuid++) % config->topology.ncudagpus);
  148. return (int)config->topology.workers_cuda_gpuid[i];
  149. }
  150. #endif
  151. #ifdef STARPU_USE_OPENCL
  152. static void
  153. _starpu_initialize_workers_opencl_gpuid (struct _starpu_machine_config*config)
  154. {
  155. struct starpu_machine_topology *topology = &config->topology;
  156. struct starpu_conf *uconf = config->conf;
  157. _starpu_initialize_workers_gpuid(
  158. uconf->use_explicit_workers_opencl_gpuid == 0
  159. ? NULL
  160. : (int *)uconf->workers_opencl_gpuid,
  161. &(config->current_opencl_gpuid),
  162. (int *)topology->workers_opencl_gpuid,
  163. "STARPU_WORKERS_OPENCLID",
  164. topology->nhwopenclgpus);
  165. #ifdef STARPU_USE_CUDA
  166. // Detect devices which are already used with CUDA
  167. {
  168. unsigned tmp[STARPU_NMAXWORKERS];
  169. unsigned nb=0;
  170. int i;
  171. for(i=0 ; i<STARPU_NMAXWORKERS ; i++)
  172. {
  173. struct handle_entry *entry;
  174. int devid = config->topology.workers_opencl_gpuid[i];
  175. HASH_FIND_INT(devices_using_cuda, &devid, entry);
  176. if (entry == NULL)
  177. {
  178. tmp[nb] = topology->workers_opencl_gpuid[i];
  179. nb++;
  180. }
  181. }
  182. for (i=nb ; i<STARPU_NMAXWORKERS ; i++)
  183. tmp[i] = -1;
  184. memcpy (topology->workers_opencl_gpuid, tmp,
  185. sizeof(unsigned)*STARPU_NMAXWORKERS);
  186. }
  187. #endif /* STARPU_USE_CUDA */
  188. {
  189. // Detect identical devices
  190. struct handle_entry *devices_already_used = NULL;
  191. unsigned tmp[STARPU_NMAXWORKERS];
  192. unsigned nb=0;
  193. int i;
  194. for(i=0 ; i<STARPU_NMAXWORKERS ; i++)
  195. {
  196. int devid = topology->workers_opencl_gpuid[i];
  197. struct handle_entry *entry;
  198. HASH_FIND_INT(devices_already_used, &devid, entry);
  199. if (entry == NULL)
  200. {
  201. struct handle_entry *entry2;
  202. entry2 = (struct handle_entry *) malloc(sizeof(*entry2));
  203. STARPU_ASSERT(entry2 != NULL);
  204. entry2->gpuid = devid;
  205. HASH_ADD_INT(devices_already_used, gpuid,
  206. entry2);
  207. tmp[nb] = devid;
  208. nb ++;
  209. }
  210. }
  211. for (i=nb ; i<STARPU_NMAXWORKERS ; i++)
  212. tmp[i] = -1;
  213. memcpy (topology->workers_opencl_gpuid, tmp,
  214. sizeof(unsigned)*STARPU_NMAXWORKERS);
  215. }
  216. }
  217. static inline int
  218. _starpu_get_next_opencl_gpuid (struct _starpu_machine_config *config)
  219. {
  220. unsigned i =
  221. ((config->current_opencl_gpuid++) % config->topology.nopenclgpus);
  222. return (int)config->topology.workers_opencl_gpuid[i];
  223. }
  224. #endif
  225. static void
  226. _starpu_init_topology (struct _starpu_machine_config *config)
  227. {
  228. /* Discover the topology, meaning finding all the available PUs for
  229. the compiled drivers. These drivers MUST have been initialized
  230. before calling this function. The discovered topology is filled in
  231. CONFIG. */
  232. struct starpu_machine_topology *topology = &config->topology;
  233. if (topology_is_initialized)
  234. return;
  235. topology->nhwcpus = 0;
  236. #ifdef STARPU_HAVE_HWLOC
  237. hwloc_topology_init(&topology->hwtopology);
  238. hwloc_topology_load(topology->hwtopology);
  239. #endif
  240. _starpu_cpu_discover_devices(config);
  241. _starpu_cuda_discover_devices(config);
  242. _starpu_opencl_discover_devices(config);
  243. topology_is_initialized = 1;
  244. }
  245. /*
  246. * Bind workers on the different processors
  247. */
  248. static void
  249. _starpu_initialize_workers_bindid (struct _starpu_machine_config *config)
  250. {
  251. char *strval;
  252. unsigned i;
  253. struct starpu_machine_topology *topology = &config->topology;
  254. config->current_bindid = 0;
  255. /* conf->workers_bindid indicates the successive cpu identifier that
  256. * should be used to bind the workers. It should be either filled
  257. * according to the user's explicit parameters (from starpu_conf) or
  258. * according to the STARPU_WORKERS_CPUID env. variable. Otherwise, a
  259. * round-robin policy is used to distributed the workers over the
  260. * cpus. */
  261. /* what do we use, explicit value, env. variable, or round-robin ? */
  262. if ((strval = getenv("STARPU_WORKERS_CPUID")))
  263. {
  264. /* STARPU_WORKERS_CPUID certainly contains less entries than
  265. * STARPU_NMAXWORKERS, so we reuse its entries in a round
  266. * robin fashion: "1 2" is equivalent to "1 2 1 2 1 2 .... 1
  267. * 2". */
  268. unsigned wrap = 0;
  269. unsigned number_of_entries = 0;
  270. char *endptr;
  271. /* we use the content of the STARPU_WORKERS_CUDAID
  272. * env. variable */
  273. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  274. {
  275. if (!wrap)
  276. {
  277. long int val;
  278. val = strtol(strval, &endptr, 10);
  279. if (endptr != strval)
  280. {
  281. topology->workers_bindid[i] =
  282. (unsigned)(val % topology->nhwcpus);
  283. strval = endptr;
  284. }
  285. else
  286. {
  287. /* there must be at least one entry */
  288. STARPU_ASSERT(i != 0);
  289. number_of_entries = i;
  290. /* there is no more values in the
  291. * string */
  292. wrap = 1;
  293. topology->workers_bindid[i] =
  294. topology->workers_bindid[0];
  295. }
  296. }
  297. else
  298. {
  299. topology->workers_bindid[i] =
  300. topology->workers_bindid[i % number_of_entries];
  301. }
  302. }
  303. }
  304. else if (config->conf->use_explicit_workers_bindid)
  305. {
  306. /* we use the explicit value from the user */
  307. memcpy(topology->workers_bindid,
  308. config->conf->workers_bindid,
  309. STARPU_NMAXWORKERS*sizeof(unsigned));
  310. }
  311. else
  312. {
  313. /* by default, we take a round robin policy */
  314. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  315. topology->workers_bindid[i] =
  316. (unsigned)(i % topology->nhwcpus);
  317. }
  318. }
  319. /* This function gets the identifier of the next cpu on which to bind a
  320. * worker. In case a list of preferred cpus was specified, we look for a an
  321. * available cpu among the list if possible, otherwise a round-robin policy is
  322. * used. */
  323. static inline int
  324. _starpu_get_next_bindid (struct _starpu_machine_config *config,
  325. int *preferred_binding, int npreferred)
  326. {
  327. struct starpu_machine_topology *topology = &config->topology;
  328. unsigned found = 0;
  329. int current_preferred;
  330. for (current_preferred = 0;
  331. current_preferred < npreferred;
  332. current_preferred++)
  333. {
  334. if (found)
  335. break;
  336. unsigned requested_cpu = preferred_binding[current_preferred];
  337. /* can we bind the worker on the requested cpu ? */
  338. unsigned ind;
  339. for (ind = config->current_bindid;
  340. ind < topology->nhwcpus;
  341. ind++)
  342. {
  343. if (topology->workers_bindid[ind] == requested_cpu)
  344. {
  345. /* the cpu is available, we use it ! In order
  346. * to make sure that it will not be used again
  347. * later on, we remove the entry from the
  348. * list */
  349. topology->workers_bindid[ind] =
  350. topology->workers_bindid[config->current_bindid];
  351. topology->workers_bindid[config->current_bindid] = requested_cpu;
  352. found = 1;
  353. break;
  354. }
  355. }
  356. }
  357. unsigned i = ((config->current_bindid++) % STARPU_NMAXWORKERS);
  358. return (int)topology->workers_bindid[i];
  359. }
  360. unsigned
  361. _starpu_topology_get_nhwcpu (struct _starpu_machine_config *config)
  362. {
  363. #ifdef STARPU_USE_OPENCL
  364. _starpu_opencl_init();
  365. #endif
  366. #ifdef STARPU_USE_CUDA
  367. _starpu_init_cuda();
  368. #endif
  369. _starpu_init_topology(config);
  370. return config->topology.nhwcpus;
  371. }
  372. static int
  373. _starpu_init_machine_config (struct _starpu_machine_config *config)
  374. {
  375. int i;
  376. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  377. config->workers[i].workerid = i;
  378. struct starpu_machine_topology *topology = &config->topology;
  379. topology->nworkers = 0;
  380. topology->ncombinedworkers = 0;
  381. topology->nsched_ctxs = 0;
  382. #ifdef STARPU_USE_OPENCL
  383. _starpu_opencl_init();
  384. #endif
  385. #ifdef STARPU_USE_CUDA
  386. _starpu_init_cuda();
  387. #endif
  388. _starpu_init_topology(config);
  389. _starpu_initialize_workers_bindid(config);
  390. #ifdef STARPU_USE_CUDA
  391. int ncuda = config->conf->ncuda;
  392. if (ncuda != 0)
  393. {
  394. /* The user did not disable CUDA. We need to initialize CUDA
  395. * early to count the number of devices */
  396. _starpu_init_cuda();
  397. int nb_devices = _starpu_get_cuda_device_count();
  398. if (ncuda == -1)
  399. {
  400. /* Nothing was specified, so let's choose ! */
  401. ncuda = nb_devices;
  402. }
  403. else
  404. {
  405. if (ncuda > nb_devices)
  406. {
  407. /* The user requires more CUDA devices than
  408. * there is available */
  409. fprintf(stderr,
  410. "# Warning: %d CUDA devices "
  411. "requested. Only %d available.\n",
  412. ncuda, nb_devices);
  413. ncuda = nb_devices;
  414. }
  415. }
  416. }
  417. /* Now we know how many CUDA devices will be used */
  418. topology->ncudagpus = ncuda;
  419. STARPU_ASSERT(topology->ncudagpus <= STARPU_MAXCUDADEVS);
  420. _starpu_initialize_workers_cuda_gpuid(config);
  421. unsigned cudagpu;
  422. for (cudagpu = 0; cudagpu < topology->ncudagpus; cudagpu++)
  423. {
  424. int worker_idx = topology->nworkers + cudagpu;
  425. config->workers[worker_idx].arch = STARPU_CUDA_WORKER;
  426. int devid = _starpu_get_next_cuda_gpuid(config);
  427. enum starpu_perf_archtype arch =
  428. (enum starpu_perf_archtype)((int)STARPU_CUDA_DEFAULT + devid);
  429. config->workers[worker_idx].devid = devid;
  430. config->workers[worker_idx].perf_arch = arch;
  431. config->workers[worker_idx].worker_mask = STARPU_CUDA;
  432. _starpu_init_sched_ctx_for_worker(config->workers[topology->nworkers + cudagpu].workerid);
  433. config->worker_mask |= STARPU_CUDA;
  434. struct handle_entry *entry;
  435. entry = (struct handle_entry *) malloc(sizeof(*entry));
  436. STARPU_ASSERT(entry != NULL);
  437. entry->gpuid = devid;
  438. HASH_ADD_INT(devices_using_cuda, gpuid, entry);
  439. }
  440. topology->nworkers += topology->ncudagpus;
  441. #endif
  442. #ifdef STARPU_USE_OPENCL
  443. int nopencl = config->conf->nopencl;
  444. if (nopencl != 0)
  445. {
  446. /* The user did not disable OPENCL. We need to initialize
  447. * OpenCL early to count the number of devices */
  448. _starpu_opencl_init();
  449. int nb_devices;
  450. nb_devices = _starpu_opencl_get_device_count();
  451. if (nopencl == -1)
  452. {
  453. /* Nothing was specified, so let's choose ! */
  454. nopencl = nb_devices;
  455. if (nopencl > STARPU_MAXOPENCLDEVS)
  456. {
  457. fprintf(stderr,
  458. "# Warning: %d OpenCL devices "
  459. "available. Only %d enabled. "
  460. "Use configure option "
  461. "--enable-maxopencldadev=xxx to "
  462. "update the maximum value of "
  463. "supported OpenCL devices.\n",
  464. nb_devices, STARPU_MAXOPENCLDEVS);
  465. nopencl = STARPU_MAXOPENCLDEVS;
  466. }
  467. }
  468. else
  469. {
  470. /* Let's make sure this value is OK. */
  471. if (nopencl > nb_devices)
  472. {
  473. /* The user requires more OpenCL devices than
  474. * there is available */
  475. fprintf(stderr,
  476. "# Warning: %d OpenCL devices "
  477. "requested. Only %d available.\n",
  478. nopencl, nb_devices);
  479. nopencl = nb_devices;
  480. }
  481. /* Let's make sure this value is OK. */
  482. if (nopencl > STARPU_MAXOPENCLDEVS)
  483. {
  484. fprintf(stderr,
  485. "# Warning: %d OpenCL devices "
  486. "requested. Only %d enabled. Use "
  487. "configure option "
  488. "--enable-maxopencldev=xxx to update "
  489. "the maximum value of supported "
  490. "OpenCL devices.\n",
  491. nopencl, STARPU_MAXOPENCLDEVS);
  492. nopencl = STARPU_MAXOPENCLDEVS;
  493. }
  494. }
  495. }
  496. topology->nopenclgpus = nopencl;
  497. STARPU_ASSERT(topology->nopenclgpus + topology->nworkers <= STARPU_NMAXWORKERS);
  498. _starpu_initialize_workers_opencl_gpuid(config);
  499. unsigned openclgpu;
  500. for (openclgpu = 0; openclgpu < topology->nopenclgpus; openclgpu++)
  501. {
  502. int worker_idx = topology->nworkers + openclgpu;
  503. int devid = _starpu_get_next_opencl_gpuid(config);
  504. if (devid == -1)
  505. { // There is no more devices left
  506. topology->nopenclgpus = openclgpu;
  507. break;
  508. }
  509. config->workers[worker_idx].arch = STARPU_OPENCL_WORKER;
  510. enum starpu_perf_archtype arch =
  511. (enum starpu_perf_archtype)((int)STARPU_OPENCL_DEFAULT + devid);
  512. config->workers[worker_idx].devid = devid;
  513. config->workers[worker_idx].perf_arch = arch;
  514. config->workers[worker_idx].worker_mask = STARPU_OPENCL;
  515. _starpu_init_sched_ctx_for_worker(config->workers[topology->nworkers + openclgpu].workerid);
  516. config->worker_mask |= STARPU_OPENCL;
  517. }
  518. topology->nworkers += topology->nopenclgpus;
  519. #endif
  520. #ifdef STARPU_USE_GORDON
  521. int ngordon = config->conf->ngordon;
  522. if (ngordon != 0)
  523. {
  524. if (ngordon == -1)
  525. {
  526. /* Nothing was specified, so let's choose ! */
  527. ngordon = spe_cpu_info_get(SPE_COUNT_USABLE_SPES, -1);
  528. }
  529. else
  530. {
  531. STARPU_ASSERT(ngordon <= NMAXGORDONSPUS);
  532. if (ngordon > STARPU_MAXGORDONSPUS);
  533. {
  534. fprintf(stderr,
  535. "# Warning: %d Gordon CPUs devices "
  536. "requested. Only %d supported\n",
  537. ngordon, NMAXGORDONSPUS);
  538. ngordon = NMAXGORDONSPUS;
  539. }
  540. }
  541. }
  542. topology->ngordon_spus = ngordon;
  543. STARPU_ASSERT(topology->ngordon_spus + topology->nworkers <= STARPU_NMAXWORKERS);
  544. unsigned spu;
  545. for (spu = 0; spu < config->ngordon_spus; spu++)
  546. {
  547. int worker_idx = topology->nworkers + spu;
  548. config->workers[worker_idx].arch = STARPU_GORDON_WORKER;
  549. config->workers[worker_idx].perf_arch = STARPU_GORDON_DEFAULT;
  550. config->workers[worker_idx].id = spu;
  551. config->workers[worker_idx].worker_is_running = 0;
  552. config->workers[worker_idx].worker_mask = STARPU_GORDON;
  553. _starpu_init_sched_ctx_for_worker(config->workers[topology->nworkers + spu].workerid);
  554. config->worker_mask |= STARPU_GORDON;
  555. }
  556. topology->nworkers += topology->ngordon_spus;
  557. #endif
  558. /* we put the CPU section after the accelerator : in case there was an
  559. * accelerator found, we devote one cpu */
  560. #ifdef STARPU_USE_CPU
  561. int ncpu = config->conf->ncpus;
  562. if (ncpu != 0)
  563. {
  564. if (ncpu == -1)
  565. {
  566. unsigned already_busy_cpus =
  567. (topology->ngordon_spus ? 1 : 0) + topology->ncudagpus + topology->nopenclgpus;
  568. long avail_cpus = topology->nhwcpus - already_busy_cpus;
  569. if (avail_cpus < 0)
  570. avail_cpus = 0;
  571. ncpu = STARPU_MIN(avail_cpus, STARPU_MAXCPUS);
  572. }
  573. else
  574. {
  575. if (ncpu > STARPU_MAXCPUS)
  576. {
  577. fprintf(stderr,
  578. "# Warning: %d CPU devices requested."
  579. " Only %d enabled. Use configure "
  580. "option --enable-maxcpus=xxx to "
  581. "update the maximum value of "
  582. "supported CPU devices.\n",
  583. ncpu, STARPU_MAXCPUS);
  584. ncpu = STARPU_MAXCPUS;
  585. }
  586. }
  587. }
  588. topology->ncpus = ncpu;
  589. STARPU_ASSERT(topology->ncpus + topology->nworkers <= STARPU_NMAXWORKERS);
  590. unsigned cpu;
  591. for (cpu = 0; cpu < topology->ncpus; cpu++)
  592. {
  593. int worker_idx = topology->nworkers + cpu;
  594. config->workers[worker_idx].arch = STARPU_CPU_WORKER;
  595. config->workers[worker_idx].perf_arch = STARPU_CPU_DEFAULT;
  596. config->workers[worker_idx].devid = cpu;
  597. config->workers[worker_idx].worker_mask = STARPU_CPU;
  598. config->worker_mask |= STARPU_CPU;
  599. _starpu_init_sched_ctx_for_worker(config->workers[topology->nworkers + cpu].workerid);
  600. }
  601. topology->nworkers += topology->ncpus;
  602. #endif
  603. if (topology->nworkers == 0)
  604. {
  605. _STARPU_DEBUG("No worker found, aborting ...\n");
  606. return -ENODEV;
  607. }
  608. return 0;
  609. }
  610. void
  611. _starpu_bind_thread_on_cpu (
  612. struct _starpu_machine_config *config STARPU_ATTRIBUTE_UNUSED,
  613. unsigned cpuid)
  614. {
  615. if (starpu_get_env_number("STARPU_WORKERS_NOBIND") > 0)
  616. return;
  617. #ifdef STARPU_HAVE_HWLOC
  618. const struct hwloc_topology_support *support;
  619. #ifdef STARPU_USE_OPENCL
  620. _starpu_opencl_init();
  621. #endif
  622. #ifdef STARPU_USE_CUDA
  623. _starpu_init_cuda();
  624. #endif
  625. _starpu_init_topology(config);
  626. support = hwloc_topology_get_support (config->topology.hwtopology);
  627. if (support->cpubind->set_thisthread_cpubind)
  628. {
  629. hwloc_obj_t obj =
  630. hwloc_get_obj_by_depth (config->topology.hwtopology,
  631. config->cpu_depth, cpuid);
  632. hwloc_bitmap_t set = obj->cpuset;
  633. int ret;
  634. hwloc_bitmap_singlify(set);
  635. ret = hwloc_set_cpubind (config->topology.hwtopology, set,
  636. HWLOC_CPUBIND_THREAD);
  637. if (ret)
  638. {
  639. perror("binding thread");
  640. STARPU_ABORT();
  641. }
  642. }
  643. #elif defined(HAVE_PTHREAD_SETAFFINITY_NP) && defined(__linux__)
  644. int ret;
  645. /* fix the thread on the correct cpu */
  646. cpu_set_t aff_mask;
  647. CPU_ZERO(&aff_mask);
  648. CPU_SET(cpuid, &aff_mask);
  649. pthread_t self = pthread_self();
  650. ret = pthread_setaffinity_np(self, sizeof(aff_mask), &aff_mask);
  651. if (ret)
  652. {
  653. perror("binding thread");
  654. STARPU_ABORT();
  655. }
  656. #elif defined(__MINGW32__) || defined(__CYGWIN__)
  657. DWORD mask = 1 << cpuid;
  658. if (!SetThreadAffinityMask(GetCurrentThread(), mask))
  659. {
  660. fprintf(stderr,"SetThreadMaskAffinity(%lx) failed\n", mask);
  661. STARPU_ABORT();
  662. }
  663. #else
  664. #warning no CPU binding support
  665. #endif
  666. }
  667. void
  668. _starpu_bind_thread_on_cpus (
  669. struct _starpu_machine_config *config STARPU_ATTRIBUTE_UNUSED,
  670. struct _starpu_combined_worker *combined_worker)
  671. {
  672. #ifdef STARPU_HAVE_HWLOC
  673. const struct hwloc_topology_support *support;
  674. #ifdef STARPU_USE_OPENC
  675. _starpu_opencl_init();
  676. #endif
  677. #ifdef STARPU_USE_CUDA
  678. _starpu_init_cuda();
  679. #endif
  680. _starpu_init_topology(config);
  681. support = hwloc_topology_get_support(config->topology.hwtopology);
  682. if (support->cpubind->set_thisthread_cpubind)
  683. {
  684. hwloc_bitmap_t set = combined_worker->hwloc_cpu_set;
  685. int ret;
  686. ret = hwloc_set_cpubind (config->topology.hwtopology, set,
  687. HWLOC_CPUBIND_THREAD);
  688. if (ret)
  689. {
  690. perror("binding thread");
  691. STARPU_ABORT();
  692. }
  693. }
  694. #else
  695. #warning no parallel worker CPU binding support
  696. #endif
  697. }
  698. static void
  699. _starpu_init_workers_binding (struct _starpu_machine_config *config)
  700. {
  701. /* launch one thread per CPU */
  702. unsigned ram_memory_node;
  703. /* a single cpu is dedicated for the accelerators */
  704. int accelerator_bindid = -1;
  705. /* note that even if the CPU cpu are not used, we always have a RAM
  706. * node */
  707. /* TODO : support NUMA ;) */
  708. ram_memory_node = _starpu_register_memory_node(STARPU_CPU_RAM, -1);
  709. /* We will store all the busid of the different (src, dst)
  710. * combinations in a matrix which we initialize here. */
  711. _starpu_initialize_busid_matrix();
  712. unsigned worker;
  713. for (worker = 0; worker < config->topology.nworkers; worker++)
  714. {
  715. unsigned memory_node = -1;
  716. unsigned is_a_set_of_accelerators = 0;
  717. struct _starpu_worker *workerarg = &config->workers[worker];
  718. /* Perhaps the worker has some "favourite" bindings */
  719. int *preferred_binding = NULL;
  720. int npreferred = 0;
  721. /* select the memory node that contains worker's memory */
  722. switch (workerarg->arch)
  723. {
  724. case STARPU_CPU_WORKER:
  725. /* "dedicate" a cpu cpu to that worker */
  726. is_a_set_of_accelerators = 0;
  727. memory_node = ram_memory_node;
  728. _starpu_memory_node_worker_add(ram_memory_node);
  729. break;
  730. #ifdef STARPU_USE_GORDON
  731. case STARPU_GORDON_WORKER:
  732. is_a_set_of_accelerators = 1;
  733. memory_node = ram_memory_node;
  734. _starpu_memory_node_worker_add(ram_memory_node);
  735. break;
  736. #endif
  737. #ifdef STARPU_USE_CUDA
  738. case STARPU_CUDA_WORKER:
  739. if (may_bind_automatically)
  740. {
  741. /* StarPU is allowed to bind threads automatically */
  742. preferred_binding = _starpu_get_cuda_affinity_vector(workerarg->devid);
  743. npreferred = config->topology.nhwcpus;
  744. }
  745. is_a_set_of_accelerators = 0;
  746. memory_node = _starpu_register_memory_node(STARPU_CUDA_RAM, workerarg->devid);
  747. _starpu_memory_node_worker_add(memory_node);
  748. _starpu_register_bus(0, memory_node);
  749. _starpu_register_bus(memory_node, 0);
  750. #ifdef HAVE_CUDA_MEMCPY_PEER
  751. unsigned worker2;
  752. for (worker2 = 0; worker2 < worker; worker2++)
  753. {
  754. struct _starpu_worker *workerarg = &config->workers[worker];
  755. if (workerarg->arch == STARPU_CUDA_WORKER)
  756. {
  757. unsigned memory_node2 = starpu_worker_get_memory_node(worker2);
  758. _starpu_register_bus(memory_node2, memory_node);
  759. _starpu_register_bus(memory_node, memory_node2);
  760. }
  761. }
  762. #endif
  763. break;
  764. #endif
  765. #ifdef STARPU_USE_OPENCL
  766. case STARPU_OPENCL_WORKER:
  767. if (may_bind_automatically)
  768. {
  769. /* StarPU is allowed to bind threads automatically */
  770. preferred_binding = _starpu_get_opencl_affinity_vector(workerarg->devid);
  771. npreferred = config->topology.nhwcpus;
  772. }
  773. is_a_set_of_accelerators = 0;
  774. memory_node = _starpu_register_memory_node(STARPU_OPENCL_RAM, workerarg->devid);
  775. _starpu_memory_node_worker_add(memory_node);
  776. _starpu_register_bus(0, memory_node);
  777. _starpu_register_bus(memory_node, 0);
  778. break;
  779. #endif
  780. default:
  781. STARPU_ABORT();
  782. }
  783. if (is_a_set_of_accelerators)
  784. {
  785. if (accelerator_bindid == -1)
  786. accelerator_bindid = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  787. workerarg->bindid = accelerator_bindid;
  788. }
  789. else
  790. {
  791. workerarg->bindid = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  792. }
  793. workerarg->memory_node = memory_node;
  794. #ifdef __GLIBC__
  795. /* Save the initial cpuset */
  796. CPU_ZERO(&workerarg->initial_cpu_set);
  797. CPU_SET(workerarg->bindid, &workerarg->initial_cpu_set);
  798. CPU_ZERO(&workerarg->current_cpu_set);
  799. CPU_SET(workerarg->bindid, &workerarg->current_cpu_set);
  800. #endif /* __GLIBC__ */
  801. #ifdef STARPU_HAVE_HWLOC
  802. /* Put the worker descriptor in the userdata field of the
  803. * hwloc object describing the CPU */
  804. hwloc_obj_t worker_obj;
  805. worker_obj =
  806. hwloc_get_obj_by_depth (config->topology.hwtopology,
  807. config->cpu_depth,
  808. workerarg->bindid);
  809. worker_obj->userdata = &config->workers[worker];
  810. /* Clear the cpu set and set the cpu */
  811. workerarg->initial_hwloc_cpu_set =
  812. hwloc_bitmap_dup (worker_obj->cpuset);
  813. workerarg->current_hwloc_cpu_set =
  814. hwloc_bitmap_dup (worker_obj->cpuset);
  815. #endif
  816. }
  817. }
  818. int
  819. _starpu_build_topology (struct _starpu_machine_config *config)
  820. {
  821. int ret;
  822. ret = _starpu_init_machine_config(config);
  823. if (ret)
  824. return ret;
  825. /* for the data management library */
  826. _starpu_init_memory_nodes();
  827. _starpu_init_workers_binding(config);
  828. return 0;
  829. }
  830. void
  831. _starpu_destroy_topology (
  832. struct _starpu_machine_config *config __attribute__ ((unused)))
  833. {
  834. /* cleanup StarPU internal data structures */
  835. _starpu_deinit_memory_nodes();
  836. unsigned worker;
  837. for (worker = 0; worker < config->topology.nworkers; worker++)
  838. {
  839. #ifdef STARPU_HAVE_HWLOC
  840. struct _starpu_worker *workerarg = &config->workers[worker];
  841. hwloc_bitmap_free(workerarg->initial_hwloc_cpu_set);
  842. hwloc_bitmap_free(workerarg->current_hwloc_cpu_set);
  843. #endif
  844. }
  845. #ifdef STARPU_HAVE_HWLOC
  846. hwloc_topology_destroy(config->topology.hwtopology);
  847. #endif
  848. topology_is_initialized = 0;
  849. #ifdef STARPU_USE_CUDA
  850. struct handle_entry *entry, *tmp;
  851. HASH_ITER(hh, devices_using_cuda, entry, tmp)
  852. {
  853. HASH_DEL(devices_using_cuda, entry);
  854. free(entry);
  855. }
  856. devices_using_cuda = NULL;
  857. #endif
  858. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  859. may_bind_automatically = 0;
  860. #endif
  861. }
  862. void
  863. starpu_topology_print (FILE *output)
  864. {
  865. struct _starpu_machine_config *config = _starpu_get_machine_config();
  866. struct starpu_machine_topology *topology = &config->topology;
  867. unsigned core;
  868. unsigned worker;
  869. unsigned nworkers = starpu_worker_get_count();
  870. unsigned ncombinedworkers = topology->ncombinedworkers;
  871. for (core = 0; core < topology->nhwcpus; core++) {
  872. fprintf(output, "core %u\t", core);
  873. for (worker = 0;
  874. worker < nworkers + ncombinedworkers;
  875. worker++)
  876. {
  877. if (worker < nworkers)
  878. {
  879. if (topology->workers_bindid[worker] == core)
  880. {
  881. char name[256];
  882. starpu_worker_get_name (worker, name,
  883. sizeof(name));
  884. fprintf(output, "%s\t", name);
  885. }
  886. }
  887. else
  888. {
  889. int worker_size, i;
  890. int *combined_workerid;
  891. starpu_combined_worker_get_description(worker, &worker_size, &combined_workerid);
  892. for (i = 0; i < worker_size; i++)
  893. {
  894. if (topology->workers_bindid[combined_workerid[i]] == core)
  895. fprintf(output, "comb %u\t", worker-nworkers);
  896. }
  897. }
  898. }
  899. fprintf(output, "\n");
  900. }
  901. }