topology.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009, 2010-2012 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <stdlib.h>
  18. #include <stdio.h>
  19. #include <common/config.h>
  20. #include <core/workers.h>
  21. #include <core/debug.h>
  22. #include <core/topology.h>
  23. #include <drivers/cuda/driver_cuda.h>
  24. #include <starpu_hash.h>
  25. #include <profiling/profiling.h>
  26. #ifdef STARPU_HAVE_HWLOC
  27. #include <hwloc.h>
  28. #ifndef HWLOC_API_VERSION
  29. #define HWLOC_OBJ_PU HWLOC_OBJ_PROC
  30. #endif
  31. #endif
  32. #ifdef STARPU_HAVE_WINDOWS
  33. #include <windows.h>
  34. #endif
  35. #ifndef HWLOC_BITMAP_H
  36. /* hwloc <1.1 does not offer the bitmap API yet */
  37. #define hwloc_bitmap_alloc hwloc_cpuset_alloc
  38. #define hwloc_bitmap_only hwloc_cpuset_cpu
  39. #define hwloc_bitmap_singlify hwloc_cpuset_singlify
  40. #endif
  41. static unsigned topology_is_initialized = 0;
  42. static void _starpu_initialize_workers_bindid(struct _starpu_machine_config *config);
  43. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  44. # ifdef STARPU_USE_CUDA
  45. static void _starpu_initialize_workers_cuda_gpuid(struct _starpu_machine_config *config);
  46. static struct starpu_htbl32_node *devices_using_cuda = NULL;
  47. # endif
  48. # ifdef STARPU_USE_OPENCL
  49. static void _starpu_initialize_workers_opencl_gpuid(struct _starpu_machine_config *config);
  50. # endif
  51. static void _starpu_initialize_workers_gpuid(int use_explicit_workers_gpuid, int *explicit_workers_gpuid,
  52. int *current, int *workers_gpuid, const char *varname, unsigned nhwgpus);
  53. static unsigned may_bind_automatically = 0;
  54. #endif
  55. /*
  56. * Discover the topology of the machine
  57. */
  58. #ifdef STARPU_USE_CUDA
  59. static void _starpu_initialize_workers_cuda_gpuid(struct _starpu_machine_config *config)
  60. {
  61. struct starpu_machine_topology *topology = &config->topology;
  62. _starpu_initialize_workers_gpuid(config->user_conf==NULL?0:config->user_conf->use_explicit_workers_cuda_gpuid,
  63. config->user_conf==NULL?NULL:(int *)config->user_conf->workers_cuda_gpuid,
  64. &(config->current_cuda_gpuid), (int *)topology->workers_cuda_gpuid, "STARPU_WORKERS_CUDAID",
  65. topology->nhwcudagpus);
  66. }
  67. #endif
  68. #ifdef STARPU_USE_OPENCL
  69. static void _starpu_initialize_workers_opencl_gpuid(struct _starpu_machine_config *config)
  70. {
  71. struct starpu_machine_topology *topology = &config->topology;
  72. _starpu_initialize_workers_gpuid(config->user_conf==NULL?0:config->user_conf->use_explicit_workers_opencl_gpuid,
  73. config->user_conf==NULL?NULL:(int *)config->user_conf->workers_opencl_gpuid,
  74. &(config->current_opencl_gpuid), (int *)topology->workers_opencl_gpuid, "STARPU_WORKERS_OPENCLID",
  75. topology->nhwopenclgpus);
  76. #ifdef STARPU_USE_CUDA
  77. // Detect devices which are already used with CUDA
  78. {
  79. unsigned tmp[STARPU_NMAXWORKERS];
  80. unsigned nb=0;
  81. int i;
  82. for(i=0 ; i<STARPU_NMAXWORKERS ; i++)
  83. {
  84. uint32_t key = starpu_crc32_be(config->topology.workers_opencl_gpuid[i], 0);
  85. if (_starpu_htbl_search_32(devices_using_cuda, key) == NULL)
  86. {
  87. tmp[nb] = topology->workers_opencl_gpuid[i];
  88. nb++;
  89. }
  90. }
  91. for(i=nb ; i<STARPU_NMAXWORKERS ; i++) tmp[i] = -1;
  92. memcpy(topology->workers_opencl_gpuid, tmp, sizeof(unsigned)*STARPU_NMAXWORKERS);
  93. }
  94. #endif /* STARPU_USE_CUDA */
  95. {
  96. // Detect identical devices
  97. struct starpu_htbl32_node *devices_already_used = NULL;
  98. unsigned tmp[STARPU_NMAXWORKERS];
  99. unsigned nb=0;
  100. int i;
  101. for(i=0 ; i<STARPU_NMAXWORKERS ; i++)
  102. {
  103. uint32_t key = starpu_crc32_be(topology->workers_opencl_gpuid[i], 0);
  104. if (_starpu_htbl_search_32(devices_already_used, key) == NULL)
  105. {
  106. _starpu_htbl_insert_32(&devices_already_used, key, config);
  107. tmp[nb] = topology->workers_opencl_gpuid[i];
  108. nb ++;
  109. }
  110. }
  111. for(i=nb ; i<STARPU_NMAXWORKERS ; i++) tmp[i] = -1;
  112. memcpy(topology->workers_opencl_gpuid, tmp, sizeof(unsigned)*STARPU_NMAXWORKERS);
  113. }
  114. }
  115. #endif
  116. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  117. static void _starpu_initialize_workers_gpuid(int use_explicit_workers_gpuid, int *explicit_workers_gpuid,
  118. int *current, int *workers_gpuid, const char *varname, unsigned nhwgpus)
  119. {
  120. char *strval;
  121. unsigned i;
  122. *current = 0;
  123. /* conf->workers_bindid indicates the successive cpu identifier that
  124. * should be used to bind the workers. It should be either filled
  125. * according to the user's explicit parameters (from starpu_conf) or
  126. * according to the STARPU_WORKERS_CPUID env. variable. Otherwise, a
  127. * round-robin policy is used to distributed the workers over the
  128. * cpus. */
  129. /* what do we use, explicit value, env. variable, or round-robin ? */
  130. if (use_explicit_workers_gpuid)
  131. {
  132. /* we use the explicit value from the user */
  133. memcpy(workers_gpuid,
  134. explicit_workers_gpuid,
  135. STARPU_NMAXWORKERS*sizeof(unsigned));
  136. }
  137. else if ((strval = getenv(varname)))
  138. {
  139. /* STARPU_WORKERS_CUDAID certainly contains less entries than
  140. * STARPU_NMAXWORKERS, so we reuse its entries in a round robin
  141. * fashion: "1 2" is equivalent to "1 2 1 2 1 2 .... 1 2". */
  142. unsigned wrap = 0;
  143. unsigned number_of_entries = 0;
  144. char *endptr;
  145. /* we use the content of the STARPU_WORKERS_CUDAID env. variable */
  146. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  147. {
  148. if (!wrap)
  149. {
  150. long int val;
  151. val = strtol(strval, &endptr, 10);
  152. if (endptr != strval)
  153. {
  154. workers_gpuid[i] = (unsigned)val;
  155. strval = endptr;
  156. }
  157. else
  158. {
  159. /* there must be at least one entry */
  160. STARPU_ASSERT(i != 0);
  161. number_of_entries = i;
  162. /* there is no more values in the string */
  163. wrap = 1;
  164. workers_gpuid[i] = workers_gpuid[0];
  165. }
  166. }
  167. else
  168. {
  169. workers_gpuid[i] = workers_gpuid[i % number_of_entries];
  170. }
  171. }
  172. }
  173. else
  174. {
  175. /* by default, we take a round robin policy */
  176. if (nhwgpus > 0)
  177. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  178. workers_gpuid[i] = (unsigned)(i % nhwgpus);
  179. /* StarPU can use sampling techniques to bind threads correctly */
  180. may_bind_automatically = 1;
  181. }
  182. }
  183. #endif
  184. #ifdef STARPU_USE_CUDA
  185. static inline int _starpu_get_next_cuda_gpuid(struct _starpu_machine_config *config)
  186. {
  187. unsigned i = ((config->current_cuda_gpuid++) % config->topology.ncudagpus);
  188. return (int)config->topology.workers_cuda_gpuid[i];
  189. }
  190. #endif
  191. #ifdef STARPU_USE_OPENCL
  192. static inline int _starpu_get_next_opencl_gpuid(struct _starpu_machine_config *config)
  193. {
  194. unsigned i = ((config->current_opencl_gpuid++) % config->topology.nopenclgpus);
  195. return (int)config->topology.workers_opencl_gpuid[i];
  196. }
  197. #endif
  198. static void _starpu_init_topology(struct _starpu_machine_config *config)
  199. {
  200. struct starpu_machine_topology *topology = &config->topology;
  201. if (!topology_is_initialized)
  202. {
  203. topology->nhwcpus = 0;
  204. #ifdef STARPU_HAVE_HWLOC
  205. hwloc_topology_init(&topology->hwtopology);
  206. hwloc_topology_load(topology->hwtopology);
  207. config->cpu_depth = hwloc_get_type_depth(topology->hwtopology, HWLOC_OBJ_CORE);
  208. /* Would be very odd */
  209. STARPU_ASSERT(config->cpu_depth != HWLOC_TYPE_DEPTH_MULTIPLE);
  210. if (config->cpu_depth == HWLOC_TYPE_DEPTH_UNKNOWN)
  211. /* unknown, using logical procesors as fallback */
  212. config->cpu_depth = hwloc_get_type_depth(topology->hwtopology, HWLOC_OBJ_PU);
  213. topology->nhwcpus = hwloc_get_nbobjs_by_depth(topology->hwtopology, config->cpu_depth);
  214. #elif defined(__MINGW32__) || defined(__CYGWIN__)
  215. SYSTEM_INFO sysinfo;
  216. GetSystemInfo(&sysinfo);
  217. topology->nhwcpus += sysinfo.dwNumberOfProcessors;
  218. #elif defined(HAVE_SYSCONF)
  219. topology->nhwcpus = sysconf(_SC_NPROCESSORS_ONLN);
  220. #else
  221. #warning no way to know number of cores, assuming 1
  222. topology->nhwcpus = 1;
  223. #endif
  224. #ifdef STARPU_USE_CUDA
  225. config->topology.nhwcudagpus = _starpu_get_cuda_device_count();
  226. #endif
  227. #ifdef STARPU_USE_OPENCL
  228. config->topology.nhwopenclgpus = _starpu_opencl_get_device_count();
  229. #endif
  230. topology_is_initialized = 1;
  231. }
  232. }
  233. unsigned _starpu_topology_get_nhwcpu(struct _starpu_machine_config *config)
  234. {
  235. _starpu_init_topology(config);
  236. return config->topology.nhwcpus;
  237. }
  238. static int _starpu_init_machine_config(struct _starpu_machine_config *config,
  239. struct starpu_conf *user_conf)
  240. {
  241. int explicitval STARPU_ATTRIBUTE_UNUSED;
  242. unsigned use_accelerator = 0;
  243. int i;
  244. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  245. config->workers[i].workerid = i;
  246. struct starpu_machine_topology *topology = &config->topology;
  247. topology->nworkers = 0;
  248. topology->ncombinedworkers = 0;
  249. _starpu_init_topology(config);
  250. _starpu_initialize_workers_bindid(config);
  251. #ifdef STARPU_USE_CUDA
  252. if (user_conf && (user_conf->ncuda == 0))
  253. {
  254. /* the user explicitely disabled CUDA */
  255. topology->ncudagpus = 0;
  256. }
  257. else
  258. {
  259. /* we need to initialize CUDA early to count the number of devices */
  260. _starpu_init_cuda();
  261. if (user_conf && (user_conf->ncuda != -1))
  262. {
  263. explicitval = user_conf->ncuda;
  264. }
  265. else
  266. {
  267. explicitval = starpu_get_env_number("STARPU_NCUDA");
  268. }
  269. if (explicitval < 0)
  270. {
  271. config->topology.ncudagpus =
  272. STARPU_MIN(_starpu_get_cuda_device_count(), STARPU_MAXCUDADEVS);
  273. }
  274. else
  275. {
  276. /* use the specified value */
  277. if (explicitval > STARPU_MAXCUDADEVS)
  278. {
  279. fprintf(stderr,"# Warning: %d CUDA devices requested. Only %d enabled. Use configure option --enable-maxcudadev=xxx to update the maximum value of supported CUDA devices.\n", explicitval, STARPU_MAXCUDADEVS);
  280. explicitval = STARPU_MAXCUDADEVS;
  281. }
  282. topology->ncudagpus = (unsigned)explicitval;
  283. STARPU_ASSERT(topology->ncudagpus <= STARPU_MAXCUDADEVS);
  284. }
  285. STARPU_ASSERT(config->topology.ncudagpus + config->topology.nworkers <= STARPU_NMAXWORKERS);
  286. }
  287. if (topology->ncudagpus > 0)
  288. use_accelerator = 1;
  289. _starpu_initialize_workers_cuda_gpuid(config);
  290. unsigned cudagpu;
  291. for (cudagpu = 0; cudagpu < topology->ncudagpus; cudagpu++)
  292. {
  293. config->workers[topology->nworkers + cudagpu].arch = STARPU_CUDA_WORKER;
  294. int devid = _starpu_get_next_cuda_gpuid(config);
  295. enum starpu_perf_archtype arch = STARPU_CUDA_DEFAULT + devid;
  296. config->workers[topology->nworkers + cudagpu].devid = devid;
  297. config->workers[topology->nworkers + cudagpu].perf_arch = arch;
  298. config->workers[topology->nworkers + cudagpu].worker_mask = STARPU_CUDA;
  299. config->worker_mask |= STARPU_CUDA;
  300. uint32_t key = starpu_crc32_be(devid, 0);
  301. _starpu_htbl_insert_32(&devices_using_cuda, key, config);
  302. }
  303. topology->nworkers += topology->ncudagpus;
  304. #endif
  305. #ifdef STARPU_USE_OPENCL
  306. if (user_conf && (user_conf->nopencl == 0))
  307. {
  308. /* the user explicitely disabled OpenCL */
  309. topology->nopenclgpus = 0;
  310. }
  311. else
  312. {
  313. /* we need to initialize OpenCL early to count the number of devices */
  314. int nb_devices;
  315. _starpu_opencl_init();
  316. nb_devices = STARPU_MIN(_starpu_opencl_get_device_count(), STARPU_MAXOPENCLDEVS);
  317. if (user_conf && (user_conf->nopencl != -1))
  318. {
  319. explicitval = user_conf->nopencl;
  320. }
  321. else
  322. {
  323. explicitval = starpu_get_env_number("STARPU_NOPENCL");
  324. }
  325. if (explicitval < 0)
  326. {
  327. topology->nopenclgpus = nb_devices;
  328. }
  329. else
  330. {
  331. if (explicitval > nb_devices)
  332. {
  333. /* The user requires more OpenCL devices than there is available */
  334. topology->nopenclgpus = nb_devices;
  335. }
  336. else
  337. {
  338. /* use the specified value */
  339. topology->nopenclgpus = (unsigned)explicitval;
  340. }
  341. STARPU_ASSERT(topology->nopenclgpus <= STARPU_MAXOPENCLDEVS);
  342. }
  343. STARPU_ASSERT(topology->nopenclgpus + topology->nworkers <= STARPU_NMAXWORKERS);
  344. }
  345. if (topology->nopenclgpus > 0)
  346. use_accelerator = 1;
  347. // TODO: use_accelerator pour les OpenCL?
  348. _starpu_initialize_workers_opencl_gpuid(config);
  349. unsigned openclgpu;
  350. for (openclgpu = 0; openclgpu < topology->nopenclgpus; openclgpu++)
  351. {
  352. int devid = _starpu_get_next_opencl_gpuid(config);
  353. if (devid == -1)
  354. { // There is no more devices left
  355. topology->nopenclgpus = openclgpu;
  356. break;
  357. }
  358. config->workers[topology->nworkers + openclgpu].arch = STARPU_OPENCL_WORKER;
  359. enum starpu_perf_archtype arch = STARPU_OPENCL_DEFAULT + devid;
  360. config->workers[topology->nworkers + openclgpu].devid = devid;
  361. config->workers[topology->nworkers + openclgpu].perf_arch = arch;
  362. config->workers[topology->nworkers + openclgpu].worker_mask = STARPU_OPENCL;
  363. config->worker_mask |= STARPU_OPENCL;
  364. }
  365. topology->nworkers += topology->nopenclgpus;
  366. #endif
  367. #ifdef STARPU_USE_GORDON
  368. if (user_conf && (user_conf->ncuda != -1))
  369. {
  370. explicitval = user_conf->ncuda;
  371. }
  372. else
  373. {
  374. explicitval = starpu_get_env_number("STARPU_NGORDON");
  375. }
  376. if (explicitval < 0)
  377. {
  378. topology->ngordon_spus = spe_cpu_info_get(SPE_COUNT_USABLE_SPES, -1);
  379. }
  380. else
  381. {
  382. /* use the specified value */
  383. topology->ngordon_spus = (unsigned)explicitval;
  384. STARPU_ASSERT(topology->ngordon_spus <= NMAXGORDONSPUS);
  385. }
  386. STARPU_ASSERT(topology->ngordon_spus + topology->nworkers <= STARPU_NMAXWORKERS);
  387. if (topology->ngordon_spus > 0)
  388. use_accelerator = 1;
  389. unsigned spu;
  390. for (spu = 0; spu < config->ngordon_spus; spu++)
  391. {
  392. config->workers[topology->nworkers + spu].arch = STARPU_GORDON_WORKER;
  393. config->workers[topology->nworkers + spu].perf_arch = STARPU_GORDON_DEFAULT;
  394. config->workers[topology->nworkers + spu].id = spu;
  395. config->workers[topology->nworkers + spu].worker_is_running = 0;
  396. config->workers[topology->nworkers + spu].worker_mask = STARPU_GORDON;
  397. config->worker_mask |= STARPU_GORDON;
  398. }
  399. topology->nworkers += topology->ngordon_spus;
  400. #endif
  401. /* we put the CPU section after the accelerator : in case there was an
  402. * accelerator found, we devote one cpu */
  403. #ifdef STARPU_USE_CPU
  404. if (user_conf && (user_conf->ncpus != -1))
  405. {
  406. explicitval = user_conf->ncpus;
  407. }
  408. else
  409. {
  410. explicitval = starpu_get_env_number("STARPU_NCPUS");
  411. }
  412. if (explicitval < 0)
  413. {
  414. unsigned already_busy_cpus = (topology->ngordon_spus?1:0) + topology->ncudagpus + topology->nopenclgpus;
  415. long avail_cpus = topology->nhwcpus - (use_accelerator?already_busy_cpus:0);
  416. if (avail_cpus < 0)
  417. avail_cpus = 0;
  418. topology->ncpus = STARPU_MIN(avail_cpus, STARPU_MAXCPUS);
  419. }
  420. else
  421. {
  422. /* use the specified value */
  423. topology->ncpus = (unsigned)explicitval;
  424. STARPU_ASSERT(topology->ncpus <= STARPU_MAXCPUS);
  425. }
  426. STARPU_ASSERT(topology->ncpus + topology->nworkers <= STARPU_NMAXWORKERS);
  427. unsigned cpu;
  428. for (cpu = 0; cpu < topology->ncpus; cpu++)
  429. {
  430. config->workers[topology->nworkers + cpu].arch = STARPU_CPU_WORKER;
  431. config->workers[topology->nworkers + cpu].perf_arch = STARPU_CPU_DEFAULT;
  432. config->workers[topology->nworkers + cpu].devid = cpu;
  433. config->workers[topology->nworkers + cpu].worker_mask = STARPU_CPU;
  434. config->worker_mask |= STARPU_CPU;
  435. }
  436. topology->nworkers += topology->ncpus;
  437. #endif
  438. if (topology->nworkers == 0)
  439. {
  440. _STARPU_DEBUG("No worker found, aborting ...\n");
  441. return -ENODEV;
  442. }
  443. return 0;
  444. }
  445. /*
  446. * Bind workers on the different processors
  447. */
  448. static void _starpu_initialize_workers_bindid(struct _starpu_machine_config *config)
  449. {
  450. char *strval;
  451. unsigned i;
  452. struct starpu_machine_topology *topology = &config->topology;
  453. config->current_bindid = 0;
  454. /* conf->workers_bindid indicates the successive cpu identifier that
  455. * should be used to bind the workers. It should be either filled
  456. * according to the user's explicit parameters (from starpu_conf) or
  457. * according to the STARPU_WORKERS_CPUID env. variable. Otherwise, a
  458. * round-robin policy is used to distributed the workers over the
  459. * cpus. */
  460. /* what do we use, explicit value, env. variable, or round-robin ? */
  461. if (config->user_conf && config->user_conf->use_explicit_workers_bindid)
  462. {
  463. /* we use the explicit value from the user */
  464. memcpy(topology->workers_bindid,
  465. config->user_conf->workers_bindid,
  466. STARPU_NMAXWORKERS*sizeof(unsigned));
  467. }
  468. else if ((strval = getenv("STARPU_WORKERS_CPUID")))
  469. {
  470. /* STARPU_WORKERS_CPUID certainly contains less entries than
  471. * STARPU_NMAXWORKERS, so we reuse its entries in a round robin
  472. * fashion: "1 2" is equivalent to "1 2 1 2 1 2 .... 1 2". */
  473. unsigned wrap = 0;
  474. unsigned number_of_entries = 0;
  475. char *endptr;
  476. /* we use the content of the STARPU_WORKERS_CUDAID env. variable */
  477. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  478. {
  479. if (!wrap)
  480. {
  481. long int val;
  482. val = strtol(strval, &endptr, 10);
  483. if (endptr != strval)
  484. {
  485. topology->workers_bindid[i] = (unsigned)(val % topology->nhwcpus);
  486. strval = endptr;
  487. }
  488. else
  489. {
  490. /* there must be at least one entry */
  491. STARPU_ASSERT(i != 0);
  492. number_of_entries = i;
  493. /* there is no more values in the string */
  494. wrap = 1;
  495. topology->workers_bindid[i] = topology->workers_bindid[0];
  496. }
  497. }
  498. else
  499. {
  500. topology->workers_bindid[i] = topology->workers_bindid[i % number_of_entries];
  501. }
  502. }
  503. }
  504. else
  505. {
  506. /* by default, we take a round robin policy */
  507. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  508. topology->workers_bindid[i] = (unsigned)(i % topology->nhwcpus);
  509. }
  510. }
  511. /* This function gets the identifier of the next cpu on which to bind a
  512. * worker. In case a list of preferred cpus was specified, we look for a an
  513. * available cpu among the list if possible, otherwise a round-robin policy is
  514. * used. */
  515. static inline int _starpu_get_next_bindid(struct _starpu_machine_config *config,
  516. int *preferred_binding, int npreferred)
  517. {
  518. struct starpu_machine_topology *topology = &config->topology;
  519. unsigned found = 0;
  520. int current_preferred;
  521. for (current_preferred = 0; current_preferred < npreferred; current_preferred++)
  522. {
  523. if (found)
  524. break;
  525. unsigned requested_cpu = preferred_binding[current_preferred];
  526. /* can we bind the worker on the requested cpu ? */
  527. unsigned ind;
  528. for (ind = config->current_bindid; ind < topology->nhwcpus; ind++)
  529. {
  530. if (topology->workers_bindid[ind] == requested_cpu)
  531. {
  532. /* the cpu is available, we use it ! In order
  533. * to make sure that it will not be used again
  534. * later on, we remove the entry from the list
  535. * */
  536. topology->workers_bindid[ind] =
  537. topology->workers_bindid[config->current_bindid];
  538. topology->workers_bindid[config->current_bindid] = requested_cpu;
  539. found = 1;
  540. break;
  541. }
  542. }
  543. }
  544. unsigned i = ((config->current_bindid++) % STARPU_NMAXWORKERS);
  545. return (int)topology->workers_bindid[i];
  546. }
  547. void _starpu_bind_thread_on_cpu(struct _starpu_machine_config *config STARPU_ATTRIBUTE_UNUSED, unsigned cpuid)
  548. {
  549. #ifdef STARPU_HAVE_HWLOC
  550. const struct hwloc_topology_support *support;
  551. _starpu_init_topology(config);
  552. support = hwloc_topology_get_support(config->topology.hwtopology);
  553. if (support->cpubind->set_thisthread_cpubind)
  554. {
  555. hwloc_obj_t obj = hwloc_get_obj_by_depth(config->topology.hwtopology, config->cpu_depth, cpuid);
  556. hwloc_cpuset_t set = obj->cpuset;
  557. int ret;
  558. hwloc_bitmap_singlify(set);
  559. ret = hwloc_set_cpubind(config->topology.hwtopology, set, HWLOC_CPUBIND_THREAD);
  560. if (ret)
  561. {
  562. perror("binding thread");
  563. STARPU_ABORT();
  564. }
  565. }
  566. #elif defined(HAVE_PTHREAD_SETAFFINITY_NP) && defined(__linux__)
  567. int ret;
  568. /* fix the thread on the correct cpu */
  569. cpu_set_t aff_mask;
  570. CPU_ZERO(&aff_mask);
  571. CPU_SET(cpuid, &aff_mask);
  572. pthread_t self = pthread_self();
  573. ret = pthread_setaffinity_np(self, sizeof(aff_mask), &aff_mask);
  574. if (ret)
  575. {
  576. perror("binding thread");
  577. STARPU_ABORT();
  578. }
  579. #elif defined(__MINGW32__) || defined(__CYGWIN__)
  580. DWORD mask = 1 << cpuid;
  581. if (!SetThreadAffinityMask(GetCurrentThread(), mask))
  582. {
  583. fprintf(stderr,"SetThreadMaskAffinity(%lx) failed\n", mask);
  584. STARPU_ABORT();
  585. }
  586. #else
  587. #warning no CPU binding support
  588. #endif
  589. }
  590. static void _starpu_init_workers_binding(struct _starpu_machine_config *config)
  591. {
  592. /* launch one thread per CPU */
  593. unsigned ram_memory_node;
  594. /* a single cpu is dedicated for the accelerators */
  595. int accelerator_bindid = -1;
  596. /* note that even if the CPU cpu are not used, we always have a RAM node */
  597. /* TODO : support NUMA ;) */
  598. ram_memory_node = _starpu_register_memory_node(STARPU_CPU_RAM, -1);
  599. /* We will store all the busid of the different (src, dst) combinations
  600. * in a matrix which we initialize here. */
  601. _starpu_initialize_busid_matrix();
  602. unsigned worker;
  603. for (worker = 0; worker < config->topology.nworkers; worker++)
  604. {
  605. unsigned memory_node = -1;
  606. unsigned is_a_set_of_accelerators = 0;
  607. struct _starpu_worker *workerarg = &config->workers[worker];
  608. /* Perhaps the worker has some "favourite" bindings */
  609. int *preferred_binding = NULL;
  610. int npreferred = 0;
  611. /* select the memory node that contains worker's memory */
  612. switch (workerarg->arch)
  613. {
  614. case STARPU_CPU_WORKER:
  615. /* "dedicate" a cpu cpu to that worker */
  616. is_a_set_of_accelerators = 0;
  617. memory_node = ram_memory_node;
  618. _starpu_memory_node_worker_add(ram_memory_node);
  619. break;
  620. #ifdef STARPU_USE_GORDON
  621. case STARPU_GORDON_WORKER:
  622. is_a_set_of_accelerators = 1;
  623. memory_node = ram_memory_node;
  624. _starpu_memory_node_worker_add(ram_memory_node);
  625. break;
  626. #endif
  627. #ifdef STARPU_USE_CUDA
  628. case STARPU_CUDA_WORKER:
  629. if (may_bind_automatically)
  630. {
  631. /* StarPU is allowed to bind threads automatically */
  632. preferred_binding = _starpu_get_cuda_affinity_vector(workerarg->devid);
  633. npreferred = config->topology.nhwcpus;
  634. }
  635. is_a_set_of_accelerators = 0;
  636. memory_node = _starpu_register_memory_node(STARPU_CUDA_RAM, workerarg->devid);
  637. _starpu_memory_node_worker_add(memory_node);
  638. _starpu_register_bus(0, memory_node);
  639. _starpu_register_bus(memory_node, 0);
  640. #ifdef HAVE_CUDA_MEMCPY_PEER
  641. unsigned worker2;
  642. for (worker2 = 0; worker2 < worker; worker2++)
  643. {
  644. struct _starpu_worker *workerarg = &config->workers[worker];
  645. if (workerarg->arch == STARPU_CUDA_WORKER)
  646. {
  647. unsigned memory_node2 = starpu_worker_get_memory_node(worker2);
  648. _starpu_register_bus(memory_node2, memory_node);
  649. _starpu_register_bus(memory_node, memory_node2);
  650. }
  651. }
  652. #endif
  653. break;
  654. #endif
  655. #ifdef STARPU_USE_OPENCL
  656. case STARPU_OPENCL_WORKER:
  657. if (may_bind_automatically)
  658. {
  659. /* StarPU is allowed to bind threads automatically */
  660. preferred_binding = _starpu_get_opencl_affinity_vector(workerarg->devid);
  661. npreferred = config->topology.nhwcpus;
  662. }
  663. is_a_set_of_accelerators = 0;
  664. memory_node = _starpu_register_memory_node(STARPU_OPENCL_RAM, workerarg->devid);
  665. _starpu_memory_node_worker_add(memory_node);
  666. _starpu_register_bus(0, memory_node);
  667. _starpu_register_bus(memory_node, 0);
  668. break;
  669. #endif
  670. default:
  671. STARPU_ABORT();
  672. }
  673. if (is_a_set_of_accelerators)
  674. {
  675. if (accelerator_bindid == -1)
  676. accelerator_bindid = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  677. workerarg->bindid = accelerator_bindid;
  678. }
  679. else
  680. {
  681. workerarg->bindid = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  682. }
  683. workerarg->memory_node = memory_node;
  684. #ifdef __GLIBC__
  685. /* Save the initial cpuset */
  686. CPU_ZERO(&workerarg->initial_cpu_set);
  687. CPU_SET(workerarg->bindid, &workerarg->initial_cpu_set);
  688. CPU_ZERO(&workerarg->current_cpu_set);
  689. CPU_SET(workerarg->bindid, &workerarg->current_cpu_set);
  690. #endif /* __GLIBC__ */
  691. #ifdef STARPU_HAVE_HWLOC
  692. /* Clear the cpu set and set the cpu */
  693. workerarg->initial_hwloc_cpu_set = hwloc_bitmap_alloc();
  694. hwloc_bitmap_only(workerarg->initial_hwloc_cpu_set, workerarg->bindid);
  695. workerarg->current_hwloc_cpu_set = hwloc_bitmap_alloc();
  696. hwloc_bitmap_only(workerarg->current_hwloc_cpu_set, workerarg->bindid);
  697. /* Put the worker descriptor in the userdata field of the hwloc object describing the CPU */
  698. hwloc_obj_t worker_obj;
  699. worker_obj = hwloc_get_obj_by_depth(config->topology.hwtopology,
  700. config->cpu_depth, workerarg->bindid);
  701. worker_obj->userdata = &config->workers[worker];
  702. #endif
  703. }
  704. }
  705. int _starpu_build_topology(struct _starpu_machine_config *config)
  706. {
  707. int ret;
  708. struct starpu_conf *user_conf = config->user_conf;
  709. ret = _starpu_init_machine_config(config, user_conf);
  710. if (ret)
  711. return ret;
  712. /* for the data management library */
  713. _starpu_init_memory_nodes();
  714. _starpu_init_workers_binding(config);
  715. return 0;
  716. }
  717. void _starpu_destroy_topology(struct _starpu_machine_config *config __attribute__ ((unused)))
  718. {
  719. /* cleanup StarPU internal data structures */
  720. _starpu_deinit_memory_nodes();
  721. unsigned worker;
  722. for (worker = 0; worker < config->topology.nworkers; worker++)
  723. {
  724. #ifdef STARPU_HAVE_HWLOC
  725. struct _starpu_worker *workerarg = &config->workers[worker];
  726. hwloc_bitmap_free(workerarg->initial_hwloc_cpu_set);
  727. hwloc_bitmap_free(workerarg->current_hwloc_cpu_set);
  728. #endif
  729. }
  730. #ifdef STARPU_HAVE_HWLOC
  731. hwloc_topology_destroy(config->topology.hwtopology);
  732. #endif
  733. topology_is_initialized = 0;
  734. #ifdef STARPU_USE_CUDA
  735. devices_using_cuda = NULL;
  736. #endif
  737. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  738. may_bind_automatically = 0;
  739. #endif
  740. }