topology.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009, 2010-2011 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <stdlib.h>
  18. #include <stdio.h>
  19. #include <common/config.h>
  20. #include <core/workers.h>
  21. #include <core/debug.h>
  22. #include <core/topology.h>
  23. #include <drivers/cuda/driver_cuda.h>
  24. #include <common/hash.h>
  25. #include <profiling/profiling.h>
  26. #ifdef STARPU_HAVE_HWLOC
  27. #include <hwloc.h>
  28. #ifndef HWLOC_API_VERSION
  29. #define HWLOC_OBJ_PU HWLOC_OBJ_PROC
  30. #endif
  31. #endif
  32. #ifdef STARPU_HAVE_WINDOWS
  33. #include <windows.h>
  34. #endif
  35. #ifndef HWLOC_BITMAP_H
  36. /* hwloc <1.1 does not offer the bitmap API yet */
  37. #define hwloc_bitmap_alloc hwloc_cpuset_alloc
  38. #define hwloc_bitmap_only hwloc_cpuset_cpu
  39. #define hwloc_bitmap_singlify hwloc_cpuset_singlify
  40. #endif
  41. static unsigned topology_is_initialized = 0;
  42. static void _starpu_initialize_workers_bindid(struct starpu_machine_config_s *config);
  43. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  44. # ifdef STARPU_USE_CUDA
  45. static void _starpu_initialize_workers_cuda_gpuid(struct starpu_machine_config_s *config);
  46. static struct starpu_htbl32_node *devices_using_cuda = NULL;
  47. # endif
  48. # ifdef STARPU_USE_OPENCL
  49. static void _starpu_initialize_workers_opencl_gpuid(struct starpu_machine_config_s *config);
  50. # endif
  51. static void _starpu_initialize_workers_gpuid(int use_explicit_workers_gpuid, int *explicit_workers_gpuid,
  52. int *current, int *workers_gpuid, const char *varname, unsigned nhwgpus);
  53. static unsigned may_bind_automatically = 0;
  54. #endif
  55. /*
  56. * Discover the topology of the machine
  57. */
  58. #ifdef STARPU_USE_CUDA
  59. static void _starpu_initialize_workers_cuda_gpuid(struct starpu_machine_config_s *config)
  60. {
  61. struct starpu_machine_topology_s *topology = &config->topology;
  62. _starpu_initialize_workers_gpuid(config->user_conf==NULL?0:config->user_conf->use_explicit_workers_cuda_gpuid,
  63. config->user_conf==NULL?NULL:(int *)config->user_conf->workers_cuda_gpuid,
  64. &(config->current_cuda_gpuid), (int *)topology->workers_cuda_gpuid, "STARPU_WORKERS_CUDAID",
  65. topology->nhwcudagpus);
  66. }
  67. #endif
  68. #ifdef STARPU_USE_OPENCL
  69. static void _starpu_initialize_workers_opencl_gpuid(struct starpu_machine_config_s *config)
  70. {
  71. struct starpu_machine_topology_s *topology = &config->topology;
  72. _starpu_initialize_workers_gpuid(config->user_conf==NULL?0:config->user_conf->use_explicit_workers_opencl_gpuid,
  73. config->user_conf==NULL?NULL:(int *)config->user_conf->workers_opencl_gpuid,
  74. &(config->current_opencl_gpuid), (int *)topology->workers_opencl_gpuid, "STARPU_WORKERS_OPENCLID",
  75. topology->nhwopenclgpus);
  76. #ifdef STARPU_USE_CUDA
  77. // Detect devices which are already used with CUDA
  78. {
  79. unsigned tmp[STARPU_NMAXWORKERS];
  80. unsigned nb=0;
  81. int i;
  82. for(i=0 ; i<STARPU_NMAXWORKERS ; i++) {
  83. uint32_t key = _starpu_crc32_be(config->topology.workers_opencl_gpuid[i], 0);
  84. if (_starpu_htbl_search_32(devices_using_cuda, key) == NULL) {
  85. tmp[nb] = topology->workers_opencl_gpuid[i];
  86. nb++;
  87. }
  88. }
  89. for(i=nb ; i<STARPU_NMAXWORKERS ; i++) tmp[i] = -1;
  90. memcpy(topology->workers_opencl_gpuid, tmp, sizeof(unsigned)*STARPU_NMAXWORKERS);
  91. }
  92. #endif /* STARPU_USE_CUDA */
  93. {
  94. // Detect identical devices
  95. struct starpu_htbl32_node *devices_already_used = NULL;
  96. unsigned tmp[STARPU_NMAXWORKERS];
  97. unsigned nb=0;
  98. int i;
  99. for(i=0 ; i<STARPU_NMAXWORKERS ; i++) {
  100. uint32_t key = _starpu_crc32_be(topology->workers_opencl_gpuid[i], 0);
  101. if (_starpu_htbl_search_32(devices_already_used, key) == NULL) {
  102. _starpu_htbl_insert_32(&devices_already_used, key, config);
  103. tmp[nb] = topology->workers_opencl_gpuid[i];
  104. nb ++;
  105. }
  106. }
  107. for(i=nb ; i<STARPU_NMAXWORKERS ; i++) tmp[i] = -1;
  108. memcpy(topology->workers_opencl_gpuid, tmp, sizeof(unsigned)*STARPU_NMAXWORKERS);
  109. }
  110. }
  111. #endif
  112. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  113. static void _starpu_initialize_workers_gpuid(int use_explicit_workers_gpuid, int *explicit_workers_gpuid,
  114. int *current, int *workers_gpuid, const char *varname, unsigned nhwgpus)
  115. {
  116. char *strval;
  117. unsigned i;
  118. *current = 0;
  119. /* conf->workers_bindid indicates the successive cpu identifier that
  120. * should be used to bind the workers. It should be either filled
  121. * according to the user's explicit parameters (from starpu_conf) or
  122. * according to the STARPU_WORKERS_CPUID env. variable. Otherwise, a
  123. * round-robin policy is used to distributed the workers over the
  124. * cpus. */
  125. /* what do we use, explicit value, env. variable, or round-robin ? */
  126. if (use_explicit_workers_gpuid)
  127. {
  128. /* we use the explicit value from the user */
  129. memcpy(workers_gpuid,
  130. explicit_workers_gpuid,
  131. STARPU_NMAXWORKERS*sizeof(unsigned));
  132. }
  133. else if ((strval = getenv(varname)))
  134. {
  135. /* STARPU_WORKERS_CUDAID certainly contains less entries than
  136. * STARPU_NMAXWORKERS, so we reuse its entries in a round robin
  137. * fashion: "1 2" is equivalent to "1 2 1 2 1 2 .... 1 2". */
  138. unsigned wrap = 0;
  139. unsigned number_of_entries = 0;
  140. char *endptr;
  141. /* we use the content of the STARPU_WORKERS_CUDAID env. variable */
  142. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  143. {
  144. if (!wrap) {
  145. long int val;
  146. val = strtol(strval, &endptr, 10);
  147. if (endptr != strval)
  148. {
  149. workers_gpuid[i] = (unsigned)val;
  150. strval = endptr;
  151. }
  152. else {
  153. /* there must be at least one entry */
  154. STARPU_ASSERT(i != 0);
  155. number_of_entries = i;
  156. /* there is no more values in the string */
  157. wrap = 1;
  158. workers_gpuid[i] = workers_gpuid[0];
  159. }
  160. }
  161. else {
  162. workers_gpuid[i] = workers_gpuid[i % number_of_entries];
  163. }
  164. }
  165. }
  166. else
  167. {
  168. /* by default, we take a round robin policy */
  169. if (nhwgpus > 0)
  170. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  171. workers_gpuid[i] = (unsigned)(i % nhwgpus);
  172. /* StarPU can use sampling techniques to bind threads correctly */
  173. may_bind_automatically = 1;
  174. }
  175. }
  176. #endif
  177. #ifdef STARPU_USE_CUDA
  178. static inline int _starpu_get_next_cuda_gpuid(struct starpu_machine_config_s *config)
  179. {
  180. unsigned i = ((config->current_cuda_gpuid++) % config->topology.ncudagpus);
  181. return (int)config->topology.workers_cuda_gpuid[i];
  182. }
  183. #endif
  184. #ifdef STARPU_USE_OPENCL
  185. static inline int _starpu_get_next_opencl_gpuid(struct starpu_machine_config_s *config)
  186. {
  187. unsigned i = ((config->current_opencl_gpuid++) % config->topology.nopenclgpus);
  188. return (int)config->topology.workers_opencl_gpuid[i];
  189. }
  190. #endif
  191. static void _starpu_init_topology(struct starpu_machine_config_s *config)
  192. {
  193. struct starpu_machine_topology_s *topology = &config->topology;
  194. if (!topology_is_initialized)
  195. {
  196. topology->nhwcpus = 0;
  197. #ifdef STARPU_HAVE_HWLOC
  198. hwloc_topology_init(&topology->hwtopology);
  199. hwloc_topology_load(topology->hwtopology);
  200. config->cpu_depth = hwloc_get_type_depth(topology->hwtopology, HWLOC_OBJ_CORE);
  201. /* Would be very odd */
  202. STARPU_ASSERT(config->cpu_depth != HWLOC_TYPE_DEPTH_MULTIPLE);
  203. if (config->cpu_depth == HWLOC_TYPE_DEPTH_UNKNOWN)
  204. /* unknown, using logical procesors as fallback */
  205. config->cpu_depth = hwloc_get_type_depth(topology->hwtopology, HWLOC_OBJ_PU);
  206. topology->nhwcpus = hwloc_get_nbobjs_by_depth(topology->hwtopology, config->cpu_depth);
  207. #elif defined(__MINGW32__) || defined(__CYGWIN__)
  208. SYSTEM_INFO sysinfo;
  209. GetSystemInfo(&sysinfo);
  210. topology->nhwcpus += sysinfo.dwNumberOfProcessors;
  211. #elif defined(HAVE_SYSCONF)
  212. topology->nhwcpus = sysconf(_SC_NPROCESSORS_ONLN);
  213. #else
  214. #warning no way to know number of cores, assuming 1
  215. topology->nhwcpus = 1;
  216. #endif
  217. #ifdef STARPU_USE_CUDA
  218. config->topology.nhwcudagpus = _starpu_get_cuda_device_count();
  219. #endif
  220. #ifdef STARPU_USE_OPENCL
  221. config->topology.nhwopenclgpus = _starpu_opencl_get_device_count();
  222. #endif
  223. topology_is_initialized = 1;
  224. }
  225. }
  226. unsigned _starpu_topology_get_nhwcpu(struct starpu_machine_config_s *config)
  227. {
  228. _starpu_init_topology(config);
  229. return config->topology.nhwcpus;
  230. }
  231. static int _starpu_init_machine_config(struct starpu_machine_config_s *config,
  232. struct starpu_conf *user_conf)
  233. {
  234. int explicitval STARPU_ATTRIBUTE_UNUSED;
  235. unsigned use_accelerator = 0;
  236. int i;
  237. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  238. config->workers[i].workerid = i;
  239. struct starpu_machine_topology_s *topology = &config->topology;
  240. topology->nworkers = 0;
  241. topology->ncombinedworkers = 0;
  242. _starpu_init_topology(config);
  243. _starpu_initialize_workers_bindid(config);
  244. #ifdef STARPU_USE_CUDA
  245. if (user_conf && (user_conf->ncuda == 0))
  246. {
  247. /* the user explicitely disabled CUDA */
  248. topology->ncudagpus = 0;
  249. }
  250. else {
  251. /* we need to initialize CUDA early to count the number of devices */
  252. _starpu_init_cuda();
  253. if (user_conf && (user_conf->ncuda != -1))
  254. {
  255. explicitval = user_conf->ncuda;
  256. }
  257. else {
  258. explicitval = starpu_get_env_number("STARPU_NCUDA");
  259. }
  260. if (explicitval < 0) {
  261. config->topology.ncudagpus =
  262. STARPU_MIN(_starpu_get_cuda_device_count(), STARPU_MAXCUDADEVS);
  263. } else {
  264. /* use the specified value */
  265. topology->ncudagpus = (unsigned)explicitval;
  266. STARPU_ASSERT(topology->ncudagpus <= STARPU_MAXCUDADEVS);
  267. }
  268. STARPU_ASSERT(config->topology.ncudagpus + config->topology.nworkers <= STARPU_NMAXWORKERS);
  269. }
  270. if (topology->ncudagpus > 0)
  271. use_accelerator = 1;
  272. _starpu_initialize_workers_cuda_gpuid(config);
  273. unsigned cudagpu;
  274. for (cudagpu = 0; cudagpu < topology->ncudagpus; cudagpu++)
  275. {
  276. config->workers[topology->nworkers + cudagpu].arch = STARPU_CUDA_WORKER;
  277. int devid = _starpu_get_next_cuda_gpuid(config);
  278. enum starpu_perf_archtype arch = STARPU_CUDA_DEFAULT + devid;
  279. config->workers[topology->nworkers + cudagpu].devid = devid;
  280. config->workers[topology->nworkers + cudagpu].perf_arch = arch;
  281. config->workers[topology->nworkers + cudagpu].worker_mask = STARPU_CUDA;
  282. config->worker_mask |= STARPU_CUDA;
  283. uint32_t key = _starpu_crc32_be(devid, 0);
  284. _starpu_htbl_insert_32(&devices_using_cuda, key, config);
  285. }
  286. topology->nworkers += topology->ncudagpus;
  287. #endif
  288. #ifdef STARPU_USE_OPENCL
  289. if (user_conf && (user_conf->nopencl == 0))
  290. {
  291. /* the user explicitely disabled OpenCL */
  292. topology->nopenclgpus = 0;
  293. }
  294. else {
  295. /* we need to initialize OpenCL early to count the number of devices */
  296. int nb_devices;
  297. _starpu_opencl_init();
  298. nb_devices = STARPU_MIN(_starpu_opencl_get_device_count(), STARPU_MAXOPENCLDEVS);
  299. if (user_conf && (user_conf->nopencl != -1))
  300. {
  301. explicitval = user_conf->nopencl;
  302. }
  303. else {
  304. explicitval = starpu_get_env_number("STARPU_NOPENCL");
  305. }
  306. if (explicitval < 0) {
  307. topology->nopenclgpus = nb_devices;
  308. }
  309. else {
  310. if (explicitval > nb_devices) {
  311. /* The user requires more OpenCL devices than there is available */
  312. topology->nopenclgpus = nb_devices;
  313. }
  314. else {
  315. /* use the specified value */
  316. topology->nopenclgpus = (unsigned)explicitval;
  317. }
  318. STARPU_ASSERT(topology->nopenclgpus <= STARPU_MAXOPENCLDEVS);
  319. }
  320. STARPU_ASSERT(topology->nopenclgpus + topology->nworkers <= STARPU_NMAXWORKERS);
  321. }
  322. if (topology->nopenclgpus > 0)
  323. use_accelerator = 1;
  324. // TODO: use_accelerator pour les OpenCL?
  325. _starpu_initialize_workers_opencl_gpuid(config);
  326. unsigned openclgpu;
  327. for (openclgpu = 0; openclgpu < topology->nopenclgpus; openclgpu++)
  328. {
  329. int devid = _starpu_get_next_opencl_gpuid(config);
  330. if (devid == -1) { // There is no more devices left
  331. topology->nopenclgpus = openclgpu;
  332. break;
  333. }
  334. config->workers[topology->nworkers + openclgpu].arch = STARPU_OPENCL_WORKER;
  335. enum starpu_perf_archtype arch = STARPU_OPENCL_DEFAULT + devid;
  336. config->workers[topology->nworkers + openclgpu].devid = devid;
  337. config->workers[topology->nworkers + openclgpu].perf_arch = arch;
  338. config->workers[topology->nworkers + openclgpu].worker_mask = STARPU_OPENCL;
  339. config->worker_mask |= STARPU_OPENCL;
  340. }
  341. topology->nworkers += topology->nopenclgpus;
  342. #endif
  343. #ifdef STARPU_USE_GORDON
  344. if (user_conf && (user_conf->ncuda != -1)) {
  345. explicitval = user_conf->ncuda;
  346. }
  347. else {
  348. explicitval = starpu_get_env_number("STARPU_NGORDON");
  349. }
  350. if (explicitval < 0) {
  351. topology->ngordon_spus = spe_cpu_info_get(SPE_COUNT_USABLE_SPES, -1);
  352. } else {
  353. /* use the specified value */
  354. topology->ngordon_spus = (unsigned)explicitval;
  355. STARPU_ASSERT(topology->ngordon_spus <= NMAXGORDONSPUS);
  356. }
  357. STARPU_ASSERT(topology->ngordon_spus + topology->nworkers <= STARPU_NMAXWORKERS);
  358. if (topology->ngordon_spus > 0)
  359. use_accelerator = 1;
  360. unsigned spu;
  361. for (spu = 0; spu < config->ngordon_spus; spu++)
  362. {
  363. config->workers[topology->nworkers + spu].arch = STARPU_GORDON_WORKER;
  364. config->workers[topology->nworkers + spu].perf_arch = STARPU_GORDON_DEFAULT;
  365. config->workers[topology->nworkers + spu].id = spu;
  366. config->workers[topology->nworkers + spu].worker_is_running = 0;
  367. config->workers[topology->nworkers + spu].worker_mask = STARPU_GORDON;
  368. config->worker_mask |= STARPU_GORDON;
  369. }
  370. topology->nworkers += topology->ngordon_spus;
  371. #endif
  372. /* we put the CPU section after the accelerator : in case there was an
  373. * accelerator found, we devote one cpu */
  374. #ifdef STARPU_USE_CPU
  375. if (user_conf && (user_conf->ncpus != -1)) {
  376. explicitval = user_conf->ncpus;
  377. }
  378. else {
  379. explicitval = starpu_get_env_number("STARPU_NCPUS");
  380. }
  381. if (explicitval < 0) {
  382. unsigned already_busy_cpus = (topology->ngordon_spus?1:0) + topology->ncudagpus + topology->nopenclgpus;
  383. long avail_cpus = topology->nhwcpus - (use_accelerator?already_busy_cpus:0);
  384. if (avail_cpus < 0)
  385. avail_cpus = 0;
  386. topology->ncpus = STARPU_MIN(avail_cpus, STARPU_MAXCPUS);
  387. } else {
  388. /* use the specified value */
  389. topology->ncpus = (unsigned)explicitval;
  390. STARPU_ASSERT(topology->ncpus <= STARPU_MAXCPUS);
  391. }
  392. STARPU_ASSERT(topology->ncpus + topology->nworkers <= STARPU_NMAXWORKERS);
  393. unsigned cpu;
  394. for (cpu = 0; cpu < topology->ncpus; cpu++)
  395. {
  396. config->workers[topology->nworkers + cpu].arch = STARPU_CPU_WORKER;
  397. config->workers[topology->nworkers + cpu].perf_arch = STARPU_CPU_DEFAULT;
  398. config->workers[topology->nworkers + cpu].devid = cpu;
  399. config->workers[topology->nworkers + cpu].worker_mask = STARPU_CPU;
  400. config->worker_mask |= STARPU_CPU;
  401. }
  402. topology->nworkers += topology->ncpus;
  403. #endif
  404. if (topology->nworkers == 0)
  405. {
  406. _STARPU_DEBUG("No worker found, aborting ...\n");
  407. return -ENODEV;
  408. }
  409. return 0;
  410. }
  411. /*
  412. * Bind workers on the different processors
  413. */
  414. static void _starpu_initialize_workers_bindid(struct starpu_machine_config_s *config)
  415. {
  416. char *strval;
  417. unsigned i;
  418. struct starpu_machine_topology_s *topology = &config->topology;
  419. config->current_bindid = 0;
  420. /* conf->workers_bindid indicates the successive cpu identifier that
  421. * should be used to bind the workers. It should be either filled
  422. * according to the user's explicit parameters (from starpu_conf) or
  423. * according to the STARPU_WORKERS_CPUID env. variable. Otherwise, a
  424. * round-robin policy is used to distributed the workers over the
  425. * cpus. */
  426. /* what do we use, explicit value, env. variable, or round-robin ? */
  427. if (config->user_conf && config->user_conf->use_explicit_workers_bindid)
  428. {
  429. /* we use the explicit value from the user */
  430. memcpy(topology->workers_bindid,
  431. config->user_conf->workers_bindid,
  432. STARPU_NMAXWORKERS*sizeof(unsigned));
  433. }
  434. else if ((strval = getenv("STARPU_WORKERS_CPUID")))
  435. {
  436. /* STARPU_WORKERS_CPUID certainly contains less entries than
  437. * STARPU_NMAXWORKERS, so we reuse its entries in a round robin
  438. * fashion: "1 2" is equivalent to "1 2 1 2 1 2 .... 1 2". */
  439. unsigned wrap = 0;
  440. unsigned number_of_entries = 0;
  441. char *endptr;
  442. /* we use the content of the STARPU_WORKERS_CUDAID env. variable */
  443. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  444. {
  445. if (!wrap) {
  446. long int val;
  447. val = strtol(strval, &endptr, 10);
  448. if (endptr != strval)
  449. {
  450. topology->workers_bindid[i] = (unsigned)(val % topology->nhwcpus);
  451. strval = endptr;
  452. }
  453. else {
  454. /* there must be at least one entry */
  455. STARPU_ASSERT(i != 0);
  456. number_of_entries = i;
  457. /* there is no more values in the string */
  458. wrap = 1;
  459. topology->workers_bindid[i] = topology->workers_bindid[0];
  460. }
  461. }
  462. else {
  463. topology->workers_bindid[i] = topology->workers_bindid[i % number_of_entries];
  464. }
  465. }
  466. }
  467. else
  468. {
  469. /* by default, we take a round robin policy */
  470. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  471. topology->workers_bindid[i] = (unsigned)(i % topology->nhwcpus);
  472. }
  473. }
  474. /* This function gets the identifier of the next cpu on which to bind a
  475. * worker. In case a list of preferred cpus was specified, we look for a an
  476. * available cpu among the list if possible, otherwise a round-robin policy is
  477. * used. */
  478. static inline int _starpu_get_next_bindid(struct starpu_machine_config_s *config,
  479. int *preferred_binding, int npreferred)
  480. {
  481. struct starpu_machine_topology_s *topology = &config->topology;
  482. unsigned found = 0;
  483. int current_preferred;
  484. for (current_preferred = 0; current_preferred < npreferred; current_preferred++)
  485. {
  486. if (found)
  487. break;
  488. unsigned requested_cpu = preferred_binding[current_preferred];
  489. /* can we bind the worker on the requested cpu ? */
  490. unsigned ind;
  491. for (ind = config->current_bindid; ind < topology->nhwcpus; ind++)
  492. {
  493. if (topology->workers_bindid[ind] == requested_cpu)
  494. {
  495. /* the cpu is available, we use it ! In order
  496. * to make sure that it will not be used again
  497. * later on, we remove the entry from the list
  498. * */
  499. topology->workers_bindid[ind] =
  500. topology->workers_bindid[config->current_bindid];
  501. topology->workers_bindid[config->current_bindid] = requested_cpu;
  502. found = 1;
  503. break;
  504. }
  505. }
  506. }
  507. unsigned i = ((config->current_bindid++) % STARPU_NMAXWORKERS);
  508. return (int)topology->workers_bindid[i];
  509. }
  510. void _starpu_bind_thread_on_cpu(struct starpu_machine_config_s *config STARPU_ATTRIBUTE_UNUSED, unsigned cpuid)
  511. {
  512. #ifdef STARPU_HAVE_HWLOC
  513. int ret;
  514. _starpu_init_topology(config);
  515. hwloc_obj_t obj = hwloc_get_obj_by_depth(config->topology.hwtopology, config->cpu_depth, cpuid);
  516. hwloc_cpuset_t set = obj->cpuset;
  517. hwloc_bitmap_singlify(set);
  518. ret = hwloc_set_cpubind(config->topology.hwtopology, set, HWLOC_CPUBIND_THREAD);
  519. if (ret)
  520. {
  521. perror("binding thread");
  522. STARPU_ABORT();
  523. }
  524. #elif defined(HAVE_PTHREAD_SETAFFINITY_NP)
  525. int ret;
  526. /* fix the thread on the correct cpu */
  527. cpu_set_t aff_mask;
  528. CPU_ZERO(&aff_mask);
  529. CPU_SET(cpuid, &aff_mask);
  530. pthread_t self = pthread_self();
  531. ret = pthread_setaffinity_np(self, sizeof(aff_mask), &aff_mask);
  532. if (ret)
  533. {
  534. perror("binding thread");
  535. STARPU_ABORT();
  536. }
  537. #elif defined(__MINGW32__) || defined(__CYGWIN__)
  538. DWORD mask = 1 << cpuid;
  539. if (!SetThreadAffinityMask(GetCurrentThread(), mask)) {
  540. fprintf(stderr,"SetThreadMaskAffinity(%lx) failed\n", mask);
  541. STARPU_ABORT();
  542. }
  543. #else
  544. #warning no CPU binding support
  545. #endif
  546. }
  547. static void _starpu_init_workers_binding(struct starpu_machine_config_s *config)
  548. {
  549. /* launch one thread per CPU */
  550. unsigned ram_memory_node;
  551. /* a single cpu is dedicated for the accelerators */
  552. int accelerator_bindid = -1;
  553. /* note that even if the CPU cpu are not used, we always have a RAM node */
  554. /* TODO : support NUMA ;) */
  555. ram_memory_node = _starpu_register_memory_node(STARPU_CPU_RAM, -1);
  556. /* We will store all the busid of the different (src, dst) combinations
  557. * in a matrix which we initialize here. */
  558. _starpu_initialize_busid_matrix();
  559. unsigned worker;
  560. for (worker = 0; worker < config->topology.nworkers; worker++)
  561. {
  562. unsigned memory_node = -1;
  563. unsigned is_a_set_of_accelerators = 0;
  564. struct starpu_worker_s *workerarg = &config->workers[worker];
  565. /* Perhaps the worker has some "favourite" bindings */
  566. int *preferred_binding = NULL;
  567. int npreferred = 0;
  568. /* select the memory node that contains worker's memory */
  569. switch (workerarg->arch) {
  570. case STARPU_CPU_WORKER:
  571. /* "dedicate" a cpu cpu to that worker */
  572. is_a_set_of_accelerators = 0;
  573. memory_node = ram_memory_node;
  574. _starpu_memory_node_worker_add(ram_memory_node);
  575. break;
  576. #ifdef STARPU_USE_GORDON
  577. case STARPU_GORDON_WORKER:
  578. is_a_set_of_accelerators = 1;
  579. memory_node = ram_memory_node;
  580. _starpu_memory_node_worker_add(ram_memory_node);
  581. break;
  582. #endif
  583. #ifdef STARPU_USE_CUDA
  584. case STARPU_CUDA_WORKER:
  585. if (may_bind_automatically)
  586. {
  587. /* StarPU is allowed to bind threads automatically */
  588. preferred_binding = _starpu_get_cuda_affinity_vector(workerarg->devid);
  589. npreferred = config->topology.nhwcpus;
  590. }
  591. is_a_set_of_accelerators = 0;
  592. memory_node = _starpu_register_memory_node(STARPU_CUDA_RAM, workerarg->devid);
  593. _starpu_memory_node_worker_add(memory_node);
  594. _starpu_register_bus(0, memory_node);
  595. _starpu_register_bus(memory_node, 0);
  596. #ifdef HAVE_CUDA_MEMCPY_PEER
  597. unsigned worker2;
  598. for (worker2 = 0; worker2 < worker; worker2++)
  599. {
  600. struct starpu_worker_s *workerarg = &config->workers[worker];
  601. if (workerarg->arch == STARPU_CUDA_WORKER) {
  602. unsigned memory_node2 = starpu_worker_get_memory_node(worker2);
  603. _starpu_register_bus(memory_node2, memory_node);
  604. _starpu_register_bus(memory_node, memory_node2);
  605. }
  606. }
  607. #endif
  608. break;
  609. #endif
  610. #ifdef STARPU_USE_OPENCL
  611. case STARPU_OPENCL_WORKER:
  612. if (may_bind_automatically)
  613. {
  614. /* StarPU is allowed to bind threads automatically */
  615. preferred_binding = _starpu_get_opencl_affinity_vector(workerarg->devid);
  616. npreferred = config->topology.nhwcpus;
  617. }
  618. is_a_set_of_accelerators = 0;
  619. memory_node = _starpu_register_memory_node(STARPU_OPENCL_RAM, workerarg->devid);
  620. _starpu_memory_node_worker_add(memory_node);
  621. _starpu_register_bus(0, memory_node);
  622. _starpu_register_bus(memory_node, 0);
  623. break;
  624. #endif
  625. default:
  626. STARPU_ABORT();
  627. }
  628. if (is_a_set_of_accelerators) {
  629. if (accelerator_bindid == -1)
  630. accelerator_bindid = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  631. workerarg->bindid = accelerator_bindid;
  632. }
  633. else {
  634. workerarg->bindid = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  635. }
  636. workerarg->memory_node = memory_node;
  637. #ifdef __GLIBC__
  638. /* Save the initial cpuset */
  639. CPU_ZERO(&workerarg->initial_cpu_set);
  640. CPU_SET(workerarg->bindid, &workerarg->initial_cpu_set);
  641. CPU_ZERO(&workerarg->current_cpu_set);
  642. CPU_SET(workerarg->bindid, &workerarg->current_cpu_set);
  643. #endif /* __GLIBC__ */
  644. #ifdef STARPU_HAVE_HWLOC
  645. /* Clear the cpu set and set the cpu */
  646. workerarg->initial_hwloc_cpu_set = hwloc_bitmap_alloc();
  647. hwloc_bitmap_only(workerarg->initial_hwloc_cpu_set, workerarg->bindid);
  648. workerarg->current_hwloc_cpu_set = hwloc_bitmap_alloc();
  649. hwloc_bitmap_only(workerarg->current_hwloc_cpu_set, workerarg->bindid);
  650. /* Put the worker descriptor in the userdata field of the hwloc object describing the CPU */
  651. hwloc_obj_t worker_obj;
  652. worker_obj = hwloc_get_obj_by_depth(config->topology.hwtopology,
  653. config->cpu_depth, workerarg->bindid);
  654. worker_obj->userdata = &config->workers[worker];
  655. #endif
  656. }
  657. }
  658. int _starpu_build_topology(struct starpu_machine_config_s *config)
  659. {
  660. int ret;
  661. struct starpu_conf *user_conf = config->user_conf;
  662. ret = _starpu_init_machine_config(config, user_conf);
  663. if (ret)
  664. return ret;
  665. /* for the data management library */
  666. _starpu_init_memory_nodes();
  667. _starpu_init_workers_binding(config);
  668. return 0;
  669. }
  670. void _starpu_destroy_topology(struct starpu_machine_config_s *config __attribute__ ((unused)))
  671. {
  672. /* cleanup StarPU internal data structures */
  673. _starpu_deinit_memory_nodes();
  674. unsigned worker;
  675. for (worker = 0; worker < config->topology.nworkers; worker++)
  676. {
  677. #ifdef STARPU_HAVE_HWLOC
  678. struct starpu_worker_s *workerarg = &config->workers[worker];
  679. hwloc_bitmap_free(workerarg->initial_hwloc_cpu_set);
  680. hwloc_bitmap_free(workerarg->current_hwloc_cpu_set);
  681. #endif
  682. }
  683. #ifdef STARPU_HAVE_HWLOC
  684. hwloc_topology_destroy(config->topology.hwtopology);
  685. #endif
  686. topology_is_initialized = 0;
  687. #ifdef STARPU_USE_CUDA
  688. devices_using_cuda = NULL;
  689. #endif
  690. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  691. may_bind_automatically = 0;
  692. #endif
  693. }