topology.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009, 2010 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <stdlib.h>
  18. #include <stdio.h>
  19. #include <common/config.h>
  20. #include <core/workers.h>
  21. #include <core/debug.h>
  22. #include <core/topology.h>
  23. #include <drivers/cuda/driver_cuda.h>
  24. #include <common/hash.h>
  25. #include <profiling/profiling.h>
  26. #ifdef STARPU_HAVE_HWLOC
  27. #include <hwloc.h>
  28. #ifndef HWLOC_API_VERSION
  29. #define HWLOC_OBJ_PU HWLOC_OBJ_PROC
  30. #endif
  31. #endif
  32. #ifdef STARPU_HAVE_WINDOWS
  33. #include <windows.h>
  34. #endif
  35. #ifndef HWLOC_BITMAP_H
  36. /* hwloc <1.1 does not offer the bitmap API yet */
  37. #define hwloc_bitmap_alloc hwloc_cpuset_alloc
  38. #define hwloc_bitmap_only hwloc_cpuset_cpu
  39. #define hwloc_bitmap_singlify hwloc_cpuset_singlify
  40. #endif
  41. static unsigned topology_is_initialized = 0;
  42. static void _starpu_initialize_workers_bindid(struct starpu_machine_config_s *config);
  43. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  44. # ifdef STARPU_USE_CUDA
  45. static void _starpu_initialize_workers_cuda_gpuid(struct starpu_machine_config_s *config);
  46. static struct starpu_htbl32_node_s *devices_using_cuda = NULL;
  47. # endif
  48. # ifdef STARPU_USE_OPENCL
  49. static void _starpu_initialize_workers_opencl_gpuid(struct starpu_machine_config_s *config);
  50. # endif
  51. static void _starpu_initialize_workers_gpuid(int use_explicit_workers_gpuid, int *explicit_workers_gpuid,
  52. int *current, int *workers_gpuid, const char *varname, unsigned nhwgpus);
  53. static unsigned may_bind_automatically = 0;
  54. #endif
  55. /*
  56. * Discover the topology of the machine
  57. */
  58. #ifdef STARPU_USE_CUDA
  59. static void _starpu_initialize_workers_cuda_gpuid(struct starpu_machine_config_s *config)
  60. {
  61. struct starpu_machine_topology_s *topology = &config->topology;
  62. _starpu_initialize_workers_gpuid(config->user_conf==NULL?0:config->user_conf->use_explicit_workers_cuda_gpuid,
  63. config->user_conf==NULL?NULL:(int *)config->user_conf->workers_cuda_gpuid,
  64. &(config->current_cuda_gpuid), (int *)topology->workers_cuda_gpuid, "STARPU_WORKERS_CUDAID",
  65. topology->nhwcudagpus);
  66. }
  67. #endif
  68. #ifdef STARPU_USE_OPENCL
  69. static void _starpu_initialize_workers_opencl_gpuid(struct starpu_machine_config_s *config)
  70. {
  71. struct starpu_machine_topology_s *topology = &config->topology;
  72. _starpu_initialize_workers_gpuid(config->user_conf==NULL?0:config->user_conf->use_explicit_workers_opencl_gpuid,
  73. config->user_conf==NULL?NULL:(int *)config->user_conf->workers_opencl_gpuid,
  74. &(config->current_opencl_gpuid), (int *)topology->workers_opencl_gpuid, "STARPU_WORKERS_OPENCLID",
  75. topology->nhwopenclgpus);
  76. #ifdef STARPU_USE_CUDA
  77. // Detect devices which are already used with CUDA
  78. {
  79. unsigned tmp[STARPU_NMAXWORKERS];
  80. unsigned nb=0;
  81. int i;
  82. for(i=0 ; i<STARPU_NMAXWORKERS ; i++) {
  83. uint32_t key = _starpu_crc32_be(config->topology.workers_opencl_gpuid[i], 0);
  84. if (_starpu_htbl_search_32(devices_using_cuda, key) == NULL) {
  85. tmp[nb] = topology->workers_opencl_gpuid[i];
  86. nb++;
  87. }
  88. }
  89. for(i=nb ; i<STARPU_NMAXWORKERS ; i++) tmp[i] = -1;
  90. memcpy(topology->workers_opencl_gpuid, tmp, sizeof(unsigned)*STARPU_NMAXWORKERS);
  91. }
  92. #endif /* STARPU_USE_CUDA */
  93. {
  94. // Detect identical devices
  95. struct starpu_htbl32_node_s *devices_already_used = NULL;
  96. unsigned tmp[STARPU_NMAXWORKERS];
  97. unsigned nb=0;
  98. int i;
  99. for(i=0 ; i<STARPU_NMAXWORKERS ; i++) {
  100. uint32_t key = _starpu_crc32_be(topology->workers_opencl_gpuid[i], 0);
  101. if (_starpu_htbl_search_32(devices_already_used, key) == NULL) {
  102. _starpu_htbl_insert_32(&devices_already_used, key, config);
  103. tmp[nb] = topology->workers_opencl_gpuid[i];
  104. nb ++;
  105. }
  106. }
  107. for(i=nb ; i<STARPU_NMAXWORKERS ; i++) tmp[i] = -1;
  108. memcpy(topology->workers_opencl_gpuid, tmp, sizeof(unsigned)*STARPU_NMAXWORKERS);
  109. }
  110. }
  111. #endif
  112. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  113. static void _starpu_initialize_workers_gpuid(int use_explicit_workers_gpuid, int *explicit_workers_gpuid,
  114. int *current, int *workers_gpuid, const char *varname, unsigned nhwgpus)
  115. {
  116. char *strval;
  117. unsigned i;
  118. *current = 0;
  119. /* conf->workers_bindid indicates the successive cpu identifier that
  120. * should be used to bind the workers. It should be either filled
  121. * according to the user's explicit parameters (from starpu_conf) or
  122. * according to the STARPU_WORKERS_CPUID env. variable. Otherwise, a
  123. * round-robin policy is used to distributed the workers over the
  124. * cpus. */
  125. /* what do we use, explicit value, env. variable, or round-robin ? */
  126. if (use_explicit_workers_gpuid)
  127. {
  128. /* we use the explicit value from the user */
  129. memcpy(workers_gpuid,
  130. explicit_workers_gpuid,
  131. STARPU_NMAXWORKERS*sizeof(unsigned));
  132. }
  133. else if ((strval = getenv(varname)))
  134. {
  135. /* STARPU_WORKERS_CUDAID certainly contains less entries than
  136. * STARPU_NMAXWORKERS, so we reuse its entries in a round robin
  137. * fashion: "1 2" is equivalent to "1 2 1 2 1 2 .... 1 2". */
  138. unsigned wrap = 0;
  139. unsigned number_of_entries = 0;
  140. char *endptr;
  141. /* we use the content of the STARPU_WORKERS_CUDAID env. variable */
  142. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  143. {
  144. if (!wrap) {
  145. long int val;
  146. val = strtol(strval, &endptr, 10);
  147. if (endptr != strval)
  148. {
  149. workers_gpuid[i] = (unsigned)val;
  150. strval = endptr;
  151. }
  152. else {
  153. /* there must be at least one entry */
  154. STARPU_ASSERT(i != 0);
  155. number_of_entries = i;
  156. /* there is no more values in the string */
  157. wrap = 1;
  158. workers_gpuid[i] = workers_gpuid[0];
  159. }
  160. }
  161. else {
  162. workers_gpuid[i] = workers_gpuid[i % number_of_entries];
  163. }
  164. }
  165. }
  166. else
  167. {
  168. /* by default, we take a round robin policy */
  169. if (nhwgpus > 0)
  170. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  171. workers_gpuid[i] = (unsigned)(i % nhwgpus);
  172. /* StarPU can use sampling techniques to bind threads correctly */
  173. may_bind_automatically = 1;
  174. }
  175. }
  176. #endif
  177. #ifdef STARPU_USE_CUDA
  178. static inline int _starpu_get_next_cuda_gpuid(struct starpu_machine_config_s *config)
  179. {
  180. unsigned i = ((config->current_cuda_gpuid++) % config->topology.ncudagpus);
  181. return (int)config->topology.workers_cuda_gpuid[i];
  182. }
  183. #endif
  184. #ifdef STARPU_USE_OPENCL
  185. static inline int _starpu_get_next_opencl_gpuid(struct starpu_machine_config_s *config)
  186. {
  187. unsigned i = ((config->current_opencl_gpuid++) % config->topology.nopenclgpus);
  188. return (int)config->topology.workers_opencl_gpuid[i];
  189. }
  190. #endif
  191. static void _starpu_init_topology(struct starpu_machine_config_s *config)
  192. {
  193. struct starpu_machine_topology_s *topology = &config->topology;
  194. if (!topology_is_initialized)
  195. {
  196. topology->nhwcpus = 0;
  197. #ifdef STARPU_HAVE_HWLOC
  198. hwloc_topology_init(&topology->hwtopology);
  199. hwloc_topology_load(topology->hwtopology);
  200. config->cpu_depth = hwloc_get_type_depth(topology->hwtopology, HWLOC_OBJ_CORE);
  201. /* Would be very odd */
  202. STARPU_ASSERT(config->cpu_depth != HWLOC_TYPE_DEPTH_MULTIPLE);
  203. if (config->cpu_depth == HWLOC_TYPE_DEPTH_UNKNOWN)
  204. /* unknown, using logical procesors as fallback */
  205. config->cpu_depth = hwloc_get_type_depth(topology->hwtopology, HWLOC_OBJ_PU);
  206. topology->nhwcpus = hwloc_get_nbobjs_by_depth(topology->hwtopology, config->cpu_depth);
  207. #elif defined(__MINGW32__) || defined(__CYGWIN__)
  208. SYSTEM_INFO sysinfo;
  209. GetSystemInfo(&sysinfo);
  210. topology->nhwcpus += sysinfo.dwNumberOfProcessors;
  211. #elif defined(HAVE_SYSCONF)
  212. topology->nhwcpus = sysconf(_SC_NPROCESSORS_ONLN);
  213. #else
  214. #warning no way to know number of cores, assuming 1
  215. topology->nhwcpus = 1;
  216. #endif
  217. #ifdef STARPU_USE_CUDA
  218. config->topology.nhwcudagpus = _starpu_get_cuda_device_count();
  219. #endif
  220. #ifdef STARPU_USE_OPENCL
  221. config->topology.nhwopenclgpus = _starpu_opencl_get_device_count();
  222. #endif
  223. topology_is_initialized = 1;
  224. }
  225. }
  226. unsigned _starpu_topology_get_nhwcpu(struct starpu_machine_config_s *config)
  227. {
  228. _starpu_init_topology(config);
  229. return config->topology.nhwcpus;
  230. }
  231. static int _starpu_init_machine_config(struct starpu_machine_config_s *config,
  232. struct starpu_conf *user_conf)
  233. {
  234. int explicitval __attribute__((unused));
  235. unsigned use_accelerator = 0;
  236. int i;
  237. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  238. config->workers[i].workerid = i;
  239. struct starpu_machine_topology_s *topology = &config->topology;
  240. topology->nworkers = 0;
  241. topology->ncombinedworkers = 0;
  242. _starpu_init_topology(config);
  243. _starpu_initialize_workers_bindid(config);
  244. #ifdef STARPU_USE_CUDA
  245. if (user_conf && (user_conf->ncuda == 0))
  246. {
  247. /* the user explicitely disabled CUDA */
  248. topology->ncudagpus = 0;
  249. }
  250. else {
  251. /* we need to initialize CUDA early to count the number of devices */
  252. _starpu_init_cuda();
  253. if (user_conf && (user_conf->ncuda != -1))
  254. {
  255. explicitval = user_conf->ncuda;
  256. }
  257. else {
  258. explicitval = starpu_get_env_number("STARPU_NCUDA");
  259. }
  260. if (explicitval < 0) {
  261. config->topology.ncudagpus =
  262. STARPU_MIN(_starpu_get_cuda_device_count(), STARPU_MAXCUDADEVS);
  263. } else {
  264. /* use the specified value */
  265. topology->ncudagpus = (unsigned)explicitval;
  266. STARPU_ASSERT(topology->ncudagpus <= STARPU_MAXCUDADEVS);
  267. }
  268. STARPU_ASSERT(config->topology.ncudagpus + config->topology.nworkers <= STARPU_NMAXWORKERS);
  269. }
  270. if (topology->ncudagpus > 0)
  271. use_accelerator = 1;
  272. _starpu_initialize_workers_cuda_gpuid(config);
  273. unsigned cudagpu;
  274. for (cudagpu = 0; cudagpu < topology->ncudagpus; cudagpu++)
  275. {
  276. config->workers[topology->nworkers + cudagpu].arch = STARPU_CUDA_WORKER;
  277. int devid = _starpu_get_next_cuda_gpuid(config);
  278. enum starpu_perf_archtype arch = STARPU_CUDA_DEFAULT + devid;
  279. config->workers[topology->nworkers + cudagpu].devid = devid;
  280. config->workers[topology->nworkers + cudagpu].perf_arch = arch;
  281. config->workers[topology->nworkers + cudagpu].worker_mask = STARPU_CUDA;
  282. config->worker_mask |= STARPU_CUDA;
  283. uint32_t key = _starpu_crc32_be(devid, 0);
  284. _starpu_htbl_insert_32(&devices_using_cuda, key, config);
  285. }
  286. topology->nworkers += topology->ncudagpus;
  287. #endif
  288. #ifdef STARPU_USE_OPENCL
  289. if (user_conf && (user_conf->nopencl == 0))
  290. {
  291. /* the user explicitely disabled OpenCL */
  292. topology->nopenclgpus = 0;
  293. }
  294. else {
  295. /* we need to initialize OpenCL early to count the number of devices */
  296. int nb_devices;
  297. _starpu_opencl_init();
  298. nb_devices = STARPU_MIN(_starpu_opencl_get_device_count(), STARPU_MAXOPENCLDEVS);
  299. if (user_conf && (user_conf->nopencl != -1))
  300. {
  301. explicitval = user_conf->nopencl;
  302. }
  303. else {
  304. explicitval = starpu_get_env_number("STARPU_NOPENCL");
  305. }
  306. if (explicitval < 0) {
  307. topology->nopenclgpus = nb_devices;
  308. }
  309. else {
  310. if (explicitval > nb_devices) {
  311. /* The user requires more OpenCL devices than there is available */
  312. topology->nopenclgpus = nb_devices;
  313. }
  314. else {
  315. /* use the specified value */
  316. topology->nopenclgpus = (unsigned)explicitval;
  317. }
  318. STARPU_ASSERT(topology->nopenclgpus <= STARPU_MAXOPENCLDEVS);
  319. }
  320. STARPU_ASSERT(topology->nopenclgpus + topology->nworkers <= STARPU_NMAXWORKERS);
  321. }
  322. if (topology->nopenclgpus > 0)
  323. use_accelerator = 1;
  324. // TODO: use_accelerator pour les OpenCL?
  325. _starpu_initialize_workers_opencl_gpuid(config);
  326. unsigned openclgpu;
  327. for (openclgpu = 0; openclgpu < topology->nopenclgpus; openclgpu++)
  328. {
  329. int devid = _starpu_get_next_opencl_gpuid(config);
  330. if (devid == -1) { // There is no more devices left
  331. topology->nopenclgpus = openclgpu;
  332. break;
  333. }
  334. config->workers[topology->nworkers + openclgpu].arch = STARPU_OPENCL_WORKER;
  335. enum starpu_perf_archtype arch = STARPU_OPENCL_DEFAULT + devid;
  336. config->workers[topology->nworkers + openclgpu].devid = devid;
  337. config->workers[topology->nworkers + openclgpu].perf_arch = arch;
  338. config->workers[topology->nworkers + openclgpu].worker_mask = STARPU_OPENCL;
  339. config->worker_mask |= STARPU_OPENCL;
  340. }
  341. topology->nworkers += topology->nopenclgpus;
  342. #endif
  343. #ifdef STARPU_USE_GORDON
  344. if (user_conf && (user_conf->ncuda != -1)) {
  345. explicitval = user_conf->ncuda;
  346. }
  347. else {
  348. explicitval = starpu_get_env_number("STARPU_NGORDON");
  349. }
  350. if (explicitval < 0) {
  351. topology->ngordon_spus = spe_cpu_info_get(SPE_COUNT_USABLE_SPES, -1);
  352. } else {
  353. /* use the specified value */
  354. topology->ngordon_spus = (unsigned)explicitval;
  355. STARPU_ASSERT(topology->ngordon_spus <= NMAXGORDONSPUS);
  356. }
  357. STARPU_ASSERT(topology->ngordon_spus + topology->nworkers <= STARPU_NMAXWORKERS);
  358. if (topology->ngordon_spus > 0)
  359. use_accelerator = 1;
  360. unsigned spu;
  361. for (spu = 0; spu < config->ngordon_spus; spu++)
  362. {
  363. config->workers[topology->nworkers + spu].arch = STARPU_GORDON_WORKER;
  364. config->workers[topology->nworkers + spu].perf_arch = STARPU_GORDON_DEFAULT;
  365. config->workers[topology->nworkers + spu].id = spu;
  366. config->workers[topology->nworkers + spu].worker_is_running = 0;
  367. config->workers[topology->nworkers + spu].worker_mask = STARPU_GORDON;
  368. config->worker_mask |= STARPU_GORDON;
  369. }
  370. topology->nworkers += topology->ngordon_spus;
  371. #endif
  372. /* we put the CPU section after the accelerator : in case there was an
  373. * accelerator found, we devote one cpu */
  374. #ifdef STARPU_USE_CPU
  375. if (user_conf && (user_conf->ncpus != -1)) {
  376. explicitval = user_conf->ncpus;
  377. }
  378. else {
  379. explicitval = starpu_get_env_number("STARPU_NCPUS");
  380. }
  381. if (explicitval < 0) {
  382. unsigned already_busy_cpus = (topology->ngordon_spus?1:0) + topology->ncudagpus;
  383. long avail_cpus = topology->nhwcpus - (use_accelerator?already_busy_cpus:0);
  384. topology->ncpus = STARPU_MIN(avail_cpus, STARPU_NMAXCPUS);
  385. } else {
  386. /* use the specified value */
  387. topology->ncpus = (unsigned)explicitval;
  388. STARPU_ASSERT(topology->ncpus <= STARPU_NMAXCPUS);
  389. }
  390. STARPU_ASSERT(topology->ncpus + topology->nworkers <= STARPU_NMAXWORKERS);
  391. unsigned cpu;
  392. for (cpu = 0; cpu < topology->ncpus; cpu++)
  393. {
  394. config->workers[topology->nworkers + cpu].arch = STARPU_CPU_WORKER;
  395. config->workers[topology->nworkers + cpu].perf_arch = STARPU_CPU_DEFAULT;
  396. config->workers[topology->nworkers + cpu].devid = cpu;
  397. config->workers[topology->nworkers + cpu].worker_mask = STARPU_CPU;
  398. config->worker_mask |= STARPU_CPU;
  399. }
  400. topology->nworkers += topology->ncpus;
  401. #endif
  402. if (topology->nworkers == 0)
  403. {
  404. _STARPU_DEBUG("No worker found, aborting ...\n");
  405. return -ENODEV;
  406. }
  407. return 0;
  408. }
  409. /*
  410. * Bind workers on the different processors
  411. */
  412. static void _starpu_initialize_workers_bindid(struct starpu_machine_config_s *config)
  413. {
  414. char *strval;
  415. unsigned i;
  416. struct starpu_machine_topology_s *topology = &config->topology;
  417. config->current_bindid = 0;
  418. /* conf->workers_bindid indicates the successive cpu identifier that
  419. * should be used to bind the workers. It should be either filled
  420. * according to the user's explicit parameters (from starpu_conf) or
  421. * according to the STARPU_WORKERS_CPUID env. variable. Otherwise, a
  422. * round-robin policy is used to distributed the workers over the
  423. * cpus. */
  424. /* what do we use, explicit value, env. variable, or round-robin ? */
  425. if (config->user_conf && config->user_conf->use_explicit_workers_bindid)
  426. {
  427. /* we use the explicit value from the user */
  428. memcpy(topology->workers_bindid,
  429. config->user_conf->workers_bindid,
  430. STARPU_NMAXWORKERS*sizeof(unsigned));
  431. }
  432. else if ((strval = getenv("STARPU_WORKERS_CPUID")))
  433. {
  434. /* STARPU_WORKERS_CPUID certainly contains less entries than
  435. * STARPU_NMAXWORKERS, so we reuse its entries in a round robin
  436. * fashion: "1 2" is equivalent to "1 2 1 2 1 2 .... 1 2". */
  437. unsigned wrap = 0;
  438. unsigned number_of_entries = 0;
  439. char *endptr;
  440. /* we use the content of the STARPU_WORKERS_CUDAID env. variable */
  441. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  442. {
  443. if (!wrap) {
  444. long int val;
  445. val = strtol(strval, &endptr, 10);
  446. if (endptr != strval)
  447. {
  448. topology->workers_bindid[i] = (unsigned)(val % topology->nhwcpus);
  449. strval = endptr;
  450. }
  451. else {
  452. /* there must be at least one entry */
  453. STARPU_ASSERT(i != 0);
  454. number_of_entries = i;
  455. /* there is no more values in the string */
  456. wrap = 1;
  457. topology->workers_bindid[i] = topology->workers_bindid[0];
  458. }
  459. }
  460. else {
  461. topology->workers_bindid[i] = topology->workers_bindid[i % number_of_entries];
  462. }
  463. }
  464. }
  465. else
  466. {
  467. /* by default, we take a round robin policy */
  468. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  469. topology->workers_bindid[i] = (unsigned)(i % topology->nhwcpus);
  470. }
  471. }
  472. /* This function gets the identifier of the next cpu on which to bind a
  473. * worker. In case a list of preferred cpus was specified, we look for a an
  474. * available cpu among the list if possible, otherwise a round-robin policy is
  475. * used. */
  476. static inline int _starpu_get_next_bindid(struct starpu_machine_config_s *config,
  477. int *preferred_binding, int npreferred)
  478. {
  479. struct starpu_machine_topology_s *topology = &config->topology;
  480. unsigned found = 0;
  481. int current_preferred;
  482. for (current_preferred = 0; current_preferred < npreferred; current_preferred++)
  483. {
  484. if (found)
  485. break;
  486. unsigned requested_cpu = preferred_binding[current_preferred];
  487. /* can we bind the worker on the requested cpu ? */
  488. unsigned ind;
  489. for (ind = config->current_bindid; ind < topology->nhwcpus; ind++)
  490. {
  491. if (topology->workers_bindid[ind] == requested_cpu)
  492. {
  493. /* the cpu is available, we use it ! In order
  494. * to make sure that it will not be used again
  495. * later on, we remove the entry from the list
  496. * */
  497. topology->workers_bindid[ind] =
  498. topology->workers_bindid[config->current_bindid];
  499. topology->workers_bindid[config->current_bindid] = requested_cpu;
  500. found = 1;
  501. break;
  502. }
  503. }
  504. }
  505. unsigned i = ((config->current_bindid++) % STARPU_NMAXWORKERS);
  506. return (int)topology->workers_bindid[i];
  507. }
  508. void _starpu_bind_thread_on_cpu(struct starpu_machine_config_s *config __attribute__((unused)), unsigned cpuid)
  509. {
  510. #ifdef STARPU_HAVE_HWLOC
  511. int ret;
  512. _starpu_init_topology(config);
  513. hwloc_obj_t obj = hwloc_get_obj_by_depth(config->topology.hwtopology, config->cpu_depth, cpuid);
  514. hwloc_cpuset_t set = obj->cpuset;
  515. hwloc_bitmap_singlify(set);
  516. ret = hwloc_set_cpubind(config->topology.hwtopology, set, HWLOC_CPUBIND_THREAD);
  517. if (ret)
  518. {
  519. perror("binding thread");
  520. STARPU_ABORT();
  521. }
  522. #elif defined(HAVE_PTHREAD_SETAFFINITY_NP)
  523. int ret;
  524. /* fix the thread on the correct cpu */
  525. cpu_set_t aff_mask;
  526. CPU_ZERO(&aff_mask);
  527. CPU_SET(cpuid, &aff_mask);
  528. pthread_t self = pthread_self();
  529. ret = pthread_setaffinity_np(self, sizeof(aff_mask), &aff_mask);
  530. if (ret)
  531. {
  532. perror("binding thread");
  533. STARPU_ABORT();
  534. }
  535. #elif defined(__MINGW32__) || defined(__CYGWIN__)
  536. DWORD mask = 1 << cpuid;
  537. if (!SetThreadAffinityMask(GetCurrentThread(), mask)) {
  538. fprintf(stderr,"SetThreadMaskAffinity(%lx) failed\n", mask);
  539. STARPU_ABORT();
  540. }
  541. #else
  542. #warning no CPU binding support
  543. #endif
  544. }
  545. static void _starpu_init_workers_binding(struct starpu_machine_config_s *config)
  546. {
  547. /* launch one thread per CPU */
  548. unsigned ram_memory_node;
  549. /* a single cpu is dedicated for the accelerators */
  550. int accelerator_bindid = -1;
  551. /* note that even if the CPU cpu are not used, we always have a RAM node */
  552. /* TODO : support NUMA ;) */
  553. ram_memory_node = _starpu_register_memory_node(STARPU_CPU_RAM);
  554. /* We will store all the busid of the different (src, dst) combinations
  555. * in a matrix which we initialize here. */
  556. _starpu_initialize_busid_matrix();
  557. unsigned worker;
  558. for (worker = 0; worker < config->topology.nworkers; worker++)
  559. {
  560. unsigned memory_node = -1;
  561. unsigned is_a_set_of_accelerators = 0;
  562. struct starpu_worker_s *workerarg = &config->workers[worker];
  563. /* Perhaps the worker has some "favourite" bindings */
  564. int *preferred_binding = NULL;
  565. int npreferred = 0;
  566. /* select the memory node that contains worker's memory */
  567. switch (workerarg->arch) {
  568. case STARPU_CPU_WORKER:
  569. /* "dedicate" a cpu cpu to that worker */
  570. is_a_set_of_accelerators = 0;
  571. memory_node = ram_memory_node;
  572. break;
  573. #ifdef STARPU_USE_GORDON
  574. case STARPU_GORDON_WORKER:
  575. is_a_set_of_accelerators = 1;
  576. memory_node = ram_memory_node;
  577. break;
  578. #endif
  579. #ifdef STARPU_USE_CUDA
  580. case STARPU_CUDA_WORKER:
  581. if (may_bind_automatically)
  582. {
  583. /* StarPU is allowed to bind threads automatically */
  584. preferred_binding = _starpu_get_cuda_affinity_vector(workerarg->devid);
  585. npreferred = config->topology.nhwcpus;
  586. }
  587. is_a_set_of_accelerators = 0;
  588. memory_node = _starpu_register_memory_node(STARPU_CUDA_RAM);
  589. _starpu_register_bus(0, memory_node);
  590. _starpu_register_bus(memory_node, 0);
  591. break;
  592. #endif
  593. #ifdef STARPU_USE_OPENCL
  594. case STARPU_OPENCL_WORKER:
  595. if (may_bind_automatically)
  596. {
  597. /* StarPU is allowed to bind threads automatically */
  598. preferred_binding = _starpu_get_opencl_affinity_vector(workerarg->devid);
  599. npreferred = config->topology.nhwcpus;
  600. }
  601. is_a_set_of_accelerators = 0;
  602. memory_node = _starpu_register_memory_node(STARPU_OPENCL_RAM);
  603. _starpu_register_bus(0, memory_node);
  604. _starpu_register_bus(memory_node, 0);
  605. break;
  606. #endif
  607. default:
  608. STARPU_ABORT();
  609. }
  610. if (is_a_set_of_accelerators) {
  611. if (accelerator_bindid == -1)
  612. accelerator_bindid = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  613. workerarg->bindid = accelerator_bindid;
  614. }
  615. else {
  616. workerarg->bindid = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  617. }
  618. workerarg->memory_node = memory_node;
  619. #ifndef STARPU_HAVE_WINDOWS
  620. /* Save the initial cpuset */
  621. CPU_ZERO(&workerarg->initial_cpu_set);
  622. CPU_SET(workerarg->bindid, &workerarg->initial_cpu_set);
  623. CPU_ZERO(&workerarg->current_cpu_set);
  624. CPU_SET(workerarg->bindid, &workerarg->current_cpu_set);
  625. #endif /* STARPU_HAVE_WINDOWS */
  626. #ifdef STARPU_HAVE_HWLOC
  627. /* Clear the cpu set and set the cpu */
  628. workerarg->initial_hwloc_cpu_set = hwloc_bitmap_alloc();
  629. hwloc_bitmap_only(workerarg->initial_hwloc_cpu_set, workerarg->bindid);
  630. workerarg->current_hwloc_cpu_set = hwloc_bitmap_alloc();
  631. hwloc_bitmap_only(workerarg->current_hwloc_cpu_set, workerarg->bindid);
  632. /* Put the worker descriptor in the userdata field of the hwloc object describing the CPU */
  633. hwloc_obj_t worker_obj;
  634. worker_obj = hwloc_get_obj_by_depth(config->topology.hwtopology,
  635. config->cpu_depth, workerarg->bindid);
  636. worker_obj->userdata = &config->workers[worker];
  637. #endif
  638. }
  639. }
  640. int _starpu_build_topology(struct starpu_machine_config_s *config)
  641. {
  642. int ret;
  643. struct starpu_conf *user_conf = config->user_conf;
  644. ret = _starpu_init_machine_config(config, user_conf);
  645. if (ret)
  646. return ret;
  647. /* for the data management library */
  648. _starpu_init_memory_nodes();
  649. _starpu_init_workers_binding(config);
  650. return 0;
  651. }
  652. void _starpu_destroy_topology(struct starpu_machine_config_s *config __attribute__ ((unused)))
  653. {
  654. /* cleanup StarPU internal data structures */
  655. _starpu_deinit_memory_nodes();
  656. #ifdef STARPU_HAVE_HWLOC
  657. hwloc_topology_destroy(config->topology.hwtopology);
  658. #endif
  659. topology_is_initialized = 0;
  660. #ifdef STARPU_USE_CUDA
  661. devices_using_cuda = NULL;
  662. #endif
  663. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  664. may_bind_automatically = 0;
  665. #endif
  666. }