topology.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009, 2010-2011 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <stdlib.h>
  18. #include <stdio.h>
  19. #include <common/config.h>
  20. #include <core/workers.h>
  21. #include <core/debug.h>
  22. #include <core/topology.h>
  23. #include <drivers/cuda/driver_cuda.h>
  24. #include <common/hash.h>
  25. #include <profiling/profiling.h>
  26. #ifdef STARPU_HAVE_HWLOC
  27. #include <hwloc.h>
  28. #ifndef HWLOC_API_VERSION
  29. #define HWLOC_OBJ_PU HWLOC_OBJ_PROC
  30. #endif
  31. #endif
  32. #ifdef STARPU_HAVE_WINDOWS
  33. #include <windows.h>
  34. #endif
  35. #ifndef HWLOC_BITMAP_H
  36. /* hwloc <1.1 does not offer the bitmap API yet */
  37. #define hwloc_bitmap_alloc hwloc_cpuset_alloc
  38. #define hwloc_bitmap_only hwloc_cpuset_cpu
  39. #define hwloc_bitmap_singlify hwloc_cpuset_singlify
  40. #endif
  41. static unsigned topology_is_initialized = 0;
  42. static void _starpu_initialize_workers_bindid(struct _starpu_machine_config *config);
  43. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  44. # ifdef STARPU_USE_CUDA
  45. static void _starpu_initialize_workers_cuda_gpuid(struct _starpu_machine_config *config);
  46. static struct starpu_htbl32_node *devices_using_cuda = NULL;
  47. # endif
  48. # ifdef STARPU_USE_OPENCL
  49. static void _starpu_initialize_workers_opencl_gpuid(struct _starpu_machine_config *config);
  50. # endif
  51. static void _starpu_initialize_workers_gpuid(int use_explicit_workers_gpuid, int *explicit_workers_gpuid,
  52. int *current, int *workers_gpuid, const char *varname, unsigned nhwgpus);
  53. static unsigned may_bind_automatically = 0;
  54. #endif
  55. /*
  56. * Discover the topology of the machine
  57. */
  58. #ifdef STARPU_USE_CUDA
  59. static void _starpu_initialize_workers_cuda_gpuid(struct _starpu_machine_config *config)
  60. {
  61. struct starpu_machine_topology *topology = &config->topology;
  62. _starpu_initialize_workers_gpuid(config->user_conf==NULL?0:config->user_conf->use_explicit_workers_cuda_gpuid,
  63. config->user_conf==NULL?NULL:(int *)config->user_conf->workers_cuda_gpuid,
  64. &(config->current_cuda_gpuid), (int *)topology->workers_cuda_gpuid, "STARPU_WORKERS_CUDAID",
  65. topology->nhwcudagpus);
  66. }
  67. #endif
  68. #ifdef STARPU_USE_OPENCL
  69. static void _starpu_initialize_workers_opencl_gpuid(struct _starpu_machine_config *config)
  70. {
  71. struct starpu_machine_topology *topology = &config->topology;
  72. _starpu_initialize_workers_gpuid(config->user_conf==NULL?0:config->user_conf->use_explicit_workers_opencl_gpuid,
  73. config->user_conf==NULL?NULL:(int *)config->user_conf->workers_opencl_gpuid,
  74. &(config->current_opencl_gpuid), (int *)topology->workers_opencl_gpuid, "STARPU_WORKERS_OPENCLID",
  75. topology->nhwopenclgpus);
  76. #ifdef STARPU_USE_CUDA
  77. // Detect devices which are already used with CUDA
  78. {
  79. unsigned tmp[STARPU_NMAXWORKERS];
  80. unsigned nb=0;
  81. int i;
  82. for(i=0 ; i<STARPU_NMAXWORKERS ; i++) {
  83. uint32_t key = _starpu_crc32_be(config->topology.workers_opencl_gpuid[i], 0);
  84. if (_starpu_htbl_search_32(devices_using_cuda, key) == NULL) {
  85. tmp[nb] = topology->workers_opencl_gpuid[i];
  86. nb++;
  87. }
  88. }
  89. for(i=nb ; i<STARPU_NMAXWORKERS ; i++) tmp[i] = -1;
  90. memcpy(topology->workers_opencl_gpuid, tmp, sizeof(unsigned)*STARPU_NMAXWORKERS);
  91. }
  92. #endif /* STARPU_USE_CUDA */
  93. {
  94. // Detect identical devices
  95. struct starpu_htbl32_node *devices_already_used = NULL;
  96. unsigned tmp[STARPU_NMAXWORKERS];
  97. unsigned nb=0;
  98. int i;
  99. for(i=0 ; i<STARPU_NMAXWORKERS ; i++) {
  100. uint32_t key = _starpu_crc32_be(topology->workers_opencl_gpuid[i], 0);
  101. if (_starpu_htbl_search_32(devices_already_used, key) == NULL) {
  102. _starpu_htbl_insert_32(&devices_already_used, key, config);
  103. tmp[nb] = topology->workers_opencl_gpuid[i];
  104. nb ++;
  105. }
  106. }
  107. for(i=nb ; i<STARPU_NMAXWORKERS ; i++) tmp[i] = -1;
  108. memcpy(topology->workers_opencl_gpuid, tmp, sizeof(unsigned)*STARPU_NMAXWORKERS);
  109. }
  110. }
  111. #endif
  112. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  113. static void _starpu_initialize_workers_gpuid(int use_explicit_workers_gpuid, int *explicit_workers_gpuid,
  114. int *current, int *workers_gpuid, const char *varname, unsigned nhwgpus)
  115. {
  116. char *strval;
  117. unsigned i;
  118. *current = 0;
  119. /* conf->workers_bindid indicates the successive cpu identifier that
  120. * should be used to bind the workers. It should be either filled
  121. * according to the user's explicit parameters (from starpu_conf) or
  122. * according to the STARPU_WORKERS_CPUID env. variable. Otherwise, a
  123. * round-robin policy is used to distributed the workers over the
  124. * cpus. */
  125. /* what do we use, explicit value, env. variable, or round-robin ? */
  126. if (use_explicit_workers_gpuid)
  127. {
  128. /* we use the explicit value from the user */
  129. memcpy(workers_gpuid,
  130. explicit_workers_gpuid,
  131. STARPU_NMAXWORKERS*sizeof(unsigned));
  132. }
  133. else if ((strval = getenv(varname)))
  134. {
  135. /* STARPU_WORKERS_CUDAID certainly contains less entries than
  136. * STARPU_NMAXWORKERS, so we reuse its entries in a round robin
  137. * fashion: "1 2" is equivalent to "1 2 1 2 1 2 .... 1 2". */
  138. unsigned wrap = 0;
  139. unsigned number_of_entries = 0;
  140. char *endptr;
  141. /* we use the content of the STARPU_WORKERS_CUDAID env. variable */
  142. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  143. {
  144. if (!wrap) {
  145. long int val;
  146. val = strtol(strval, &endptr, 10);
  147. if (endptr != strval)
  148. {
  149. workers_gpuid[i] = (unsigned)val;
  150. strval = endptr;
  151. }
  152. else {
  153. /* there must be at least one entry */
  154. STARPU_ASSERT(i != 0);
  155. number_of_entries = i;
  156. /* there is no more values in the string */
  157. wrap = 1;
  158. workers_gpuid[i] = workers_gpuid[0];
  159. }
  160. }
  161. else {
  162. workers_gpuid[i] = workers_gpuid[i % number_of_entries];
  163. }
  164. }
  165. }
  166. else
  167. {
  168. /* by default, we take a round robin policy */
  169. if (nhwgpus > 0)
  170. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  171. workers_gpuid[i] = (unsigned)(i % nhwgpus);
  172. /* StarPU can use sampling techniques to bind threads correctly */
  173. may_bind_automatically = 1;
  174. }
  175. }
  176. #endif
  177. #ifdef STARPU_USE_CUDA
  178. static inline int _starpu_get_next_cuda_gpuid(struct _starpu_machine_config *config)
  179. {
  180. unsigned i = ((config->current_cuda_gpuid++) % config->topology.ncudagpus);
  181. return (int)config->topology.workers_cuda_gpuid[i];
  182. }
  183. #endif
  184. #ifdef STARPU_USE_OPENCL
  185. static inline int _starpu_get_next_opencl_gpuid(struct _starpu_machine_config *config)
  186. {
  187. unsigned i = ((config->current_opencl_gpuid++) % config->topology.nopenclgpus);
  188. return (int)config->topology.workers_opencl_gpuid[i];
  189. }
  190. #endif
  191. static void _starpu_init_topology(struct _starpu_machine_config *config)
  192. {
  193. struct starpu_machine_topology *topology = &config->topology;
  194. if (!topology_is_initialized)
  195. {
  196. topology->nhwcpus = 0;
  197. #ifdef STARPU_HAVE_HWLOC
  198. hwloc_topology_init(&topology->hwtopology);
  199. hwloc_topology_load(topology->hwtopology);
  200. config->cpu_depth = hwloc_get_type_depth(topology->hwtopology, HWLOC_OBJ_CORE);
  201. /* Would be very odd */
  202. STARPU_ASSERT(config->cpu_depth != HWLOC_TYPE_DEPTH_MULTIPLE);
  203. if (config->cpu_depth == HWLOC_TYPE_DEPTH_UNKNOWN)
  204. /* unknown, using logical procesors as fallback */
  205. config->cpu_depth = hwloc_get_type_depth(topology->hwtopology, HWLOC_OBJ_PU);
  206. topology->nhwcpus = hwloc_get_nbobjs_by_depth(topology->hwtopology, config->cpu_depth);
  207. #elif defined(__MINGW32__) || defined(__CYGWIN__)
  208. SYSTEM_INFO sysinfo;
  209. GetSystemInfo(&sysinfo);
  210. topology->nhwcpus += sysinfo.dwNumberOfProcessors;
  211. #elif defined(HAVE_SYSCONF)
  212. topology->nhwcpus = sysconf(_SC_NPROCESSORS_ONLN);
  213. #else
  214. #warning no way to know number of cores, assuming 1
  215. topology->nhwcpus = 1;
  216. #endif
  217. #ifdef STARPU_USE_CUDA
  218. config->topology.nhwcudagpus = _starpu_get_cuda_device_count();
  219. #endif
  220. #ifdef STARPU_USE_OPENCL
  221. config->topology.nhwopenclgpus = _starpu_opencl_get_device_count();
  222. #endif
  223. topology_is_initialized = 1;
  224. }
  225. }
  226. unsigned _starpu_topology_get_nhwcpu(struct _starpu_machine_config *config)
  227. {
  228. _starpu_init_topology(config);
  229. return config->topology.nhwcpus;
  230. }
  231. static int _starpu_init_machine_config(struct _starpu_machine_config *config,
  232. struct starpu_conf *user_conf)
  233. {
  234. int explicitval STARPU_ATTRIBUTE_UNUSED;
  235. unsigned use_accelerator = 0;
  236. int i;
  237. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  238. config->workers[i].workerid = i;
  239. struct starpu_machine_topology *topology = &config->topology;
  240. topology->nworkers = 0;
  241. topology->ncombinedworkers = 0;
  242. _starpu_init_topology(config);
  243. _starpu_initialize_workers_bindid(config);
  244. #ifdef STARPU_USE_CUDA
  245. if (user_conf && (user_conf->ncuda == 0))
  246. {
  247. /* the user explicitely disabled CUDA */
  248. topology->ncudagpus = 0;
  249. }
  250. else {
  251. /* we need to initialize CUDA early to count the number of devices */
  252. _starpu_init_cuda();
  253. if (user_conf && (user_conf->ncuda != -1))
  254. {
  255. explicitval = user_conf->ncuda;
  256. }
  257. else {
  258. explicitval = starpu_get_env_number("STARPU_NCUDA");
  259. }
  260. if (explicitval < 0) {
  261. config->topology.ncudagpus =
  262. STARPU_MIN(_starpu_get_cuda_device_count(), STARPU_MAXCUDADEVS);
  263. } else {
  264. /* use the specified value */
  265. if (explicitval > STARPU_MAXCUDADEVS) {
  266. fprintf(stderr,"# Warning: %d CUDA devices requested. Only %d enabled. Use configure option --enable-maxcudadev=xxx to update the maximum value of supported CUDA devices.\n", explicitval, STARPU_MAXCUDADEVS);
  267. explicitval = STARPU_MAXCUDADEVS;
  268. }
  269. topology->ncudagpus = (unsigned)explicitval;
  270. STARPU_ASSERT(topology->ncudagpus <= STARPU_MAXCUDADEVS);
  271. }
  272. STARPU_ASSERT(config->topology.ncudagpus + config->topology.nworkers <= STARPU_NMAXWORKERS);
  273. }
  274. if (topology->ncudagpus > 0)
  275. use_accelerator = 1;
  276. _starpu_initialize_workers_cuda_gpuid(config);
  277. unsigned cudagpu;
  278. for (cudagpu = 0; cudagpu < topology->ncudagpus; cudagpu++)
  279. {
  280. config->workers[topology->nworkers + cudagpu].arch = STARPU_CUDA_WORKER;
  281. int devid = _starpu_get_next_cuda_gpuid(config);
  282. enum starpu_perf_archtype arch = STARPU_CUDA_DEFAULT + devid;
  283. config->workers[topology->nworkers + cudagpu].devid = devid;
  284. config->workers[topology->nworkers + cudagpu].perf_arch = arch;
  285. config->workers[topology->nworkers + cudagpu].worker_mask = STARPU_CUDA;
  286. config->worker_mask |= STARPU_CUDA;
  287. uint32_t key = _starpu_crc32_be(devid, 0);
  288. _starpu_htbl_insert_32(&devices_using_cuda, key, config);
  289. }
  290. topology->nworkers += topology->ncudagpus;
  291. #endif
  292. #ifdef STARPU_USE_OPENCL
  293. if (user_conf && (user_conf->nopencl == 0))
  294. {
  295. /* the user explicitely disabled OpenCL */
  296. topology->nopenclgpus = 0;
  297. }
  298. else {
  299. /* we need to initialize OpenCL early to count the number of devices */
  300. int nb_devices;
  301. _starpu_opencl_init();
  302. nb_devices = STARPU_MIN(_starpu_opencl_get_device_count(), STARPU_MAXOPENCLDEVS);
  303. if (user_conf && (user_conf->nopencl != -1))
  304. {
  305. explicitval = user_conf->nopencl;
  306. }
  307. else {
  308. explicitval = starpu_get_env_number("STARPU_NOPENCL");
  309. }
  310. if (explicitval < 0) {
  311. topology->nopenclgpus = nb_devices;
  312. }
  313. else {
  314. if (explicitval > nb_devices) {
  315. /* The user requires more OpenCL devices than there is available */
  316. topology->nopenclgpus = nb_devices;
  317. }
  318. else {
  319. /* use the specified value */
  320. topology->nopenclgpus = (unsigned)explicitval;
  321. }
  322. STARPU_ASSERT(topology->nopenclgpus <= STARPU_MAXOPENCLDEVS);
  323. }
  324. STARPU_ASSERT(topology->nopenclgpus + topology->nworkers <= STARPU_NMAXWORKERS);
  325. }
  326. if (topology->nopenclgpus > 0)
  327. use_accelerator = 1;
  328. // TODO: use_accelerator pour les OpenCL?
  329. _starpu_initialize_workers_opencl_gpuid(config);
  330. unsigned openclgpu;
  331. for (openclgpu = 0; openclgpu < topology->nopenclgpus; openclgpu++)
  332. {
  333. int devid = _starpu_get_next_opencl_gpuid(config);
  334. if (devid == -1) { // There is no more devices left
  335. topology->nopenclgpus = openclgpu;
  336. break;
  337. }
  338. config->workers[topology->nworkers + openclgpu].arch = STARPU_OPENCL_WORKER;
  339. enum starpu_perf_archtype arch = STARPU_OPENCL_DEFAULT + devid;
  340. config->workers[topology->nworkers + openclgpu].devid = devid;
  341. config->workers[topology->nworkers + openclgpu].perf_arch = arch;
  342. config->workers[topology->nworkers + openclgpu].worker_mask = STARPU_OPENCL;
  343. config->worker_mask |= STARPU_OPENCL;
  344. }
  345. topology->nworkers += topology->nopenclgpus;
  346. #endif
  347. #ifdef STARPU_USE_GORDON
  348. if (user_conf && (user_conf->ncuda != -1)) {
  349. explicitval = user_conf->ncuda;
  350. }
  351. else {
  352. explicitval = starpu_get_env_number("STARPU_NGORDON");
  353. }
  354. if (explicitval < 0) {
  355. topology->ngordon_spus = spe_cpu_info_get(SPE_COUNT_USABLE_SPES, -1);
  356. } else {
  357. /* use the specified value */
  358. topology->ngordon_spus = (unsigned)explicitval;
  359. STARPU_ASSERT(topology->ngordon_spus <= NMAXGORDONSPUS);
  360. }
  361. STARPU_ASSERT(topology->ngordon_spus + topology->nworkers <= STARPU_NMAXWORKERS);
  362. if (topology->ngordon_spus > 0)
  363. use_accelerator = 1;
  364. unsigned spu;
  365. for (spu = 0; spu < config->ngordon_spus; spu++)
  366. {
  367. config->workers[topology->nworkers + spu].arch = STARPU_GORDON_WORKER;
  368. config->workers[topology->nworkers + spu].perf_arch = STARPU_GORDON_DEFAULT;
  369. config->workers[topology->nworkers + spu].id = spu;
  370. config->workers[topology->nworkers + spu].worker_is_running = 0;
  371. config->workers[topology->nworkers + spu].worker_mask = STARPU_GORDON;
  372. config->worker_mask |= STARPU_GORDON;
  373. }
  374. topology->nworkers += topology->ngordon_spus;
  375. #endif
  376. /* we put the CPU section after the accelerator : in case there was an
  377. * accelerator found, we devote one cpu */
  378. #ifdef STARPU_USE_CPU
  379. if (user_conf && (user_conf->ncpus != -1)) {
  380. explicitval = user_conf->ncpus;
  381. }
  382. else {
  383. explicitval = starpu_get_env_number("STARPU_NCPUS");
  384. }
  385. if (explicitval < 0) {
  386. unsigned already_busy_cpus = (topology->ngordon_spus?1:0) + topology->ncudagpus + topology->nopenclgpus;
  387. long avail_cpus = topology->nhwcpus - (use_accelerator?already_busy_cpus:0);
  388. if (avail_cpus < 0)
  389. avail_cpus = 0;
  390. topology->ncpus = STARPU_MIN(avail_cpus, STARPU_MAXCPUS);
  391. } else {
  392. /* use the specified value */
  393. topology->ncpus = (unsigned)explicitval;
  394. STARPU_ASSERT(topology->ncpus <= STARPU_MAXCPUS);
  395. }
  396. STARPU_ASSERT(topology->ncpus + topology->nworkers <= STARPU_NMAXWORKERS);
  397. unsigned cpu;
  398. for (cpu = 0; cpu < topology->ncpus; cpu++)
  399. {
  400. config->workers[topology->nworkers + cpu].arch = STARPU_CPU_WORKER;
  401. config->workers[topology->nworkers + cpu].perf_arch = STARPU_CPU_DEFAULT;
  402. config->workers[topology->nworkers + cpu].devid = cpu;
  403. config->workers[topology->nworkers + cpu].worker_mask = STARPU_CPU;
  404. config->worker_mask |= STARPU_CPU;
  405. }
  406. topology->nworkers += topology->ncpus;
  407. #endif
  408. if (topology->nworkers == 0)
  409. {
  410. _STARPU_DEBUG("No worker found, aborting ...\n");
  411. return -ENODEV;
  412. }
  413. return 0;
  414. }
  415. /*
  416. * Bind workers on the different processors
  417. */
  418. static void _starpu_initialize_workers_bindid(struct _starpu_machine_config *config)
  419. {
  420. char *strval;
  421. unsigned i;
  422. struct starpu_machine_topology *topology = &config->topology;
  423. config->current_bindid = 0;
  424. /* conf->workers_bindid indicates the successive cpu identifier that
  425. * should be used to bind the workers. It should be either filled
  426. * according to the user's explicit parameters (from starpu_conf) or
  427. * according to the STARPU_WORKERS_CPUID env. variable. Otherwise, a
  428. * round-robin policy is used to distributed the workers over the
  429. * cpus. */
  430. /* what do we use, explicit value, env. variable, or round-robin ? */
  431. if (config->user_conf && config->user_conf->use_explicit_workers_bindid)
  432. {
  433. /* we use the explicit value from the user */
  434. memcpy(topology->workers_bindid,
  435. config->user_conf->workers_bindid,
  436. STARPU_NMAXWORKERS*sizeof(unsigned));
  437. }
  438. else if ((strval = getenv("STARPU_WORKERS_CPUID")))
  439. {
  440. /* STARPU_WORKERS_CPUID certainly contains less entries than
  441. * STARPU_NMAXWORKERS, so we reuse its entries in a round robin
  442. * fashion: "1 2" is equivalent to "1 2 1 2 1 2 .... 1 2". */
  443. unsigned wrap = 0;
  444. unsigned number_of_entries = 0;
  445. char *endptr;
  446. /* we use the content of the STARPU_WORKERS_CUDAID env. variable */
  447. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  448. {
  449. if (!wrap) {
  450. long int val;
  451. val = strtol(strval, &endptr, 10);
  452. if (endptr != strval)
  453. {
  454. topology->workers_bindid[i] = (unsigned)(val % topology->nhwcpus);
  455. strval = endptr;
  456. }
  457. else {
  458. /* there must be at least one entry */
  459. STARPU_ASSERT(i != 0);
  460. number_of_entries = i;
  461. /* there is no more values in the string */
  462. wrap = 1;
  463. topology->workers_bindid[i] = topology->workers_bindid[0];
  464. }
  465. }
  466. else {
  467. topology->workers_bindid[i] = topology->workers_bindid[i % number_of_entries];
  468. }
  469. }
  470. }
  471. else
  472. {
  473. /* by default, we take a round robin policy */
  474. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  475. topology->workers_bindid[i] = (unsigned)(i % topology->nhwcpus);
  476. }
  477. }
  478. /* This function gets the identifier of the next cpu on which to bind a
  479. * worker. In case a list of preferred cpus was specified, we look for a an
  480. * available cpu among the list if possible, otherwise a round-robin policy is
  481. * used. */
  482. static inline int _starpu_get_next_bindid(struct _starpu_machine_config *config,
  483. int *preferred_binding, int npreferred)
  484. {
  485. struct starpu_machine_topology *topology = &config->topology;
  486. unsigned found = 0;
  487. int current_preferred;
  488. for (current_preferred = 0; current_preferred < npreferred; current_preferred++)
  489. {
  490. if (found)
  491. break;
  492. unsigned requested_cpu = preferred_binding[current_preferred];
  493. /* can we bind the worker on the requested cpu ? */
  494. unsigned ind;
  495. for (ind = config->current_bindid; ind < topology->nhwcpus; ind++)
  496. {
  497. if (topology->workers_bindid[ind] == requested_cpu)
  498. {
  499. /* the cpu is available, we use it ! In order
  500. * to make sure that it will not be used again
  501. * later on, we remove the entry from the list
  502. * */
  503. topology->workers_bindid[ind] =
  504. topology->workers_bindid[config->current_bindid];
  505. topology->workers_bindid[config->current_bindid] = requested_cpu;
  506. found = 1;
  507. break;
  508. }
  509. }
  510. }
  511. unsigned i = ((config->current_bindid++) % STARPU_NMAXWORKERS);
  512. return (int)topology->workers_bindid[i];
  513. }
  514. void _starpu_bind_thread_on_cpu(struct _starpu_machine_config *config STARPU_ATTRIBUTE_UNUSED, unsigned cpuid)
  515. {
  516. #ifdef STARPU_HAVE_HWLOC
  517. int ret;
  518. _starpu_init_topology(config);
  519. hwloc_obj_t obj = hwloc_get_obj_by_depth(config->topology.hwtopology, config->cpu_depth, cpuid);
  520. hwloc_cpuset_t set = obj->cpuset;
  521. hwloc_bitmap_singlify(set);
  522. ret = hwloc_set_cpubind(config->topology.hwtopology, set, HWLOC_CPUBIND_THREAD);
  523. if (ret)
  524. {
  525. perror("binding thread");
  526. STARPU_ABORT();
  527. }
  528. #elif defined(HAVE_PTHREAD_SETAFFINITY_NP)
  529. int ret;
  530. /* fix the thread on the correct cpu */
  531. cpu_set_t aff_mask;
  532. CPU_ZERO(&aff_mask);
  533. CPU_SET(cpuid, &aff_mask);
  534. pthread_t self = pthread_self();
  535. ret = pthread_setaffinity_np(self, sizeof(aff_mask), &aff_mask);
  536. if (ret)
  537. {
  538. perror("binding thread");
  539. STARPU_ABORT();
  540. }
  541. #elif defined(__MINGW32__) || defined(__CYGWIN__)
  542. DWORD mask = 1 << cpuid;
  543. if (!SetThreadAffinityMask(GetCurrentThread(), mask)) {
  544. fprintf(stderr,"SetThreadMaskAffinity(%lx) failed\n", mask);
  545. STARPU_ABORT();
  546. }
  547. #else
  548. #warning no CPU binding support
  549. #endif
  550. }
  551. static void _starpu_init_workers_binding(struct _starpu_machine_config *config)
  552. {
  553. /* launch one thread per CPU */
  554. unsigned ram_memory_node;
  555. /* a single cpu is dedicated for the accelerators */
  556. int accelerator_bindid = -1;
  557. /* note that even if the CPU cpu are not used, we always have a RAM node */
  558. /* TODO : support NUMA ;) */
  559. ram_memory_node = _starpu_register_memory_node(STARPU_CPU_RAM, -1);
  560. /* We will store all the busid of the different (src, dst) combinations
  561. * in a matrix which we initialize here. */
  562. _starpu_initialize_busid_matrix();
  563. unsigned worker;
  564. for (worker = 0; worker < config->topology.nworkers; worker++)
  565. {
  566. unsigned memory_node = -1;
  567. unsigned is_a_set_of_accelerators = 0;
  568. struct _starpu_worker *workerarg = &config->workers[worker];
  569. /* Perhaps the worker has some "favourite" bindings */
  570. int *preferred_binding = NULL;
  571. int npreferred = 0;
  572. /* select the memory node that contains worker's memory */
  573. switch (workerarg->arch) {
  574. case STARPU_CPU_WORKER:
  575. /* "dedicate" a cpu cpu to that worker */
  576. is_a_set_of_accelerators = 0;
  577. memory_node = ram_memory_node;
  578. _starpu_memory_node_worker_add(ram_memory_node);
  579. break;
  580. #ifdef STARPU_USE_GORDON
  581. case STARPU_GORDON_WORKER:
  582. is_a_set_of_accelerators = 1;
  583. memory_node = ram_memory_node;
  584. _starpu_memory_node_worker_add(ram_memory_node);
  585. break;
  586. #endif
  587. #ifdef STARPU_USE_CUDA
  588. case STARPU_CUDA_WORKER:
  589. if (may_bind_automatically)
  590. {
  591. /* StarPU is allowed to bind threads automatically */
  592. preferred_binding = _starpu_get_cuda_affinity_vector(workerarg->devid);
  593. npreferred = config->topology.nhwcpus;
  594. }
  595. is_a_set_of_accelerators = 0;
  596. memory_node = _starpu_register_memory_node(STARPU_CUDA_RAM, workerarg->devid);
  597. _starpu_memory_node_worker_add(memory_node);
  598. _starpu_register_bus(0, memory_node);
  599. _starpu_register_bus(memory_node, 0);
  600. #ifdef HAVE_CUDA_MEMCPY_PEER
  601. unsigned worker2;
  602. for (worker2 = 0; worker2 < worker; worker2++)
  603. {
  604. struct _starpu_worker *workerarg = &config->workers[worker];
  605. if (workerarg->arch == STARPU_CUDA_WORKER) {
  606. unsigned memory_node2 = starpu_worker_get_memory_node(worker2);
  607. _starpu_register_bus(memory_node2, memory_node);
  608. _starpu_register_bus(memory_node, memory_node2);
  609. }
  610. }
  611. #endif
  612. break;
  613. #endif
  614. #ifdef STARPU_USE_OPENCL
  615. case STARPU_OPENCL_WORKER:
  616. if (may_bind_automatically)
  617. {
  618. /* StarPU is allowed to bind threads automatically */
  619. preferred_binding = _starpu_get_opencl_affinity_vector(workerarg->devid);
  620. npreferred = config->topology.nhwcpus;
  621. }
  622. is_a_set_of_accelerators = 0;
  623. memory_node = _starpu_register_memory_node(STARPU_OPENCL_RAM, workerarg->devid);
  624. _starpu_memory_node_worker_add(memory_node);
  625. _starpu_register_bus(0, memory_node);
  626. _starpu_register_bus(memory_node, 0);
  627. break;
  628. #endif
  629. default:
  630. STARPU_ABORT();
  631. }
  632. if (is_a_set_of_accelerators) {
  633. if (accelerator_bindid == -1)
  634. accelerator_bindid = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  635. workerarg->bindid = accelerator_bindid;
  636. }
  637. else {
  638. workerarg->bindid = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  639. }
  640. workerarg->memory_node = memory_node;
  641. #ifdef __GLIBC__
  642. /* Save the initial cpuset */
  643. CPU_ZERO(&workerarg->initial_cpu_set);
  644. CPU_SET(workerarg->bindid, &workerarg->initial_cpu_set);
  645. CPU_ZERO(&workerarg->current_cpu_set);
  646. CPU_SET(workerarg->bindid, &workerarg->current_cpu_set);
  647. #endif /* __GLIBC__ */
  648. #ifdef STARPU_HAVE_HWLOC
  649. /* Clear the cpu set and set the cpu */
  650. workerarg->initial_hwloc_cpu_set = hwloc_bitmap_alloc();
  651. hwloc_bitmap_only(workerarg->initial_hwloc_cpu_set, workerarg->bindid);
  652. workerarg->current_hwloc_cpu_set = hwloc_bitmap_alloc();
  653. hwloc_bitmap_only(workerarg->current_hwloc_cpu_set, workerarg->bindid);
  654. /* Put the worker descriptor in the userdata field of the hwloc object describing the CPU */
  655. hwloc_obj_t worker_obj;
  656. worker_obj = hwloc_get_obj_by_depth(config->topology.hwtopology,
  657. config->cpu_depth, workerarg->bindid);
  658. worker_obj->userdata = &config->workers[worker];
  659. #endif
  660. }
  661. }
  662. int _starpu_build_topology(struct _starpu_machine_config *config)
  663. {
  664. int ret;
  665. struct starpu_conf *user_conf = config->user_conf;
  666. ret = _starpu_init_machine_config(config, user_conf);
  667. if (ret)
  668. return ret;
  669. /* for the data management library */
  670. _starpu_init_memory_nodes();
  671. _starpu_init_workers_binding(config);
  672. return 0;
  673. }
  674. void _starpu_destroy_topology(struct _starpu_machine_config *config __attribute__ ((unused)))
  675. {
  676. /* cleanup StarPU internal data structures */
  677. _starpu_deinit_memory_nodes();
  678. unsigned worker;
  679. for (worker = 0; worker < config->topology.nworkers; worker++)
  680. {
  681. #ifdef STARPU_HAVE_HWLOC
  682. struct _starpu_worker *workerarg = &config->workers[worker];
  683. hwloc_bitmap_free(workerarg->initial_hwloc_cpu_set);
  684. hwloc_bitmap_free(workerarg->current_hwloc_cpu_set);
  685. #endif
  686. }
  687. #ifdef STARPU_HAVE_HWLOC
  688. hwloc_topology_destroy(config->topology.hwtopology);
  689. #endif
  690. topology_is_initialized = 0;
  691. #ifdef STARPU_USE_CUDA
  692. devices_using_cuda = NULL;
  693. #endif
  694. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  695. may_bind_automatically = 0;
  696. #endif
  697. }