topology.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2012 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <stdlib.h>
  18. #include <stdio.h>
  19. #include <common/config.h>
  20. #include <core/workers.h>
  21. #include <core/debug.h>
  22. #include <core/topology.h>
  23. #include <drivers/cuda/driver_cuda.h>
  24. #include <drivers/opencl/driver_opencl.h>
  25. #include <profiling/profiling.h>
  26. #include <common/uthash.h>
  27. #ifdef STARPU_HAVE_HWLOC
  28. #include <hwloc.h>
  29. #ifndef HWLOC_API_VERSION
  30. #define HWLOC_OBJ_PU HWLOC_OBJ_PROC
  31. #endif
  32. #endif
  33. #ifdef STARPU_HAVE_WINDOWS
  34. #include <windows.h>
  35. #endif
  36. #ifdef STARPU_SIMGRID
  37. #include <msg/msg.h>
  38. #include <core/simgrid.h>
  39. #endif
  40. static unsigned topology_is_initialized = 0;
  41. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  42. struct handle_entry
  43. {
  44. UT_hash_handle hh;
  45. unsigned gpuid;
  46. };
  47. # ifdef STARPU_USE_CUDA
  48. /* Entry in the `devices_using_cuda' hash table. */
  49. static struct handle_entry *devices_using_cuda;
  50. # endif
  51. static unsigned may_bind_automatically = 0;
  52. #endif // defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  53. /*
  54. * Discover the topology of the machine
  55. */
  56. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  57. static void
  58. _starpu_initialize_workers_gpuid (int *explicit_workers_gpuid,
  59. int *current, int *workers_gpuid,
  60. const char *varname, unsigned nhwgpus)
  61. {
  62. char *strval;
  63. unsigned i;
  64. *current = 0;
  65. /* conf->workers_bindid indicates the successive cpu identifier that
  66. * should be used to bind the workers. It should be either filled
  67. * according to the user's explicit parameters (from starpu_conf) or
  68. * according to the STARPU_WORKERS_CPUID env. variable. Otherwise, a
  69. * round-robin policy is used to distributed the workers over the
  70. * cpus. */
  71. /* what do we use, explicit value, env. variable, or round-robin ? */
  72. if ((strval = getenv(varname)))
  73. {
  74. /* STARPU_WORKERS_CUDAID certainly contains less entries than
  75. * STARPU_NMAXWORKERS, so we reuse its entries in a round
  76. * robin fashion: "1 2" is equivalent to "1 2 1 2 1 2 .... 1
  77. * 2". */
  78. unsigned wrap = 0;
  79. unsigned number_of_entries = 0;
  80. char *endptr;
  81. /* we use the content of the STARPU_WORKERS_CUDAID
  82. * env. variable */
  83. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  84. {
  85. if (!wrap)
  86. {
  87. long int val;
  88. val = strtol(strval, &endptr, 10);
  89. if (endptr != strval)
  90. {
  91. workers_gpuid[i] = (unsigned)val;
  92. strval = endptr;
  93. }
  94. else
  95. {
  96. /* there must be at least one entry */
  97. STARPU_ASSERT(i != 0);
  98. number_of_entries = i;
  99. /* there is no more values in the
  100. * string */
  101. wrap = 1;
  102. workers_gpuid[i] = workers_gpuid[0];
  103. }
  104. }
  105. else
  106. {
  107. workers_gpuid[i] =
  108. workers_gpuid[i % number_of_entries];
  109. }
  110. }
  111. }
  112. else if (explicit_workers_gpuid)
  113. {
  114. /* we use the explicit value from the user */
  115. memcpy(workers_gpuid,
  116. explicit_workers_gpuid,
  117. STARPU_NMAXWORKERS*sizeof(unsigned));
  118. }
  119. else
  120. {
  121. /* by default, we take a round robin policy */
  122. if (nhwgpus > 0)
  123. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  124. workers_gpuid[i] = (unsigned)(i % nhwgpus);
  125. /* StarPU can use sampling techniques to bind threads
  126. * correctly */
  127. may_bind_automatically = 1;
  128. }
  129. }
  130. #endif
  131. #ifdef STARPU_USE_CUDA
  132. static void
  133. _starpu_initialize_workers_cuda_gpuid (struct _starpu_machine_config *config)
  134. {
  135. struct starpu_machine_topology *topology = &config->topology;
  136. struct starpu_conf *uconf = config->conf;
  137. _starpu_initialize_workers_gpuid (
  138. uconf->use_explicit_workers_cuda_gpuid == 0
  139. ? NULL
  140. : (int *)uconf->workers_cuda_gpuid,
  141. &(config->current_cuda_gpuid),
  142. (int *)topology->workers_cuda_gpuid,
  143. "STARPU_WORKERS_CUDAID",
  144. topology->nhwcudagpus);
  145. }
  146. static inline int
  147. _starpu_get_next_cuda_gpuid (struct _starpu_machine_config *config)
  148. {
  149. unsigned i =
  150. ((config->current_cuda_gpuid++) % config->topology.ncudagpus);
  151. return (int)config->topology.workers_cuda_gpuid[i];
  152. }
  153. #endif
  154. #ifdef STARPU_USE_OPENCL
  155. static void
  156. _starpu_initialize_workers_opencl_gpuid (struct _starpu_machine_config*config)
  157. {
  158. struct starpu_machine_topology *topology = &config->topology;
  159. struct starpu_conf *uconf = config->conf;
  160. _starpu_initialize_workers_gpuid(
  161. uconf->use_explicit_workers_opencl_gpuid == 0
  162. ? NULL
  163. : (int *)uconf->workers_opencl_gpuid,
  164. &(config->current_opencl_gpuid),
  165. (int *)topology->workers_opencl_gpuid,
  166. "STARPU_WORKERS_OPENCLID",
  167. topology->nhwopenclgpus);
  168. #ifdef STARPU_USE_CUDA
  169. // Detect devices which are already used with CUDA
  170. {
  171. unsigned tmp[STARPU_NMAXWORKERS];
  172. unsigned nb=0;
  173. int i;
  174. for(i=0 ; i<STARPU_NMAXWORKERS ; i++)
  175. {
  176. struct handle_entry *entry;
  177. int devid = config->topology.workers_opencl_gpuid[i];
  178. HASH_FIND_INT(devices_using_cuda, &devid, entry);
  179. if (entry == NULL)
  180. {
  181. tmp[nb] = topology->workers_opencl_gpuid[i];
  182. nb++;
  183. }
  184. }
  185. for (i=nb ; i<STARPU_NMAXWORKERS ; i++)
  186. tmp[i] = -1;
  187. memcpy (topology->workers_opencl_gpuid, tmp,
  188. sizeof(unsigned)*STARPU_NMAXWORKERS);
  189. }
  190. #endif /* STARPU_USE_CUDA */
  191. {
  192. // Detect identical devices
  193. struct handle_entry *devices_already_used = NULL;
  194. unsigned tmp[STARPU_NMAXWORKERS];
  195. unsigned nb=0;
  196. int i;
  197. for(i=0 ; i<STARPU_NMAXWORKERS ; i++)
  198. {
  199. int devid = topology->workers_opencl_gpuid[i];
  200. struct handle_entry *entry;
  201. HASH_FIND_INT(devices_already_used, &devid, entry);
  202. if (entry == NULL)
  203. {
  204. struct handle_entry *entry2;
  205. entry2 = (struct handle_entry *) malloc(sizeof(*entry2));
  206. STARPU_ASSERT(entry2 != NULL);
  207. entry2->gpuid = devid;
  208. HASH_ADD_INT(devices_already_used, gpuid,
  209. entry2);
  210. tmp[nb] = devid;
  211. nb ++;
  212. }
  213. }
  214. for (i=nb ; i<STARPU_NMAXWORKERS ; i++)
  215. tmp[i] = -1;
  216. memcpy (topology->workers_opencl_gpuid, tmp,
  217. sizeof(unsigned)*STARPU_NMAXWORKERS);
  218. }
  219. }
  220. static inline int
  221. _starpu_get_next_opencl_gpuid (struct _starpu_machine_config *config)
  222. {
  223. unsigned i =
  224. ((config->current_opencl_gpuid++) % config->topology.nopenclgpus);
  225. return (int)config->topology.workers_opencl_gpuid[i];
  226. }
  227. #endif
  228. static void
  229. _starpu_init_topology (struct _starpu_machine_config *config)
  230. {
  231. /* Discover the topology, meaning finding all the available PUs for
  232. the compiled drivers. These drivers MUST have been initialized
  233. before calling this function. The discovered topology is filled in
  234. CONFIG. */
  235. struct starpu_machine_topology *topology = &config->topology;
  236. if (topology_is_initialized)
  237. return;
  238. #ifdef STARPU_SIMGRID
  239. struct starpu_conf *conf = config->conf;
  240. topology->nhwcpus = conf->ncpus?conf->ncpus:1;
  241. topology->nhwcudagpus = conf->ncuda;
  242. topology->nhwopenclgpus = conf->nopencl;
  243. #else
  244. topology->nhwcpus = 0;
  245. #ifdef STARPU_HAVE_HWLOC
  246. hwloc_topology_init(&topology->hwtopology);
  247. hwloc_topology_load(topology->hwtopology);
  248. #endif
  249. _starpu_cpu_discover_devices(config);
  250. _starpu_cuda_discover_devices(config);
  251. _starpu_opencl_discover_devices(config);
  252. #endif
  253. topology_is_initialized = 1;
  254. }
  255. /*
  256. * Bind workers on the different processors
  257. */
  258. static void
  259. _starpu_initialize_workers_bindid (struct _starpu_machine_config *config)
  260. {
  261. char *strval;
  262. unsigned i;
  263. struct starpu_machine_topology *topology = &config->topology;
  264. config->current_bindid = 0;
  265. /* conf->workers_bindid indicates the successive cpu identifier that
  266. * should be used to bind the workers. It should be either filled
  267. * according to the user's explicit parameters (from starpu_conf) or
  268. * according to the STARPU_WORKERS_CPUID env. variable. Otherwise, a
  269. * round-robin policy is used to distributed the workers over the
  270. * cpus. */
  271. /* what do we use, explicit value, env. variable, or round-robin ? */
  272. if ((strval = getenv("STARPU_WORKERS_CPUID")))
  273. {
  274. /* STARPU_WORKERS_CPUID certainly contains less entries than
  275. * STARPU_NMAXWORKERS, so we reuse its entries in a round
  276. * robin fashion: "1 2" is equivalent to "1 2 1 2 1 2 .... 1
  277. * 2". */
  278. unsigned wrap = 0;
  279. unsigned number_of_entries = 0;
  280. char *endptr;
  281. /* we use the content of the STARPU_WORKERS_CUDAID
  282. * env. variable */
  283. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  284. {
  285. if (!wrap)
  286. {
  287. long int val;
  288. val = strtol(strval, &endptr, 10);
  289. if (endptr != strval)
  290. {
  291. topology->workers_bindid[i] =
  292. (unsigned)(val % topology->nhwcpus);
  293. strval = endptr;
  294. }
  295. else
  296. {
  297. /* there must be at least one entry */
  298. STARPU_ASSERT(i != 0);
  299. number_of_entries = i;
  300. /* there is no more values in the
  301. * string */
  302. wrap = 1;
  303. topology->workers_bindid[i] =
  304. topology->workers_bindid[0];
  305. }
  306. }
  307. else
  308. {
  309. topology->workers_bindid[i] =
  310. topology->workers_bindid[i % number_of_entries];
  311. }
  312. }
  313. }
  314. else if (config->conf->use_explicit_workers_bindid)
  315. {
  316. /* we use the explicit value from the user */
  317. memcpy(topology->workers_bindid,
  318. config->conf->workers_bindid,
  319. STARPU_NMAXWORKERS*sizeof(unsigned));
  320. }
  321. else
  322. {
  323. /* by default, we take a round robin policy */
  324. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  325. topology->workers_bindid[i] =
  326. (unsigned)(i % topology->nhwcpus);
  327. }
  328. }
  329. /* This function gets the identifier of the next cpu on which to bind a
  330. * worker. In case a list of preferred cpus was specified, we look for a an
  331. * available cpu among the list if possible, otherwise a round-robin policy is
  332. * used. */
  333. static inline int
  334. _starpu_get_next_bindid (struct _starpu_machine_config *config,
  335. int *preferred_binding, int npreferred)
  336. {
  337. struct starpu_machine_topology *topology = &config->topology;
  338. unsigned found = 0;
  339. int current_preferred;
  340. for (current_preferred = 0;
  341. current_preferred < npreferred;
  342. current_preferred++)
  343. {
  344. if (found)
  345. break;
  346. unsigned requested_cpu = preferred_binding[current_preferred];
  347. /* can we bind the worker on the requested cpu ? */
  348. unsigned ind;
  349. for (ind = config->current_bindid;
  350. ind < topology->nhwcpus;
  351. ind++)
  352. {
  353. if (topology->workers_bindid[ind] == requested_cpu)
  354. {
  355. /* the cpu is available, we use it ! In order
  356. * to make sure that it will not be used again
  357. * later on, we remove the entry from the
  358. * list */
  359. topology->workers_bindid[ind] =
  360. topology->workers_bindid[config->current_bindid];
  361. topology->workers_bindid[config->current_bindid] = requested_cpu;
  362. found = 1;
  363. break;
  364. }
  365. }
  366. }
  367. unsigned i = ((config->current_bindid++) % STARPU_NMAXWORKERS);
  368. return (int)topology->workers_bindid[i];
  369. }
  370. unsigned
  371. _starpu_topology_get_nhwcpu (struct _starpu_machine_config *config)
  372. {
  373. #ifdef STARPU_USE_OPENCL
  374. _starpu_opencl_init();
  375. #endif
  376. #ifdef STARPU_USE_CUDA
  377. _starpu_init_cuda();
  378. #endif
  379. _starpu_init_topology(config);
  380. return config->topology.nhwcpus;
  381. }
  382. static int
  383. _starpu_init_machine_config (struct _starpu_machine_config *config)
  384. {
  385. int i;
  386. for (i = 0; i < STARPU_NMAXWORKERS; i++)
  387. config->workers[i].workerid = i;
  388. struct starpu_machine_topology *topology = &config->topology;
  389. topology->nworkers = 0;
  390. topology->ncombinedworkers = 0;
  391. #ifdef STARPU_USE_OPENCL
  392. _starpu_opencl_init();
  393. #endif
  394. #ifdef STARPU_USE_CUDA
  395. _starpu_init_cuda();
  396. #endif
  397. _starpu_init_topology(config);
  398. _starpu_initialize_workers_bindid(config);
  399. #ifdef STARPU_USE_CUDA
  400. int ncuda = config->conf->ncuda;
  401. #ifndef STARPU_SIMGRID
  402. if (ncuda != 0)
  403. {
  404. /* The user did not disable CUDA. We need to initialize CUDA
  405. * early to count the number of devices */
  406. _starpu_init_cuda();
  407. int nb_devices = _starpu_get_cuda_device_count();
  408. if (ncuda == -1)
  409. {
  410. /* Nothing was specified, so let's choose ! */
  411. ncuda = nb_devices;
  412. }
  413. else
  414. {
  415. if (ncuda > nb_devices)
  416. {
  417. /* The user requires more CUDA devices than
  418. * there is available */
  419. _STARPU_DISP("Warning: %d CUDA devices requested. Only %d available.\n", ncuda, nb_devices);
  420. ncuda = nb_devices;
  421. }
  422. }
  423. }
  424. #endif
  425. /* Now we know how many CUDA devices will be used */
  426. topology->ncudagpus = ncuda;
  427. STARPU_ASSERT(topology->ncudagpus <= STARPU_MAXCUDADEVS);
  428. _starpu_initialize_workers_cuda_gpuid(config);
  429. unsigned cudagpu;
  430. for (cudagpu = 0; cudagpu < topology->ncudagpus; cudagpu++)
  431. {
  432. int worker_idx = topology->nworkers + cudagpu;
  433. config->workers[worker_idx].arch = STARPU_CUDA_WORKER;
  434. int devid = _starpu_get_next_cuda_gpuid(config);
  435. enum starpu_perf_archtype arch =
  436. (enum starpu_perf_archtype)((int)STARPU_CUDA_DEFAULT + devid);
  437. config->workers[worker_idx].devid = devid;
  438. config->workers[worker_idx].perf_arch = arch;
  439. config->workers[worker_idx].worker_mask = STARPU_CUDA;
  440. config->worker_mask |= STARPU_CUDA;
  441. struct handle_entry *entry;
  442. entry = (struct handle_entry *) malloc(sizeof(*entry));
  443. STARPU_ASSERT(entry != NULL);
  444. entry->gpuid = devid;
  445. HASH_ADD_INT(devices_using_cuda, gpuid, entry);
  446. }
  447. topology->nworkers += topology->ncudagpus;
  448. #endif
  449. #ifdef STARPU_USE_OPENCL
  450. int nopencl = config->conf->nopencl;
  451. #ifndef STARPU_SIMGRID
  452. if (nopencl != 0)
  453. {
  454. /* The user did not disable OPENCL. We need to initialize
  455. * OpenCL early to count the number of devices */
  456. _starpu_opencl_init();
  457. int nb_devices;
  458. nb_devices = _starpu_opencl_get_device_count();
  459. if (nopencl == -1)
  460. {
  461. /* Nothing was specified, so let's choose ! */
  462. nopencl = nb_devices;
  463. if (nopencl > STARPU_MAXOPENCLDEVS)
  464. {
  465. _STARPU_DISP("Warning: %d OpenCL devices available. Only %d enabled. Use configure option --enable-maxopencldadev=xxx to update the maximum value of supported OpenCL devices.\n", nb_devices, STARPU_MAXOPENCLDEVS);
  466. nopencl = STARPU_MAXOPENCLDEVS;
  467. }
  468. }
  469. else
  470. {
  471. /* Let's make sure this value is OK. */
  472. if (nopencl > nb_devices)
  473. {
  474. /* The user requires more OpenCL devices than
  475. * there is available */
  476. _STARPU_DISP("Warning: %d OpenCL devices requested. Only %d available.\n", nopencl, nb_devices);
  477. nopencl = nb_devices;
  478. }
  479. /* Let's make sure this value is OK. */
  480. if (nopencl > STARPU_MAXOPENCLDEVS)
  481. {
  482. _STARPU_DISP("Warning: %d OpenCL devices requested. Only %d enabled. Use configure option --enable-maxopencldev=xxx to update the maximum value of supported OpenCL devices.\n", nopencl, STARPU_MAXOPENCLDEVS);
  483. nopencl = STARPU_MAXOPENCLDEVS;
  484. }
  485. }
  486. }
  487. #endif
  488. topology->nopenclgpus = nopencl;
  489. STARPU_ASSERT(topology->nopenclgpus + topology->nworkers <= STARPU_NMAXWORKERS);
  490. _starpu_initialize_workers_opencl_gpuid(config);
  491. unsigned openclgpu;
  492. for (openclgpu = 0; openclgpu < topology->nopenclgpus; openclgpu++)
  493. {
  494. int worker_idx = topology->nworkers + openclgpu;
  495. int devid = _starpu_get_next_opencl_gpuid(config);
  496. if (devid == -1)
  497. { // There is no more devices left
  498. topology->nopenclgpus = openclgpu;
  499. break;
  500. }
  501. config->workers[worker_idx].arch = STARPU_OPENCL_WORKER;
  502. enum starpu_perf_archtype arch =
  503. (enum starpu_perf_archtype)((int)STARPU_OPENCL_DEFAULT + devid);
  504. config->workers[worker_idx].devid = devid;
  505. config->workers[worker_idx].perf_arch = arch;
  506. config->workers[worker_idx].worker_mask = STARPU_OPENCL;
  507. config->worker_mask |= STARPU_OPENCL;
  508. }
  509. topology->nworkers += topology->nopenclgpus;
  510. #endif
  511. #ifdef STARPU_USE_GORDON
  512. int ngordon = config->conf->ngordon;
  513. if (ngordon != 0)
  514. {
  515. if (ngordon == -1)
  516. {
  517. /* Nothing was specified, so let's choose ! */
  518. ngordon = spe_cpu_info_get(SPE_COUNT_USABLE_SPES, -1);
  519. }
  520. else
  521. {
  522. STARPU_ASSERT(ngordon <= NMAXGORDONSPUS);
  523. if (ngordon > STARPU_MAXGORDONSPUS);
  524. {
  525. _STARPU_DISP("Warning: %d Gordon CPUs devices requested. Only %d supported\n", ngordon, NMAXGORDONSPUS);
  526. ngordon = NMAXGORDONSPUS;
  527. }
  528. }
  529. }
  530. topology->ngordon_spus = ngordon;
  531. STARPU_ASSERT(topology->ngordon_spus + topology->nworkers <= STARPU_NMAXWORKERS);
  532. unsigned spu;
  533. for (spu = 0; spu < config->ngordon_spus; spu++)
  534. {
  535. int worker_idx = topology->nworkers + spu;
  536. config->workers[worker_idx].arch = STARPU_GORDON_WORKER;
  537. config->workers[worker_idx].perf_arch = STARPU_GORDON_DEFAULT;
  538. config->workers[worker_idx].id = spu;
  539. config->workers[worker_idx].worker_is_running = 0;
  540. config->workers[worker_idx].worker_mask = STARPU_GORDON;
  541. config->worker_mask |= STARPU_GORDON;
  542. }
  543. topology->nworkers += topology->ngordon_spus;
  544. #endif
  545. /* we put the CPU section after the accelerator : in case there was an
  546. * accelerator found, we devote one cpu */
  547. #ifdef STARPU_USE_CPU
  548. int ncpu = config->conf->ncpus;
  549. if (ncpu != 0)
  550. {
  551. if (ncpu == -1)
  552. {
  553. unsigned already_busy_cpus =
  554. (topology->ngordon_spus ? 1 : 0) + topology->ncudagpus + topology->nopenclgpus;
  555. long avail_cpus = topology->nhwcpus - already_busy_cpus;
  556. if (avail_cpus < 0)
  557. avail_cpus = 0;
  558. ncpu = STARPU_MIN(avail_cpus, STARPU_MAXCPUS);
  559. }
  560. else
  561. {
  562. if (ncpu > STARPU_MAXCPUS)
  563. {
  564. _STARPU_DISP("Warning: %d CPU devices requested. Only %d enabled. Use configure option --enable-maxcpus=xxx to update the maximum value of supported CPU devices.\n", ncpu, STARPU_MAXCPUS);
  565. ncpu = STARPU_MAXCPUS;
  566. }
  567. }
  568. }
  569. topology->ncpus = ncpu;
  570. STARPU_ASSERT(topology->ncpus + topology->nworkers <= STARPU_NMAXWORKERS);
  571. unsigned cpu;
  572. for (cpu = 0; cpu < topology->ncpus; cpu++)
  573. {
  574. int worker_idx = topology->nworkers + cpu;
  575. config->workers[worker_idx].arch = STARPU_CPU_WORKER;
  576. config->workers[worker_idx].perf_arch = STARPU_CPU_DEFAULT;
  577. config->workers[worker_idx].devid = cpu;
  578. config->workers[worker_idx].worker_mask = STARPU_CPU;
  579. config->worker_mask |= STARPU_CPU;
  580. }
  581. topology->nworkers += topology->ncpus;
  582. #endif
  583. if (topology->nworkers == 0)
  584. {
  585. _STARPU_DEBUG("No worker found, aborting ...\n");
  586. return -ENODEV;
  587. }
  588. return 0;
  589. }
  590. void
  591. _starpu_bind_thread_on_cpu (
  592. struct _starpu_machine_config *config STARPU_ATTRIBUTE_UNUSED,
  593. unsigned cpuid)
  594. {
  595. #ifdef STARPU_SIMGRID
  596. return;
  597. #endif
  598. if (starpu_get_env_number("STARPU_WORKERS_NOBIND") > 0)
  599. return;
  600. #ifdef STARPU_HAVE_HWLOC
  601. const struct hwloc_topology_support *support;
  602. #ifdef STARPU_USE_OPENCL
  603. _starpu_opencl_init();
  604. #endif
  605. #ifdef STARPU_USE_CUDA
  606. _starpu_init_cuda();
  607. #endif
  608. _starpu_init_topology(config);
  609. support = hwloc_topology_get_support (config->topology.hwtopology);
  610. if (support->cpubind->set_thisthread_cpubind)
  611. {
  612. hwloc_obj_t obj =
  613. hwloc_get_obj_by_depth (config->topology.hwtopology,
  614. config->cpu_depth, cpuid);
  615. hwloc_bitmap_t set = obj->cpuset;
  616. int ret;
  617. hwloc_bitmap_singlify(set);
  618. ret = hwloc_set_cpubind (config->topology.hwtopology, set,
  619. HWLOC_CPUBIND_THREAD);
  620. if (ret)
  621. {
  622. perror("binding thread");
  623. STARPU_ABORT();
  624. }
  625. }
  626. #elif defined(HAVE_PTHREAD_SETAFFINITY_NP) && defined(__linux__)
  627. int ret;
  628. /* fix the thread on the correct cpu */
  629. cpu_set_t aff_mask;
  630. CPU_ZERO(&aff_mask);
  631. CPU_SET(cpuid, &aff_mask);
  632. pthread_t self = pthread_self();
  633. ret = pthread_setaffinity_np(self, sizeof(aff_mask), &aff_mask);
  634. if (ret)
  635. {
  636. perror("binding thread");
  637. STARPU_ABORT();
  638. }
  639. #elif defined(__MINGW32__) || defined(__CYGWIN__)
  640. DWORD mask = 1 << cpuid;
  641. if (!SetThreadAffinityMask(GetCurrentThread(), mask))
  642. {
  643. _STARPU_ERROR("SetThreadMaskAffinity(%lx) failed\n", mask);
  644. }
  645. #else
  646. #warning no CPU binding support
  647. #endif
  648. }
  649. void
  650. _starpu_bind_thread_on_cpus (
  651. struct _starpu_machine_config *config STARPU_ATTRIBUTE_UNUSED,
  652. struct _starpu_combined_worker *combined_worker STARPU_ATTRIBUTE_UNUSED)
  653. {
  654. #ifdef STARPU_SIMGRID
  655. return;
  656. #endif
  657. #ifdef STARPU_HAVE_HWLOC
  658. const struct hwloc_topology_support *support;
  659. #ifdef STARPU_USE_OPENC
  660. _starpu_opencl_init();
  661. #endif
  662. #ifdef STARPU_USE_CUDA
  663. _starpu_init_cuda();
  664. #endif
  665. _starpu_init_topology(config);
  666. support = hwloc_topology_get_support(config->topology.hwtopology);
  667. if (support->cpubind->set_thisthread_cpubind)
  668. {
  669. hwloc_bitmap_t set = combined_worker->hwloc_cpu_set;
  670. int ret;
  671. ret = hwloc_set_cpubind (config->topology.hwtopology, set,
  672. HWLOC_CPUBIND_THREAD);
  673. if (ret)
  674. {
  675. perror("binding thread");
  676. STARPU_ABORT();
  677. }
  678. }
  679. #else
  680. #warning no parallel worker CPU binding support
  681. #endif
  682. }
  683. static void
  684. _starpu_init_workers_binding (struct _starpu_machine_config *config)
  685. {
  686. /* launch one thread per CPU */
  687. unsigned ram_memory_node;
  688. /* a single cpu is dedicated for the accelerators */
  689. int accelerator_bindid = -1;
  690. /* note that even if the CPU cpu are not used, we always have a RAM
  691. * node */
  692. /* TODO : support NUMA ;) */
  693. ram_memory_node = _starpu_register_memory_node(STARPU_CPU_RAM, -1);
  694. #ifdef STARPU_SIMGRID
  695. char name[16];
  696. xbt_dynar_t hosts = MSG_hosts_as_dynar();
  697. msg_host_t host = MSG_get_host_by_name("RAM");
  698. STARPU_ASSERT(host);
  699. _starpu_simgrid_memory_node_set_host(0, host);
  700. #endif
  701. /* We will store all the busid of the different (src, dst)
  702. * combinations in a matrix which we initialize here. */
  703. _starpu_initialize_busid_matrix();
  704. unsigned worker;
  705. for (worker = 0; worker < config->topology.nworkers; worker++)
  706. {
  707. unsigned memory_node = -1;
  708. unsigned is_a_set_of_accelerators = 0;
  709. struct _starpu_worker *workerarg = &config->workers[worker];
  710. /* Perhaps the worker has some "favourite" bindings */
  711. int *preferred_binding = NULL;
  712. int npreferred = 0;
  713. /* select the memory node that contains worker's memory */
  714. switch (workerarg->arch)
  715. {
  716. case STARPU_CPU_WORKER:
  717. /* "dedicate" a cpu cpu to that worker */
  718. is_a_set_of_accelerators = 0;
  719. memory_node = ram_memory_node;
  720. _starpu_memory_node_worker_add(ram_memory_node);
  721. break;
  722. #ifdef STARPU_USE_GORDON
  723. case STARPU_GORDON_WORKER:
  724. is_a_set_of_accelerators = 1;
  725. memory_node = ram_memory_node;
  726. _starpu_memory_node_worker_add(ram_memory_node);
  727. break;
  728. #endif
  729. #ifdef STARPU_USE_CUDA
  730. case STARPU_CUDA_WORKER:
  731. if (may_bind_automatically)
  732. {
  733. /* StarPU is allowed to bind threads automatically */
  734. preferred_binding = _starpu_get_cuda_affinity_vector(workerarg->devid);
  735. npreferred = config->topology.nhwcpus;
  736. }
  737. is_a_set_of_accelerators = 0;
  738. memory_node = _starpu_register_memory_node(STARPU_CUDA_RAM, workerarg->devid);
  739. #ifdef STARPU_SIMGRID
  740. snprintf(name, sizeof(name), "CUDA%d", workerarg->devid);
  741. host = MSG_get_host_by_name(name);
  742. STARPU_ASSERT(host);
  743. _starpu_simgrid_memory_node_set_host(memory_node, host);
  744. #endif
  745. _starpu_memory_node_worker_add(memory_node);
  746. _starpu_register_bus(0, memory_node);
  747. _starpu_register_bus(memory_node, 0);
  748. #ifdef HAVE_CUDA_MEMCPY_PEER
  749. unsigned worker2;
  750. for (worker2 = 0; worker2 < worker; worker2++)
  751. {
  752. struct _starpu_worker *workerarg = &config->workers[worker];
  753. if (workerarg->arch == STARPU_CUDA_WORKER)
  754. {
  755. unsigned memory_node2 = starpu_worker_get_memory_node(worker2);
  756. _starpu_register_bus(memory_node2, memory_node);
  757. _starpu_register_bus(memory_node, memory_node2);
  758. }
  759. }
  760. #endif
  761. break;
  762. #endif
  763. #ifdef STARPU_USE_OPENCL
  764. case STARPU_OPENCL_WORKER:
  765. if (may_bind_automatically)
  766. {
  767. /* StarPU is allowed to bind threads automatically */
  768. preferred_binding = _starpu_get_opencl_affinity_vector(workerarg->devid);
  769. npreferred = config->topology.nhwcpus;
  770. }
  771. is_a_set_of_accelerators = 0;
  772. memory_node = _starpu_register_memory_node(STARPU_OPENCL_RAM, workerarg->devid);
  773. #ifdef STARPU_SIMGRID
  774. snprintf(name, sizeof(name), "OpenCL%d", workerarg->devid);
  775. host = MSG_get_host_by_name(name);
  776. STARPU_ASSERT(host);
  777. _starpu_simgrid_memory_node_set_host(memory_node, host);
  778. #endif
  779. _starpu_memory_node_worker_add(memory_node);
  780. _starpu_register_bus(0, memory_node);
  781. _starpu_register_bus(memory_node, 0);
  782. break;
  783. #endif
  784. default:
  785. STARPU_ABORT();
  786. }
  787. if (is_a_set_of_accelerators)
  788. {
  789. if (accelerator_bindid == -1)
  790. accelerator_bindid = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  791. workerarg->bindid = accelerator_bindid;
  792. }
  793. else
  794. {
  795. workerarg->bindid = _starpu_get_next_bindid(config, preferred_binding, npreferred);
  796. }
  797. workerarg->memory_node = memory_node;
  798. #ifdef __GLIBC__
  799. /* Save the initial cpuset */
  800. CPU_ZERO(&workerarg->initial_cpu_set);
  801. CPU_SET(workerarg->bindid, &workerarg->initial_cpu_set);
  802. CPU_ZERO(&workerarg->current_cpu_set);
  803. CPU_SET(workerarg->bindid, &workerarg->current_cpu_set);
  804. #endif /* __GLIBC__ */
  805. #ifdef STARPU_HAVE_HWLOC
  806. /* Put the worker descriptor in the userdata field of the
  807. * hwloc object describing the CPU */
  808. hwloc_obj_t worker_obj;
  809. worker_obj =
  810. hwloc_get_obj_by_depth (config->topology.hwtopology,
  811. config->cpu_depth,
  812. workerarg->bindid);
  813. worker_obj->userdata = &config->workers[worker];
  814. /* Clear the cpu set and set the cpu */
  815. workerarg->initial_hwloc_cpu_set =
  816. hwloc_bitmap_dup (worker_obj->cpuset);
  817. workerarg->current_hwloc_cpu_set =
  818. hwloc_bitmap_dup (worker_obj->cpuset);
  819. #endif
  820. }
  821. #ifdef STARPU_SIMGRID
  822. xbt_dynar_free(&hosts);
  823. #endif
  824. }
  825. int
  826. _starpu_build_topology (struct _starpu_machine_config *config)
  827. {
  828. int ret;
  829. ret = _starpu_init_machine_config(config);
  830. if (ret)
  831. return ret;
  832. /* for the data management library */
  833. _starpu_init_memory_nodes();
  834. _starpu_init_workers_binding(config);
  835. return 0;
  836. }
  837. void
  838. _starpu_destroy_topology (
  839. struct _starpu_machine_config *config __attribute__ ((unused)))
  840. {
  841. /* cleanup StarPU internal data structures */
  842. _starpu_deinit_memory_nodes();
  843. unsigned worker;
  844. for (worker = 0; worker < config->topology.nworkers; worker++)
  845. {
  846. #ifdef STARPU_HAVE_HWLOC
  847. struct _starpu_worker *workerarg = &config->workers[worker];
  848. hwloc_bitmap_free(workerarg->initial_hwloc_cpu_set);
  849. hwloc_bitmap_free(workerarg->current_hwloc_cpu_set);
  850. #endif
  851. }
  852. #ifdef STARPU_HAVE_HWLOC
  853. hwloc_topology_destroy(config->topology.hwtopology);
  854. #endif
  855. topology_is_initialized = 0;
  856. #ifdef STARPU_USE_CUDA
  857. struct handle_entry *entry, *tmp;
  858. HASH_ITER(hh, devices_using_cuda, entry, tmp)
  859. {
  860. HASH_DEL(devices_using_cuda, entry);
  861. free(entry);
  862. }
  863. devices_using_cuda = NULL;
  864. #endif
  865. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  866. may_bind_automatically = 0;
  867. #endif
  868. }
  869. void
  870. starpu_topology_print (FILE *output)
  871. {
  872. struct _starpu_machine_config *config = _starpu_get_machine_config();
  873. struct starpu_machine_topology *topology = &config->topology;
  874. unsigned core;
  875. unsigned worker;
  876. unsigned nworkers = starpu_worker_get_count();
  877. unsigned ncombinedworkers = topology->ncombinedworkers;
  878. for (core = 0; core < topology->nhwcpus; core++) {
  879. fprintf(output, "core %u\t", core);
  880. for (worker = 0;
  881. worker < nworkers + ncombinedworkers;
  882. worker++)
  883. {
  884. if (worker < nworkers)
  885. {
  886. if (topology->workers_bindid[worker] == core)
  887. {
  888. char name[256];
  889. starpu_worker_get_name (worker, name,
  890. sizeof(name));
  891. fprintf(output, "%s\t", name);
  892. }
  893. }
  894. else
  895. {
  896. int worker_size, i;
  897. int *combined_workerid;
  898. starpu_combined_worker_get_description(worker, &worker_size, &combined_workerid);
  899. for (i = 0; i < worker_size; i++)
  900. {
  901. if (topology->workers_bindid[combined_workerid[i]] == core)
  902. fprintf(output, "comb %u\t", worker-nworkers);
  903. }
  904. }
  905. }
  906. fprintf(output, "\n");
  907. }
  908. }