501_environment_variables.doxy 58 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2021 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. * Copyright (C) 2016 Uppsala University
  5. * Copyright (C) 2020,2021 Federal University of Rio Grande do Sul (UFRGS)
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. /*! \page ExecutionConfigurationThroughEnvironmentVariables Execution Configuration Through Environment Variables
  19. The behavior of the StarPU library and tools may be tuned thanks to
  20. the following environment variables.
  21. \section EnvConfiguringWorkers Configuring Workers
  22. \subsection Basic General Configuration
  23. <dl>
  24. <dt>STARPU_WORKERS_NOBIND</dt>
  25. <dd>
  26. \anchor STARPU_WORKERS_NOBIND
  27. \addindex __env__STARPU_WORKERS_NOBIND
  28. Setting it to non-zero will prevent StarPU from binding its threads to
  29. CPUs. This is for instance useful when running the testsuite in parallel.
  30. </dd>
  31. <dt>STARPU_WORKERS_GETBIND</dt>
  32. <dd>
  33. \anchor STARPU_WORKERS_GETBIND
  34. \addindex __env__STARPU_WORKERS_GETBIND
  35. Setting it to non-zero makes StarPU use the OS-provided CPU binding to determine
  36. how many and which CPU cores it should use. This is notably useful when running
  37. several StarPU-MPI processes on the same host, to let the MPI launcher set the
  38. CPUs to be used.
  39. </dd>
  40. <dt>STARPU_WORKERS_CPUID</dt>
  41. <dd>
  42. \anchor STARPU_WORKERS_CPUID
  43. \addindex __env__STARPU_WORKERS_CPUID
  44. Passing an array of integers in \ref STARPU_WORKERS_CPUID
  45. specifies on which logical CPU the different workers should be
  46. bound. For instance, if <c>STARPU_WORKERS_CPUID = "0 1 4 5"</c>, the first
  47. worker will be bound to logical CPU #0, the second CPU worker will be bound to
  48. logical CPU #1 and so on. Note that the logical ordering of the CPUs is either
  49. determined by the OS, or provided by the library <c>hwloc</c> in case it is
  50. available. Ranges can be provided: for instance, <c>STARPU_WORKERS_CPUID = "1-3
  51. 5"</c> will bind the first three workers on logical CPUs #1, #2, and #3, and the
  52. fourth worker on logical CPU #5. Unbound ranges can also be provided:
  53. <c>STARPU_WORKERS_CPUID = "1-"</c> will bind the workers starting from logical
  54. CPU #1 up to last CPU.
  55. Note that the first workers correspond to the CUDA workers, then come the
  56. OpenCL workers, and finally the CPU workers. For example if
  57. we have <c>STARPU_NCUDA=1</c>, <c>STARPU_NOPENCL=1</c>, <c>STARPU_NCPU=2</c>
  58. and <c>STARPU_WORKERS_CPUID = "0 2 1 3"</c>, the CUDA device will be controlled
  59. by logical CPU #0, the OpenCL device will be controlled by logical CPU #2, and
  60. the logical CPUs #1 and #3 will be used by the CPU workers.
  61. If the number of workers is larger than the array given in
  62. \ref STARPU_WORKERS_CPUID, the workers are bound to the logical CPUs in a
  63. round-robin fashion: if <c>STARPU_WORKERS_CPUID = "0 1"</c>, the first
  64. and the third (resp. second and fourth) workers will be put on CPU #0
  65. (resp. CPU #1).
  66. This variable is ignored if the field
  67. starpu_conf::use_explicit_workers_bindid passed to starpu_init() is
  68. set.
  69. </dd>
  70. <dt>STARPU_WORKERS_COREID</dt>
  71. <dd>
  72. \anchor STARPU_WORKERS_COREID
  73. \addindex __env__STARPU_WORKERS_COREID
  74. Same as \ref STARPU_WORKERS_CPUID, but bind the workers to cores instead of PUs
  75. (hyperthreads).
  76. </dd>
  77. <dt>STARPU_MAIN_THREAD_BIND</dt>
  78. <dd>
  79. \anchor STARPU_MAIN_THREAD_BIND
  80. \addindex __env__STARPU_MAIN_THREAD_BIND
  81. When defined, this make StarPU bind the thread that calls starpu_initialize() to
  82. a reserved CPU, subtracted from the CPU workers.
  83. </dd>
  84. <dt>STARPU_MAIN_THREAD_CPUID</dt>
  85. <dd>
  86. \anchor STARPU_MAIN_THREAD_CPUID
  87. \addindex __env__STARPU_MAIN_THREAD_CPUID
  88. When defined, this make StarPU bind the thread that calls starpu_initialize() to
  89. the given CPU ID.
  90. </dd>
  91. <dt>STARPU_MAIN_THREAD_COREID</dt>
  92. <dd>
  93. \anchor STARPU_MAIN_THREAD_COREID
  94. \addindex __env__STARPU_MAIN_THREAD_COREID
  95. Same as \ref STARPU_MAIN_THREAD_CPUID, but bind the thread that calls
  96. starpu_initialize() to the given core, instead of the PU (hyperthread).
  97. </dd>
  98. <dt>STARPU_WORKER_TREE</dt>
  99. <dd>
  100. \anchor STARPU_WORKER_TREE
  101. \addindex __env__STARPU_WORKER_TREE
  102. Define to 1 to enable the tree iterator in schedulers.
  103. </dd>
  104. <dt>STARPU_SINGLE_COMBINED_WORKER</dt>
  105. <dd>
  106. \anchor STARPU_SINGLE_COMBINED_WORKER
  107. \addindex __env__STARPU_SINGLE_COMBINED_WORKER
  108. If set, StarPU will create several workers which won't be able to work
  109. concurrently. It will by default create combined workers which size goes from 1
  110. to the total number of CPU workers in the system. \ref STARPU_MIN_WORKERSIZE
  111. and \ref STARPU_MAX_WORKERSIZE can be used to change this default.
  112. </dd>
  113. <dt>STARPU_MIN_WORKERSIZE</dt>
  114. <dd>
  115. \anchor STARPU_MIN_WORKERSIZE
  116. \addindex __env__STARPU_MIN_WORKERSIZE
  117. Specify the minimum size of the combined workers. Default value is 2.
  118. </dd>
  119. <dt>STARPU_MAX_WORKERSIZE</dt>
  120. <dd>
  121. \anchor STARPU_MAX_WORKERSIZE
  122. \addindex __env__STARPU_MAX_WORKERSIZE
  123. Specify the minimum size of the combined workers. Default value is the
  124. number of CPU workers in the system.
  125. </dd>
  126. <dt>STARPU_SYNTHESIZE_ARITY_COMBINED_WORKER</dt>
  127. <dd>
  128. \anchor STARPU_SYNTHESIZE_ARITY_COMBINED_WORKER
  129. \addindex __env__STARPU_SYNTHESIZE_ARITY_COMBINED_WORKER
  130. Specify how many elements are allowed between combined workers
  131. created from \c hwloc information. For instance, in the case of sockets with 6
  132. cores without shared L2 caches, if \ref STARPU_SYNTHESIZE_ARITY_COMBINED_WORKER is
  133. set to 6, no combined worker will be synthesized beyond one for the socket
  134. and one per core. If it is set to 3, 3 intermediate combined workers will be
  135. synthesized, to divide the socket cores into 3 chunks of 2 cores. If it set to
  136. 2, 2 intermediate combined workers will be synthesized, to divide the the socket
  137. cores into 2 chunks of 3 cores, and then 3 additional combined workers will be
  138. synthesized, to divide the former synthesized workers into a bunch of 2 cores,
  139. and the remaining core (for which no combined worker is synthesized since there
  140. is already a normal worker for it).
  141. The default, 2, thus makes StarPU tend to building a binary trees of combined
  142. workers.
  143. </dd>
  144. <dt>STARPU_DISABLE_ASYNCHRONOUS_COPY</dt>
  145. <dd>
  146. \anchor STARPU_DISABLE_ASYNCHRONOUS_COPY
  147. \addindex __env__STARPU_DISABLE_ASYNCHRONOUS_COPY
  148. Disable asynchronous copies between CPU and GPU devices.
  149. The AMD implementation of OpenCL is known to
  150. fail when copying data asynchronously. When using this implementation,
  151. it is therefore necessary to disable asynchronous data transfers.
  152. See also \ref STARPU_DISABLE_ASYNCHRONOUS_CUDA_COPY and \ref
  153. STARPU_DISABLE_ASYNCHRONOUS_OPENCL_COPY.
  154. </dd>
  155. <dt>STARPU_DISABLE_PINNING</dt>
  156. <dd>
  157. \anchor STARPU_DISABLE_PINNING
  158. \addindex __env__STARPU_DISABLE_PINNING
  159. Disable (1) or Enable (0) pinning host memory allocated through starpu_malloc(), starpu_memory_pin()
  160. and friends. The default is Enabled.
  161. This permits to test the performance effect of memory pinning.
  162. </dd>
  163. <dt>STARPU_BACKOFF_MIN</dt>
  164. <dd>
  165. \anchor STARPU_BACKOFF_MIN
  166. \addindex __env__STARPU_BACKOFF_MIN
  167. Set minimum exponential backoff of number of cycles to pause when spinning. Default value is 1.
  168. </dd>
  169. <dt>STARPU_BACKOFF_MAX</dt>
  170. <dd>
  171. \anchor STARPU_BACKOFF_MAX
  172. \addindex __env__STARPU_BACKOFF_MAX
  173. Set maximum exponential backoff of number of cycles to pause when spinning. Default value is 32.
  174. </dd>
  175. <dt>STARPU_SINK</dt>
  176. <dd>
  177. \anchor STARPU_SINK
  178. \addindex __env__STARPU_SINK
  179. Defined internally by StarPU when running in master slave mode.
  180. </dd>
  181. </dl>
  182. \subsection cpuWorkers CPU Workers
  183. <dl>
  184. <dt>STARPU_NCPU</dt>
  185. <dd>
  186. \anchor STARPU_NCPU
  187. \addindex __env__STARPU_NCPU
  188. Specify the number of CPU workers (thus not including workers
  189. dedicated to control accelerators). Note that by default, StarPU will
  190. not allocate more CPU workers than there are physical CPUs, and that
  191. some CPUs are used to control the accelerators.
  192. </dd>
  193. <dt>STARPU_RESERVE_NCPU</dt>
  194. <dd>
  195. \anchor STARPU_RESERVE_NCPU
  196. \addindex __env__STARPU_RESERVE_NCPU
  197. Specify the number of CPU cores that should not be used by StarPU, so the
  198. application can use starpu_get_next_bindid() and starpu_bind_thread_on() to bind
  199. its own threads.
  200. This option is ignored if \ref STARPU_NCPU or starpu_conf::ncpus is set.
  201. </dd>
  202. <dt>STARPU_NCPUS</dt>
  203. <dd>
  204. \anchor STARPU_NCPUS
  205. \addindex __env__STARPU_NCPUS
  206. This variable is deprecated. You should use \ref STARPU_NCPU.
  207. </dd>
  208. </dl>
  209. \subsection cudaWorkers CUDA Workers
  210. <dl>
  211. <dt>STARPU_NCUDA</dt>
  212. <dd>
  213. \anchor STARPU_NCUDA
  214. \addindex __env__STARPU_NCUDA
  215. Specify the number of CUDA devices that StarPU can use. If
  216. \ref STARPU_NCUDA is lower than the number of physical devices, it is
  217. possible to select which GPU devices should be used by the means of the
  218. environment variable \ref STARPU_WORKERS_CUDAID. By default, StarPU will
  219. create as many CUDA workers as there are GPU devices.
  220. </dd>
  221. <dt>STARPU_NWORKER_PER_CUDA</dt>
  222. <dd>
  223. \anchor STARPU_NWORKER_PER_CUDA
  224. \addindex __env__STARPU_NWORKER_PER_CUDA
  225. Specify the number of workers per CUDA device, and thus the number of kernels
  226. which will be concurrently running on the devices, i.e. the number of CUDA
  227. streams. The default value is 1.
  228. </dd>
  229. <dt>STARPU_CUDA_THREAD_PER_WORKER</dt>
  230. <dd>
  231. \anchor STARPU_CUDA_THREAD_PER_WORKER
  232. \addindex __env__STARPU_CUDA_THREAD_PER_WORKER
  233. Specify whether the cuda driver should use one thread per stream (1) or to use
  234. a single thread to drive all the streams of the device or all devices (0), and
  235. \ref STARPU_CUDA_THREAD_PER_DEV determines whether is it one thread per device or one
  236. thread for all devices. The default value is 0. Setting it to 1 is contradictory
  237. with setting \ref STARPU_CUDA_THREAD_PER_DEV.
  238. </dd>
  239. <dt>STARPU_CUDA_THREAD_PER_DEV</dt>
  240. <dd>
  241. \anchor STARPU_CUDA_THREAD_PER_DEV
  242. \addindex __env__STARPU_CUDA_THREAD_PER_DEV
  243. Specify whether the cuda driver should use one thread per device (1) or to use a
  244. single thread to drive all the devices (0). The default value is 1. It does not
  245. make sense to set this variable if \ref STARPU_CUDA_THREAD_PER_WORKER is set to to 1
  246. (since \ref STARPU_CUDA_THREAD_PER_DEV is then meaningless).
  247. </dd>
  248. <dt>STARPU_CUDA_PIPELINE</dt>
  249. <dd>
  250. \anchor STARPU_CUDA_PIPELINE
  251. \addindex __env__STARPU_CUDA_PIPELINE
  252. Specify how many asynchronous tasks are submitted in advance on CUDA
  253. devices. This for instance permits to overlap task management with the execution
  254. of previous tasks, but it also allows concurrent execution on Fermi cards, which
  255. otherwise bring spurious synchronizations. The default is 2. Setting the value to 0 forces a synchronous
  256. execution of all tasks.
  257. </dd>
  258. <dt>STARPU_WORKERS_CUDAID</dt>
  259. <dd>
  260. \anchor STARPU_WORKERS_CUDAID
  261. \addindex __env__STARPU_WORKERS_CUDAID
  262. Similarly to the \ref STARPU_WORKERS_CPUID environment variable, it is
  263. possible to select which CUDA devices should be used by StarPU. On a machine
  264. equipped with 4 GPUs, setting <c>STARPU_WORKERS_CUDAID = "1 3"</c> and
  265. <c>STARPU_NCUDA=2</c> specifies that 2 CUDA workers should be created, and that
  266. they should use CUDA devices #1 and #3 (the logical ordering of the devices is
  267. the one reported by CUDA).
  268. This variable is ignored if the field
  269. starpu_conf::use_explicit_workers_cuda_gpuid passed to starpu_init()
  270. is set.
  271. </dd>
  272. <dt>STARPU_DISABLE_ASYNCHRONOUS_CUDA_COPY</dt>
  273. <dd>
  274. \anchor STARPU_DISABLE_ASYNCHRONOUS_CUDA_COPY
  275. \addindex __env__STARPU_DISABLE_ASYNCHRONOUS_CUDA_COPY
  276. Disable asynchronous copies between CPU and CUDA devices.
  277. See also \ref STARPU_DISABLE_ASYNCHRONOUS_COPY and \ref
  278. STARPU_DISABLE_ASYNCHRONOUS_OPENCL_COPY.
  279. </dd>
  280. <dt>STARPU_ENABLE_CUDA_GPU_GPU_DIRECT</dt>
  281. <dd>
  282. \anchor STARPU_ENABLE_CUDA_GPU_GPU_DIRECT
  283. \addindex __env__STARPU_ENABLE_CUDA_GPU_GPU_DIRECT
  284. Enable (1) or Disable (0) direct CUDA transfers from GPU to GPU, without copying
  285. through RAM. The default is Enabled.
  286. This permits to test the performance effect of GPU-Direct.
  287. </dd>
  288. <dt>STARPU_CUDA_ONLY_FAST_ALLOC_OTHER_MEMNODES</dt>
  289. <dd>
  290. \anchor STARPU_CUDA_ONLY_FAST_ALLOC_OTHER_MEMNODES
  291. \addindex __env__STARPU_CUDA_ONLY_FAST_ALLOC_OTHER_MEMNODES
  292. Specify if CUDA workers should do only fast allocations
  293. when running the datawizard progress of
  294. other memory nodes. This will pass the internal value
  295. _STARPU_DATAWIZARD_ONLY_FAST_ALLOC to allocation methods.
  296. Default value is 0, allowing CUDA workers to do slow allocations.
  297. This can also be specified with starpu_conf::cuda_only_fast_alloc_other_memnodes.
  298. </dd>
  299. </dl>
  300. \subsection openclWorkers OpenCL Workers
  301. <dl>
  302. <dt>STARPU_NOPENCL</dt>
  303. <dd>
  304. \anchor STARPU_NOPENCL
  305. \addindex __env__STARPU_NOPENCL
  306. Specify the number of OpenCL devices that StarPU can use. If
  307. \ref STARPU_NOPENCL is lower than the number of physical devices, it is
  308. possible to select which GPU devices should be used by the means of the
  309. environment variable \ref STARPU_WORKERS_OPENCLID. By default, StarPU will
  310. create as many OpenCL workers as there are GPU devices.
  311. Note that by default StarPU will launch CUDA workers on GPU devices.
  312. You need to disable CUDA to allow the creation of OpenCL workers.
  313. </dd>
  314. <dt>STARPU_WORKERS_OPENCLID</dt>
  315. <dd>
  316. \anchor STARPU_WORKERS_OPENCLID
  317. \addindex __env__STARPU_WORKERS_OPENCLID
  318. Similarly to the \ref STARPU_WORKERS_CPUID environment variable, it is
  319. possible to select which GPU devices should be used by StarPU. On a machine
  320. equipped with 4 GPUs, setting <c>STARPU_WORKERS_OPENCLID = "1 3"</c> and
  321. <c>STARPU_NOPENCL=2</c> specifies that 2 OpenCL workers should be
  322. created, and that they should use GPU devices #1 and #3.
  323. This variable is ignored if the field
  324. starpu_conf::use_explicit_workers_opencl_gpuid passed to starpu_init()
  325. is set.
  326. </dd>
  327. <dt>STARPU_OPENCL_PIPELINE</dt>
  328. <dd>
  329. \anchor STARPU_OPENCL_PIPELINE
  330. \addindex __env__STARPU_OPENCL_PIPELINE
  331. Specify how many asynchronous tasks are submitted in advance on OpenCL
  332. devices. This for instance permits to overlap task management with the execution
  333. of previous tasks, but it also allows concurrent execution on Fermi cards, which
  334. otherwise bring spurious synchronizations. The default is 2. Setting the value to 0 forces a synchronous
  335. execution of all tasks.
  336. </dd>
  337. <dt>STARPU_OPENCL_ON_CPUS</dt>
  338. <dd>
  339. \anchor STARPU_OPENCL_ON_CPUS
  340. \addindex __env__STARPU_OPENCL_ON_CPUS
  341. By default, the OpenCL driver only enables GPU and accelerator
  342. devices. By setting the environment variable \ref STARPU_OPENCL_ON_CPUS
  343. to 1, the OpenCL driver will also enable CPU devices.
  344. </dd>
  345. <dt>STARPU_OPENCL_ONLY_ON_CPUS</dt>
  346. <dd>
  347. \anchor STARPU_OPENCL_ONLY_ON_CPUS
  348. \addindex __env__STARPU_OPENCL_ONLY_ON_CPUS
  349. By default, the OpenCL driver enables GPU and accelerator
  350. devices. By setting the environment variable \ref STARPU_OPENCL_ONLY_ON_CPUS
  351. to 1, the OpenCL driver will ONLY enable CPU devices.
  352. </dd>
  353. <dt>STARPU_DISABLE_ASYNCHRONOUS_OPENCL_COPY</dt>
  354. <dd>
  355. \anchor STARPU_DISABLE_ASYNCHRONOUS_OPENCL_COPY
  356. \addindex __env__STARPU_DISABLE_ASYNCHRONOUS_OPENCL_COPY
  357. Disable asynchronous copies between CPU and OpenCL devices.
  358. The AMD implementation of OpenCL is known to
  359. fail when copying data asynchronously. When using this implementation,
  360. it is therefore necessary to disable asynchronous data transfers.
  361. See also \ref STARPU_DISABLE_ASYNCHRONOUS_COPY and \ref
  362. STARPU_DISABLE_ASYNCHRONOUS_CUDA_COPY.
  363. </dd>
  364. </dl>
  365. \subsection fpgaWorkers FPGA Workers
  366. <dl>
  367. <dt>STARPU_NFPGA</dt>
  368. <dd>
  369. \anchor STARPU_NFPGA
  370. \addindex __env__STARPU_NFPGA
  371. Specify the number of FPGA devices that StarPU can use. If
  372. \ref STARPU_NFPGA is lower than the number of physical devices, it is
  373. possible to select which FPGA devices should be used by the means of the
  374. environment variable \ref STARPU_WORKERS_FPGAID. By default, StarPU will
  375. create as many FPGA workers as there are GPU devices.
  376. </dd>
  377. <dt>STARPU_WORKERS_FPGAID</dt>
  378. <dd>
  379. \anchor STARPU_WORKERS_FPGAID
  380. \addindex __env__STARPU_WORKERS_FPGAID
  381. Similarly to the \ref STARPU_WORKERS_CPUID environment variable, it is
  382. possible to select which FPGA devices should be used by StarPU. On a machine
  383. equipped with 4 FPGAs, setting <c>STARPU_WORKERS_FPGAID = "1 3"</c> and
  384. <c>STARPU_NFPGA=2</c> specifies that 2 FPGA workers should be created, and that
  385. they should use FPGA devices #1 and #3 (the logical ordering of the devices is
  386. the one reported by FPGA).
  387. \subsection mpimsWorkers MPI Master Slave Workers
  388. <dl>
  389. <dt>STARPU_NMPI_MS</dt>
  390. <dd>
  391. \anchor STARPU_NMPI_MS
  392. \addindex __env__STARPU_NMPI_MS
  393. Specify the number of MPI master slave devices that StarPU can use.
  394. </dd>
  395. <dt>STARPU_NMPIMSTHREADS</dt>
  396. <dd>
  397. \anchor STARPU_NMPIMSTHREADS
  398. \addindex __env__STARPU_NMPIMSTHREADS
  399. Number of threads to use on the MPI Slave devices.
  400. </dd>
  401. <dt>STARPU_MPI_MASTER_NODE</dt>
  402. <dd>
  403. \anchor STARPU_MPI_MASTER_NODE
  404. \addindex __env__STARPU_MPI_MASTER_NODE
  405. This variable allows to chose which MPI node (with the MPI ID) will be the master.
  406. </dd>
  407. <dt>STARPU_DISABLE_ASYNCHRONOUS_MPI_MS_COPY</dt>
  408. <dd>
  409. \anchor STARPU_DISABLE_ASYNCHRONOUS_MPI_MS_COPY
  410. \addindex __env__STARPU_DISABLE_ASYNCHRONOUS_MPI_MS_COPY
  411. Disable asynchronous copies between CPU and MPI Slave devices.
  412. </dd>
  413. </dl>
  414. \subsection mpiConf MPI Configuration
  415. <dl>
  416. <dt>STARPU_MPI_THREAD_CPUID</dt>
  417. <dd>
  418. \anchor STARPU_MPI_THREAD_CPUID
  419. \addindex __env__STARPU_MPI_THREAD_CPUID
  420. When defined, this make StarPU bind its MPI thread to the given CPU ID. Setting
  421. it to -1 (the default value) will use a reserved CPU, subtracted from the CPU
  422. workers.
  423. </dd>
  424. <dt>STARPU_MPI_THREAD_COREID</dt>
  425. <dd>
  426. \anchor STARPU_MPI_THREAD_COREID
  427. \addindex __env__STARPU_MPI_THREAD_COREID
  428. Same as \ref STARPU_MPI_THREAD_CPUID, but bind the MPI thread to the given core
  429. ID, instead of the PU (hyperthread).
  430. </dd>
  431. <dt>STARPU_MPI_NOBIND</dt>
  432. <dd>
  433. \anchor STARPU_MPI_NOBIND
  434. \addindex __env__STARPU_MPI_NOBIND
  435. Setting it to non-zero will prevent StarPU from binding the MPI to
  436. a separate core. This is for instance useful when running the testsuite on a single system.
  437. </dd>
  438. </dl>
  439. \section ConfiguringTheSchedulingEngine Configuring The Scheduling Engine
  440. <dl>
  441. <dt>STARPU_SCHED</dt>
  442. <dd>
  443. \anchor STARPU_SCHED
  444. \addindex __env__STARPU_SCHED
  445. Choose between the different scheduling policies proposed by StarPU: work
  446. random, stealing, greedy, with performance models, etc.
  447. Use <c>STARPU_SCHED=help</c> to get the list of available schedulers.
  448. </dd>
  449. <dt>STARPU_MIN_PRIO</dt>
  450. <dd>
  451. \anchor STARPU_MIN_PRIO_env
  452. \addindex __env__STARPU_MIN_PRIO
  453. Set the mininum priority used by priorities-aware schedulers.
  454. The flag can also be set through the field starpu_conf::global_sched_ctx_min_priority.
  455. </dd>
  456. <dt>STARPU_MAX_PRIO</dt>
  457. <dd>
  458. \anchor STARPU_MAX_PRIO_env
  459. \addindex __env__STARPU_MAX_PRIO
  460. Set the maximum priority used by priorities-aware schedulers.
  461. The flag can also be set through the field starpu_conf::global_sched_ctx_max_priority.
  462. </dd>
  463. <dt>STARPU_CALIBRATE</dt>
  464. <dd>
  465. \anchor STARPU_CALIBRATE
  466. \addindex __env__STARPU_CALIBRATE
  467. If this variable is set to 1, the performance models are calibrated during
  468. the execution. If it is set to 2, the previous values are dropped to restart
  469. calibration from scratch. Setting this variable to 0 disable calibration, this
  470. is the default behaviour.
  471. Note: this currently only applies to <c>dm</c> and <c>dmda</c> scheduling policies.
  472. </dd>
  473. <dt>STARPU_CALIBRATE_MINIMUM</dt>
  474. <dd>
  475. \anchor STARPU_CALIBRATE_MINIMUM
  476. \addindex __env__STARPU_CALIBRATE_MINIMUM
  477. Define the minimum number of calibration measurements that will be made
  478. before considering that the performance model is calibrated. The default value is 10.
  479. </dd>
  480. <dt>STARPU_BUS_CALIBRATE</dt>
  481. <dd>
  482. \anchor STARPU_BUS_CALIBRATE
  483. \addindex __env__STARPU_BUS_CALIBRATE
  484. If this variable is set to 1, the bus is recalibrated during intialization.
  485. </dd>
  486. <dt>STARPU_PREFETCH</dt>
  487. <dd>
  488. \anchor STARPU_PREFETCH
  489. \addindex __env__STARPU_PREFETCH
  490. Indicate whether data prefetching should be enabled (0 means
  491. that it is disabled). If prefetching is enabled, when a task is scheduled to be
  492. executed e.g. on a GPU, StarPU will request an asynchronous transfer in
  493. advance, so that data is already present on the GPU when the task starts. As a
  494. result, computation and data transfers are overlapped.
  495. Note that prefetching is enabled by default in StarPU.
  496. </dd>
  497. <dt>STARPU_SCHED_ALPHA</dt>
  498. <dd>
  499. \anchor STARPU_SCHED_ALPHA
  500. \addindex __env__STARPU_SCHED_ALPHA
  501. To estimate the cost of a task StarPU takes into account the estimated
  502. computation time (obtained thanks to performance models). The alpha factor is
  503. the coefficient to be applied to it before adding it to the communication part.
  504. </dd>
  505. <dt>STARPU_SCHED_BETA</dt>
  506. <dd>
  507. \anchor STARPU_SCHED_BETA
  508. \addindex __env__STARPU_SCHED_BETA
  509. To estimate the cost of a task StarPU takes into account the estimated
  510. data transfer time (obtained thanks to performance models). The beta factor is
  511. the coefficient to be applied to it before adding it to the computation part.
  512. </dd>
  513. <dt>STARPU_SCHED_GAMMA</dt>
  514. <dd>
  515. \anchor STARPU_SCHED_GAMMA
  516. \addindex __env__STARPU_SCHED_GAMMA
  517. Define the execution time penalty of a joule (\ref Energy-basedScheduling).
  518. </dd>
  519. <dt>STARPU_SCHED_READY</dt>
  520. <dd>
  521. \anchor STARPU_SCHED_READY
  522. \addindex __env__STARPU_SCHED_READY
  523. For a modular scheduler with sorted queues below the decision component, workers
  524. pick up a task which has most of its data already available. Setting this to 0
  525. disables this.
  526. </dd>
  527. <dt>STARPU_SCHED_SORTED_ABOVE</dt>
  528. <dd>
  529. \anchor STARPU_SCHED_SORTED_ABOVE
  530. \addindex __env__STARPU_SCHED_SORTED_ABOVE
  531. For a modular scheduler with queues above the decision component, it is
  532. usually sorted by priority. Setting this to 0 disables this.
  533. </dd>
  534. <dt>STARPU_SCHED_SORTED_BELOW</dt>
  535. <dd>
  536. \anchor STARPU_SCHED_SORTED_BELOW
  537. \addindex __env__STARPU_SCHED_SORTED_BELOW
  538. For a modular scheduler with queues below the decision component, they are
  539. usually sorted by priority. Setting this to 0 disables this.
  540. </dd>
  541. <dt>STARPU_IDLE_POWER</dt>
  542. <dd>
  543. \anchor STARPU_IDLE_POWER
  544. \addindex __env__STARPU_IDLE_POWER
  545. Define the idle power of the machine (\ref Energy-basedScheduling).
  546. </dd>
  547. <dt>STARPU_PROFILING</dt>
  548. <dd>
  549. \anchor STARPU_PROFILING
  550. \addindex __env__STARPU_PROFILING
  551. Enable on-line performance monitoring (\ref EnablingOn-linePerformanceMonitoring).
  552. </dd>
  553. <dt>STARPU_PROF_PAPI_EVENTS</dt>
  554. <dd>
  555. \anchor STARPU_PROF_PAPI_EVENTS
  556. \addindex __env__STARPU_PROF_PAPI_EVENTS
  557. Specify which PAPI events should be recorded in the trace (\ref PapiCounters).
  558. </dd>
  559. </dl>
  560. \section ConfiguringHeteroprio Configuring The Heteroprio Scheduler
  561. \subsection ConfiguringLaHeteroprio Configuring LAHeteroprio
  562. <dl>
  563. <dt>STARPU_HETEROPRIO_USE_LA</dt>
  564. <dd>
  565. \anchor STARPU_HETEROPRIO_USE_LA
  566. \addindex __env__STARPU_HETEROPRIO_USE_LA
  567. Enable the locality aware mode of Heteroprio which guides the distribution of tasks to workers
  568. in order to reduce the data transfers between memory nodes.
  569. </dd>
  570. <dt>STARPU_LAHETEROPRIO_PUSH</dt>
  571. <dd>
  572. \anchor STARPU_LAHETEROPRIO_PUSH
  573. \addindex __env__STARPU_LAHETEROPRIO_PUSH
  574. Choose between the different push strategies for locality aware Heteroprio:
  575. WORKER, LcS, LS_SDH, LS_SDH2, LS_SDHB, LC_SMWB, AUTO (by default: AUTO). These are detailed in
  576. \ref LAHeteroprio
  577. </dd>
  578. <dt>STARPU_LAHETEROPRIO_S_[ARCH]</dt>
  579. <dd>
  580. \anchor STARPU_LAHETEROPRIO_S_[ARCH]
  581. \addindex __env__STARPU_LAHETEROPRIO_S_arch
  582. Specify the number of memory nodes contained in an affinity group. An affinity
  583. group will be composed of the closests memory nodes to a worker of a given architecture,
  584. and this worker will look for tasks available inside these memory nodes, before
  585. considering stealing tasks outside this group.
  586. ARCH can be CPU, CUDA, OPENCL, MICC, SCC, MPI_MS, etc.
  587. </dd>
  588. <dt>STARPU_LAHETEROPRIO_PRIO_STEP_[ARCH]</dt>
  589. <dd>
  590. \anchor STARPU_LAHETEROPRIO_PRIO_STEP_[ARCH]
  591. \addindex __env__STARPU_LAHETEROPRIO_PRIO_STEP_arch
  592. Specify the number of buckets in the local memory node in which a worker will look for
  593. available tasks, before this worker starts looking for tasks in other memory nodes' buckets.
  594. ARCH indicates that this number is specific to a given arch which can be:
  595. CPU, CUDA, OPENCL, MICC, SCC, MPI_MS, etc.
  596. </dd>
  597. </dl>
  598. \subsection ConfiguringAutoHeteroprio Configuring AutoHeteroprio
  599. <dl>
  600. <dt>STARPU_HETEROPRIO_USE_AUTO_CALIBRATION</dt>
  601. <dd>
  602. \anchor STARPU_HETEROPRIO_USE_AUTO_CALIBRATION
  603. \addindex __env__STARPU_HETEROPRIO_USE_AUTO_CALIBRATION
  604. Enable the auto calibration mode of Heteroprio which assign priorities to tasks automatically
  605. </dd>
  606. <dt>STARPU_HETEROPRIO_DATA_DIR</dt>
  607. <dd>
  608. \anchor STARPU_HETEROPRIO_DATA_DIR
  609. \addindex __env__STARPU_HETEROPRIO_DATA_DIR
  610. Specify the path of the directory where Heteroprio stores data about program executions.
  611. By default, these are stored in the same directory used by perfmodel.
  612. </dd>
  613. <dt>STARPU_HETEROPRIO_DATA_FILE</dt>
  614. <dd>
  615. \anchor STARPU_HETEROPRIO_DATA_FILE
  616. \addindex __env__STARPU_HETEROPRIO_DATA_FILE
  617. Specify the filename where Heteroprio will save data about the current program's execution.
  618. </dd>
  619. <dt>STARPU_HETEROPRIO_CODELET_GROUPING_STRATEGY</dt>
  620. <dd>
  621. \anchor STARPU_HETEROPRIO_CODELET_GROUPING_STRATEGY
  622. \addindex __env__STARPU_HETEROPRIO_CODELET_GROUPING_STRATEGY
  623. Choose how Heteroprio groups similar tasks. It can be <c>0</c> to group
  624. the tasks with the same perfmodel or the same codelet's name if no perfmodel was assigned.
  625. Or, it could be <c>1</c> to group the tasks only by codelet's name.
  626. </dd>
  627. <dt>STARPU_AUTOHETEROPRIO_PRINT_DATA_ON_UPDATE</dt>
  628. <dd>
  629. \anchor STARPU_AUTOHETEROPRIO_PRINT_DATA_ON_UPDATE
  630. \addindex __env__STARPU_AUTOHETEROPRIO_PRINT_DATA_ON_UPDATE
  631. Enable the printing of priorities' data every time they get updated.
  632. </dd>
  633. <dt>STARPU_AUTOHETEROPRIO_PRINT_AFTER_ORDERING</dt>
  634. <dd>
  635. \anchor STARPU_AUTOHETEROPRIO_PRINT_AFTER_ORDERING
  636. \addindex __env__STARPU_AUTOHETEROPRIO_PRINT_AFTER_ORDERING
  637. Enable the printing of priorities' order for each architecture every time there's a reordering.
  638. </dd>
  639. <dt>STARPU_AUTOHETEROPRIO_PRIORITY_ORDERING_POLICY</dt>
  640. <dd>
  641. \anchor STARPU_AUTOHETEROPRIO_PRIORITY_ORDERING_POLICY
  642. \addindex __env__STARPU_AUTOHETEROPRIO_PRIORITY_ORDERING_POLICY
  643. Specify the heuristic which will be used to assign priorities automatically.
  644. It should be an integer between 0 and 27.
  645. </dd>
  646. <dt>STARPU_AUTOHETEROPRIO_ORDERING_INTERVAL</dt>
  647. <dd>
  648. \anchor STARPU_AUTOHETEROPRIO_ORDERING_INTERVAL
  649. \addindex __env__STARPU_AUTOHETEROPRIO_ORDERING_INTERVAL
  650. Specify the period (in number of tasks pushed), between priorities reordering operations.
  651. </dd>
  652. <dt>STARPU_AUTOHETEROPRIO_FREEZE_GATHERING</dt>
  653. <dd>
  654. \anchor STARPU_AUTOHETEROPRIO_FREEZE_GATHERING
  655. \addindex __env__STARPU_AUTOHETEROPRIO_FREEZE_GATHERING
  656. Disable data gathering from task executions.
  657. </dd>
  658. </dl>
  659. \section Extensions Extensions
  660. <dl>
  661. <dt>SOCL_OCL_LIB_OPENCL</dt>
  662. <dd>
  663. \anchor SOCL_OCL_LIB_OPENCL
  664. \addindex __env__SOCL_OCL_LIB_OPENCL
  665. THE SOCL test suite is only run when the environment variable
  666. \ref SOCL_OCL_LIB_OPENCL is defined. It should contain the location
  667. of the file <c>libOpenCL.so</c> of the OCL ICD implementation.
  668. </dd>
  669. <dt>OCL_ICD_VENDORS</dt>
  670. <dd>
  671. \anchor OCL_ICD_VENDORS
  672. \addindex __env__OCL_ICD_VENDORS
  673. When using SOCL with OpenCL ICD
  674. (https://forge.imag.fr/projects/ocl-icd/), this variable may be used
  675. to point to the directory where ICD files are installed. The default
  676. directory is <c>/etc/OpenCL/vendors</c>. StarPU installs ICD
  677. files in the directory <c>$prefix/share/starpu/opencl/vendors</c>.
  678. </dd>
  679. <dt>STARPU_COMM_STATS</dt>
  680. <dd>
  681. \anchor STARPU_COMM_STATS
  682. \addindex __env__STARPU_COMM_STATS
  683. Communication statistics for starpumpi (\ref MPIDebug)
  684. will be enabled when the environment variable \ref STARPU_COMM_STATS
  685. is defined to an value other than 0.
  686. </dd>
  687. <dt>STARPU_MPI_CACHE</dt>
  688. <dd>
  689. \anchor STARPU_MPI_CACHE
  690. \addindex __env__STARPU_MPI_CACHE
  691. Communication cache for starpumpi (\ref MPISupport) will be
  692. disabled when the environment variable \ref STARPU_MPI_CACHE is set
  693. to 0. It is enabled by default or for any other values of the variable
  694. \ref STARPU_MPI_CACHE.
  695. </dd>
  696. <dt>STARPU_MPI_COMM</dt>
  697. <dd>
  698. \anchor STARPU_MPI_COMM
  699. \addindex __env__STARPU_MPI_COMM
  700. Communication trace for starpumpi (\ref MPISupport) will be
  701. enabled when the environment variable \ref STARPU_MPI_COMM is set
  702. to 1, and StarPU has been configured with the option
  703. \ref enable-verbose "--enable-verbose".
  704. </dd>
  705. <dt>STARPU_MPI_CACHE_STATS</dt>
  706. <dd>
  707. \anchor STARPU_MPI_CACHE_STATS
  708. \addindex __env__STARPU_MPI_CACHE_STATS
  709. When set to 1, statistics are enabled for the communication cache (\ref MPISupport). For now,
  710. it prints messages on the standard output when data are added or removed from the received
  711. communication cache.
  712. </dd>
  713. <dt>STARPU_MPI_PRIORITIES</dt>
  714. <dd>
  715. \anchor STARPU_MPI_PRIORITIES
  716. \addindex __env__STARPU_MPI_PRIORITIES
  717. When set to 0, the use of priorities to order MPI communications is disabled
  718. (\ref MPISupport).
  719. </dd>
  720. <dt>STARPU_MPI_NDETACHED_SEND</dt>
  721. <dd>
  722. \anchor STARPU_MPI_NDETACHED_SEND
  723. \addindex __env__STARPU_MPI_NDETACHED_SEND
  724. This sets the number of send requests that StarPU-MPI will emit concurrently. The default is 10.
  725. </dd>
  726. <dt>STARPU_MPI_NREADY_PROCESS</dt>
  727. <dd>
  728. \anchor STARPU_MPI_NREADY_PROCESS
  729. \addindex __env__STARPU_MPI_NREADY_PROCESS
  730. This sets the number of requests that StarPU-MPI will submit to MPI before
  731. polling for termination of existing requests. The default is 10.
  732. </dd>
  733. <dt>STARPU_MPI_FAKE_SIZE</dt>
  734. <dd>
  735. \anchor STARPU_MPI_FAKE_SIZE
  736. \addindex __env__STARPU_MPI_FAKE_SIZE
  737. Setting to a number makes StarPU believe that there are as many MPI nodes, even
  738. if it was run on only one MPI node. This allows e.g. to simulate the execution
  739. of one of the nodes of a big cluster without actually running the rest.
  740. It of course does not provide computation results and timing.
  741. </dd>
  742. <dt>STARPU_MPI_FAKE_RANK</dt>
  743. <dd>
  744. \anchor STARPU_MPI_FAKE_RANK
  745. \addindex __env__STARPU_MPI_FAKE_RANK
  746. Setting to a number makes StarPU believe that it runs the given MPI node, even
  747. if it was run on only one MPI node. This allows e.g. to simulate the execution
  748. of one of the nodes of a big cluster without actually running the rest.
  749. It of course does not provide computation results and timing.
  750. </dd>
  751. <dt>STARPU_MPI_DRIVER_CALL_FREQUENCY</dt>
  752. <dd>
  753. \anchor STARPU_MPI_DRIVER_CALL_FREQUENCY
  754. \addindex __env__STARPU_MPI_DRIVER_CALL_FREQUENCY
  755. When set to a positive value, activates the interleaving of the execution of
  756. tasks with the progression of MPI communications (\ref MPISupport). The
  757. starpu_mpi_init_conf() function must have been called by the application
  758. for that environment variable to be used. When set to 0, the MPI progression
  759. thread does not use at all the driver given by the user, and only focuses on
  760. making MPI communications progress.
  761. </dd>
  762. <dt>STARPU_MPI_DRIVER_TASK_FREQUENCY</dt>
  763. <dd>
  764. \anchor STARPU_MPI_DRIVER_TASK_FREQUENCY
  765. \addindex __env__STARPU_MPI_DRIVER_TASK_FREQUENCY
  766. When set to a positive value, the interleaving of the execution of tasks with
  767. the progression of MPI communications mechanism to execute several tasks before
  768. checking communication requests again (\ref MPISupport). The
  769. starpu_mpi_init_conf() function must have been called by the application
  770. for that environment variable to be used, and the
  771. STARPU_MPI_DRIVER_CALL_FREQUENCY environment variable set to a positive value.
  772. </dd>
  773. <dt>STARPU_MPI_MEM_THROTTLE</dt>
  774. <dd>
  775. \anchor STARPU_MPI_MEM_THROTTLE
  776. \addindex __env__STARPU_MPI_MEM_THROTTLE
  777. When set to a positive value, this makes the starpu_mpi_*recv* functions
  778. block when the memory allocation required for network reception overflows the
  779. available main memory (as typically set by \ref STARPU_LIMIT_CPU_MEM)
  780. </dd>
  781. <dt>STARPU_MPI_EARLYDATA_ALLOCATE</dt>
  782. <dd>
  783. \anchor STARPU_MPI_EARLYDATA_ALLOCATE
  784. \addindex __env__STARPU_MPI_EARLYDATA_ALLOCATE
  785. When set to 1, the MPI Driver will immediately allocate the data for early
  786. requests instead of issuing a data request and blocking. The default value is 0,
  787. issuing a data request. Because it is an early request and we do not know its
  788. real priority, the data request will assume \ref STARPU_DEFAULT_PRIO. In cases
  789. where there are many data requests with priorities greater than
  790. \ref STARPU_DEFAULT_PRIO the MPI drive could be blocked for long periods.
  791. </dd>
  792. <dt>STARPU_SIMGRID</dt>
  793. <dd>
  794. \anchor STARPU_SIMGRID
  795. \addindex __env__STARPU_SIMGRID
  796. When set to 1 (the default is 0), this makes StarPU check that it was really
  797. build with simulation support. This is convenient in scripts to avoid using a
  798. native version, that would try to update performance models...
  799. </dd>
  800. <dt>STARPU_SIMGRID_TRANSFER_COST</dt>
  801. <dd>
  802. \anchor STARPU_SIMGRID_TRANSFER_COST
  803. \addindex __env__STARPU_SIMGRID_TRANSFER_COST
  804. When set to 1 (which is the default), data transfers (over PCI bus, typically) are taken into account
  805. in SimGrid mode.
  806. </dd>
  807. <dt>STARPU_SIMGRID_CUDA_MALLOC_COST</dt>
  808. <dd>
  809. \anchor STARPU_SIMGRID_CUDA_MALLOC_COST
  810. \addindex __env__STARPU_SIMGRID_CUDA_MALLOC_COST
  811. When set to 1 (which is the default), CUDA malloc costs are taken into account
  812. in SimGrid mode.
  813. </dd>
  814. <dt>STARPU_SIMGRID_CUDA_QUEUE_COST</dt>
  815. <dd>
  816. \anchor STARPU_SIMGRID_CUDA_QUEUE_COST
  817. \addindex __env__STARPU_SIMGRID_CUDA_QUEUE_COST
  818. When set to 1 (which is the default), CUDA task and transfer queueing costs are
  819. taken into account in SimGrid mode.
  820. </dd>
  821. <dt>STARPU_PCI_FLAT</dt>
  822. <dd>
  823. \anchor STARPU_PCI_FLAT
  824. \addindex __env__STARPU_PCI_FLAT
  825. When unset or set to 0, the platform file created for SimGrid will
  826. contain PCI bandwidths and routes.
  827. </dd>
  828. <dt>STARPU_SIMGRID_QUEUE_MALLOC_COST</dt>
  829. <dd>
  830. \anchor STARPU_SIMGRID_QUEUE_MALLOC_COST
  831. \addindex __env__STARPU_SIMGRID_QUEUE_MALLOC_COST
  832. When unset or set to 1, simulate within SimGrid the GPU transfer queueing.
  833. </dd>
  834. <dt>STARPU_MALLOC_SIMULATION_FOLD</dt>
  835. <dd>
  836. \anchor STARPU_MALLOC_SIMULATION_FOLD
  837. \addindex __env__STARPU_MALLOC_SIMULATION_FOLD
  838. Define the size of the file used for folding virtual allocation, in
  839. MiB. The default is 1, thus allowing 64GiB virtual memory when Linux's
  840. <c>sysctl vm.max_map_count</c> value is the default 65535.
  841. </dd>
  842. <dt>STARPU_SIMGRID_TASK_SUBMIT_COST</dt>
  843. <dd>
  844. \anchor STARPU_SIMGRID_TASK_SUBMIT_COST
  845. \addindex __env__STARPU_SIMGRID_TASK_SUBMIT_COST
  846. When set to 1 (which is the default), task submission costs are taken into
  847. account in SimGrid mode. This provides more accurate SimGrid predictions,
  848. especially for the beginning of the execution.
  849. </dd>
  850. <dt>STARPU_SIMGRID_FETCHING_INPUT_COST</dt>
  851. <dd>
  852. \anchor STARPU_SIMGRID_FETCHING_INPUT_COST
  853. \addindex __env__STARPU_SIMGRID_FETCHING_INPUT_COST
  854. When set to 1 (which is the default), fetching input costs are taken into
  855. account in SimGrid mode. This provides more accurate SimGrid predictions,
  856. especially regarding data transfers.
  857. </dd>
  858. <dt>STARPU_SIMGRID_SCHED_COST</dt>
  859. <dd>
  860. \anchor STARPU_SIMGRID_SCHED_COST
  861. \addindex __env__STARPU_SIMGRID_SCHED_COST
  862. When set to 1 (0 is the default), scheduling costs are taken into
  863. account in SimGrid mode. This provides more accurate SimGrid predictions,
  864. and allows studying scheduling overhead of the runtime system. However,
  865. it also makes simulation non-deterministic.
  866. </dd>
  867. </dl>
  868. \section MiscellaneousAndDebug Miscellaneous And Debug
  869. <dl>
  870. <dt>STARPU_HOME</dt>
  871. <dd>
  872. \anchor STARPU_HOME
  873. \addindex __env__STARPU_HOME
  874. Specify the main directory in which StarPU stores its
  875. configuration files. The default is <c>$HOME</c> on Unix environments,
  876. and <c>$USERPROFILE</c> on Windows environments.
  877. </dd>
  878. <dt>STARPU_PATH</dt>
  879. <dd>
  880. \anchor STARPU_PATH
  881. \addindex __env__STARPU_PATH
  882. Only used on Windows environments.
  883. Specify the main directory in which StarPU is installed
  884. (\ref RunningABasicStarPUApplicationOnMicrosoft)
  885. </dd>
  886. <dt>STARPU_PERF_MODEL_DIR</dt>
  887. <dd>
  888. \anchor STARPU_PERF_MODEL_DIR
  889. \addindex __env__STARPU_PERF_MODEL_DIR
  890. Specify the main directory in which StarPU stores its
  891. performance model files. The default is <c>$STARPU_HOME/.starpu/sampling</c>.
  892. </dd>
  893. <dt>STARPU_PERF_MODEL_HOMOGENEOUS_CPU</dt>
  894. <dd>
  895. \anchor STARPU_PERF_MODEL_HOMOGENEOUS_CPU
  896. \addindex __env__STARPU_PERF_MODEL_HOMOGENEOUS_CPU
  897. When set to 0, StarPU will assume that CPU devices do not have the same
  898. performance, and thus use different performance models for them, thus making
  899. kernel calibration much longer, since measurements have to be made for each CPU
  900. core.
  901. </dd>
  902. <dt>STARPU_PERF_MODEL_HOMOGENEOUS_CUDA</dt>
  903. <dd>
  904. \anchor STARPU_PERF_MODEL_HOMOGENEOUS_CUDA
  905. \addindex __env__STARPU_PERF_MODEL_HOMOGENEOUS_CUDA
  906. When set to 1, StarPU will assume that all CUDA devices have the same
  907. performance, and thus share performance models for them, thus allowing kernel
  908. calibration to be much faster, since measurements only have to be once for all
  909. CUDA GPUs.
  910. </dd>
  911. <dt>STARPU_PERF_MODEL_HOMOGENEOUS_OPENCL</dt>
  912. <dd>
  913. \anchor STARPU_PERF_MODEL_HOMOGENEOUS_OPENCL
  914. \addindex __env__STARPU_PERF_MODEL_HOMOGENEOUS_OPENCL
  915. When set to 1, StarPU will assume that all OPENCL devices have the same
  916. performance, and thus share performance models for them, thus allowing kernel
  917. calibration to be much faster, since measurements only have to be once for all
  918. OPENCL GPUs.
  919. </dd>
  920. <dt>STARPU_PERF_MODEL_HOMOGENEOUS_MPI_MS</dt>
  921. <dd>
  922. \anchor STARPU_PERF_MODEL_HOMOGENEOUS_MPI_MS
  923. \addindex __env__STARPU_PERF_MODEL_HOMOGENEOUS_MPI_MS
  924. When set to 1, StarPU will assume that all MPI Slave devices have the same
  925. performance, and thus share performance models for them, thus allowing kernel
  926. calibration to be much faster, since measurements only have to be once for all
  927. MPI Slaves.
  928. </dd>
  929. <dt>STARPU_HOSTNAME</dt>
  930. <dd>
  931. \anchor STARPU_HOSTNAME
  932. \addindex __env__STARPU_HOSTNAME
  933. When set, force the hostname to be used when dealing performance model
  934. files. Models are indexed by machine name. When running for example on
  935. a homogenenous cluster, it is possible to share the models between
  936. machines by setting <c>export STARPU_HOSTNAME=some_global_name</c>.
  937. </dd>
  938. <dt>STARPU_MPI_HOSTNAMES</dt>
  939. <dd>
  940. \anchor STARPU_MPI_HOSTNAMES
  941. \addindex __env__STARPU_MPI_HOSTNAMES
  942. Similar to \ref STARPU_HOSTNAME but to define multiple nodes on a
  943. heterogeneous cluster. The variable is a list of hostnames that will be assigned
  944. to each StarPU-MPI rank considering their position and the value of
  945. \ref starpu_mpi_world_rank on each rank. When running, for example, on a
  946. heterogeneous cluster, it is possible to set individual models for each machine
  947. by setting <c>export STARPU_MPI_HOSTNAMES="name0 name1 name2"</c>. Where rank 0
  948. will receive name0, rank1 will receive name1, and so on.
  949. This variable has precedence over \ref STARPU_HOSTNAME.
  950. </dd>
  951. <dt>STARPU_OPENCL_PROGRAM_DIR</dt>
  952. <dd>
  953. \anchor STARPU_OPENCL_PROGRAM_DIR
  954. \addindex __env__STARPU_OPENCL_PROGRAM_DIR
  955. Specify the directory where the OpenCL codelet source files are
  956. located. The function starpu_opencl_load_program_source() looks
  957. for the codelet in the current directory, in the directory specified
  958. by the environment variable \ref STARPU_OPENCL_PROGRAM_DIR, in the
  959. directory <c>share/starpu/opencl</c> of the installation directory of
  960. StarPU, and finally in the source directory of StarPU.
  961. </dd>
  962. <dt>STARPU_SILENT</dt>
  963. <dd>
  964. \anchor STARPU_SILENT
  965. \addindex __env__STARPU_SILENT
  966. Allow to disable verbose mode at runtime when StarPU
  967. has been configured with the option \ref enable-verbose "--enable-verbose". Also
  968. disable the display of StarPU information and warning messages.
  969. </dd>
  970. <dt>STARPU_MPI_DEBUG_LEVEL_MIN</dt>
  971. <dd>
  972. \anchor STARPU_MPI_DEBUG_LEVEL_MIN
  973. \addindex __env__STARPU_MPI_DEBUG_LEVEL_MIN
  974. Set the minimum level of debug when StarPU
  975. has been configured with the option \ref enable-mpi-verbose "--enable-mpi-verbose".
  976. </dd>
  977. <dt>STARPU_MPI_DEBUG_LEVEL_MAX</dt>
  978. <dd>
  979. \anchor STARPU_MPI_DEBUG_LEVEL_MAX
  980. \addindex __env__STARPU_MPI_DEBUG_LEVEL_MAX
  981. Set the maximum level of debug when StarPU
  982. has been configured with the option \ref enable-mpi-verbose "--enable-mpi-verbose".
  983. </dd>
  984. <dt>STARPU_LOGFILENAME</dt>
  985. <dd>
  986. \anchor STARPU_LOGFILENAME
  987. \addindex __env__STARPU_LOGFILENAME
  988. Specify in which file the debugging output should be saved to.
  989. </dd>
  990. <dt>STARPU_FXT_PREFIX</dt>
  991. <dd>
  992. \anchor STARPU_FXT_PREFIX
  993. \addindex __env__STARPU_FXT_PREFIX
  994. Specify in which directory to save the generated trace if FxT is enabled.
  995. </dd>
  996. <dt>STARPU_FXT_SUFFIX</dt>
  997. <dd>
  998. \anchor STARPU_FXT_SUFFIX
  999. \addindex __env__STARPU_FXT_SUFFIX
  1000. Specify in which file to save the generated trace if FxT is enabled.
  1001. </dd>
  1002. <dt>STARPU_FXT_TRACE</dt>
  1003. <dd>
  1004. \anchor STARPU_FXT_TRACE
  1005. \addindex __env__STARPU_FXT_TRACE
  1006. Specify whether to generate (1) or not (0) the FxT trace in /tmp/prof_file_XXX_YYY (the directory and file name can be changed with \ref STARPU_FXT_PREFIX and \ref STARPU_FXT_SUFFIX). The default is 0 (do not generate it)
  1007. </dd>
  1008. <dt>STARPU_LIMIT_CUDA_devid_MEM</dt>
  1009. <dd>
  1010. \anchor STARPU_LIMIT_CUDA_devid_MEM
  1011. \addindex __env__STARPU_LIMIT_CUDA_devid_MEM
  1012. Specify the maximum number of megabytes that should be
  1013. available to the application on the CUDA device with the identifier
  1014. <c>devid</c>. This variable is intended to be used for experimental
  1015. purposes as it emulates devices that have a limited amount of memory.
  1016. When defined, the variable overwrites the value of the variable
  1017. \ref STARPU_LIMIT_CUDA_MEM.
  1018. </dd>
  1019. <dt>STARPU_LIMIT_CUDA_MEM</dt>
  1020. <dd>
  1021. \anchor STARPU_LIMIT_CUDA_MEM
  1022. \addindex __env__STARPU_LIMIT_CUDA_MEM
  1023. Specify the maximum number of megabytes that should be
  1024. available to the application on each CUDA devices. This variable is
  1025. intended to be used for experimental purposes as it emulates devices
  1026. that have a limited amount of memory.
  1027. </dd>
  1028. <dt>STARPU_LIMIT_OPENCL_devid_MEM</dt>
  1029. <dd>
  1030. \anchor STARPU_LIMIT_OPENCL_devid_MEM
  1031. \addindex __env__STARPU_LIMIT_OPENCL_devid_MEM
  1032. Specify the maximum number of megabytes that should be
  1033. available to the application on the OpenCL device with the identifier
  1034. <c>devid</c>. This variable is intended to be used for experimental
  1035. purposes as it emulates devices that have a limited amount of memory.
  1036. When defined, the variable overwrites the value of the variable
  1037. \ref STARPU_LIMIT_OPENCL_MEM.
  1038. </dd>
  1039. <dt>STARPU_LIMIT_OPENCL_MEM</dt>
  1040. <dd>
  1041. \anchor STARPU_LIMIT_OPENCL_MEM
  1042. \addindex __env__STARPU_LIMIT_OPENCL_MEM
  1043. Specify the maximum number of megabytes that should be
  1044. available to the application on each OpenCL devices. This variable is
  1045. intended to be used for experimental purposes as it emulates devices
  1046. that have a limited amount of memory.
  1047. </dd>
  1048. <dt>STARPU_LIMIT_CPU_MEM</dt>
  1049. <dd>
  1050. \anchor STARPU_LIMIT_CPU_MEM
  1051. \addindex __env__STARPU_LIMIT_CPU_MEM
  1052. Specify the maximum number of megabytes that should be
  1053. available to the application in the main CPU memory. Setting it enables allocation
  1054. cache in main memory. Setting it to zero lets StarPU overflow memory.
  1055. Note: for now not all StarPU allocations get throttled by this
  1056. parameter. Notably MPI reception are not throttled unless \ref
  1057. STARPU_MPI_MEM_THROTTLE is set to 1.
  1058. </dd>
  1059. <dt>STARPU_LIMIT_CPU_NUMA_devid_MEM</dt>
  1060. <dd>
  1061. \anchor STARPU_LIMIT_CPU_NUMA_devid_MEM
  1062. \addindex __env__STARPU_LIMIT_CPU_NUMA_devid_MEM
  1063. Specify the maximum number of megabytes that should be available to the
  1064. application on the NUMA node with the OS identifier <c>devid</c>. Setting it
  1065. overrides the value of STARPU_LIMIT_CPU_MEM.
  1066. </dd>
  1067. <dt>STARPU_LIMIT_CPU_NUMA_MEM</dt>
  1068. <dd>
  1069. \anchor STARPU_LIMIT_CPU_NUMA_MEM
  1070. \addindex __env__STARPU_LIMIT_CPU_NUMA_MEM
  1071. Specify the maximum number of megabytes that should be available to the
  1072. application on each NUMA node. This is the same as specifying that same amount
  1073. with \ref STARPU_LIMIT_CPU_NUMA_devid_MEM for each NUMA node number. The total
  1074. memory available to StarPU will thus be this amount multiplied by the number of
  1075. NUMA nodes used by StarPU. Any \ref STARPU_LIMIT_CPU_NUMA_devid_MEM additionally
  1076. specified will take over STARPU_LIMIT_CPU_NUMA_MEM.
  1077. </dd>
  1078. <dt>STARPU_LIMIT_BANDWIDTH</dt>
  1079. <dd>
  1080. \anchor STARPU_LIMIT_BANDWIDTH
  1081. \addindex __env__STARPU_LIMIT_BANDWIDTH
  1082. Specify the maximum available PCI bandwidth of the system in MB/s. This can only
  1083. be effective with simgrid simulation. This allows to easily override the
  1084. bandwidths stored in the platform file generated from measurements on the native
  1085. system. This can be used e.g. for convenient
  1086. Specify the maximum number of megabytes that should be available to the
  1087. application on each NUMA node. This is the same as specifying that same amount
  1088. with \ref STARPU_LIMIT_CPU_NUMA_devid_MEM for each NUMA node number. The total
  1089. memory available to StarPU will thus be this amount multiplied by the number of
  1090. NUMA nodes used by StarPU. Any \ref STARPU_LIMIT_CPU_NUMA_devid_MEM additionally
  1091. specified will take over STARPU_LIMIT_BANDWIDTH.
  1092. </dd>
  1093. <dt>STARPU_MINIMUM_AVAILABLE_MEM</dt>
  1094. <dd>
  1095. \anchor STARPU_MINIMUM_AVAILABLE_MEM
  1096. \addindex __env__STARPU_MINIMUM_AVAILABLE_MEM
  1097. Specify the minimum percentage of memory that should be available in GPUs
  1098. (or in main memory, when using out of core), below which a reclaiming pass is
  1099. performed. The default is 0%.
  1100. </dd>
  1101. <dt>STARPU_TARGET_AVAILABLE_MEM</dt>
  1102. <dd>
  1103. \anchor STARPU_TARGET_AVAILABLE_MEM
  1104. \addindex __env__STARPU_TARGET_AVAILABLE_MEM
  1105. Specify the target percentage of memory that should be reached in
  1106. GPUs (or in main memory, when using out of core), when performing a periodic
  1107. reclaiming pass. The default is 0%.
  1108. </dd>
  1109. <dt>STARPU_MINIMUM_CLEAN_BUFFERS</dt>
  1110. <dd>
  1111. \anchor STARPU_MINIMUM_CLEAN_BUFFERS
  1112. \addindex __env__STARPU_MINIMUM_CLEAN_BUFFERS
  1113. Specify the minimum percentage of number of buffers that should be clean in GPUs
  1114. (or in main memory, when using out of core), below which asynchronous writebacks will be
  1115. issued. The default is 5%.
  1116. </dd>
  1117. <dt>STARPU_TARGET_CLEAN_BUFFERS</dt>
  1118. <dd>
  1119. \anchor STARPU_TARGET_CLEAN_BUFFERS
  1120. \addindex __env__STARPU_TARGET_CLEAN_BUFFERS
  1121. Specify the target percentage of number of buffers that should be reached in
  1122. GPUs (or in main memory, when using out of core), when performing an asynchronous
  1123. writeback pass. The default is 10%.
  1124. </dd>
  1125. <dt>STARPU_DISK_SWAP</dt>
  1126. <dd>
  1127. \anchor STARPU_DISK_SWAP
  1128. \addindex __env__STARPU_DISK_SWAP
  1129. Specify a path where StarPU can push data when the main memory is getting
  1130. full.
  1131. </dd>
  1132. <dt>STARPU_DISK_SWAP_BACKEND</dt>
  1133. <dd>
  1134. \anchor STARPU_DISK_SWAP_BACKEND
  1135. \addindex __env__STARPU_DISK_SWAP_BACKEND
  1136. Specify the backend to be used by StarPU to push data when the main
  1137. memory is getting full. The default is unistd (i.e. using read/write functions),
  1138. other values are stdio (i.e. using fread/fwrite), unistd_o_direct (i.e. using
  1139. read/write with O_DIRECT), leveldb (i.e. using a leveldb database), and hdf5
  1140. (i.e. using HDF5 library).
  1141. </dd>
  1142. <dt>STARPU_DISK_SWAP_SIZE</dt>
  1143. <dd>
  1144. \anchor STARPU_DISK_SWAP_SIZE
  1145. \addindex __env__STARPU_DISK_SWAP_SIZE
  1146. Specify the maximum size in MiB to be used by StarPU to push data when the main
  1147. memory is getting full. The default is unlimited.
  1148. </dd>
  1149. <dt>STARPU_LIMIT_MAX_SUBMITTED_TASKS</dt>
  1150. <dd>
  1151. \anchor STARPU_LIMIT_MAX_SUBMITTED_TASKS
  1152. \addindex __env__STARPU_LIMIT_MAX_SUBMITTED_TASKS
  1153. Allow users to control the task submission flow by specifying
  1154. to StarPU a maximum number of submitted tasks allowed at a given time, i.e. when
  1155. this limit is reached task submission becomes blocking until enough tasks have
  1156. completed, specified by \ref STARPU_LIMIT_MIN_SUBMITTED_TASKS.
  1157. Setting it enables allocation cache buffer reuse in main memory.
  1158. </dd>
  1159. <dt>STARPU_LIMIT_MIN_SUBMITTED_TASKS</dt>
  1160. <dd>
  1161. \anchor STARPU_LIMIT_MIN_SUBMITTED_TASKS
  1162. \addindex __env__STARPU_LIMIT_MIN_SUBMITTED_TASKS
  1163. Allow users to control the task submission flow by specifying
  1164. to StarPU a submitted task threshold to wait before unblocking task submission. This
  1165. variable has to be used in conjunction with \ref STARPU_LIMIT_MAX_SUBMITTED_TASKS
  1166. which puts the task submission thread to
  1167. sleep. Setting it enables allocation cache buffer reuse in main memory.
  1168. </dd>
  1169. <dt>STARPU_TRACE_BUFFER_SIZE</dt>
  1170. <dd>
  1171. \anchor STARPU_TRACE_BUFFER_SIZE
  1172. \addindex __env__STARPU_TRACE_BUFFER_SIZE
  1173. Set the buffer size for recording trace events in MiB. Setting it to a big
  1174. size allows to avoid pauses in the trace while it is recorded on the disk. This
  1175. however also consumes memory, of course. The default value is 64.
  1176. </dd>
  1177. <dt>STARPU_GENERATE_TRACE</dt>
  1178. <dd>
  1179. \anchor STARPU_GENERATE_TRACE
  1180. \addindex __env__STARPU_GENERATE_TRACE
  1181. When set to <c>1</c>, indicate that StarPU should automatically
  1182. generate a Paje trace when starpu_shutdown() is called.
  1183. </dd>
  1184. <dt>STARPU_GENERATE_TRACE_OPTIONS</dt>
  1185. <dd>
  1186. \anchor STARPU_GENERATE_TRACE_OPTIONS
  1187. \addindex __env__STARPU_GENERATE_TRACE_OPTIONS
  1188. When the variable \ref STARPU_GENERATE_TRACE is set to <c>1</c> to
  1189. generate a Paje trace, this variable can be set to specify options (see
  1190. <c>starpu_fxt_tool --help</c>).
  1191. </dd>
  1192. <dt>STARPU_ENABLE_STATS</dt>
  1193. <dd>
  1194. \anchor STARPU_ENABLE_STATS
  1195. \addindex __env__STARPU_ENABLE_STATS
  1196. When defined, enable gathering various data statistics (\ref DataStatistics).
  1197. </dd>
  1198. <dt>STARPU_MEMORY_STATS</dt>
  1199. <dd>
  1200. \anchor STARPU_MEMORY_STATS
  1201. \addindex __env__STARPU_MEMORY_STATS
  1202. When set to 0, disable the display of memory statistics on data which
  1203. have not been unregistered at the end of the execution (\ref MemoryFeedback).
  1204. </dd>
  1205. <dt>STARPU_MAX_MEMORY_USE</dt>
  1206. <dd>
  1207. \anchor STARPU_MAX_MEMORY_USE
  1208. \addindex __env__STARPU_MAX_MEMORY_USE
  1209. When set to 1, display at the end of the execution the maximum memory used by
  1210. StarPU for internal data structures during execution.
  1211. </dd>
  1212. <dt>STARPU_BUS_STATS</dt>
  1213. <dd>
  1214. \anchor STARPU_BUS_STATS
  1215. \addindex __env__STARPU_BUS_STATS
  1216. When defined, statistics about data transfers will be displayed when calling
  1217. starpu_shutdown() (\ref Profiling). By default, statistics are printed
  1218. on the standard error stream, use the environment variable \ref
  1219. STARPU_BUS_STATS_FILE to define another filename.
  1220. </dd>
  1221. <dt>STARPU_BUS_STATS_FILE</dt>
  1222. <dd>
  1223. \anchor STARPU_BUS_STATS_FILE
  1224. \addindex __env__STARPU_BUS_STATS_FILE
  1225. Define the name of the file where to display data transfers
  1226. statistics, see \ref STARPU_BUS_STATS.
  1227. </dd>
  1228. <dt>STARPU_WORKER_STATS</dt>
  1229. <dd>
  1230. \anchor STARPU_WORKER_STATS
  1231. \addindex __env__STARPU_WORKER_STATS
  1232. When defined, statistics about the workers will be displayed when calling
  1233. starpu_shutdown() (\ref Profiling). When combined with the
  1234. environment variable \ref STARPU_PROFILING, it displays the energy
  1235. consumption (\ref Energy-basedScheduling). By default, statistics are
  1236. printed on the standard error stream, use the environment variable
  1237. \ref STARPU_WORKER_STATS_FILE to define another filename.
  1238. </dd>
  1239. <dt>STARPU_WORKER_STATS_FILE</dt>
  1240. <dd>
  1241. \anchor STARPU_WORKER_STATS_FILE
  1242. \addindex __env__STARPU_WORKER_STATS_FILE
  1243. Define the name of the file where to display workers statistics, see
  1244. \ref STARPU_WORKER_STATS.
  1245. </dd>
  1246. <dt>STARPU_STATS</dt>
  1247. <dd>
  1248. \anchor STARPU_STATS
  1249. \addindex __env__STARPU_STATS
  1250. When set to 0, data statistics will not be displayed at the
  1251. end of the execution of an application (\ref DataStatistics).
  1252. </dd>
  1253. <dt>STARPU_WATCHDOG_TIMEOUT</dt>
  1254. <dd>
  1255. \anchor STARPU_WATCHDOG_TIMEOUT
  1256. \addindex __env__STARPU_WATCHDOG_TIMEOUT
  1257. When set to a value other than 0, allows to make StarPU print an error
  1258. message whenever StarPU does not terminate any task for the given time (in µs),
  1259. but lets the application continue normally. Should
  1260. be used in combination with \ref STARPU_WATCHDOG_CRASH
  1261. (see \ref DetectionStuckConditions).
  1262. </dd>
  1263. <dt>STARPU_WATCHDOG_CRASH</dt>
  1264. <dd>
  1265. \anchor STARPU_WATCHDOG_CRASH
  1266. \addindex __env__STARPU_WATCHDOG_CRASH
  1267. When set to a value other than 0, trigger a crash when the watch
  1268. dog is reached, thus allowing to catch the situation in gdb, etc
  1269. (see \ref DetectionStuckConditions)
  1270. </dd>
  1271. <dt>STARPU_WATCHDOG_DELAY</dt>
  1272. <dd>
  1273. \anchor STARPU_WATCHDOG_DELAY
  1274. \addindex __env__STARPU_WATCHDOG_DELAY
  1275. Delay the activation of the watchdog by the given time (in µs). This can
  1276. be convenient for letting the application initialize data etc. before starting
  1277. to look for idle time.
  1278. </dd>
  1279. <dt>STARPU_TASK_PROGRESS</dt>
  1280. <dd>
  1281. \anchor STARPU_TASK_PROGRESS
  1282. \addindex __env__STARPU_TASK_PROGRESS
  1283. Print the progression of tasks. This is convenient to determine whether a
  1284. program is making progress in task execution, or is just stuck.
  1285. </dd>
  1286. <dt>STARPU_TASK_BREAK_ON_PUSH</dt>
  1287. <dd>
  1288. \anchor STARPU_TASK_BREAK_ON_PUSH
  1289. \addindex __env__STARPU_TASK_BREAK_ON_PUSH
  1290. When this variable contains a job id, StarPU will raise SIGTRAP when the task
  1291. with that job id is being pushed to the scheduler, which will be nicely catched by debuggers
  1292. (see \ref DebuggingScheduling)
  1293. </dd>
  1294. <dt>STARPU_TASK_BREAK_ON_SCHED</dt>
  1295. <dd>
  1296. \anchor STARPU_TASK_BREAK_ON_SCHED
  1297. \addindex __env__STARPU_TASK_BREAK_ON_SCHED
  1298. When this variable contains a job id, StarPU will raise SIGTRAP when the task
  1299. with that job id is being scheduled by the scheduler (at a scheduler-specific
  1300. point), which will be nicely catched by debuggers.
  1301. This only works for schedulers which have such a scheduling point defined
  1302. (see \ref DebuggingScheduling)
  1303. </dd>
  1304. <dt>STARPU_TASK_BREAK_ON_POP</dt>
  1305. <dd>
  1306. \anchor STARPU_TASK_BREAK_ON_POP
  1307. \addindex __env__STARPU_TASK_BREAK_ON_POP
  1308. When this variable contains a job id, StarPU will raise SIGTRAP when the task
  1309. with that job id is being popped from the scheduler, which will be nicely catched by debuggers
  1310. (see \ref DebuggingScheduling)
  1311. </dd>
  1312. <dt>STARPU_TASK_BREAK_ON_EXEC</dt>
  1313. <dd>
  1314. \anchor STARPU_TASK_BREAK_ON_EXEC
  1315. \addindex __env__STARPU_TASK_BREAK_ON_EXEC
  1316. When this variable contains a job id, StarPU will raise SIGTRAP when the task
  1317. with that job id is being executed, which will be nicely catched by debuggers
  1318. (see \ref DebuggingScheduling)
  1319. </dd>
  1320. <dt>STARPU_DISABLE_KERNELS</dt>
  1321. <dd>
  1322. \anchor STARPU_DISABLE_KERNELS
  1323. \addindex __env__STARPU_DISABLE_KERNELS
  1324. When set to a value other than 1, it disables actually calling the kernel
  1325. functions, thus allowing to quickly check that the task scheme is working
  1326. properly, without performing the actual application-provided computation.
  1327. </dd>
  1328. <dt>STARPU_HISTORY_MAX_ERROR</dt>
  1329. <dd>
  1330. \anchor STARPU_HISTORY_MAX_ERROR
  1331. \addindex __env__STARPU_HISTORY_MAX_ERROR
  1332. History-based performance models will drop measurements which are really far
  1333. froom the measured average. This specifies the allowed variation. The default is
  1334. 50 (%), i.e. the measurement is allowed to be x1.5 faster or /1.5 slower than the
  1335. average.
  1336. </dd>
  1337. <dt>STARPU_RAND_SEED</dt>
  1338. <dd>
  1339. \anchor STARPU_RAND_SEED
  1340. \addindex __env__STARPU_RAND_SEED
  1341. The random scheduler and some examples use random numbers for their own
  1342. working. Depending on the examples, the seed is by default juste always 0 or
  1343. the current time() (unless SimGrid mode is enabled, in which case it is always
  1344. 0). \ref STARPU_RAND_SEED allows to set the seed to a specific value.
  1345. </dd>
  1346. <dt>STARPU_GLOBAL_ARBITER</dt>
  1347. <dd>
  1348. \anchor STARPU_GLOBAL_ARBITER
  1349. \addindex __env__STARPU_GLOBAL_ARBITER
  1350. When set to a positive value, StarPU will create a arbiter, which
  1351. implements an advanced but centralized management of concurrent data
  1352. accesses (see \ref ConcurrentDataAccess).
  1353. </dd>
  1354. <dt>STARPU_USE_NUMA</dt>
  1355. <dd>
  1356. \anchor STARPU_USE_NUMA
  1357. \addindex __env__STARPU_USE_NUMA
  1358. When defined, NUMA nodes are taking into account by StarPU. Otherwise, memory
  1359. is considered as only one node. This is experimental for now.
  1360. When enabled, ::STARPU_MAIN_RAM is a pointer to the NUMA node associated to the
  1361. first CPU worker if it exists, the NUMA node associated to the first GPU discovered otherwise.
  1362. If StarPU doesn't find any NUMA node after these step, ::STARPU_MAIN_RAM is the first NUMA node
  1363. discovered by StarPU.
  1364. </dd>
  1365. <dt>STARPU_IDLE_FILE</dt>
  1366. <dd>
  1367. \anchor STARPU_IDLE_FILE
  1368. \addindex __env__STARPU_IDLE_FILE
  1369. When defined, a file named after its contents will be created at the
  1370. end of the execution. This file will contain the sum of the idle times
  1371. of all the workers.
  1372. </dd>
  1373. <dt>STARPU_HWLOC_INPUT</dt>
  1374. <dd>
  1375. \anchor STARPU_HWLOC_INPUT
  1376. \addindex __env__STARPU_HWLOC_INPUT
  1377. When defined to the path of an XML file, \c hwloc will use this file
  1378. as input instead of detecting the current platform topology, which can
  1379. save significant initialization time.
  1380. To produce this XML file, use <c>lstopo file.xml</c>
  1381. </dd>
  1382. <dt>STARPU_CATCH_SIGNALS</dt>
  1383. <dd>
  1384. \anchor STARPU_CATCH_SIGNALS
  1385. \addindex __env__STARPU_CATCH_SIGNALS
  1386. By default, StarPU catch signals \c SIGINT, \c SIGSEGV and \c SIGTRAP to
  1387. perform final actions such as dumping FxT trace files even though the
  1388. application has crashed. Setting this variable to a value other than 1
  1389. will disable this behaviour. This should be done on JVM systems which
  1390. may use these signals for their own needs.
  1391. The flag can also be set through the field starpu_conf::catch_signals.
  1392. </dd>
  1393. <dt>STARPU_DISPLAY_BINDINGS</dt>
  1394. <dd>
  1395. \anchor STARPU_DISPLAY_BINDINGS
  1396. \addindex __env__STARPU_DISPLAY_BINDINGS
  1397. Display the binding of all processes and threads running on the machine. If MPI is enabled, display the binding of each node.<br>
  1398. Users can manually display the binding by calling starpu_display_bindings().
  1399. </dd>
  1400. </dl>
  1401. \section ConfiguringTheHypervisor Configuring The Hypervisor
  1402. <dl>
  1403. <dt>SC_HYPERVISOR_POLICY</dt>
  1404. <dd>
  1405. \anchor SC_HYPERVISOR_POLICY
  1406. \addindex __env__SC_HYPERVISOR_POLICY
  1407. Choose between the different resizing policies proposed by StarPU for the hypervisor:
  1408. idle, app_driven, feft_lp, teft_lp; ispeed_lp, throughput_lp etc.
  1409. Use <c>SC_HYPERVISOR_POLICY=help</c> to get the list of available policies for the hypervisor
  1410. </dd>
  1411. <dt>SC_HYPERVISOR_TRIGGER_RESIZE</dt>
  1412. <dd>
  1413. \anchor SC_HYPERVISOR_TRIGGER_RESIZE
  1414. \addindex __env__SC_HYPERVISOR_TRIGGER_RESIZE
  1415. Choose how should the hypervisor be triggered: <c>speed</c> if the resizing algorithm should
  1416. be called whenever the speed of the context does not correspond to an optimal precomputed value,
  1417. <c>idle</c> it the resizing algorithm should be called whenever the workers are idle for a period
  1418. longer than the value indicated when configuring the hypervisor.
  1419. </dd>
  1420. <dt>SC_HYPERVISOR_START_RESIZE</dt>
  1421. <dd>
  1422. \anchor SC_HYPERVISOR_START_RESIZE
  1423. \addindex __env__SC_HYPERVISOR_START_RESIZE
  1424. Indicate the moment when the resizing should be available. The value correspond to the percentage
  1425. of the total time of execution of the application. The default value is the resizing frame.
  1426. </dd>
  1427. <dt>SC_HYPERVISOR_MAX_SPEED_GAP</dt>
  1428. <dd>
  1429. \anchor SC_HYPERVISOR_MAX_SPEED_GAP
  1430. \addindex __env__SC_HYPERVISOR_MAX_SPEED_GAP
  1431. Indicate the ratio of speed difference between contexts that should trigger the hypervisor.
  1432. This situation may occur only when a theoretical speed could not be computed and the hypervisor
  1433. has no value to compare the speed to. Otherwise the resizing of a context is not influenced by the
  1434. the speed of the other contexts, but only by the the value that a context should have.
  1435. </dd>
  1436. <dt>SC_HYPERVISOR_STOP_PRINT</dt>
  1437. <dd>
  1438. \anchor SC_HYPERVISOR_STOP_PRINT
  1439. \addindex __env__SC_HYPERVISOR_STOP_PRINT
  1440. By default the values of the speed of the workers is printed during the execution
  1441. of the application. If the value 1 is given to this environment variable this printing
  1442. is not done.
  1443. </dd>
  1444. <dt>SC_HYPERVISOR_LAZY_RESIZE</dt>
  1445. <dd>
  1446. \anchor SC_HYPERVISOR_LAZY_RESIZE
  1447. \addindex __env__SC_HYPERVISOR_LAZY_RESIZE
  1448. By default the hypervisor resizes the contexts in a lazy way, that is workers are firstly added to a new context
  1449. before removing them from the previous one. Once this workers are clearly taken into account
  1450. into the new context (a task was poped there) we remove them from the previous one. However if the application
  1451. would like that the change in the distribution of workers should change right away this variable should be set to 0
  1452. </dd>
  1453. <dt>SC_HYPERVISOR_SAMPLE_CRITERIA</dt>
  1454. <dd>
  1455. \anchor SC_HYPERVISOR_SAMPLE_CRITERIA
  1456. \addindex __env__SC_HYPERVISOR_SAMPLE_CRITERIA
  1457. By default the hypervisor uses a sample of flops when computing the speed of the contexts and of the workers.
  1458. If this variable is set to <c>time</c> the hypervisor uses a sample of time (10% of an aproximation of the total
  1459. execution time of the application)
  1460. </dd>
  1461. </dl>
  1462. */