501_environment_variables.doxy 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2020 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. * Copyright (C) 2016 Uppsala University
  5. * Copyright (C) 2020 Federal University of Rio Grande do Sul (UFRGS)
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. /*! \page ExecutionConfigurationThroughEnvironmentVariables Execution Configuration Through Environment Variables
  19. The behavior of the StarPU library and tools may be tuned thanks to
  20. the following environment variables.
  21. \section EnvConfiguringWorkers Configuring Workers
  22. <dl>
  23. <dt>STARPU_NCPU</dt>
  24. <dd>
  25. \anchor STARPU_NCPU
  26. \addindex __env__STARPU_NCPU
  27. Specify the number of CPU workers (thus not including workers
  28. dedicated to control accelerators). Note that by default, StarPU will
  29. not allocate more CPU workers than there are physical CPUs, and that
  30. some CPUs are used to control the accelerators.
  31. </dd>
  32. <dt>STARPU_RESERVE_NCPU</dt>
  33. <dd>
  34. \anchor STARPU_RESERVE_NCPU
  35. \addindex __env__STARPU_RESERVE_NCPU
  36. Specify the number of CPU cores that should not be used by StarPU, so the
  37. application can use starpu_get_next_bindid() and starpu_bind_thread_on() to bind
  38. its own threads.
  39. This option is ignored if \ref STARPU_NCPU or starpu_conf::ncpus is set.
  40. </dd>
  41. <dt>STARPU_NCPUS</dt>
  42. <dd>
  43. \anchor STARPU_NCPUS
  44. \addindex __env__STARPU_NCPUS
  45. This variable is deprecated. You should use \ref STARPU_NCPU.
  46. </dd>
  47. <dt>STARPU_NCUDA</dt>
  48. <dd>
  49. \anchor STARPU_NCUDA
  50. \addindex __env__STARPU_NCUDA
  51. Specify the number of CUDA devices that StarPU can use. If
  52. \ref STARPU_NCUDA is lower than the number of physical devices, it is
  53. possible to select which CUDA devices should be used by the means of the
  54. environment variable \ref STARPU_WORKERS_CUDAID. By default, StarPU will
  55. create as many CUDA workers as there are CUDA devices.
  56. </dd>
  57. <dt>STARPU_NWORKER_PER_CUDA</dt>
  58. <dd>
  59. \anchor STARPU_NWORKER_PER_CUDA
  60. \addindex __env__STARPU_NWORKER_PER_CUDA
  61. Specify the number of workers per CUDA device, and thus the number of kernels
  62. which will be concurrently running on the devices. The default value is 1.
  63. </dd>
  64. <dt>STARPU_CUDA_THREAD_PER_WORKER</dt>
  65. <dd>
  66. \anchor STARPU_CUDA_THREAD_PER_WORKER
  67. \addindex __env__STARPU_CUDA_THREAD_PER_WORKER
  68. Specify whether the cuda driver should use one thread per stream (1) or to use
  69. a single thread to drive all the streams of the device or all devices (0), and
  70. \ref STARPU_CUDA_THREAD_PER_DEV determines whether is it one thread per device or one
  71. thread for all devices. The default value is 0. Setting it to 1 is contradictory
  72. with setting \ref STARPU_CUDA_THREAD_PER_DEV.
  73. </dd>
  74. <dt>STARPU_CUDA_THREAD_PER_DEV</dt>
  75. <dd>
  76. \anchor STARPU_CUDA_THREAD_PER_DEV
  77. \addindex __env__STARPU_CUDA_THREAD_PER_DEV
  78. Specify whether the cuda driver should use one thread per device (1) or to use a
  79. single thread to drive all the devices (0). The default value is 1. It does not
  80. make sense to set this variable if \ref STARPU_CUDA_THREAD_PER_WORKER is set to to 1
  81. (since \ref STARPU_CUDA_THREAD_PER_DEV is then meaningless).
  82. </dd>
  83. <dt>STARPU_CUDA_PIPELINE</dt>
  84. <dd>
  85. \anchor STARPU_CUDA_PIPELINE
  86. \addindex __env__STARPU_CUDA_PIPELINE
  87. Specify how many asynchronous tasks are submitted in advance on CUDA
  88. devices. This for instance permits to overlap task management with the execution
  89. of previous tasks, but it also allows concurrent execution on Fermi cards, which
  90. otherwise bring spurious synchronizations. The default is 2. Setting the value to 0 forces a synchronous
  91. execution of all tasks.
  92. </dd>
  93. <dt>STARPU_NOPENCL</dt>
  94. <dd>
  95. \anchor STARPU_NOPENCL
  96. \addindex __env__STARPU_NOPENCL
  97. OpenCL equivalent of the environment variable \ref STARPU_NCUDA.
  98. </dd>
  99. <dt>STARPU_OPENCL_PIPELINE</dt>
  100. <dd>
  101. \anchor STARPU_OPENCL_PIPELINE
  102. \addindex __env__STARPU_OPENCL_PIPELINE
  103. Specify how many asynchronous tasks are submitted in advance on OpenCL
  104. devices. This for instance permits to overlap task management with the execution
  105. of previous tasks, but it also allows concurrent execution on Fermi cards, which
  106. otherwise bring spurious synchronizations. The default is 2. Setting the value to 0 forces a synchronous
  107. execution of all tasks.
  108. </dd>
  109. <dt>STARPU_OPENCL_ON_CPUS</dt>
  110. <dd>
  111. \anchor STARPU_OPENCL_ON_CPUS
  112. \addindex __env__STARPU_OPENCL_ON_CPUS
  113. By default, the OpenCL driver only enables GPU and accelerator
  114. devices. By setting the environment variable \ref STARPU_OPENCL_ON_CPUS
  115. to 1, the OpenCL driver will also enable CPU devices.
  116. </dd>
  117. <dt>STARPU_OPENCL_ONLY_ON_CPUS</dt>
  118. <dd>
  119. \anchor STARPU_OPENCL_ONLY_ON_CPUS
  120. \addindex __env__STARPU_OPENCL_ONLY_ON_CPUS
  121. By default, the OpenCL driver enables GPU and accelerator
  122. devices. By setting the environment variable \ref STARPU_OPENCL_ONLY_ON_CPUS
  123. to 1, the OpenCL driver will ONLY enable CPU devices.
  124. </dd>
  125. <dt>STARPU_NMIC</dt>
  126. <dd>
  127. \anchor STARPU_NMIC
  128. \addindex __env__STARPU_NMIC
  129. MIC equivalent of the environment variable \ref STARPU_NCUDA, i.e. the number of
  130. MIC devices to use.
  131. </dd>
  132. <dt>STARPU_NMICTHREADS</dt>
  133. <dd>
  134. \anchor STARPU_NMICTHREADS
  135. \addindex __env__STARPU_NMICTHREADS
  136. Number of threads to use on the MIC devices.
  137. </dd>
  138. <dt>STARPU_NMPI_MS</dt>
  139. <dd>
  140. \anchor STARPU_NMPI_MS
  141. \addindex __env__STARPU_NMPI_MS
  142. MPI Master Slave equivalent of the environment variable \ref STARPU_NCUDA, i.e. the number of
  143. MPI Master Slave devices to use.
  144. </dd>
  145. <dt>STARPU_NMPIMSTHREADS</dt>
  146. <dd>
  147. \anchor STARPU_NMPIMSTHREADS
  148. \addindex __env__STARPU_NMPIMSTHREADS
  149. Number of threads to use on the MPI Slave devices.
  150. </dd>
  151. <dt>STARPU_MPI_MASTER_NODE</dt>
  152. <dd>
  153. \anchor STARPU_MPI_MASTER_NODE
  154. \addindex __env__STARPU_MPI_MASTER_NODE
  155. This variable allows to chose which MPI node (with the MPI ID) will be the master.
  156. </dd>
  157. <dt>STARPU_WORKERS_NOBIND</dt>
  158. <dd>
  159. \anchor STARPU_WORKERS_NOBIND
  160. \addindex __env__STARPU_WORKERS_NOBIND
  161. Setting it to non-zero will prevent StarPU from binding its threads to
  162. CPUs. This is for instance useful when running the testsuite in parallel.
  163. </dd>
  164. <dt>STARPU_WORKERS_GETBIND</dt>
  165. <dd>
  166. \anchor STARPU_WORKERS_GETBIND
  167. \addindex __env__STARPU_WORKERS_GETBIND
  168. Setting it to non-zero makes StarPU use the OS-provided CPU binding to determine
  169. how many and which CPU cores it should use. This is notably useful when running
  170. several StarPU-MPI processes on the same host, to let the MPI launcher set the
  171. CPUs to be used.
  172. </dd>
  173. <dt>STARPU_WORKERS_CPUID</dt>
  174. <dd>
  175. \anchor STARPU_WORKERS_CPUID
  176. \addindex __env__STARPU_WORKERS_CPUID
  177. Passing an array of integers in \ref STARPU_WORKERS_CPUID
  178. specifies on which logical CPU the different workers should be
  179. bound. For instance, if <c>STARPU_WORKERS_CPUID = "0 1 4 5"</c>, the first
  180. worker will be bound to logical CPU #0, the second CPU worker will be bound to
  181. logical CPU #1 and so on. Note that the logical ordering of the CPUs is either
  182. determined by the OS, or provided by the library <c>hwloc</c> in case it is
  183. available. Ranges can be provided: for instance, <c>STARPU_WORKERS_CPUID = "1-3
  184. 5"</c> will bind the first three workers on logical CPUs #1, #2, and #3, and the
  185. fourth worker on logical CPU #5. Unbound ranges can also be provided:
  186. <c>STARPU_WORKERS_CPUID = "1-"</c> will bind the workers starting from logical
  187. CPU #1 up to last CPU.
  188. Note that the first workers correspond to the CUDA workers, then come the
  189. OpenCL workers, and finally the CPU workers. For example if
  190. we have <c>STARPU_NCUDA=1</c>, <c>STARPU_NOPENCL=1</c>, <c>STARPU_NCPU=2</c>
  191. and <c>STARPU_WORKERS_CPUID = "0 2 1 3"</c>, the CUDA device will be controlled
  192. by logical CPU #0, the OpenCL device will be controlled by logical CPU #2, and
  193. the logical CPUs #1 and #3 will be used by the CPU workers.
  194. If the number of workers is larger than the array given in
  195. \ref STARPU_WORKERS_CPUID, the workers are bound to the logical CPUs in a
  196. round-robin fashion: if <c>STARPU_WORKERS_CPUID = "0 1"</c>, the first
  197. and the third (resp. second and fourth) workers will be put on CPU #0
  198. (resp. CPU #1).
  199. This variable is ignored if the field
  200. starpu_conf::use_explicit_workers_bindid passed to starpu_init() is
  201. set.
  202. </dd>
  203. <dt>STARPU_MAIN_THREAD_BIND</dt>
  204. <dd>
  205. \anchor STARPU_MAIN_THREAD_BIND
  206. \addindex __env__STARPU_MAIN_THREAD_BIND
  207. When defined, this make StarPU bind the thread that calls starpu_initialize() to
  208. a reserved CPU, subtracted from the CPU workers.
  209. </dd>
  210. <dt>STARPU_MAIN_THREAD_CPUID</dt>
  211. <dd>
  212. \anchor STARPU_MAIN_THREAD_CPUID
  213. \addindex __env__STARPU_MAIN_THREAD_CPUID
  214. When defined, this make StarPU bind the thread that calls starpu_initialize() to
  215. the given CPU ID.
  216. </dd>
  217. <dt>STARPU_MPI_THREAD_CPUID</dt>
  218. <dd>
  219. \anchor STARPU_MPI_THREAD_CPUID
  220. \addindex __env__STARPU_MPI_THREAD_CPUID
  221. When defined, this make StarPU bind its MPI thread to the given CPU ID. Setting
  222. it to -1 (the default value) will use a reserved CPU, subtracted from the CPU
  223. workers.
  224. </dd>
  225. <dt>STARPU_MPI_NOBIND</dt>
  226. <dd>
  227. \anchor STARPU_MPI_NOBIND
  228. \addindex __env__STARPU_MPI_NOBIND
  229. Setting it to non-zero will prevent StarPU from binding the MPI to
  230. a separate core. This is for instance useful when running the testsuite on a single system.
  231. </dd>
  232. <dt>STARPU_WORKERS_CUDAID</dt>
  233. <dd>
  234. \anchor STARPU_WORKERS_CUDAID
  235. \addindex __env__STARPU_WORKERS_CUDAID
  236. Similarly to the \ref STARPU_WORKERS_CPUID environment variable, it is
  237. possible to select which CUDA devices should be used by StarPU. On a machine
  238. equipped with 4 GPUs, setting <c>STARPU_WORKERS_CUDAID = "1 3"</c> and
  239. <c>STARPU_NCUDA=2</c> specifies that 2 CUDA workers should be created, and that
  240. they should use CUDA devices #1 and #3 (the logical ordering of the devices is
  241. the one reported by CUDA).
  242. This variable is ignored if the field
  243. starpu_conf::use_explicit_workers_cuda_gpuid passed to starpu_init()
  244. is set.
  245. </dd>
  246. <dt>STARPU_WORKERS_OPENCLID</dt>
  247. <dd>
  248. \anchor STARPU_WORKERS_OPENCLID
  249. \addindex __env__STARPU_WORKERS_OPENCLID
  250. OpenCL equivalent of the \ref STARPU_WORKERS_CUDAID environment variable.
  251. This variable is ignored if the field
  252. starpu_conf::use_explicit_workers_opencl_gpuid passed to starpu_init()
  253. is set.
  254. </dd>
  255. <dt>STARPU_WORKERS_MICID</dt>
  256. <dd>
  257. \anchor STARPU_WORKERS_MICID
  258. \addindex __env__STARPU_WORKERS_MICID
  259. MIC equivalent of the \ref STARPU_WORKERS_CUDAID environment variable.
  260. This variable is ignored if the field
  261. starpu_conf::use_explicit_workers_mic_deviceid passed to starpu_init()
  262. is set.
  263. </dd>
  264. <dt>STARPU_WORKER_TREE</dt>
  265. <dd>
  266. \anchor STARPU_WORKER_TREE
  267. \addindex __env__STARPU_WORKER_TREE
  268. Define to 1 to enable the tree iterator in schedulers.
  269. </dd>
  270. <dt>STARPU_SINGLE_COMBINED_WORKER</dt>
  271. <dd>
  272. \anchor STARPU_SINGLE_COMBINED_WORKER
  273. \addindex __env__STARPU_SINGLE_COMBINED_WORKER
  274. If set, StarPU will create several workers which won't be able to work
  275. concurrently. It will by default create combined workers which size goes from 1
  276. to the total number of CPU workers in the system. \ref STARPU_MIN_WORKERSIZE
  277. and \ref STARPU_MAX_WORKERSIZE can be used to change this default.
  278. </dd>
  279. <dt>STARPU_MIN_WORKERSIZE</dt>
  280. <dd>
  281. \anchor STARPU_MIN_WORKERSIZE
  282. \addindex __env__STARPU_MIN_WORKERSIZE
  283. \ref STARPU_MIN_WORKERSIZE
  284. permits to specify the minimum size of the combined workers (instead of the default 2)
  285. </dd>
  286. <dt>STARPU_MAX_WORKERSIZE</dt>
  287. <dd>
  288. \anchor STARPU_MAX_WORKERSIZE
  289. \addindex __env__STARPU_MAX_WORKERSIZE
  290. \ref STARPU_MAX_WORKERSIZE
  291. permits to specify the minimum size of the combined workers (instead of the
  292. number of CPU workers in the system)
  293. </dd>
  294. <dt>STARPU_SYNTHESIZE_ARITY_COMBINED_WORKER</dt>
  295. <dd>
  296. \anchor STARPU_SYNTHESIZE_ARITY_COMBINED_WORKER
  297. \addindex __env__STARPU_SYNTHESIZE_ARITY_COMBINED_WORKER
  298. Let the user decide how many elements are allowed between combined workers
  299. created from hwloc information. For instance, in the case of sockets with 6
  300. cores without shared L2 caches, if \ref STARPU_SYNTHESIZE_ARITY_COMBINED_WORKER is
  301. set to 6, no combined worker will be synthesized beyond one for the socket
  302. and one per core. If it is set to 3, 3 intermediate combined workers will be
  303. synthesized, to divide the socket cores into 3 chunks of 2 cores. If it set to
  304. 2, 2 intermediate combined workers will be synthesized, to divide the the socket
  305. cores into 2 chunks of 3 cores, and then 3 additional combined workers will be
  306. synthesized, to divide the former synthesized workers into a bunch of 2 cores,
  307. and the remaining core (for which no combined worker is synthesized since there
  308. is already a normal worker for it).
  309. The default, 2, thus makes StarPU tend to building a binary trees of combined
  310. workers.
  311. </dd>
  312. <dt>STARPU_DISABLE_ASYNCHRONOUS_COPY</dt>
  313. <dd>
  314. \anchor STARPU_DISABLE_ASYNCHRONOUS_COPY
  315. \addindex __env__STARPU_DISABLE_ASYNCHRONOUS_COPY
  316. Disable asynchronous copies between CPU and GPU devices.
  317. The AMD implementation of OpenCL is known to
  318. fail when copying data asynchronously. When using this implementation,
  319. it is therefore necessary to disable asynchronous data transfers.
  320. </dd>
  321. <dt>STARPU_DISABLE_ASYNCHRONOUS_CUDA_COPY</dt>
  322. <dd>
  323. \anchor STARPU_DISABLE_ASYNCHRONOUS_CUDA_COPY
  324. \addindex __env__STARPU_DISABLE_ASYNCHRONOUS_CUDA_COPY
  325. Disable asynchronous copies between CPU and CUDA devices.
  326. </dd>
  327. <dt>STARPU_DISABLE_ASYNCHRONOUS_OPENCL_COPY</dt>
  328. <dd>
  329. \anchor STARPU_DISABLE_ASYNCHRONOUS_OPENCL_COPY
  330. \addindex __env__STARPU_DISABLE_ASYNCHRONOUS_OPENCL_COPY
  331. Disable asynchronous copies between CPU and OpenCL devices.
  332. The AMD implementation of OpenCL is known to
  333. fail when copying data asynchronously. When using this implementation,
  334. it is therefore necessary to disable asynchronous data transfers.
  335. </dd>
  336. <dt>STARPU_DISABLE_ASYNCHRONOUS_MIC_COPY</dt>
  337. <dd>
  338. \anchor STARPU_DISABLE_ASYNCHRONOUS_MIC_COPY
  339. \addindex __env__STARPU_DISABLE_ASYNCHRONOUS_MIC_COPY
  340. Disable asynchronous copies between CPU and MIC devices.
  341. </dd>
  342. <dt>STARPU_DISABLE_ASYNCHRONOUS_MPI_MS_COPY</dt>
  343. <dd>
  344. \anchor STARPU_DISABLE_ASYNCHRONOUS_MPI_MS_COPY
  345. \addindex __env__STARPU_DISABLE_ASYNCHRONOUS_MPI_MS_COPY
  346. Disable asynchronous copies between CPU and MPI Slave devices.
  347. </dd>
  348. <dt>STARPU_ENABLE_CUDA_GPU_GPU_DIRECT</dt>
  349. <dd>
  350. \anchor STARPU_ENABLE_CUDA_GPU_GPU_DIRECT
  351. \addindex __env__STARPU_ENABLE_CUDA_GPU_GPU_DIRECT
  352. Enable (1) or Disable (0) direct CUDA transfers from GPU to GPU, without copying
  353. through RAM. The default is Enabled.
  354. This permits to test the performance effect of GPU-Direct.
  355. </dd>
  356. <dt>STARPU_DISABLE_PINNING</dt>
  357. <dd>
  358. \anchor STARPU_DISABLE_PINNING
  359. \addindex __env__STARPU_DISABLE_PINNING
  360. Disable (1) or Enable (0) pinning host memory allocated through starpu_malloc, starpu_memory_pin
  361. and friends. The default is Enabled.
  362. This permits to test the performance effect of memory pinning.
  363. </dd>
  364. <dt>STARPU_BACKOFF_MIN</dt>
  365. <dd>
  366. \anchor STARPU_BACKOFF_MIN
  367. \addindex __env__STARPU_BACKOFF_MIN
  368. Set minimum exponential backoff of number of cycles to pause when spinning. Default value is 1.
  369. </dd>
  370. <dt>STARPU_BACKOFF_MAX</dt>
  371. <dd>
  372. \anchor STARPU_BACKOFF_MAX
  373. \addindex __env__STARPU_BACKOFF_MAX
  374. Set maximum exponential backoff of number of cycles to pause when spinning. Default value is 32.
  375. </dd>
  376. <dt>STARPU_MIC_SINK_PROGRAM_NAME</dt>
  377. <dd>
  378. \anchor STARPU_MIC_SINK_PROGRAM_NAME
  379. \addindex __env__STARPU_MIC_SINK_PROGRAM_NAME
  380. todo
  381. </dd>
  382. <dt>STARPU_MIC_SINK_PROGRAM_PATH</dt>
  383. <dd>
  384. \anchor STARPU_MIC_SINK_PROGRAM_PATH
  385. \addindex __env__STARPU_MIC_SINK_PROGRAM_PATH
  386. todo
  387. </dd>
  388. <dt>STARPU_MIC_PROGRAM_PATH</dt>
  389. <dd>
  390. \anchor STARPU_MIC_PROGRAM_PATH
  391. \addindex __env__STARPU_MIC_PROGRAM_PATH
  392. todo
  393. </dd>
  394. </dl>
  395. \section ConfiguringTheSchedulingEngine Configuring The Scheduling Engine
  396. <dl>
  397. <dt>STARPU_SCHED</dt>
  398. <dd>
  399. \anchor STARPU_SCHED
  400. \addindex __env__STARPU_SCHED
  401. Choose between the different scheduling policies proposed by StarPU: work
  402. random, stealing, greedy, with performance models, etc.
  403. Use <c>STARPU_SCHED=help</c> to get the list of available schedulers.
  404. </dd>
  405. <dt>STARPU_MIN_PRIO</dt>
  406. <dd>
  407. \anchor STARPU_MIN_PRIO_env
  408. \addindex __env__STARPU_MIN_PRIO
  409. Set the mininum priority used by priorities-aware schedulers.
  410. </dd>
  411. <dt>STARPU_MAX_PRIO</dt>
  412. <dd>
  413. \anchor STARPU_MAX_PRIO_env
  414. \addindex __env__STARPU_MAX_PRIO
  415. Set the maximum priority used by priorities-aware schedulers.
  416. </dd>
  417. <dt>STARPU_CALIBRATE</dt>
  418. <dd>
  419. \anchor STARPU_CALIBRATE
  420. \addindex __env__STARPU_CALIBRATE
  421. If this variable is set to 1, the performance models are calibrated during
  422. the execution. If it is set to 2, the previous values are dropped to restart
  423. calibration from scratch. Setting this variable to 0 disable calibration, this
  424. is the default behaviour.
  425. Note: this currently only applies to <c>dm</c> and <c>dmda</c> scheduling policies.
  426. </dd>
  427. <dt>STARPU_CALIBRATE_MINIMUM</dt>
  428. <dd>
  429. \anchor STARPU_CALIBRATE_MINIMUM
  430. \addindex __env__STARPU_CALIBRATE_MINIMUM
  431. Define the minimum number of calibration measurements that will be made
  432. before considering that the performance model is calibrated. The default value is 10.
  433. </dd>
  434. <dt>STARPU_BUS_CALIBRATE</dt>
  435. <dd>
  436. \anchor STARPU_BUS_CALIBRATE
  437. \addindex __env__STARPU_BUS_CALIBRATE
  438. If this variable is set to 1, the bus is recalibrated during intialization.
  439. </dd>
  440. <dt>STARPU_PREFETCH</dt>
  441. <dd>
  442. \anchor STARPU_PREFETCH
  443. \addindex __env__STARPU_PREFETCH
  444. Indicate whether data prefetching should be enabled (0 means
  445. that it is disabled). If prefetching is enabled, when a task is scheduled to be
  446. executed e.g. on a GPU, StarPU will request an asynchronous transfer in
  447. advance, so that data is already present on the GPU when the task starts. As a
  448. result, computation and data transfers are overlapped.
  449. Note that prefetching is enabled by default in StarPU.
  450. </dd>
  451. <dt>STARPU_SCHED_ALPHA</dt>
  452. <dd>
  453. \anchor STARPU_SCHED_ALPHA
  454. \addindex __env__STARPU_SCHED_ALPHA
  455. To estimate the cost of a task StarPU takes into account the estimated
  456. computation time (obtained thanks to performance models). The alpha factor is
  457. the coefficient to be applied to it before adding it to the communication part.
  458. </dd>
  459. <dt>STARPU_SCHED_BETA</dt>
  460. <dd>
  461. \anchor STARPU_SCHED_BETA
  462. \addindex __env__STARPU_SCHED_BETA
  463. To estimate the cost of a task StarPU takes into account the estimated
  464. data transfer time (obtained thanks to performance models). The beta factor is
  465. the coefficient to be applied to it before adding it to the computation part.
  466. </dd>
  467. <dt>STARPU_SCHED_GAMMA</dt>
  468. <dd>
  469. \anchor STARPU_SCHED_GAMMA
  470. \addindex __env__STARPU_SCHED_GAMMA
  471. Define the execution time penalty of a joule (\ref Energy-basedScheduling).
  472. </dd>
  473. <dt>STARPU_SCHED_READY</dt>
  474. <dd>
  475. \anchor STARPU_SCHED_READY
  476. \addindex __env__STARPU_SCHED_READY
  477. For a modular scheduler with sorted queues below the decision component, workers
  478. pick up a task which has most of its data already available. Setting this to 0
  479. disables this.
  480. </dd>
  481. <dt>STARPU_IDLE_POWER</dt>
  482. <dd>
  483. \anchor STARPU_IDLE_POWER
  484. \addindex __env__STARPU_IDLE_POWER
  485. Define the idle power of the machine (\ref Energy-basedScheduling).
  486. </dd>
  487. <dt>STARPU_PROFILING</dt>
  488. <dd>
  489. \anchor STARPU_PROFILING
  490. \addindex __env__STARPU_PROFILING
  491. Enable on-line performance monitoring (\ref EnablingOn-linePerformanceMonitoring).
  492. </dd>
  493. <dt>STARPU_PROF_PAPI_EVENTS</dt>
  494. <dd>
  495. \anchor STARPU_PROF_PAPI_EVENTS
  496. \addindex __env__STARPU_PROF_PAPI_EVENTS
  497. Specify which PAPI events should be recorded in the trace (\ref PapiCounters).
  498. </dd>
  499. </dl>
  500. \section Extensions Extensions
  501. <dl>
  502. <dt>SOCL_OCL_LIB_OPENCL</dt>
  503. <dd>
  504. \anchor SOCL_OCL_LIB_OPENCL
  505. \addindex __env__SOCL_OCL_LIB_OPENCL
  506. THE SOCL test suite is only run when the environment variable
  507. \ref SOCL_OCL_LIB_OPENCL is defined. It should contain the location
  508. of the file <c>libOpenCL.so</c> of the OCL ICD implementation.
  509. </dd>
  510. <dt>OCL_ICD_VENDORS</dt>
  511. <dd>
  512. \anchor OCL_ICD_VENDORS
  513. \addindex __env__OCL_ICD_VENDORS
  514. When using SOCL with OpenCL ICD
  515. (https://forge.imag.fr/projects/ocl-icd/), this variable may be used
  516. to point to the directory where ICD files are installed. The default
  517. directory is <c>/etc/OpenCL/vendors</c>. StarPU installs ICD
  518. files in the directory <c>$prefix/share/starpu/opencl/vendors</c>.
  519. </dd>
  520. <dt>STARPU_COMM_STATS</dt>
  521. <dd>
  522. \anchor STARPU_COMM_STATS
  523. \addindex __env__STARPU_COMM_STATS
  524. Communication statistics for starpumpi (\ref MPIDebug)
  525. will be enabled when the environment variable \ref STARPU_COMM_STATS
  526. is defined to an value other than 0.
  527. </dd>
  528. <dt>STARPU_MPI_CACHE</dt>
  529. <dd>
  530. \anchor STARPU_MPI_CACHE
  531. \addindex __env__STARPU_MPI_CACHE
  532. Communication cache for starpumpi (\ref MPISupport) will be
  533. disabled when the environment variable \ref STARPU_MPI_CACHE is set
  534. to 0. It is enabled by default or for any other values of the variable
  535. \ref STARPU_MPI_CACHE.
  536. </dd>
  537. <dt>STARPU_MPI_COMM</dt>
  538. <dd>
  539. \anchor STARPU_MPI_COMM
  540. \addindex __env__STARPU_MPI_COMM
  541. Communication trace for starpumpi (\ref MPISupport) will be
  542. enabled when the environment variable \ref STARPU_MPI_COMM is set
  543. to 1, and StarPU has been configured with the option
  544. \ref enable-verbose "--enable-verbose".
  545. </dd>
  546. <dt>STARPU_MPI_CACHE_STATS</dt>
  547. <dd>
  548. \anchor STARPU_MPI_CACHE_STATS
  549. \addindex __env__STARPU_MPI_CACHE_STATS
  550. When set to 1, statistics are enabled for the communication cache (\ref MPISupport). For now,
  551. it prints messages on the standard output when data are added or removed from the received
  552. communication cache.
  553. </dd>
  554. <dt>STARPU_MPI_PRIORITIES</dt>
  555. <dd>
  556. \anchor STARPU_MPI_PRIORITIES
  557. \addindex __env__STARPU_MPI_PRIORITIES
  558. When set to 0, the use of priorities to order MPI communications is disabled
  559. (\ref MPISupport).
  560. </dd>
  561. <dt>STARPU_MPI_NDETACHED_SEND</dt>
  562. <dd>
  563. \anchor STARPU_MPI_NDETACHED_SEND
  564. \addindex __env__STARPU_MPI_NDETACHED_SEND
  565. This sets the number of send requests that StarPU-MPI will emit concurrently. The default is 10.
  566. </dd>
  567. <dt>STARPU_MPI_NREADY_PROCESS</dt>
  568. <dd>
  569. \anchor STARPU_MPI_NREADY_PROCESS
  570. \addindex __env__STARPU_MPI_NREADY_PROCESS
  571. This sets the number of requests that StarPU-MPI will submit to MPI before
  572. polling for termination of existing requests. The default is 10.
  573. </dd>
  574. <dt>STARPU_MPI_FAKE_SIZE</dt>
  575. <dd>
  576. \anchor STARPU_MPI_FAKE_SIZE
  577. \addindex __env__STARPU_MPI_FAKE_SIZE
  578. Setting to a number makes StarPU believe that there are as many MPI nodes, even
  579. if it was run on only one MPI node. This allows e.g. to simulate the execution
  580. of one of the nodes of a big cluster without actually running the rest.
  581. It of course does not provide computation results and timing.
  582. </dd>
  583. <dt>STARPU_MPI_FAKE_RANK</dt>
  584. <dd>
  585. \anchor STARPU_MPI_FAKE_RANK
  586. \addindex __env__STARPU_MPI_FAKE_RANK
  587. Setting to a number makes StarPU believe that it runs the given MPI node, even
  588. if it was run on only one MPI node. This allows e.g. to simulate the execution
  589. of one of the nodes of a big cluster without actually running the rest.
  590. It of course does not provide computation results and timing.
  591. </dd>
  592. <dt>STARPU_MPI_DRIVER_CALL_FREQUENCY</dt>
  593. <dd>
  594. \anchor STARPU_MPI_DRIVER_CALL_FREQUENCY
  595. \addindex __env__STARPU_MPI_DRIVER_CALL_FREQUENCY
  596. When set to a positive value, activates the interleaving of the execution of
  597. tasks with the progression of MPI communications (\ref MPISupport). The
  598. starpu_mpi_init_conf() function must have been called by the application
  599. for that environment variable to be used. When set to 0, the MPI progression
  600. thread does not use at all the driver given by the user, and only focuses on
  601. making MPI communications progress.
  602. </dd>
  603. <dt>STARPU_MPI_DRIVER_TASK_FREQUENCY</dt>
  604. <dd>
  605. \anchor STARPU_MPI_DRIVER_TASK_FREQUENCY
  606. \addindex __env__STARPU_MPI_DRIVER_TASK_FREQUENCY
  607. When set to a positive value, the interleaving of the execution of tasks with
  608. the progression of MPI communications mechanism to execute several tasks before
  609. checking communication requests again (\ref MPISupport). The
  610. starpu_mpi_init_conf() function must have been called by the application
  611. for that environment variable to be used, and the
  612. STARPU_MPI_DRIVER_CALL_FREQUENCY environment variable set to a positive value.
  613. </dd>
  614. <dt>STARPU_MPI_MEM_THROTTLE</dt>
  615. <dd>
  616. \anchor STARPU_MPI_MEM_THROTTLE
  617. \addindex __env__STARPU_MPI_MEM_THROTTLE
  618. When set to a positive value, this makes the starpu_mpi_*recv* functions
  619. block when the memory allocation required for network reception overflows the
  620. available main memory (as typically set by \ref STARPU_LIMIT_CPU_MEM)
  621. </dd>
  622. <dt>STARPU_SIMGRID_TRANSFER_COST</dt>
  623. <dd>
  624. \anchor STARPU_SIMGRID_TRANSFER_COST
  625. \addindex __env__STARPU_SIMGRID_TRANSFER_COST
  626. When set to 1 (which is the default), data transfers (over PCI bus, typically) are taken into account
  627. in SimGrid mode.
  628. </dd>
  629. <dt>STARPU_SIMGRID_CUDA_MALLOC_COST</dt>
  630. <dd>
  631. \anchor STARPU_SIMGRID_CUDA_MALLOC_COST
  632. \addindex __env__STARPU_SIMGRID_CUDA_MALLOC_COST
  633. When set to 1 (which is the default), CUDA malloc costs are taken into account
  634. in SimGrid mode.
  635. </dd>
  636. <dt>STARPU_SIMGRID_CUDA_QUEUE_COST</dt>
  637. <dd>
  638. \anchor STARPU_SIMGRID_CUDA_QUEUE_COST
  639. \addindex __env__STARPU_SIMGRID_CUDA_QUEUE_COST
  640. When set to 1 (which is the default), CUDA task and transfer queueing costs are
  641. taken into account in SimGrid mode.
  642. </dd>
  643. <dt>STARPU_PCI_FLAT</dt>
  644. <dd>
  645. \anchor STARPU_PCI_FLAT
  646. \addindex __env__STARPU_PCI_FLAT
  647. When unset or set to 0, the platform file created for SimGrid will
  648. contain PCI bandwidths and routes.
  649. </dd>
  650. <dt>STARPU_SIMGRID_QUEUE_MALLOC_COST</dt>
  651. <dd>
  652. \anchor STARPU_SIMGRID_QUEUE_MALLOC_COST
  653. \addindex __env__STARPU_SIMGRID_QUEUE_MALLOC_COST
  654. When unset or set to 1, simulate within SimGrid the GPU transfer queueing.
  655. </dd>
  656. <dt>STARPU_MALLOC_SIMULATION_FOLD</dt>
  657. <dd>
  658. \anchor STARPU_MALLOC_SIMULATION_FOLD
  659. \addindex __env__STARPU_MALLOC_SIMULATION_FOLD
  660. Define the size of the file used for folding virtual allocation, in
  661. MiB. The default is 1, thus allowing 64GiB virtual memory when Linux's
  662. <c>sysctl vm.max_map_count</c> value is the default 65535.
  663. </dd>
  664. <dt>STARPU_SIMGRID_TASK_SUBMIT_COST</dt>
  665. <dd>
  666. \anchor STARPU_SIMGRID_TASK_SUBMIT_COST
  667. \addindex __env__STARPU_SIMGRID_TASK_SUBMIT_COST
  668. When set to 1 (which is the default), task submission costs are taken into
  669. account in SimGrid mode. This provides more accurate SimGrid predictions,
  670. especially for the beginning of the execution.
  671. </dd>
  672. <dt>STARPU_SIMGRID_FETCHING_INPUT_COST</dt>
  673. <dd>
  674. \anchor STARPU_SIMGRID_FETCHING_INPUT_COST
  675. \addindex __env__STARPU_SIMGRID_FETCHING_INPUT_COST
  676. When set to 1 (which is the default), fetching input costs are taken into
  677. account in SimGrid mode. This provides more accurate SimGrid predictions,
  678. especially regarding data transfers.
  679. </dd>
  680. <dt>STARPU_SIMGRID_SCHED_COST</dt>
  681. <dd>
  682. \anchor STARPU_SIMGRID_SCHED_COST
  683. \addindex __env__STARPU_SIMGRID_SCHED_COST
  684. When set to 1 (0 is the default), scheduling costs are taken into
  685. account in SimGrid mode. This provides more accurate SimGrid predictions,
  686. and allows studying scheduling overhead of the runtime system. However,
  687. it also makes simulation non-deterministic.
  688. </dd>
  689. <dt>STARPU_SINK</dt>
  690. <dd>
  691. \anchor STARPU_SINK
  692. \addindex __env__STARPU_SINK
  693. Variable defined by StarPU when running MPI Xeon PHI on the sink.
  694. </dd>
  695. </dl>
  696. \section MiscellaneousAndDebug Miscellaneous And Debug
  697. <dl>
  698. <dt>STARPU_HOME</dt>
  699. <dd>
  700. \anchor STARPU_HOME
  701. \addindex __env__STARPU_HOME
  702. Specify the main directory in which StarPU stores its
  703. configuration files. The default is <c>$HOME</c> on Unix environments,
  704. and <c>$USERPROFILE</c> on Windows environments.
  705. </dd>
  706. <dt>STARPU_PATH</dt>
  707. <dd>
  708. \anchor STARPU_PATH
  709. \addindex __env__STARPU_PATH
  710. Only used on Windows environments.
  711. Specify the main directory in which StarPU is installed
  712. (\ref RunningABasicStarPUApplicationOnMicrosoft)
  713. </dd>
  714. <dt>STARPU_PERF_MODEL_DIR</dt>
  715. <dd>
  716. \anchor STARPU_PERF_MODEL_DIR
  717. \addindex __env__STARPU_PERF_MODEL_DIR
  718. Specify the main directory in which StarPU stores its
  719. performance model files. The default is <c>$STARPU_HOME/.starpu/sampling</c>.
  720. </dd>
  721. <dt>STARPU_PERF_MODEL_HOMOGENEOUS_CPU</dt>
  722. <dd>
  723. \anchor STARPU_PERF_MODEL_HOMOGENEOUS_CPU
  724. \addindex __env__STARPU_PERF_MODEL_HOMOGENEOUS_CPU
  725. When this is set to 0, StarPU will assume that CPU devices do not have the same
  726. performance, and thus use different performance models for them, thus making
  727. kernel calibration much longer, since measurements have to be made for each CPU
  728. core.
  729. </dd>
  730. <dt>STARPU_PERF_MODEL_HOMOGENEOUS_CUDA</dt>
  731. <dd>
  732. \anchor STARPU_PERF_MODEL_HOMOGENEOUS_CUDA
  733. \addindex __env__STARPU_PERF_MODEL_HOMOGENEOUS_CUDA
  734. When this is set to 1, StarPU will assume that all CUDA devices have the same
  735. performance, and thus share performance models for them, thus allowing kernel
  736. calibration to be much faster, since measurements only have to be once for all
  737. CUDA GPUs.
  738. </dd>
  739. <dt>STARPU_PERF_MODEL_HOMOGENEOUS_OPENCL</dt>
  740. <dd>
  741. \anchor STARPU_PERF_MODEL_HOMOGENEOUS_OPENCL
  742. \addindex __env__STARPU_PERF_MODEL_HOMOGENEOUS_OPENCL
  743. When this is set to 1, StarPU will assume that all OPENCL devices have the same
  744. performance, and thus share performance models for them, thus allowing kernel
  745. calibration to be much faster, since measurements only have to be once for all
  746. OPENCL GPUs.
  747. </dd>
  748. <dt>STARPU_PERF_MODEL_HOMOGENEOUS_MIC</dt>
  749. <dd>
  750. \anchor STARPU_PERF_MODEL_HOMOGENEOUS_MIC
  751. \addindex __env__STARPU_PERF_MODEL_HOMOGENEOUS_MIC
  752. When this is set to 1, StarPU will assume that all MIC devices have the same
  753. performance, and thus share performance models for them, thus allowing kernel
  754. calibration to be much faster, since measurements only have to be once for all
  755. MIC GPUs.
  756. </dd>
  757. <dt>STARPU_PERF_MODEL_HOMOGENEOUS_MPI_MS</dt>
  758. <dd>
  759. \anchor STARPU_PERF_MODEL_HOMOGENEOUS_MPI_MS
  760. \addindex __env__STARPU_PERF_MODEL_HOMOGENEOUS_MPI_MS
  761. When this is set to 1, StarPU will assume that all MPI Slave devices have the same
  762. performance, and thus share performance models for them, thus allowing kernel
  763. calibration to be much faster, since measurements only have to be once for all
  764. MPI Slaves.
  765. </dd>
  766. <dt>STARPU_HOSTNAME</dt>
  767. <dd>
  768. \anchor STARPU_HOSTNAME
  769. \addindex __env__STARPU_HOSTNAME
  770. When set, force the hostname to be used when dealing performance model
  771. files. Models are indexed by machine name. When running for example on
  772. a homogenenous cluster, it is possible to share the models between
  773. machines by setting <c>export STARPU_HOSTNAME=some_global_name</c>.
  774. </dd>
  775. <dt>STARPU_MPI_HOSTNAMES</dt>
  776. <dd>
  777. \anchor STARPU_MPI_HOSTNAMES
  778. \addindex __env__STARPU_MPI_HOSTNAMES
  779. Similar to \ref STARPU_HOSTNAME but to define multiple nodes on a
  780. heterogeneous cluster. The variable is a list of hostnames that will be assigned
  781. to each StarPU-MPI rank considering their position and the value of
  782. \ref starpu_mpi_world_rank on each rank. When running, for example, on a
  783. heterogeneous cluster, it is possible to set individual models for each machine
  784. by setting <c>export STARPU_MPI_HOSTNAMES="name0 name1 name2"</c>. Where rank 0
  785. will receive name0, rank1 will receive name1, and so on.
  786. This variable has precedence over \ref STARPU_HOSTNAME.
  787. </dd>
  788. <dt>STARPU_OPENCL_PROGRAM_DIR</dt>
  789. <dd>
  790. \anchor STARPU_OPENCL_PROGRAM_DIR
  791. \addindex __env__STARPU_OPENCL_PROGRAM_DIR
  792. Specify the directory where the OpenCL codelet source files are
  793. located. The function starpu_opencl_load_program_source() looks
  794. for the codelet in the current directory, in the directory specified
  795. by the environment variable \ref STARPU_OPENCL_PROGRAM_DIR, in the
  796. directory <c>share/starpu/opencl</c> of the installation directory of
  797. StarPU, and finally in the source directory of StarPU.
  798. </dd>
  799. <dt>STARPU_SILENT</dt>
  800. <dd>
  801. \anchor STARPU_SILENT
  802. \addindex __env__STARPU_SILENT
  803. Allow to disable verbose mode at runtime when StarPU
  804. has been configured with the option \ref enable-verbose "--enable-verbose". Also
  805. disable the display of StarPU information and warning messages.
  806. </dd>
  807. <dt>STARPU_MPI_DEBUG_LEVEL_MIN</dt>
  808. <dd>
  809. \anchor STARPU_MPI_DEBUG_LEVEL_MIN
  810. \addindex __env__STARPU_MPI_DEBUG_LEVEL_MIN
  811. Set the minimum level of debug when StarPU
  812. has been configured with the option \ref enable-mpi-verbose "--enable-mpi-verbose".
  813. </dd>
  814. <dt>STARPU_MPI_DEBUG_LEVEL_MAX</dt>
  815. <dd>
  816. \anchor STARPU_MPI_DEBUG_LEVEL_MAX
  817. \addindex __env__STARPU_MPI_DEBUG_LEVEL_MAX
  818. Set the maximum level of debug when StarPU
  819. has been configured with the option \ref enable-mpi-verbose "--enable-mpi-verbose".
  820. </dd>
  821. <dt>STARPU_LOGFILENAME</dt>
  822. <dd>
  823. \anchor STARPU_LOGFILENAME
  824. \addindex __env__STARPU_LOGFILENAME
  825. Specify in which file the debugging output should be saved to.
  826. </dd>
  827. <dt>STARPU_FXT_PREFIX</dt>
  828. <dd>
  829. \anchor STARPU_FXT_PREFIX
  830. \addindex __env__STARPU_FXT_PREFIX
  831. Specify in which directory to save the generated trace if FxT is enabled.
  832. </dd>
  833. <dt>STARPU_FXT_SUFFIX</dt>
  834. <dd>
  835. \anchor STARPU_FXT_SUFFIX
  836. \addindex __env__STARPU_FXT_SUFFIX
  837. Specify in which file to save the generated trace if FxT is enabled.
  838. </dd>
  839. <dt>STARPU_FXT_TRACE</dt>
  840. <dd>
  841. \anchor STARPU_FXT_TRACE
  842. \addindex __env__STARPU_FXT_TRACE
  843. Specify whether to generate (1) or not (0) the FxT trace in /tmp/prof_file_XXX_YYY (the directory and file name can be changed with \ref STARPU_FXT_PREFIX and \ref STARPU_FXT_SUFFIX). The default is 1 (generate it)
  844. </dd>
  845. <dt>STARPU_LIMIT_CUDA_devid_MEM</dt>
  846. <dd>
  847. \anchor STARPU_LIMIT_CUDA_devid_MEM
  848. \addindex __env__STARPU_LIMIT_CUDA_devid_MEM
  849. Specify the maximum number of megabytes that should be
  850. available to the application on the CUDA device with the identifier
  851. <c>devid</c>. This variable is intended to be used for experimental
  852. purposes as it emulates devices that have a limited amount of memory.
  853. When defined, the variable overwrites the value of the variable
  854. \ref STARPU_LIMIT_CUDA_MEM.
  855. </dd>
  856. <dt>STARPU_LIMIT_CUDA_MEM</dt>
  857. <dd>
  858. \anchor STARPU_LIMIT_CUDA_MEM
  859. \addindex __env__STARPU_LIMIT_CUDA_MEM
  860. Specify the maximum number of megabytes that should be
  861. available to the application on each CUDA devices. This variable is
  862. intended to be used for experimental purposes as it emulates devices
  863. that have a limited amount of memory.
  864. </dd>
  865. <dt>STARPU_LIMIT_OPENCL_devid_MEM</dt>
  866. <dd>
  867. \anchor STARPU_LIMIT_OPENCL_devid_MEM
  868. \addindex __env__STARPU_LIMIT_OPENCL_devid_MEM
  869. Specify the maximum number of megabytes that should be
  870. available to the application on the OpenCL device with the identifier
  871. <c>devid</c>. This variable is intended to be used for experimental
  872. purposes as it emulates devices that have a limited amount of memory.
  873. When defined, the variable overwrites the value of the variable
  874. \ref STARPU_LIMIT_OPENCL_MEM.
  875. </dd>
  876. <dt>STARPU_LIMIT_OPENCL_MEM</dt>
  877. <dd>
  878. \anchor STARPU_LIMIT_OPENCL_MEM
  879. \addindex __env__STARPU_LIMIT_OPENCL_MEM
  880. Specify the maximum number of megabytes that should be
  881. available to the application on each OpenCL devices. This variable is
  882. intended to be used for experimental purposes as it emulates devices
  883. that have a limited amount of memory.
  884. </dd>
  885. <dt>STARPU_LIMIT_CPU_MEM</dt>
  886. <dd>
  887. \anchor STARPU_LIMIT_CPU_MEM
  888. \addindex __env__STARPU_LIMIT_CPU_MEM
  889. Specify the maximum number of megabytes that should be
  890. available to the application in the main CPU memory. Setting it enables allocation
  891. cache in main memory. Setting it to zero lets StarPU overflow memory.
  892. Note: for now not all StarPU allocations get throttled by this
  893. parameter. Notably MPI reception are not throttled unless \ref
  894. STARPU_MPI_MEM_THROTTLE is set to 1.
  895. </dd>
  896. <dt>STARPU_LIMIT_CPU_NUMA_devid_MEM</dt>
  897. <dd>
  898. \anchor STARPU_LIMIT_CPU_NUMA_devid_MEM
  899. \addindex __env__STARPU_LIMIT_CPU_NUMA_devid_MEM
  900. Specify the maximum number of megabytes that should be available to the
  901. application on the NUMA node with the OS identifier <c>devid</c>. Setting it
  902. overrides the value of STARPU_LIMIT_CPU_MEM.
  903. </dd>
  904. <dt>STARPU_LIMIT_CPU_NUMA_MEM</dt>
  905. <dd>
  906. \anchor STARPU_LIMIT_CPU_NUMA_MEM
  907. \addindex __env__STARPU_LIMIT_CPU_NUMA_MEM
  908. Specify the maximum number of megabytes that should be available to the
  909. application on each NUMA node. This is the same as specifying that same amount
  910. with \ref STARPU_LIMIT_CPU_NUMA_devid_MEM for each NUMA node number. The total
  911. memory available to StarPU will thus be this amount multiplied by the number of
  912. NUMA nodes used by StarPU. Any \ref STARPU_LIMIT_CPU_NUMA_devid_MEM additionally
  913. specified will take over STARPU_LIMIT_CPU_NUMA_MEM.
  914. </dd>
  915. <dt>STARPU_LIMIT_BANDWIDTH</dt>
  916. <dd>
  917. \anchor STARPU_LIMIT_BANDWIDTH
  918. \addindex __env__STARPU_LIMIT_BANDWIDTH
  919. Specify the maximum available PCI bandwidth of the system in MB/s. This can only
  920. be effective with simgrid simulation. This allows to easily override the
  921. bandwidths stored in the platform file generated from measurements on the native
  922. system. This can be used e.g. for convenient
  923. Specify the maximum number of megabytes that should be available to the
  924. application on each NUMA node. This is the same as specifying that same amount
  925. with \ref STARPU_LIMIT_CPU_NUMA_devid_MEM for each NUMA node number. The total
  926. memory available to StarPU will thus be this amount multiplied by the number of
  927. NUMA nodes used by StarPU. Any \ref STARPU_LIMIT_CPU_NUMA_devid_MEM additionally
  928. specified will take over STARPU_LIMIT_BANDWIDTH.
  929. </dd>
  930. <dt>STARPU_MINIMUM_AVAILABLE_MEM</dt>
  931. <dd>
  932. \anchor STARPU_MINIMUM_AVAILABLE_MEM
  933. \addindex __env__STARPU_MINIMUM_AVAILABLE_MEM
  934. Specify the minimum percentage of memory that should be available in GPUs
  935. (or in main memory, when using out of core), below which a reclaiming pass is
  936. performed. The default is 0%.
  937. </dd>
  938. <dt>STARPU_TARGET_AVAILABLE_MEM</dt>
  939. <dd>
  940. \anchor STARPU_TARGET_AVAILABLE_MEM
  941. \addindex __env__STARPU_TARGET_AVAILABLE_MEM
  942. Specify the target percentage of memory that should be reached in
  943. GPUs (or in main memory, when using out of core), when performing a periodic
  944. reclaiming pass. The default is 0%.
  945. </dd>
  946. <dt>STARPU_MINIMUM_CLEAN_BUFFERS</dt>
  947. <dd>
  948. \anchor STARPU_MINIMUM_CLEAN_BUFFERS
  949. \addindex __env__STARPU_MINIMUM_CLEAN_BUFFERS
  950. Specify the minimum percentage of number of buffers that should be clean in GPUs
  951. (or in main memory, when using out of core), below which asynchronous writebacks will be
  952. issued. The default is 5%.
  953. </dd>
  954. <dt>STARPU_TARGET_CLEAN_BUFFERS</dt>
  955. <dd>
  956. \anchor STARPU_TARGET_CLEAN_BUFFERS
  957. \addindex __env__STARPU_TARGET_CLEAN_BUFFERS
  958. Specify the target percentage of number of buffers that should be reached in
  959. GPUs (or in main memory, when using out of core), when performing an asynchronous
  960. writeback pass. The default is 10%.
  961. </dd>
  962. <dt>STARPU_DIDUSE_BARRIER</dt>
  963. <dd>
  964. \anchor STARPU_DIDUSE_BARRIER
  965. \addindex __env__STARPU_DIDUSE_BARRIER
  966. When set to 1, StarPU will never evict a piece of data if it has not been used
  967. by at least one task. This avoids odd behaviors under high memory pressure, but
  968. can lead to deadlocks, so is to be considered experimental only.
  969. </dd>
  970. <dt>STARPU_DISK_SWAP</dt>
  971. <dd>
  972. \anchor STARPU_DISK_SWAP
  973. \addindex __env__STARPU_DISK_SWAP
  974. Specify a path where StarPU can push data when the main memory is getting
  975. full.
  976. </dd>
  977. <dt>STARPU_DISK_SWAP_BACKEND</dt>
  978. <dd>
  979. \anchor STARPU_DISK_SWAP_BACKEND
  980. \addindex __env__STARPU_DISK_SWAP_BACKEND
  981. Specify the backend to be used by StarPU to push data when the main
  982. memory is getting full. The default is unistd (i.e. using read/write functions),
  983. other values are stdio (i.e. using fread/fwrite), unistd_o_direct (i.e. using
  984. read/write with O_DIRECT), leveldb (i.e. using a leveldb database), and hdf5
  985. (i.e. using HDF5 library).
  986. </dd>
  987. <dt>STARPU_DISK_SWAP_SIZE</dt>
  988. <dd>
  989. \anchor STARPU_DISK_SWAP_SIZE
  990. \addindex __env__STARPU_DISK_SWAP_SIZE
  991. Specify the maximum size in MiB to be used by StarPU to push data when the main
  992. memory is getting full. The default is unlimited.
  993. </dd>
  994. <dt>STARPU_LIMIT_MAX_SUBMITTED_TASKS</dt>
  995. <dd>
  996. \anchor STARPU_LIMIT_MAX_SUBMITTED_TASKS
  997. \addindex __env__STARPU_LIMIT_MAX_SUBMITTED_TASKS
  998. Allow users to control the task submission flow by specifying
  999. to StarPU a maximum number of submitted tasks allowed at a given time, i.e. when
  1000. this limit is reached task submission becomes blocking until enough tasks have
  1001. completed, specified by \ref STARPU_LIMIT_MIN_SUBMITTED_TASKS.
  1002. Setting it enables allocation cache buffer reuse in main memory.
  1003. </dd>
  1004. <dt>STARPU_LIMIT_MIN_SUBMITTED_TASKS</dt>
  1005. <dd>
  1006. \anchor STARPU_LIMIT_MIN_SUBMITTED_TASKS
  1007. \addindex __env__STARPU_LIMIT_MIN_SUBMITTED_TASKS
  1008. Allow users to control the task submission flow by specifying
  1009. to StarPU a submitted task threshold to wait before unblocking task submission. This
  1010. variable has to be used in conjunction with \ref STARPU_LIMIT_MAX_SUBMITTED_TASKS
  1011. which puts the task submission thread to
  1012. sleep. Setting it enables allocation cache buffer reuse in main memory.
  1013. </dd>
  1014. <dt>STARPU_TRACE_BUFFER_SIZE</dt>
  1015. <dd>
  1016. \anchor STARPU_TRACE_BUFFER_SIZE
  1017. \addindex __env__STARPU_TRACE_BUFFER_SIZE
  1018. Set the buffer size for recording trace events in MiB. Setting it to a big
  1019. size allows to avoid pauses in the trace while it is recorded on the disk. This
  1020. however also consumes memory, of course. The default value is 64.
  1021. </dd>
  1022. <dt>STARPU_GENERATE_TRACE</dt>
  1023. <dd>
  1024. \anchor STARPU_GENERATE_TRACE
  1025. \addindex __env__STARPU_GENERATE_TRACE
  1026. When set to <c>1</c>, indicate that StarPU should automatically
  1027. generate a Paje trace when starpu_shutdown() is called.
  1028. </dd>
  1029. <dt>STARPU_GENERATE_TRACE_OPTIONS</dt>
  1030. <dd>
  1031. \anchor STARPU_GENERATE_TRACE_OPTIONS
  1032. \addindex __env__STARPU_GENERATE_TRACE_OPTIONS
  1033. When the variable \ref STARPU_GENERATE_TRACE is set to <c>1</c> to
  1034. generate a Paje trace, this variable can be set to specify options (see
  1035. <c>starpu_fxt_tool --help</c>).
  1036. </dd>
  1037. <dt>STARPU_ENABLE_STATS</dt>
  1038. <dd>
  1039. \anchor STARPU_ENABLE_STATS
  1040. \addindex __env__STARPU_ENABLE_STATS
  1041. When defined, enable gathering various data statistics (\ref DataStatistics).
  1042. </dd>
  1043. <dt>STARPU_MEMORY_STATS</dt>
  1044. <dd>
  1045. \anchor STARPU_MEMORY_STATS
  1046. \addindex __env__STARPU_MEMORY_STATS
  1047. When set to 0, disable the display of memory statistics on data which
  1048. have not been unregistered at the end of the execution (\ref MemoryFeedback).
  1049. </dd>
  1050. <dt>STARPU_MAX_MEMORY_USE</dt>
  1051. <dd>
  1052. \anchor STARPU_MAX_MEMORY_USE
  1053. \addindex __env__STARPU_MAX_MEMORY_USE
  1054. When set to 1, display at the end of the execution the maximum memory used by
  1055. StarPU for internal data structures during execution.
  1056. </dd>
  1057. <dt>STARPU_BUS_STATS</dt>
  1058. <dd>
  1059. \anchor STARPU_BUS_STATS
  1060. \addindex __env__STARPU_BUS_STATS
  1061. When defined, statistics about data transfers will be displayed when calling
  1062. starpu_shutdown() (\ref Profiling). By default, statistics are printed
  1063. on the standard error stream, use the environement variable \ref
  1064. STARPU_BUS_STATS_FILE to define another filename.
  1065. </dd>
  1066. <dt>STARPU_BUS_STATS_FILE</dt>
  1067. <dd>
  1068. \anchor STARPU_BUS_STATS_FILE
  1069. \addindex __env__STARPU_BUS_STATS_FILE
  1070. Define the name of the file where to display data transfers
  1071. statistics, see \ref STARPU_BUS_STATS.
  1072. </dd>
  1073. <dt>STARPU_WORKER_STATS</dt>
  1074. <dd>
  1075. \anchor STARPU_WORKER_STATS
  1076. \addindex __env__STARPU_WORKER_STATS
  1077. When defined, statistics about the workers will be displayed when calling
  1078. starpu_shutdown() (\ref Profiling). When combined with the
  1079. environment variable \ref STARPU_PROFILING, it displays the energy
  1080. consumption (\ref Energy-basedScheduling). By default, statistics are
  1081. printed on the standard error stream, use the environement variable
  1082. \ref STARPU_WORKER_STATS_FILE to define another filename.
  1083. </dd>
  1084. <dt>STARPU_WORKER_STATS_FILE</dt>
  1085. <dd>
  1086. \anchor STARPU_WORKER_STATS_FILE
  1087. \addindex __env__STARPU_WORKER_STATS_FILE
  1088. Define the name of the file where to display workers statistics, see
  1089. \ref STARPU_WORKER_STATS.
  1090. </dd>
  1091. <dt>STARPU_STATS</dt>
  1092. <dd>
  1093. \anchor STARPU_STATS
  1094. \addindex __env__STARPU_STATS
  1095. When set to 0, data statistics will not be displayed at the
  1096. end of the execution of an application (\ref DataStatistics).
  1097. </dd>
  1098. <dt>STARPU_WATCHDOG_TIMEOUT</dt>
  1099. <dd>
  1100. \anchor STARPU_WATCHDOG_TIMEOUT
  1101. \addindex __env__STARPU_WATCHDOG_TIMEOUT
  1102. When set to a value other than 0, allows to make StarPU print an error
  1103. message whenever StarPU does not terminate any task for the given time (in µs),
  1104. but lets the application continue normally. Should
  1105. be used in combination with \ref STARPU_WATCHDOG_CRASH
  1106. (see \ref DetectionStuckConditions).
  1107. </dd>
  1108. <dt>STARPU_WATCHDOG_CRASH</dt>
  1109. <dd>
  1110. \anchor STARPU_WATCHDOG_CRASH
  1111. \addindex __env__STARPU_WATCHDOG_CRASH
  1112. When set to a value other than 0, trigger a crash when the watch
  1113. dog is reached, thus allowing to catch the situation in gdb, etc
  1114. (see \ref DetectionStuckConditions)
  1115. </dd>
  1116. <dt>STARPU_WATCHDOG_DELAY</dt>
  1117. <dd>
  1118. \anchor STARPU_WATCHDOG_DELAY
  1119. \addindex __env__STARPU_WATCHDOG_DELAY
  1120. Delay the activation of the watchdog by the given time (in µs). This can
  1121. be convenient for letting the application initialize data etc. before starting
  1122. to look for idle time.
  1123. </dd>
  1124. <dt>STARPU_TASK_BREAK_ON_PUSH</dt>
  1125. <dd>
  1126. \anchor STARPU_TASK_BREAK_ON_PUSH
  1127. \addindex __env__STARPU_TASK_BREAK_ON_PUSH
  1128. When this variable contains a job id, StarPU will raise SIGTRAP when the task
  1129. with that job id is being pushed to the scheduler, which will be nicely catched by debuggers
  1130. (see \ref DebuggingScheduling)
  1131. </dd>
  1132. <dt>STARPU_TASK_BREAK_ON_SCHED</dt>
  1133. <dd>
  1134. \anchor STARPU_TASK_BREAK_ON_SCHED
  1135. \addindex __env__STARPU_TASK_BREAK_ON_SCHED
  1136. When this variable contains a job id, StarPU will raise SIGTRAP when the task
  1137. with that job id is being scheduled by the scheduler (at a scheduler-specific
  1138. point), which will be nicely catched by debuggers.
  1139. This only works for schedulers which have such a scheduling point defined
  1140. (see \ref DebuggingScheduling)
  1141. </dd>
  1142. <dt>STARPU_TASK_BREAK_ON_POP</dt>
  1143. <dd>
  1144. \anchor STARPU_TASK_BREAK_ON_POP
  1145. \addindex __env__STARPU_TASK_BREAK_ON_POP
  1146. When this variable contains a job id, StarPU will raise SIGTRAP when the task
  1147. with that job id is being popped from the scheduler, which will be nicely catched by debuggers
  1148. (see \ref DebuggingScheduling)
  1149. </dd>
  1150. <dt>STARPU_TASK_BREAK_ON_EXEC</dt>
  1151. <dd>
  1152. \anchor STARPU_TASK_BREAK_ON_EXEC
  1153. \addindex __env__STARPU_TASK_BREAK_ON_EXEC
  1154. When this variable contains a job id, StarPU will raise SIGTRAP when the task
  1155. with that job id is being executed, which will be nicely catched by debuggers
  1156. (see \ref DebuggingScheduling)
  1157. </dd>
  1158. <dt>STARPU_DISABLE_KERNELS</dt>
  1159. <dd>
  1160. \anchor STARPU_DISABLE_KERNELS
  1161. \addindex __env__STARPU_DISABLE_KERNELS
  1162. When set to a value other than 1, it disables actually calling the kernel
  1163. functions, thus allowing to quickly check that the task scheme is working
  1164. properly, without performing the actual application-provided computation.
  1165. </dd>
  1166. <dt>STARPU_HISTORY_MAX_ERROR</dt>
  1167. <dd>
  1168. \anchor STARPU_HISTORY_MAX_ERROR
  1169. \addindex __env__STARPU_HISTORY_MAX_ERROR
  1170. History-based performance models will drop measurements which are really far
  1171. froom the measured average. This specifies the allowed variation. The default is
  1172. 50 (%), i.e. the measurement is allowed to be x1.5 faster or /1.5 slower than the
  1173. average.
  1174. </dd>
  1175. <dt>STARPU_RAND_SEED</dt>
  1176. <dd>
  1177. \anchor STARPU_RAND_SEED
  1178. \addindex __env__STARPU_RAND_SEED
  1179. The random scheduler and some examples use random numbers for their own
  1180. working. Depending on the examples, the seed is by default juste always 0 or
  1181. the current time() (unless SimGrid mode is enabled, in which case it is always
  1182. 0). \ref STARPU_RAND_SEED allows to set the seed to a specific value.
  1183. </dd>
  1184. <dt>STARPU_GLOBAL_ARBITER</dt>
  1185. <dd>
  1186. \anchor STARPU_GLOBAL_ARBITER
  1187. \addindex __env__STARPU_GLOBAL_ARBITER
  1188. When set to a positive value, StarPU will create a arbiter, which
  1189. implements an advanced but centralized management of concurrent data
  1190. accesses (see \ref ConcurrentDataAccess).
  1191. </dd>
  1192. <dt>STARPU_USE_NUMA</dt>
  1193. <dd>
  1194. \anchor STARPU_USE_NUMA
  1195. \addindex __env__STARPU_USE_NUMA
  1196. When defined, NUMA nodes are taking into account by StarPU. Otherwise, memory
  1197. is considered as only one node. This is experimental for now.
  1198. When enabled, STARPU_MAIN_MEMORY is a pointer to the NUMA node associated to the
  1199. first CPU worker if it exists, the NUMA node associated to the first GPU discovered otherwise.
  1200. If StarPU doesn't find any NUMA node after these step, STARPU_MAIN_MEMORY is the first NUMA node
  1201. discovered by StarPU.
  1202. </dd>
  1203. <dt>STARPU_IDLE_FILE</dt>
  1204. <dd>
  1205. \anchor STARPU_IDLE_FILE
  1206. \addindex __env__STARPU_IDLE_FILE
  1207. If the environment variable STARPU_IDLE_FILE is defined, a file named after its contents will be created at the end of the execution.
  1208. The file will contain the sum of the idle times of all the workers.
  1209. </dd>
  1210. <dt>STARPU_HWLOC_INPUT</dt>
  1211. <dd>
  1212. \anchor STARPU_HWLOC_INPUT
  1213. \addindex __env__STARPU_HWLOC_INPUT
  1214. If the environment variable STARPU_HWLOC_INPUT is defined to the path of an XML file, hwloc will be made to use it as input instead of detecting the current platform topology, which can save significant initialization time.
  1215. To produce this XML file, use <c>lstopo file.xml</c>
  1216. </dd>
  1217. <dt>STARPU_CATCH_SIGNALS</dt>
  1218. <dd>
  1219. \anchor STARPU_CATCH_SIGNALS
  1220. \addindex __env__STARPU_CATCH_SIGNALS
  1221. By default, StarPU catch signals SIGINT, SIGSEGV and SIGTRAP to
  1222. perform final actions such as dumping FxT trace files even though the
  1223. application has crashed. Setting this variable to a value other than 1
  1224. will disable this behaviour. This should be done on JVM systems which
  1225. may use these signals for their own needs.
  1226. The flag can also be set through the field starpu_conf::catch_signals.
  1227. </dd>
  1228. <dt>STARPU_DISPLAY_BINDINGS</dt>
  1229. <dd>
  1230. \anchor STARPU_DISPLAY_BINDINGS
  1231. \addindex __env__STARPU_DISPLAY_BINDINGS
  1232. Display the binding of all processes and threads running on the machine. If MPI is enabled, display the binding of each node.<br>
  1233. Users can manually display the binding by calling starpu_display_bindings().
  1234. </dd>
  1235. </dl>
  1236. \section ConfiguringTheHypervisor Configuring The Hypervisor
  1237. <dl>
  1238. <dt>SC_HYPERVISOR_POLICY</dt>
  1239. <dd>
  1240. \anchor SC_HYPERVISOR_POLICY
  1241. \addindex __env__SC_HYPERVISOR_POLICY
  1242. Choose between the different resizing policies proposed by StarPU for the hypervisor:
  1243. idle, app_driven, feft_lp, teft_lp; ispeed_lp, throughput_lp etc.
  1244. Use <c>SC_HYPERVISOR_POLICY=help</c> to get the list of available policies for the hypervisor
  1245. </dd>
  1246. <dt>SC_HYPERVISOR_TRIGGER_RESIZE</dt>
  1247. <dd>
  1248. \anchor SC_HYPERVISOR_TRIGGER_RESIZE
  1249. \addindex __env__SC_HYPERVISOR_TRIGGER_RESIZE
  1250. Choose how should the hypervisor be triggered: <c>speed</c> if the resizing algorithm should
  1251. be called whenever the speed of the context does not correspond to an optimal precomputed value,
  1252. <c>idle</c> it the resizing algorithm should be called whenever the workers are idle for a period
  1253. longer than the value indicated when configuring the hypervisor.
  1254. </dd>
  1255. <dt>SC_HYPERVISOR_START_RESIZE</dt>
  1256. <dd>
  1257. \anchor SC_HYPERVISOR_START_RESIZE
  1258. \addindex __env__SC_HYPERVISOR_START_RESIZE
  1259. Indicate the moment when the resizing should be available. The value correspond to the percentage
  1260. of the total time of execution of the application. The default value is the resizing frame.
  1261. </dd>
  1262. <dt>SC_HYPERVISOR_MAX_SPEED_GAP</dt>
  1263. <dd>
  1264. \anchor SC_HYPERVISOR_MAX_SPEED_GAP
  1265. \addindex __env__SC_HYPERVISOR_MAX_SPEED_GAP
  1266. Indicate the ratio of speed difference between contexts that should trigger the hypervisor.
  1267. This situation may occur only when a theoretical speed could not be computed and the hypervisor
  1268. has no value to compare the speed to. Otherwise the resizing of a context is not influenced by the
  1269. the speed of the other contexts, but only by the the value that a context should have.
  1270. </dd>
  1271. <dt>SC_HYPERVISOR_STOP_PRINT</dt>
  1272. <dd>
  1273. \anchor SC_HYPERVISOR_STOP_PRINT
  1274. \addindex __env__SC_HYPERVISOR_STOP_PRINT
  1275. By default the values of the speed of the workers is printed during the execution
  1276. of the application. If the value 1 is given to this environment variable this printing
  1277. is not done.
  1278. </dd>
  1279. <dt>SC_HYPERVISOR_LAZY_RESIZE</dt>
  1280. <dd>
  1281. \anchor SC_HYPERVISOR_LAZY_RESIZE
  1282. \addindex __env__SC_HYPERVISOR_LAZY_RESIZE
  1283. By default the hypervisor resizes the contexts in a lazy way, that is workers are firstly added to a new context
  1284. before removing them from the previous one. Once this workers are clearly taken into account
  1285. into the new context (a task was poped there) we remove them from the previous one. However if the application
  1286. would like that the change in the distribution of workers should change right away this variable should be set to 0
  1287. </dd>
  1288. <dt>SC_HYPERVISOR_SAMPLE_CRITERIA</dt>
  1289. <dd>
  1290. \anchor SC_HYPERVISOR_SAMPLE_CRITERIA
  1291. \addindex __env__SC_HYPERVISOR_SAMPLE_CRITERIA
  1292. By default the hypervisor uses a sample of flops when computing the speed of the contexts and of the workers.
  1293. If this variable is set to <c>time</c> the hypervisor uses a sample of time (10% of an aproximation of the total
  1294. execution time of the application)
  1295. </dd>
  1296. </dl>
  1297. */