starpu.texi 165 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589
  1. \input texinfo @c -*-texinfo-*-
  2. @c %**start of header
  3. @setfilename starpu.info
  4. @settitle StarPU Handbook
  5. @c %**end of header
  6. @include version.texi
  7. @setchapternewpage odd
  8. @titlepage
  9. @title StarPU Handbook
  10. @subtitle for StarPU @value{VERSION}
  11. @page
  12. @vskip 0pt plus 1fill
  13. @comment For the @value{version-GCC} Version*
  14. @end titlepage
  15. @c @summarycontents
  16. @contents
  17. @page
  18. @node Top
  19. @top Preface
  20. @cindex Preface
  21. This manual documents the usage of StarPU version @value{VERSION}. It
  22. was last updated on @value{UPDATED}.
  23. @comment
  24. @comment When you add a new menu item, please keep the right hand
  25. @comment aligned to the same column. Do not use tabs. This provides
  26. @comment better formatting.
  27. @comment
  28. @menu
  29. * Introduction:: A basic introduction to using StarPU
  30. * Installing StarPU:: How to configure, build and install StarPU
  31. * Using StarPU:: How to run StarPU application
  32. * Basic Examples:: Basic examples of the use of StarPU
  33. * Performance optimization:: How to optimize performance with StarPU
  34. * Performance feedback:: Performance debugging tools
  35. * StarPU MPI support:: How to combine StarPU with MPI
  36. * Configuring StarPU:: How to configure StarPU
  37. * StarPU API:: The API to use StarPU
  38. * Advanced Topics:: Advanced use of StarPU
  39. * Full source code for the 'Scaling a Vector' example::
  40. * Function Index:: Index of C functions.
  41. @end menu
  42. @c ---------------------------------------------------------------------
  43. @c Introduction to StarPU
  44. @c ---------------------------------------------------------------------
  45. @node Introduction
  46. @chapter Introduction to StarPU
  47. @menu
  48. * Motivation:: Why StarPU ?
  49. * StarPU in a Nutshell:: The Fundamentals of StarPU
  50. @end menu
  51. @node Motivation
  52. @section Motivation
  53. @c complex machines with heterogeneous cores/devices
  54. The use of specialized hardware such as accelerators or coprocessors offers an
  55. interesting approach to overcome the physical limits encountered by processor
  56. architects. As a result, many machines are now equipped with one or several
  57. accelerators (e.g. a GPU), in addition to the usual processor(s). While a lot of
  58. efforts have been devoted to offload computation onto such accelerators, very
  59. little attention as been paid to portability concerns on the one hand, and to the
  60. possibility of having heterogeneous accelerators and processors to interact on the other hand.
  61. StarPU is a runtime system that offers support for heterogeneous multicore
  62. architectures, it not only offers a unified view of the computational resources
  63. (i.e. CPUs and accelerators at the same time), but it also takes care of
  64. efficiently mapping and executing tasks onto an heterogeneous machine while
  65. transparently handling low-level issues such as data transfers in a portable
  66. fashion.
  67. @c this leads to a complicated distributed memory design
  68. @c which is not (easily) manageable by hand
  69. @c added value/benefits of StarPU
  70. @c - portability
  71. @c - scheduling, perf. portability
  72. @node StarPU in a Nutshell
  73. @section StarPU in a Nutshell
  74. @menu
  75. * Codelet and Tasks::
  76. * StarPU Data Management Library::
  77. * Research Papers::
  78. @end menu
  79. From a programming point of view, StarPU is not a new language but a library
  80. that executes tasks explicitly submitted by the application. The data that a
  81. task manipulates are automatically transferred onto the accelerator so that the
  82. programmer does not have to take care of complex data movements. StarPU also
  83. takes particular care of scheduling those tasks efficiently and allows
  84. scheduling experts to implement custom scheduling policies in a portable
  85. fashion.
  86. @c explain the notion of codelet and task (i.e. g(A, B)
  87. @node Codelet and Tasks
  88. @subsection Codelet and Tasks
  89. One of the StarPU primary data structures is the @b{codelet}. A codelet describes a
  90. computational kernel that can possibly be implemented on multiple architectures
  91. such as a CPU, a CUDA device or a Cell's SPU.
  92. @c TODO insert illustration f : f_spu, f_cpu, ...
  93. Another important data structure is the @b{task}. Executing a StarPU task
  94. consists in applying a codelet on a data set, on one of the architectures on
  95. which the codelet is implemented. In addition to the codelet that a task
  96. useuses, it also describes which data are accessed, and how they are
  97. accessed during the computation (read and/or write).
  98. StarPU tasks are asynchronous: submitting a task to StarPU is a non-blocking
  99. operation. The task structure can also specify a @b{callback} function that is
  100. called once StarPU has properly executed the task. It also contains optional
  101. fields that the application may use to give hints to the scheduler (such as
  102. priority levels).
  103. A task may be identified by a unique 64-bit number chosen by the application
  104. which we refer as a @b{tag}.
  105. Task dependencies can be enforced either by the means of callback functions, by
  106. expressing dependencies between explicit tasks or by expressing dependencies
  107. between tags (which can thus correspond to tasks that have not been submitted
  108. yet).
  109. @c TODO insert illustration f(Ar, Brw, Cr) + ..
  110. @c DSM
  111. @node StarPU Data Management Library
  112. @subsection StarPU Data Management Library
  113. Because StarPU schedules tasks at runtime, data transfers have to be
  114. done automatically and ``just-in-time'' between processing units,
  115. relieving the application programmer from explicit data transfers.
  116. Moreover, to avoid unnecessary transfers, StarPU keeps data
  117. where it was last needed, even if was modified there, and it
  118. allows multiple copies of the same data to reside at the same time on
  119. several processing units as long as it is not modified.
  120. @node Research Papers
  121. @subsection Research Papers
  122. Research papers about StarPU can be found at
  123. @indicateurl{http://runtime.bordeaux.inria.fr/Publis/Keyword/STARPU.html}
  124. Notably a good overview in the research report
  125. @indicateurl{http://hal.archives-ouvertes.fr/inria-00467677}
  126. @c ---------------------------------------------------------------------
  127. @c Installing StarPU
  128. @c ---------------------------------------------------------------------
  129. @node Installing StarPU
  130. @chapter Installing StarPU
  131. @menu
  132. * Downloading StarPU::
  133. * Configuration of StarPU::
  134. * Building and Installing StarPU::
  135. @end menu
  136. StarPU can be built and installed by the standard means of the GNU
  137. autotools. The following chapter is intended to briefly remind how these tools
  138. can be used to install StarPU.
  139. @node Downloading StarPU
  140. @section Downloading StarPU
  141. @menu
  142. * Getting Sources::
  143. * Optional dependencies::
  144. @end menu
  145. @node Getting Sources
  146. @subsection Getting Sources
  147. The simplest way to get StarPU sources is to download the latest official
  148. release tarball from @indicateurl{https://gforge.inria.fr/frs/?group_id=1570} ,
  149. or the latest nightly snapshot from
  150. @indicateurl{http://starpu.gforge.inria.fr/testing/} . The following documents
  151. how to get the very latest version from the subversion repository itself, it
  152. should be needed only if you need the very latest changes (i.e. less than a
  153. day!)
  154. The source code is managed by a Subversion server hosted by the
  155. InriaGforge. To get the source code, you need:
  156. @itemize
  157. @item
  158. To install the client side of the software Subversion if it is
  159. not already available on your system. The software can be obtained from
  160. @indicateurl{http://subversion.tigris.org} . If you are running
  161. on Windows, you will probably prefer to use TortoiseSVN from
  162. @indicateurl{http://tortoisesvn.tigris.org/} .
  163. @item
  164. You can check out the project's SVN repository through anonymous
  165. access. This will provide you with a read access to the
  166. repository.
  167. If you need to have write access on the StarPU project, you can also choose to
  168. become a member of the project @code{starpu}. For this, you first need to get
  169. an account to the gForge server. You can then send a request to join the project
  170. (@indicateurl{https://gforge.inria.fr/project/request.php?group_id=1570}).
  171. @item
  172. More information on how to get a gForge account, to become a member of
  173. a project, or on any other related task can be obtained from the
  174. InriaGforge at @indicateurl{https://gforge.inria.fr/}. The most important
  175. thing is to upload your public SSH key on the gForge server (see the
  176. FAQ at @indicateurl{http://siteadmin.gforge.inria.fr/FAQ.html#Q6} for
  177. instructions).
  178. @end itemize
  179. You can now check out the latest version from the Subversion server:
  180. @itemize
  181. @item
  182. using the anonymous access via svn:
  183. @example
  184. % svn checkout svn://scm.gforge.inria.fr/svn/starpu/trunk
  185. @end example
  186. @item
  187. using the anonymous access via https:
  188. @example
  189. % svn checkout --username anonsvn https://scm.gforge.inria.fr/svn/starpu/trunk
  190. @end example
  191. The password is @code{anonsvn}.
  192. @item
  193. using your gForge account
  194. @example
  195. % svn checkout svn+ssh://<login>@@scm.gforge.inria.fr/svn/starpu/trunk
  196. @end example
  197. @end itemize
  198. The following step requires the availability of @code{autoconf} and
  199. @code{automake} to generate the @code{./configure} script. This is
  200. done by calling @code{./autogen.sh}. The required version for
  201. @code{autoconf} is 2.60 or higher. You will also need @code{makeinfo}.
  202. @example
  203. % ./autogen.sh
  204. @end example
  205. If the autotools are not available on your machine or not recent
  206. enough, you can choose to download the latest nightly tarball, which
  207. is provided with a @code{configure} script.
  208. @example
  209. % wget http://starpu.gforge.inria.fr/testing/starpu-nightly-latest.tar.gz
  210. @end example
  211. @node Optional dependencies
  212. @subsection Optional dependencies
  213. The topology discovery library, @code{hwloc}, is not mandatory to use StarPU
  214. but strongly recommended. It allows to increase performance, and to
  215. perform some topology aware scheduling.
  216. @code{hwloc} is available in major distributions and for most OSes and can be
  217. downloaded from @indicateurl{http://www.open-mpi.org/software/hwloc}.
  218. @node Configuration of StarPU
  219. @section Configuration of StarPU
  220. @menu
  221. * Generating Makefiles and configuration scripts::
  222. * Running the configuration::
  223. @end menu
  224. @node Generating Makefiles and configuration scripts
  225. @subsection Generating Makefiles and configuration scripts
  226. This step is not necessary when using the tarball releases of StarPU. If you
  227. are using the source code from the svn repository, you first need to generate
  228. the configure scripts and the Makefiles.
  229. @example
  230. % ./autogen.sh
  231. @end example
  232. @node Running the configuration
  233. @subsection Running the configuration
  234. @example
  235. % ./configure
  236. @end example
  237. Details about options that are useful to give to @code{./configure} are given in
  238. @ref{Compilation configuration}.
  239. @node Building and Installing StarPU
  240. @section Building and Installing StarPU
  241. @menu
  242. * Building::
  243. * Sanity Checks::
  244. * Installing::
  245. @end menu
  246. @node Building
  247. @subsection Building
  248. @example
  249. % make
  250. @end example
  251. @node Sanity Checks
  252. @subsection Sanity Checks
  253. In order to make sure that StarPU is working properly on the system, it is also
  254. possible to run a test suite.
  255. @example
  256. % make check
  257. @end example
  258. @node Installing
  259. @subsection Installing
  260. In order to install StarPU at the location that was specified during
  261. configuration:
  262. @example
  263. % make install
  264. @end example
  265. @c ---------------------------------------------------------------------
  266. @c Using StarPU
  267. @c ---------------------------------------------------------------------
  268. @node Using StarPU
  269. @chapter Using StarPU
  270. @menu
  271. * Setting flags for compiling and linking applications::
  272. * Running a basic StarPU application::
  273. * Kernel threads started by StarPU::
  274. * Using accelerators::
  275. @end menu
  276. @node Setting flags for compiling and linking applications
  277. @section Setting flags for compiling and linking applications
  278. Compiling and linking an application against StarPU may require to use
  279. specific flags or libraries (for instance @code{CUDA} or @code{libspe2}).
  280. To this end, it is possible to use the @code{pkg-config} tool.
  281. If StarPU was not installed at some standard location, the path of StarPU's
  282. library must be specified in the @code{PKG_CONFIG_PATH} environment variable so
  283. that @code{pkg-config} can find it. For example if StarPU was installed in
  284. @code{$prefix_dir}:
  285. @example
  286. % PKG_CONFIG_PATH=$PKG_CONFIG_PATH:$prefix_dir/lib/pkgconfig
  287. @end example
  288. The flags required to compile or link against StarPU are then
  289. accessible with the following commands:
  290. @example
  291. % pkg-config --cflags libstarpu # options for the compiler
  292. % pkg-config --libs libstarpu # options for the linker
  293. @end example
  294. @node Running a basic StarPU application
  295. @section Running a basic StarPU application
  296. Basic examples using StarPU have been built in the directory
  297. @code{$prefix_dir/lib/starpu/examples/}. You can for example run the
  298. example @code{vector_scal}.
  299. @example
  300. % $prefix_dir/lib/starpu/examples/vector_scal
  301. BEFORE : First element was 1.000000
  302. AFTER First element is 3.140000
  303. %
  304. @end example
  305. When StarPU is used for the first time, the directory
  306. @code{$HOME/.starpu/} is created, performance models will be stored in
  307. that directory.
  308. Please note that buses are benchmarked when StarPU is launched for the
  309. first time. This may take a few minutes, or less if @code{hwloc} is
  310. installed. This step is done only once per user and per machine.
  311. @node Kernel threads started by StarPU
  312. @section Kernel threads started by StarPU
  313. TODO: StarPU starts one thread per CPU core and binds them there, uses one of
  314. them per GPU. The application is not supposed to do computations in its own
  315. threads. TODO: add a StarPU function to bind an application thread (e.g. the
  316. main thread) to a dedicated core (and thus disable the corresponding StarPU CPU
  317. worker).
  318. @node Using accelerators
  319. @section Using accelerators
  320. When both CUDA and OpenCL drivers are enabled, StarPU will launch an
  321. OpenCL worker for NVIDIA GPUs only if CUDA is not already running on them.
  322. This design choice was necessary as OpenCL and CUDA can not run at the
  323. same time on the same NVIDIA GPU, as there is currently no interoperability
  324. between them.
  325. Details on how to specify devices running OpenCL and the ones running
  326. CUDA are given in @ref{Enabling OpenCL}.
  327. @c ---------------------------------------------------------------------
  328. @c Basic Examples
  329. @c ---------------------------------------------------------------------
  330. @node Basic Examples
  331. @chapter Basic Examples
  332. @menu
  333. * Compiling and linking options::
  334. * Hello World:: Submitting Tasks
  335. * Scaling a Vector:: Manipulating Data
  336. * Vector Scaling on an Hybrid CPU/GPU Machine:: Handling Heterogeneous Architectures
  337. * Task and Worker Profiling::
  338. * Partitioning Data:: Partitioning Data
  339. * Performance model example::
  340. * Theoretical lower bound on execution time::
  341. * Insert Task Utility::
  342. * More examples:: More examples shipped with StarPU
  343. @end menu
  344. @node Compiling and linking options
  345. @section Compiling and linking options
  346. Let's suppose StarPU has been installed in the directory
  347. @code{$STARPU_DIR}. As explained in @ref{Setting flags for compiling and linking applications},
  348. the variable @code{PKG_CONFIG_PATH} needs to be set. It is also
  349. necessary to set the variable @code{LD_LIBRARY_PATH} to locate dynamic
  350. libraries at runtime.
  351. @example
  352. % PKG_CONFIG_PATH=$STARPU_DIR/lib/pkgconfig:$PKG_CONFIG_PATH
  353. % LD_LIBRARY_PATH=$STARPU_DIR/lib:$LD_LIBRARY_PATH
  354. @end example
  355. The Makefile could for instance contain the following lines to define which
  356. options must be given to the compiler and to the linker:
  357. @cartouche
  358. @example
  359. CFLAGS += $$(pkg-config --cflags libstarpu)
  360. LDFLAGS += $$(pkg-config --libs libstarpu)
  361. @end example
  362. @end cartouche
  363. @node Hello World
  364. @section Hello World
  365. @menu
  366. * Required Headers::
  367. * Defining a Codelet::
  368. * Submitting a Task::
  369. * Execution of Hello World::
  370. @end menu
  371. In this section, we show how to implement a simple program that submits a task to StarPU.
  372. @node Required Headers
  373. @subsection Required Headers
  374. The @code{starpu.h} header should be included in any code using StarPU.
  375. @cartouche
  376. @smallexample
  377. #include <starpu.h>
  378. @end smallexample
  379. @end cartouche
  380. @node Defining a Codelet
  381. @subsection Defining a Codelet
  382. @cartouche
  383. @smallexample
  384. struct params @{
  385. int i;
  386. float f;
  387. @};
  388. void cpu_func(void *buffers[], void *cl_arg)
  389. @{
  390. struct params *params = cl_arg;
  391. printf("Hello world (params = @{%i, %f@} )\n", params->i, params->f);
  392. @}
  393. starpu_codelet cl =
  394. @{
  395. .where = STARPU_CPU,
  396. .cpu_func = cpu_func,
  397. .nbuffers = 0
  398. @};
  399. @end smallexample
  400. @end cartouche
  401. A codelet is a structure that represents a computational kernel. Such a codelet
  402. may contain an implementation of the same kernel on different architectures
  403. (e.g. CUDA, Cell's SPU, x86, ...).
  404. The @code{nbuffers} field specifies the number of data buffers that are
  405. manipulated by the codelet: here the codelet does not access or modify any data
  406. that is controlled by our data management library. Note that the argument
  407. passed to the codelet (the @code{cl_arg} field of the @code{starpu_task}
  408. structure) does not count as a buffer since it is not managed by our data
  409. management library, but just contain trivial parameters.
  410. @c TODO need a crossref to the proper description of "where" see bla for more ...
  411. We create a codelet which may only be executed on the CPUs. The @code{where}
  412. field is a bitmask that defines where the codelet may be executed. Here, the
  413. @code{STARPU_CPU} value means that only CPUs can execute this codelet
  414. (@pxref{Codelets and Tasks} for more details on this field).
  415. When a CPU core executes a codelet, it calls the @code{cpu_func} function,
  416. which @emph{must} have the following prototype:
  417. @code{void (*cpu_func)(void *buffers[], void *cl_arg);}
  418. In this example, we can ignore the first argument of this function which gives a
  419. description of the input and output buffers (e.g. the size and the location of
  420. the matrices) since there is none.
  421. The second argument is a pointer to a buffer passed as an
  422. argument to the codelet by the means of the @code{cl_arg} field of the
  423. @code{starpu_task} structure.
  424. @c TODO rewrite so that it is a little clearer ?
  425. Be aware that this may be a pointer to a
  426. @emph{copy} of the actual buffer, and not the pointer given by the programmer:
  427. if the codelet modifies this buffer, there is no guarantee that the initial
  428. buffer will be modified as well: this for instance implies that the buffer
  429. cannot be used as a synchronization medium. If synchronization is needed, data
  430. has to be registered to StarPU, see @ref{Scaling a Vector}.
  431. @node Submitting a Task
  432. @subsection Submitting a Task
  433. @cartouche
  434. @smallexample
  435. void callback_func(void *callback_arg)
  436. @{
  437. printf("Callback function (arg %x)\n", callback_arg);
  438. @}
  439. int main(int argc, char **argv)
  440. @{
  441. /* @b{initialize StarPU} */
  442. starpu_init(NULL);
  443. struct starpu_task *task = starpu_task_create();
  444. task->cl = &cl; /* @b{Pointer to the codelet defined above} */
  445. struct params params = @{ 1, 2.0f @};
  446. task->cl_arg = &params;
  447. task->cl_arg_size = sizeof(params);
  448. task->callback_func = callback_func;
  449. task->callback_arg = 0x42;
  450. /* @b{starpu_task_submit will be a blocking call} */
  451. task->synchronous = 1;
  452. /* @b{submit the task to StarPU} */
  453. starpu_task_submit(task);
  454. /* @b{terminate StarPU} */
  455. starpu_shutdown();
  456. return 0;
  457. @}
  458. @end smallexample
  459. @end cartouche
  460. Before submitting any tasks to StarPU, @code{starpu_init} must be called. The
  461. @code{NULL} argument specifies that we use default configuration. Tasks cannot
  462. be submitted after the termination of StarPU by a call to
  463. @code{starpu_shutdown}.
  464. In the example above, a task structure is allocated by a call to
  465. @code{starpu_task_create}. This function only allocates and fills the
  466. corresponding structure with the default settings (@pxref{starpu_task_create}),
  467. but it does not submit the task to StarPU.
  468. @c not really clear ;)
  469. The @code{cl} field is a pointer to the codelet which the task will
  470. execute: in other words, the codelet structure describes which computational
  471. kernel should be offloaded on the different architectures, and the task
  472. structure is a wrapper containing a codelet and the piece of data on which the
  473. codelet should operate.
  474. The optional @code{cl_arg} field is a pointer to a buffer (of size
  475. @code{cl_arg_size}) with some parameters for the kernel
  476. described by the codelet. For instance, if a codelet implements a computational
  477. kernel that multiplies its input vector by a constant, the constant could be
  478. specified by the means of this buffer, instead of registering it as a StarPU
  479. data. It must however be noted that StarPU avoids making copy whenever possible
  480. and rather passes the pointer as such, so the buffer which is pointed at must
  481. kept allocated until the task terminates, and if several tasks are submitted
  482. with various parameters, each of them must be given a pointer to their own
  483. buffer.
  484. Once a task has been executed, an optional callback function is be called.
  485. While the computational kernel could be offloaded on various architectures, the
  486. callback function is always executed on a CPU. The @code{callback_arg}
  487. pointer is passed as an argument of the callback. The prototype of a callback
  488. function must be:
  489. @code{void (*callback_function)(void *);}
  490. If the @code{synchronous} field is non-zero, task submission will be
  491. synchronous: the @code{starpu_task_submit} function will not return until the
  492. task was executed. Note that the @code{starpu_shutdown} method does not
  493. guarantee that asynchronous tasks have been executed before it returns,
  494. @code{starpu_task_wait_for_all} can be used to that effect, or data can be
  495. acquired (@code{starpu_data_acquire(vector_handle, STARPU_R);}), which will
  496. implicitly wait for all the tasks scheduled to work on it, unless explicitly
  497. disabled thanks to @code{starpu_data_set_default_sequential_consistency_flag} or
  498. @code{starpu_data_set_sequential_consistency_flag}.
  499. @node Execution of Hello World
  500. @subsection Execution of Hello World
  501. @smallexample
  502. % make hello_world
  503. cc $(pkg-config --cflags libstarpu) $(pkg-config --libs libstarpu) hello_world.c -o hello_world
  504. % ./hello_world
  505. Hello world (params = @{1, 2.000000@} )
  506. Callback function (arg 42)
  507. @end smallexample
  508. @node Scaling a Vector
  509. @section Manipulating Data: Scaling a Vector
  510. The previous example has shown how to submit tasks. In this section,
  511. we show how StarPU tasks can manipulate data. The full source code for
  512. this example is given in @ref{Full source code for the 'Scaling a Vector' example}.
  513. @menu
  514. * Source code of Vector Scaling::
  515. * Execution of Vector Scaling::
  516. @end menu
  517. @node Source code of Vector Scaling
  518. @subsection Source code of Vector Scaling
  519. Programmers can describe the data layout of their application so that StarPU is
  520. responsible for enforcing data coherency and availability across the machine.
  521. Instead of handling complex (and non-portable) mechanisms to perform data
  522. movements, programmers only declare which piece of data is accessed and/or
  523. modified by a task, and StarPU makes sure that when a computational kernel
  524. starts somewhere (e.g. on a GPU), its data are available locally.
  525. Before submitting those tasks, the programmer first needs to declare the
  526. different pieces of data to StarPU using the @code{starpu_*_data_register}
  527. functions. To ease the development of applications for StarPU, it is possible
  528. to describe multiple types of data layout. A type of data layout is called an
  529. @b{interface}. There are different predefined interfaces available in StarPU:
  530. here we will consider the @b{vector interface}.
  531. The following lines show how to declare an array of @code{NX} elements of type
  532. @code{float} using the vector interface:
  533. @cartouche
  534. @smallexample
  535. float vector[NX];
  536. starpu_data_handle vector_handle;
  537. starpu_vector_data_register(&vector_handle, 0, (uintptr_t)vector, NX,
  538. sizeof(vector[0]));
  539. @end smallexample
  540. @end cartouche
  541. The first argument, called the @b{data handle}, is an opaque pointer which
  542. designates the array in StarPU. This is also the structure which is used to
  543. describe which data is used by a task. The second argument is the node number
  544. where the data originally resides. Here it is 0 since the @code{vector} array is in
  545. the main memory. Then comes the pointer @code{vector} where the data can be found in main memory,
  546. the number of elements in the vector and the size of each element.
  547. The following shows how to construct a StarPU task that will manipulate the
  548. vector and a constant factor.
  549. @cartouche
  550. @smallexample
  551. float factor = 3.14;
  552. struct starpu_task *task = starpu_task_create();
  553. task->cl = &cl; /* @b{Pointer to the codelet defined below} */
  554. task->buffers[0].handle = vector_handle; /* @b{First parameter of the codelet} */
  555. task->buffers[0].mode = STARPU_RW;
  556. task->cl_arg = &factor;
  557. task->cl_arg_size = sizeof(factor);
  558. task->synchronous = 1;
  559. starpu_task_submit(task);
  560. @end smallexample
  561. @end cartouche
  562. Since the factor is a mere constant float value parameter,
  563. it does not need a preliminary registration, and
  564. can just be passed through the @code{cl_arg} pointer like in the previous
  565. example. The vector parameter is described by its handle.
  566. There are two fields in each element of the @code{buffers} array.
  567. @code{handle} is the handle of the data, and @code{mode} specifies how the
  568. kernel will access the data (@code{STARPU_R} for read-only, @code{STARPU_W} for
  569. write-only and @code{STARPU_RW} for read and write access).
  570. The definition of the codelet can be written as follows:
  571. @cartouche
  572. @smallexample
  573. void scal_cpu_func(void *buffers[], void *cl_arg)
  574. @{
  575. unsigned i;
  576. float *factor = cl_arg;
  577. /* length of the vector */
  578. unsigned n = STARPU_VECTOR_GET_NX(buffers[0]);
  579. /* local copy of the vector pointer */
  580. float *val = (float *)STARPU_VECTOR_GET_PTR(buffers[0]);
  581. for (i = 0; i < n; i++)
  582. val[i] *= *factor;
  583. @}
  584. starpu_codelet cl = @{
  585. .where = STARPU_CPU,
  586. .cpu_func = scal_cpu_func,
  587. .nbuffers = 1
  588. @};
  589. @end smallexample
  590. @end cartouche
  591. The first argument is an array that gives
  592. a description of all the buffers passed in the @code{task->buffers}@ array. The
  593. size of this array is given by the @code{nbuffers} field of the codelet
  594. structure. For the sake of genericity, this array contains pointers to the
  595. different interfaces describing each buffer. In the case of the @b{vector
  596. interface}, the location of the vector (resp. its length) is accessible in the
  597. @code{ptr} (resp. @code{nx}) of this array. Since the vector is accessed in a
  598. read-write fashion, any modification will automatically affect future accesses
  599. to this vector made by other tasks.
  600. The second argument of the @code{scal_cpu_func} function contains a pointer to the
  601. parameters of the codelet (given in @code{task->cl_arg}), so that we read the
  602. constant factor from this pointer.
  603. @node Execution of Vector Scaling
  604. @subsection Execution of Vector Scaling
  605. @smallexample
  606. % make vector_scal
  607. cc $(pkg-config --cflags libstarpu) $(pkg-config --libs libstarpu) vector_scal.c -o vector_scal
  608. % ./vector_scal
  609. 0.000000 3.000000 6.000000 9.000000 12.000000
  610. @end smallexample
  611. @node Vector Scaling on an Hybrid CPU/GPU Machine
  612. @section Vector Scaling on an Hybrid CPU/GPU Machine
  613. Contrary to the previous examples, the task submitted in this example may not
  614. only be executed by the CPUs, but also by a CUDA device.
  615. @menu
  616. * Definition of the CUDA Kernel::
  617. * Definition of the OpenCL Kernel::
  618. * Definition of the Main Code::
  619. * Execution of Hybrid Vector Scaling::
  620. @end menu
  621. @node Definition of the CUDA Kernel
  622. @subsection Definition of the CUDA Kernel
  623. The CUDA implementation can be written as follows. It needs to be compiled with
  624. a CUDA compiler such as nvcc, the NVIDIA CUDA compiler driver. It must be noted
  625. that the vector pointer returned by STARPU_VECTOR_GET_PTR is here a pointer in GPU
  626. memory, so that it can be passed as such to the @code{vector_mult_cuda} kernel
  627. call.
  628. @cartouche
  629. @smallexample
  630. #include <starpu.h>
  631. static __global__ void vector_mult_cuda(float *val, unsigned n,
  632. float factor)
  633. @{
  634. unsigned i = blockIdx.x*blockDim.x + threadIdx.x;
  635. if (i < n)
  636. val[i] *= factor;
  637. @}
  638. extern "C" void scal_cuda_func(void *buffers[], void *_args)
  639. @{
  640. float *factor = (float *)_args;
  641. /* length of the vector */
  642. unsigned n = STARPU_VECTOR_GET_NX(buffers[0]);
  643. /* local copy of the vector pointer */
  644. float *val = (float *)STARPU_VECTOR_GET_PTR(buffers[0]);
  645. unsigned threads_per_block = 64;
  646. unsigned nblocks = (n + threads_per_block-1) / threads_per_block;
  647. @i{ vector_mult_cuda<<<nblocks,threads_per_block, 0, starpu_cuda_get_local_stream()>>>(val, n, *factor);}
  648. @i{ cudaStreamSynchronize(starpu_cuda_get_local_stream());}
  649. @}
  650. @end smallexample
  651. @end cartouche
  652. @node Definition of the OpenCL Kernel
  653. @subsection Definition of the OpenCL Kernel
  654. The OpenCL implementation can be written as follows. StarPU provides
  655. tools to compile a OpenCL kernel stored in a file.
  656. @cartouche
  657. @smallexample
  658. __kernel void vector_mult_opencl(__global float* val, int nx, float factor)
  659. @{
  660. const int i = get_global_id(0);
  661. if (i < nx) @{
  662. val[i] *= factor;
  663. @}
  664. @}
  665. @end smallexample
  666. @end cartouche
  667. Similarly to CUDA, the pointer returned by @code{STARPU_VECTOR_GET_PTR} is here
  668. a device pointer, so that it is passed as such to the OpenCL kernel.
  669. @cartouche
  670. @smallexample
  671. #include <starpu.h>
  672. @i{#include <starpu_opencl.h>}
  673. @i{extern struct starpu_opencl_program programs;}
  674. void scal_opencl_func(void *buffers[], void *_args)
  675. @{
  676. float *factor = _args;
  677. @i{ int id, devid, err;}
  678. @i{ cl_kernel kernel;}
  679. @i{ cl_command_queue queue;}
  680. @i{ cl_event event;}
  681. /* length of the vector */
  682. unsigned n = STARPU_VECTOR_GET_NX(buffers[0]);
  683. /* local copy of the vector pointer */
  684. float *val = (float *)STARPU_VECTOR_GET_PTR(buffers[0]);
  685. @i{ id = starpu_worker_get_id();}
  686. @i{ devid = starpu_worker_get_devid(id);}
  687. @i{ err = starpu_opencl_load_kernel(&kernel, &queue, &programs,}
  688. @i{ "vector_mult_opencl", devid); /* @b{Name of the codelet defined above} */}
  689. @i{ if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);}
  690. @i{ err = clSetKernelArg(kernel, 0, sizeof(cl_mem), &val);}
  691. @i{ err |= clSetKernelArg(kernel, 1, sizeof(n), &n);}
  692. @i{ err |= clSetKernelArg(kernel, 2, sizeof(*factor), factor);}
  693. @i{ if (err) STARPU_OPENCL_REPORT_ERROR(err);}
  694. @i{ @{}
  695. @i{ size_t global=1;}
  696. @i{ size_t local=1;}
  697. @i{ err = clEnqueueNDRangeKernel(queue, kernel, 1, NULL, &global, &local, 0, NULL, &event);}
  698. @i{ if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);}
  699. @i{ @}}
  700. @i{ clFinish(queue);}
  701. @i{ starpu_opencl_collect_stats(event);}
  702. @i{ clReleaseEvent(event);}
  703. @i{ starpu_opencl_release_kernel(kernel);}
  704. @}
  705. @end smallexample
  706. @end cartouche
  707. @node Definition of the Main Code
  708. @subsection Definition of the Main Code
  709. The CPU implementation is the same as in the previous section.
  710. Here is the source of the main application. You can notice the value of the
  711. field @code{where} for the codelet. We specify
  712. @code{STARPU_CPU|STARPU_CUDA|STARPU_OPENCL} to indicate to StarPU that the codelet
  713. can be executed either on a CPU or on a CUDA or an OpenCL device.
  714. @cartouche
  715. @smallexample
  716. #include <starpu.h>
  717. #define NX 2048
  718. extern void scal_cuda_func(void *buffers[], void *_args);
  719. extern void scal_cpu_func(void *buffers[], void *_args);
  720. extern void scal_opencl_func(void *buffers[], void *_args);
  721. /* @b{Definition of the codelet} */
  722. static starpu_codelet cl = @{
  723. .where = STARPU_CPU|STARPU_CUDA|STARPU_OPENCL; /* @b{It can be executed on a CPU,} */
  724. /* @b{on a CUDA device, or on an OpenCL device} */
  725. .cuda_func = scal_cuda_func;
  726. .cpu_func = scal_cpu_func;
  727. .opencl_func = scal_opencl_func;
  728. .nbuffers = 1;
  729. @}
  730. #ifdef STARPU_USE_OPENCL
  731. /* @b{The compiled version of the OpenCL program} */
  732. struct starpu_opencl_program programs;
  733. #endif
  734. int main(int argc, char **argv)
  735. @{
  736. float *vector;
  737. int i, ret;
  738. float factor=3.0;
  739. struct starpu_task *task;
  740. starpu_data_handle vector_handle;
  741. starpu_init(NULL); /* @b{Initialising StarPU} */
  742. #ifdef STARPU_USE_OPENCL
  743. starpu_opencl_load_opencl_from_file(
  744. "examples/basic_examples/vector_scal_opencl_codelet.cl",
  745. &programs, NULL);
  746. #endif
  747. vector = malloc(NX*sizeof(vector[0]));
  748. assert(vector);
  749. for(i=0 ; i<NX ; i++) vector[i] = i;
  750. @end smallexample
  751. @end cartouche
  752. @cartouche
  753. @smallexample
  754. /* @b{Registering data within StarPU} */
  755. starpu_vector_data_register(&vector_handle, 0, (uintptr_t)vector,
  756. NX, sizeof(vector[0]));
  757. /* @b{Definition of the task} */
  758. task = starpu_task_create();
  759. task->cl = &cl;
  760. task->buffers[0].handle = vector_handle;
  761. task->buffers[0].mode = STARPU_RW;
  762. task->cl_arg = &factor;
  763. task->cl_arg_size = sizeof(factor);
  764. @end smallexample
  765. @end cartouche
  766. @cartouche
  767. @smallexample
  768. /* @b{Submitting the task} */
  769. ret = starpu_task_submit(task);
  770. if (ret == -ENODEV) @{
  771. fprintf(stderr, "No worker may execute this task\n");
  772. return 1;
  773. @}
  774. @c TODO: Mmm, should rather be an unregistration with an implicit dependency, no?
  775. /* @b{Waiting for its termination} */
  776. starpu_task_wait_for_all();
  777. /* @b{Update the vector in RAM} */
  778. starpu_data_acquire(vector_handle, STARPU_R);
  779. @end smallexample
  780. @end cartouche
  781. @cartouche
  782. @smallexample
  783. /* @b{Access the data} */
  784. for(i=0 ; i<NX; i++) @{
  785. fprintf(stderr, "%f ", vector[i]);
  786. @}
  787. fprintf(stderr, "\n");
  788. /* @b{Release the data and shutdown StarPU} */
  789. starpu_data_release(vector_handle);
  790. starpu_shutdown();
  791. return 0;
  792. @}
  793. @end smallexample
  794. @end cartouche
  795. @node Execution of Hybrid Vector Scaling
  796. @subsection Execution of Hybrid Vector Scaling
  797. The Makefile given at the beginning of the section must be extended to
  798. give the rules to compile the CUDA source code. Note that the source
  799. file of the OpenCL kernel does not need to be compiled now, it will
  800. be compiled at run-time when calling the function
  801. @code{starpu_opencl_load_opencl_from_file()} (@pxref{starpu_opencl_load_opencl_from_file}).
  802. @cartouche
  803. @smallexample
  804. CFLAGS += $(shell pkg-config --cflags libstarpu)
  805. LDFLAGS += $(shell pkg-config --libs libstarpu)
  806. CC = gcc
  807. vector_scal: vector_scal.o vector_scal_cpu.o vector_scal_cuda.o vector_scal_opencl.o
  808. %.o: %.cu
  809. nvcc $(CFLAGS) $< -c $@
  810. clean:
  811. rm -f vector_scal *.o
  812. @end smallexample
  813. @end cartouche
  814. @smallexample
  815. % make
  816. @end smallexample
  817. and to execute it, with the default configuration:
  818. @smallexample
  819. % ./vector_scal
  820. 0.000000 3.000000 6.000000 9.000000 12.000000
  821. @end smallexample
  822. or for example, by disabling CPU devices:
  823. @smallexample
  824. % STARPU_NCPUS=0 ./vector_scal
  825. 0.000000 3.000000 6.000000 9.000000 12.000000
  826. @end smallexample
  827. or by disabling CUDA devices (which may permit to enable the use of OpenCL,
  828. see @ref{Using accelerators}):
  829. @smallexample
  830. % STARPU_NCUDA=0 ./vector_scal
  831. 0.000000 3.000000 6.000000 9.000000 12.000000
  832. @end smallexample
  833. @node Task and Worker Profiling
  834. @section Task and Worker Profiling
  835. A full example showing how to use the profiling API is available in
  836. the StarPU sources in the directory @code{examples/profiling/}.
  837. @cartouche
  838. @smallexample
  839. struct starpu_task *task = starpu_task_create();
  840. task->cl = &cl;
  841. task->synchronous = 1;
  842. /* We will destroy the task structure by hand so that we can
  843. * query the profiling info before the task is destroyed. */
  844. task->destroy = 0;
  845. /* Submit and wait for completion (since synchronous was set to 1) */
  846. starpu_task_submit(task);
  847. /* The task is finished, get profiling information */
  848. struct starpu_task_profiling_info *info = task->profiling_info;
  849. /* How much time did it take before the task started ? */
  850. double delay += starpu_timing_timespec_delay_us(&info->submit_time, &info->start_time);
  851. /* How long was the task execution ? */
  852. double length += starpu_timing_timespec_delay_us(&info->start_time, &info->end_time);
  853. /* We don't need the task structure anymore */
  854. starpu_task_destroy(task);
  855. @end smallexample
  856. @end cartouche
  857. @cartouche
  858. @smallexample
  859. /* Display the occupancy of all workers during the test */
  860. int worker;
  861. for (worker = 0; worker < starpu_worker_get_count(); worker++)
  862. @{
  863. struct starpu_worker_profiling_info worker_info;
  864. int ret = starpu_worker_get_profiling_info(worker, &worker_info);
  865. STARPU_ASSERT(!ret);
  866. double total_time = starpu_timing_timespec_to_us(&worker_info.total_time);
  867. double executing_time = starpu_timing_timespec_to_us(&worker_info.executing_time);
  868. double sleeping_time = starpu_timing_timespec_to_us(&worker_info.sleeping_time);
  869. float executing_ratio = 100.0*executing_time/total_time;
  870. float sleeping_ratio = 100.0*sleeping_time/total_time;
  871. char workername[128];
  872. starpu_worker_get_name(worker, workername, 128);
  873. fprintf(stderr, "Worker %s:\n", workername);
  874. fprintf(stderr, "\ttotal time : %.2lf ms\n", total_time*1e-3);
  875. fprintf(stderr, "\texec time : %.2lf ms (%.2f %%)\n", executing_time*1e-3,
  876. executing_ratio);
  877. fprintf(stderr, "\tblocked time : %.2lf ms (%.2f %%)\n", sleeping_time*1e-3,
  878. sleeping_ratio);
  879. @}
  880. @end smallexample
  881. @end cartouche
  882. @node Partitioning Data
  883. @section Partitioning Data
  884. An existing piece of data can be partitioned in sub parts to be used by different tasks, for instance:
  885. @cartouche
  886. @smallexample
  887. int vector[NX];
  888. starpu_data_handle handle;
  889. /* Declare data to StarPU */
  890. starpu_vector_data_register(&handle, 0, (uintptr_t)vector, NX, sizeof(vector[0]));
  891. /* Partition the vector in PARTS sub-vectors */
  892. starpu_filter f =
  893. @{
  894. .filter_func = starpu_block_filter_func_vector,
  895. .nchildren = PARTS,
  896. .get_nchildren = NULL,
  897. .get_child_ops = NULL
  898. @};
  899. starpu_data_partition(handle, &f);
  900. @end smallexample
  901. @end cartouche
  902. @cartouche
  903. @smallexample
  904. /* Submit a task on each sub-vector */
  905. for (i=0; i<starpu_data_get_nb_children(handle); i++) @{
  906. /* Get subdata number i (there is only 1 dimension) */
  907. starpu_data_handle sub_handle = starpu_data_get_sub_data(handle, 1, i);
  908. struct starpu_task *task = starpu_task_create();
  909. task->buffers[0].handle = sub_handle;
  910. task->buffers[0].mode = STARPU_RW;
  911. task->cl = &cl;
  912. task->synchronous = 1;
  913. task->cl_arg = &factor;
  914. task->cl_arg_size = sizeof(factor);
  915. starpu_task_submit(task);
  916. @}
  917. @end smallexample
  918. @end cartouche
  919. Partitioning can be applied several times, see
  920. @code{examples/basic_examples/mult.c} and @code{examples/filters/}.
  921. @node Performance model example
  922. @section Performance model example
  923. To achieve good scheduling, StarPU scheduling policies need to be able to
  924. estimate in advance the duration of a task. This is done by giving to codelets a
  925. performance model. There are several kinds of performance models.
  926. @itemize
  927. @item
  928. Providing an estimation from the application itself (@code{STARPU_COMMON} model type and @code{cost_model} field),
  929. see for instance
  930. @code{examples/common/blas_model.h} and @code{examples/common/blas_model.c}. It can also be provided for each architecture (@code{STARPU_PER_ARCH} model type and @code{per_arch} field)
  931. @item
  932. Measured at runtime (STARPU_HISTORY_BASED model type). This assumes that for a
  933. given set of data input/output sizes, the performance will always be about the
  934. same. This is very true for regular kernels on GPUs for instance (<0.1% error),
  935. and just a bit less true on CPUs (~=1% error). This also assumes that there are
  936. few different sets of data input/output sizes. StarPU will then keep record of
  937. the average time of previous executions on the various processing units, and use
  938. it as an estimation. History is done per task size, by using a hash of the input
  939. and ouput sizes as an index.
  940. It will also save it in @code{~/.starpu/sampling/codelets}
  941. for further executions, and can be observed by using the
  942. @code{starpu_perfmodel_display} command. The following is a small code example.
  943. @cartouche
  944. @smallexample
  945. static struct starpu_perfmodel_t mult_perf_model = @{
  946. .type = STARPU_HISTORY_BASED,
  947. .symbol = "mult_perf_model"
  948. @};
  949. starpu_codelet cl = @{
  950. .where = STARPU_CPU,
  951. .cpu_func = cpu_mult,
  952. .nbuffers = 3,
  953. /* for the scheduling policy to be able to use performance models */
  954. .model = &mult_perf_model
  955. @};
  956. @end smallexample
  957. @end cartouche
  958. @item
  959. Measured at runtime and refined by regression (STARPU_REGRESSION_BASED model
  960. type). This still assumes performance regularity, but can work with various data
  961. input sizes, by applying a*n^b+c regression over observed execution times.
  962. @end itemize
  963. How to use schedulers which can benefit from such performance model is explained
  964. in @ref{Task scheduling policy}.
  965. The same can be done for task power consumption estimation, by setting the
  966. @code{power_model} field the same way as the @code{model} field. Note: for
  967. now, the application has to give to the power consumption performance model
  968. a name which is different from the execution time performance model.
  969. @node Theoretical lower bound on execution time
  970. @section Theoretical lower bound on execution time
  971. For kernels with history-based performance models, StarPU can very easily provide a theoretical lower
  972. bound for the execution time of a whole set of tasks. See for
  973. instance @code{examples/lu/lu_example.c}: before submitting tasks,
  974. call @code{starpu_bound_start}, and after complete execution, call
  975. @code{starpu_bound_stop}. @code{starpu_bound_print_lp} or
  976. @code{starpu_bound_print_mps} can then be used to output a Linear Programming
  977. problem corresponding to the schedule of your tasks. Run it through
  978. @code{lp_solve} or any other linear programming solver, and that will give you a
  979. lower bound for the total execution time of your tasks. If StarPU was compiled
  980. with the glpk library installed, @code{starpu_bound_compute} can be used to
  981. solve it immediately and get the optimized minimum. Its @code{integer}
  982. parameter allows to decide whether integer resolution should be computed
  983. and returned.
  984. The @code{deps} parameter tells StarPU whether to take tasks and implicit data
  985. dependencies into account. It must be understood that the linear programming
  986. problem size is quadratic with the number of tasks and thus the time to solve it
  987. will be very long, it could be minutes for just a few dozen tasks. You should
  988. probably use @code{lp_solve -timeout 1 test.pl -wmps test.mps} to convert the
  989. problem to MPS format and then use a better solver, @code{glpsol} might be
  990. better than @code{lp_solve} for instance (the @code{--pcost} option may be
  991. useful), but sometimes doesn't manage to converge. @code{cbc} might look
  992. slower, but it is parallel. Be sure to try at least all the @code{-B} options
  993. of @code{lp_solve}. For instance, we often just use
  994. @code{lp_solve -cc -B1 -Bb -Bg -Bp -Bf -Br -BG -Bd -Bs -BB -Bo -Bc -Bi} , and
  995. the @code{-gr} option can also be quite useful.
  996. Setting @code{deps} to 0 will only take into account the actual computations
  997. on processing units. It however still properly takes into account the varying
  998. performances of kernels and processing units, which is quite more accurate than
  999. just comparing StarPU performances with the fastest of the kernels being used.
  1000. The @code{prio} parameter tells StarPU whether to simulate taking into account
  1001. the priorities as the StarPU scheduler would, i.e. schedule prioritized
  1002. tasks before less prioritized tasks, to check to which extend this results
  1003. to a less optimal solution. This increases even more computation time.
  1004. Note that for simplicity, all this however doesn't take into account data
  1005. transfers, which are assumed to be completely overlapped.
  1006. @node Insert Task Utility
  1007. @section Insert Task Utility
  1008. StarPU provides the wrapper function @code{starpu_insert_task} to ease
  1009. the creation and submission of tasks.
  1010. @deftypefun int starpu_insert_task (starpu_codelet *cl, ...)
  1011. Create and submit a task corresponding to @var{cl} with the following
  1012. arguments. The argument list must be zero-terminated.
  1013. The arguments following the codelets can be of the following types:
  1014. @itemize
  1015. @item
  1016. @code{STARPU_R}, @code{STARPU_W}, @code{STARPU_RW}, @code{STARPU_SCRATCH} an access mode followed by a data handle;
  1017. @item
  1018. @code{STARPU_VALUE} followed by a pointer to a constant value and
  1019. the size of the constant;
  1020. @item
  1021. @code{STARPU_CALLBACK} followed by a pointer to a callback function;
  1022. @item
  1023. @code{STARPU_CALLBACK_ARG} followed by a pointer to be given as an
  1024. argument to the callback function;
  1025. @item
  1026. @code{STARPU_PRIORITY} followed by a integer defining a priority level.
  1027. @end itemize
  1028. Parameters to be passed to the codelet implementation are defined
  1029. through the type @code{STARPU_VALUE}. The function
  1030. @code{starpu_unpack_cl_args} must be called within the codelet
  1031. implementation to retrieve them.
  1032. @end deftypefun
  1033. Here the implementation of the codelet:
  1034. @smallexample
  1035. void func_cpu(void *descr[], void *_args)
  1036. @{
  1037. int *x0 = (int *)STARPU_VARIABLE_GET_PTR(descr[0]);
  1038. float *x1 = (float *)STARPU_VARIABLE_GET_PTR(descr[1]);
  1039. int ifactor;
  1040. float ffactor;
  1041. starpu_unpack_cl_args(_args, &ifactor, &ffactor);
  1042. *x0 = *x0 * ifactor;
  1043. *x1 = *x1 * ffactor;
  1044. @}
  1045. starpu_codelet mycodelet = @{
  1046. .where = STARPU_CPU,
  1047. .cpu_func = func_cpu,
  1048. .nbuffers = 2
  1049. @};
  1050. @end smallexample
  1051. And the call to the @code{starpu_insert_task} wrapper:
  1052. @smallexample
  1053. starpu_insert_task(&mycodelet,
  1054. STARPU_VALUE, &ifactor, sizeof(ifactor),
  1055. STARPU_VALUE, &ffactor, sizeof(ffactor),
  1056. STARPU_RW, data_handles[0], STARPU_RW, data_handles[1],
  1057. 0);
  1058. @end smallexample
  1059. The call to @code{starpu_insert_task} is equivalent to the following
  1060. code:
  1061. @smallexample
  1062. struct starpu_task *task = starpu_task_create();
  1063. task->cl = &mycodelet;
  1064. task->buffers[0].handle = data_handles[0];
  1065. task->buffers[0].mode = STARPU_RW;
  1066. task->buffers[1].handle = data_handles[1];
  1067. task->buffers[1].mode = STARPU_RW;
  1068. char *arg_buffer;
  1069. size_t arg_buffer_size;
  1070. starpu_pack_cl_args(&arg_buffer, &arg_buffer_size,
  1071. STARPU_VALUE, &ifactor, sizeof(ifactor),
  1072. STARPU_VALUE, &ffactor, sizeof(ffactor),
  1073. 0);
  1074. task->cl_arg = arg_buffer;
  1075. task->cl_arg_size = arg_buffer_size;
  1076. int ret = starpu_task_submit(task);
  1077. @end smallexample
  1078. @node More examples
  1079. @section More examples
  1080. More examples are available in the StarPU sources in the @code{examples/}
  1081. directory. Simple examples include:
  1082. @table @asis
  1083. @item @code{incrementer/}:
  1084. Trivial incrementation test.
  1085. @item @code{basic_examples/}:
  1086. Simple documented Hello world (as shown in @ref{Hello World}), vector/scalar product (as shown
  1087. in @ref{Vector Scaling on an Hybrid CPU/GPU Machine}), matrix
  1088. product examples (as shown in @ref{Performance model example}), an example using the blocked matrix data
  1089. interface, and an example using the variable data interface.
  1090. @item @code{matvecmult/}:
  1091. OpenCL example from NVidia, adapted to StarPU.
  1092. @item @code{axpy/}:
  1093. AXPY CUBLAS operation adapted to StarPU.
  1094. @item @code{fortran/}:
  1095. Example of Fortran bindings.
  1096. @end table
  1097. More advanced examples include:
  1098. @table @asis
  1099. @item @code{filters/}:
  1100. Examples using filters, as shown in @ref{Partitioning Data}.
  1101. @item @code{lu/}:
  1102. LU matrix factorization, see for instance @code{xlu_implicit.c}
  1103. @item @code{cholesky/}:
  1104. Cholesky matrix factorization, see for instance @code{cholesky_implicit.c}.
  1105. @end table
  1106. @c ---------------------------------------------------------------------
  1107. @c Performance options
  1108. @c ---------------------------------------------------------------------
  1109. @node Performance optimization
  1110. @chapter How to optimize performance with StarPU
  1111. TODO: improve!
  1112. @menu
  1113. * Data management::
  1114. * Task submission::
  1115. * Task priorities::
  1116. * Task scheduling policy::
  1117. * Task distribution vs Data transfer::
  1118. * Power-based scheduling::
  1119. * Profiling::
  1120. * CUDA-specific optimizations::
  1121. @end menu
  1122. Simply encapsulating application kernels into tasks already permits to
  1123. seamlessly support CPU and GPUs at the same time. To achieve good performance, a
  1124. few additional changes are needed.
  1125. @node Data management
  1126. @section Data management
  1127. @c By default, StarPU does not enable data prefetching, because CUDA does
  1128. @c not announce when too many data transfers were scheduled and can thus block
  1129. @c unexpectedly... To enable data prefetching, use @code{export STARPU_PREFETCH=1}
  1130. @c .
  1131. By default, StarPU leaves replicates of data wherever they were used, in case they
  1132. will be re-used by other tasks, thus saving the data transfer time. When some
  1133. task modifies some data, all the other replicates are invalidated, and only the
  1134. processing unit will have a valid replicate of the data. If the application knows
  1135. that this data will not be re-used by further tasks, it should advise StarPU to
  1136. immediately replicate it to a desired list of memory nodes (given through a
  1137. bitmask). This can be understood like the write-through mode of CPU caches.
  1138. @example
  1139. starpu_data_set_wt_mask(img_handle, 1<<0);
  1140. @end example
  1141. will for instance request to always transfer a replicate into the main memory (node
  1142. 0), as bit 0 of the write-through bitmask is being set.
  1143. When the application allocates data, whenever possible it should use the
  1144. @code{starpu_malloc} function, which will ask CUDA or
  1145. OpenCL to make the allocation itself and pin the corresponding allocated
  1146. memory. This is needed to permit asynchronous data transfer, i.e. permit data
  1147. transfer to overlap with computations.
  1148. @node Task submission
  1149. @section Task submission
  1150. To let StarPU make online optimizations, tasks should be submitted
  1151. asynchronously as much as possible. Ideally, all the tasks should be
  1152. submitted, and mere calls to @code{starpu_task_wait_for_all} or
  1153. @code{starpu_data_acquire} be done to wait for
  1154. termination. StarPU will then be able to rework the whole schedule, overlap
  1155. computation with communication, manage accelerator local memory usage, etc.
  1156. @node Task priorities
  1157. @section Task priorities
  1158. By default, StarPU will consider the tasks in the order they are submitted by
  1159. the application. If the application programmer knows that some tasks should
  1160. be performed in priority (for instance because their output is needed by many
  1161. other tasks and may thus be a bottleneck if not executed early enough), the
  1162. @code{priority} field of the task structure should be set to transmit the
  1163. priority information to StarPU.
  1164. @node Task scheduling policy
  1165. @section Task scheduling policy
  1166. By default, StarPU uses the @code{eager} simple greedy scheduler. This is
  1167. because it provides correct load balance even if the application codelets do not
  1168. have performance models. If your application codelets have performance models
  1169. (@pxref{Performance model example} for examples showing how to do it),
  1170. you should change the scheduler thanks to the @code{STARPU_SCHED} environment
  1171. variable. For instance @code{export STARPU_SCHED=dmda} . Use @code{help} to get
  1172. the list of available schedulers.
  1173. @c TODO: give some details about each scheduler.
  1174. Most schedulers are based on an estimation of codelet duration on each kind
  1175. of processing unit. For this to be possible, the application programmer needs
  1176. to configure a performance model for the codelets of the application (see
  1177. @ref{Performance model example} for instance). History-based performance models
  1178. use on-line calibration. StarPU will automatically calibrate codelets
  1179. which have never been calibrated yet. To force continuing calibration, use
  1180. @code{export STARPU_CALIBRATE=1} . To drop existing calibration information
  1181. completely and re-calibrate from start, use @code{export STARPU_CALIBRATE=2}.
  1182. Note: due to CUDA limitations, to be able to measure kernel duration,
  1183. calibration mode needs to disable asynchronous data transfers. Calibration thus
  1184. disables data transfer / computation overlapping, and should thus not be used
  1185. for eventual benchmarks. Note 2: history-based performance model get calibrated
  1186. only if a performance-model-based scheduler is chosen.
  1187. @node Task distribution vs Data transfer
  1188. @section Task distribution vs Data transfer
  1189. Distributing tasks to balance the load induces data transfer penalty. StarPU
  1190. thus needs to find a balance between both. The target function that the
  1191. @code{dmda} scheduler of StarPU
  1192. tries to minimize is @code{alpha * T_execution + beta * T_data_transfer}, where
  1193. @code{T_execution} is the estimated execution time of the codelet (usually
  1194. accurate), and @code{T_data_transfer} is the estimated data transfer time. The
  1195. latter is however estimated based on bus calibration before execution start,
  1196. i.e. with an idle machine. You can force bus re-calibration by running
  1197. @code{starpu_calibrate_bus}. The beta parameter defaults to 1, but it can be
  1198. worth trying to tweak it by using @code{export STARPU_BETA=2} for instance.
  1199. This is of course imprecise, but in practice, a rough estimation already gives
  1200. the good results that a precise estimation would give.
  1201. @node Power-based scheduling
  1202. @section Power-based scheduling
  1203. If the application can provide some power performance model (through
  1204. the @code{power_model} field of the codelet structure), StarPU will
  1205. take it into account when distributing tasks. The target function that
  1206. the @code{dmda} scheduler minimizes becomes @code{alpha * T_execution +
  1207. beta * T_data_transfer + gamma * Consumption} , where @code{Consumption}
  1208. is the estimated task consumption in Joules. To tune this parameter, use
  1209. @code{export STARPU_GAMMA=3000} for instance, to express that each Joule
  1210. (i.e kW during 1000us) is worth 3000us execution time penalty. Setting
  1211. alpha and beta to zero permits to only take into account power consumption.
  1212. This is however not sufficient to correctly optimize power: the scheduler would
  1213. simply tend to run all computations on the most energy-conservative processing
  1214. unit. To account for the consumption of the whole machine (including idle
  1215. processing units), the idle power of the machine should be given by setting
  1216. @code{export STARPU_IDLE_POWER=200} for 200W, for instance. This value can often
  1217. be obtained from the machine power supplier.
  1218. The power actually consumed by the total execution can be displayed by setting
  1219. @code{export STARPU_PROFILING=1 STARPU_WORKER_STATS=1} .
  1220. @node Profiling
  1221. @section Profiling
  1222. A quick view of how many tasks each worker has executed can be obtained by setting
  1223. @code{export STARPU_WORKER_STATS=1} This is a convenient way to check that
  1224. execution did happen on accelerators without penalizing performance with
  1225. the profiling overhead.
  1226. More detailed profiling information can be enabled by using @code{export STARPU_PROFILING=1} or by
  1227. calling @code{starpu_profiling_status_set} from the source code.
  1228. Statistics on the execution can then be obtained by using @code{export
  1229. STARPU_BUS_STATS=1} and @code{export STARPU_WORKER_STATS=1} .
  1230. More details on performance feedback are provided by the next chapter.
  1231. @node CUDA-specific optimizations
  1232. @section CUDA-specific optimizations
  1233. Due to CUDA limitations, StarPU will have a hard time overlapping its own
  1234. communications and the codelet computations if the application does not use a
  1235. dedicated CUDA stream for its computations. StarPU provides one by the use of
  1236. @code{starpu_cuda_get_local_stream()} which should be used by all CUDA codelet
  1237. operations. For instance:
  1238. @example
  1239. func <<<grid,block,0,starpu_cuda_get_local_stream()>>> (foo, bar);
  1240. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  1241. @end example
  1242. Unfortunately, a lot of CUDA libraries do not have stream variants of
  1243. kernels. That will lower the potential for overlapping.
  1244. @c ---------------------------------------------------------------------
  1245. @c Performance feedback
  1246. @c ---------------------------------------------------------------------
  1247. @node Performance feedback
  1248. @chapter Performance feedback
  1249. @menu
  1250. * On-line:: On-line performance feedback
  1251. * Off-line:: Off-line performance feedback
  1252. * Codelet performance:: Performance of codelets
  1253. @end menu
  1254. @node On-line
  1255. @section On-line performance feedback
  1256. @menu
  1257. * Enabling monitoring:: Enabling on-line performance monitoring
  1258. * Task feedback:: Per-task feedback
  1259. * Codelet feedback:: Per-codelet feedback
  1260. * Worker feedback:: Per-worker feedback
  1261. * Bus feedback:: Bus-related feedback
  1262. @end menu
  1263. @node Enabling monitoring
  1264. @subsection Enabling on-line performance monitoring
  1265. In order to enable online performance monitoring, the application can call
  1266. @code{starpu_profiling_status_set(STARPU_PROFILING_ENABLE)}. It is possible to
  1267. detect whether monitoring is already enabled or not by calling
  1268. @code{starpu_profiling_status_get()}. Enabling monitoring also reinitialize all
  1269. previously collected feedback. The @code{STARPU_PROFILING} environment variable
  1270. can also be set to 1 to achieve the same effect.
  1271. Likewise, performance monitoring is stopped by calling
  1272. @code{starpu_profiling_status_set(STARPU_PROFILING_DISABLE)}. Note that this
  1273. does not reset the performance counters so that the application may consult
  1274. them later on.
  1275. More details about the performance monitoring API are available in section
  1276. @ref{Profiling API}.
  1277. @node Task feedback
  1278. @subsection Per-task feedback
  1279. If profiling is enabled, a pointer to a @code{starpu_task_profiling_info}
  1280. structure is put in the @code{.profiling_info} field of the @code{starpu_task}
  1281. structure when a task terminates.
  1282. This structure is automatically destroyed when the task structure is destroyed,
  1283. either automatically or by calling @code{starpu_task_destroy}.
  1284. The @code{starpu_task_profiling_info} structure indicates the date when the
  1285. task was submitted (@code{submit_time}), started (@code{start_time}), and
  1286. terminated (@code{end_time}), relative to the initialization of
  1287. StarPU with @code{starpu_init}. It also specifies the identifier of the worker
  1288. that has executed the task (@code{workerid}).
  1289. These date are stored as @code{timespec} structures which the user may convert
  1290. into micro-seconds using the @code{starpu_timing_timespec_to_us} helper
  1291. function.
  1292. It it worth noting that the application may directly access this structure from
  1293. the callback executed at the end of the task. The @code{starpu_task} structure
  1294. associated to the callback currently being executed is indeed accessible with
  1295. the @code{starpu_get_current_task()} function.
  1296. @node Codelet feedback
  1297. @subsection Per-codelet feedback
  1298. The @code{per_worker_stats} field of the @code{starpu_codelet_t} structure is
  1299. an array of counters. The i-th entry of the array is incremented every time a
  1300. task implementing the codelet is executed on the i-th worker.
  1301. This array is not reinitialized when profiling is enabled or disabled.
  1302. @node Worker feedback
  1303. @subsection Per-worker feedback
  1304. The second argument returned by the @code{starpu_worker_get_profiling_info}
  1305. function is a @code{starpu_worker_profiling_info} structure that gives
  1306. statistics about the specified worker. This structure specifies when StarPU
  1307. started collecting profiling information for that worker (@code{start_time}),
  1308. the duration of the profiling measurement interval (@code{total_time}), the
  1309. time spent executing kernels (@code{executing_time}), the time spent sleeping
  1310. because there is no task to execute at all (@code{sleeping_time}), and the
  1311. number of tasks that were executed while profiling was enabled.
  1312. These values give an estimation of the proportion of time spent do real work,
  1313. and the time spent either sleeping because there are not enough executable
  1314. tasks or simply wasted in pure StarPU overhead.
  1315. Calling @code{starpu_worker_get_profiling_info} resets the profiling
  1316. information associated to a worker.
  1317. When an FxT trace is generated (see @ref{Generating traces}), it is also
  1318. possible to use the @code{starpu_top} script (described in @ref{starpu-top}) to
  1319. generate a graphic showing the evolution of these values during the time, for
  1320. the different workers.
  1321. @node Bus feedback
  1322. @subsection Bus-related feedback
  1323. TODO
  1324. @c how to enable/disable performance monitoring
  1325. @c what kind of information do we get ?
  1326. @node Off-line
  1327. @section Off-line performance feedback
  1328. @menu
  1329. * Generating traces:: Generating traces with FxT
  1330. * Gantt diagram:: Creating a Gantt Diagram
  1331. * DAG:: Creating a DAG with graphviz
  1332. * starpu-top:: Monitoring activity
  1333. @end menu
  1334. @node Generating traces
  1335. @subsection Generating traces with FxT
  1336. StarPU can use the FxT library (see
  1337. @indicateurl{https://savannah.nongnu.org/projects/fkt/}) to generate traces
  1338. with a limited runtime overhead.
  1339. You can either get the FxT library from CVS (autotools are required):
  1340. @example
  1341. % cvs -d :pserver:anonymous@@cvs.sv.gnu.org:/sources/fkt co FxT
  1342. % ./bootstrap
  1343. @end example
  1344. If autotools are not available on your machine, or if you prefer to do so,
  1345. FxT's code is also available as a tarball:
  1346. @example
  1347. % wget http://download.savannah.gnu.org/releases/fkt/fxt-0.2.2.tar.gz
  1348. @end example
  1349. Compiling and installing the FxT library in the @code{$FXTDIR} path is
  1350. done following the standard procedure:
  1351. @example
  1352. % ./configure --prefix=$FXTDIR
  1353. % make
  1354. % make install
  1355. @end example
  1356. In order to have StarPU to generate traces, StarPU should be configured with
  1357. the @code{--with-fxt} option:
  1358. @example
  1359. $ ./configure --with-fxt=$FXTDIR
  1360. @end example
  1361. When FxT is enabled, a trace is generated when StarPU is terminated by calling
  1362. @code{starpu_shutdown()}). The trace is a binary file whose name has the form
  1363. @code{prof_file_XXX_YYY} where @code{XXX} is the user name, and
  1364. @code{YYY} is the pid of the process that used StarPU. This file is saved in the
  1365. @code{/tmp/} directory by default, or by the directory specified by
  1366. the @code{STARPU_FXT_PREFIX} environment variable.
  1367. @node Gantt diagram
  1368. @subsection Creating a Gantt Diagram
  1369. When the FxT trace file @code{filename} has been generated, it is possible to
  1370. generate a trace in the Paje format by calling:
  1371. @example
  1372. % starpu_fxt_tool -i filename
  1373. @end example
  1374. Or alternatively, setting the @code{STARPU_GENERATE_TRACE} environment variable
  1375. to 1 before application execution will make StarPU do it automatically at
  1376. application shutdown.
  1377. This will create a @code{paje.trace} file in the current directory that can be
  1378. inspected with the ViTE trace visualizing open-source tool. More information
  1379. about ViTE is available at @indicateurl{http://vite.gforge.inria.fr/}. It is
  1380. possible to open the @code{paje.trace} file with ViTE by using the following
  1381. command:
  1382. @example
  1383. % vite paje.trace
  1384. @end example
  1385. @node DAG
  1386. @subsection Creating a DAG with graphviz
  1387. When the FxT trace file @code{filename} has been generated, it is possible to
  1388. generate a task graph in the DOT format by calling:
  1389. @example
  1390. $ starpu_fxt_tool -i filename
  1391. @end example
  1392. This will create a @code{dag.dot} file in the current directory. This file is a
  1393. task graph described using the DOT language. It is possible to get a
  1394. graphical output of the graph by using the graphviz library:
  1395. @example
  1396. $ dot -Tpdf dag.dot -o output.pdf
  1397. @end example
  1398. @node starpu-top
  1399. @subsection Monitoring activity
  1400. When the FxT trace file @code{filename} has been generated, it is possible to
  1401. generate a activity trace by calling:
  1402. @example
  1403. $ starpu_fxt_tool -i filename
  1404. @end example
  1405. This will create an @code{activity.data} file in the current
  1406. directory. A profile of the application showing the activity of StarPU
  1407. during the execution of the program can be generated:
  1408. @example
  1409. $ starpu_top.sh activity.data
  1410. @end example
  1411. This will create a file named @code{activity.eps} in the current directory.
  1412. This picture is composed of two parts.
  1413. The first part shows the activity of the different workers. The green sections
  1414. indicate which proportion of the time was spent executed kernels on the
  1415. processing unit. The red sections indicate the proportion of time spent in
  1416. StartPU: an important overhead may indicate that the granularity may be too
  1417. low, and that bigger tasks may be appropriate to use the processing unit more
  1418. efficiently. The black sections indicate that the processing unit was blocked
  1419. because there was no task to process: this may indicate a lack of parallelism
  1420. which may be alleviated by creating more tasks when it is possible.
  1421. The second part of the @code{activity.eps} picture is a graph showing the
  1422. evolution of the number of tasks available in the system during the execution.
  1423. Ready tasks are shown in black, and tasks that are submitted but not
  1424. schedulable yet are shown in grey.
  1425. @node Codelet performance
  1426. @section Performance of codelets
  1427. The performance model of codelets can be examined by using the
  1428. @code{starpu_perfmodel_display} tool:
  1429. @example
  1430. $ starpu_perfmodel_display -l
  1431. file: <malloc_pinned.hannibal>
  1432. file: <starpu_slu_lu_model_21.hannibal>
  1433. file: <starpu_slu_lu_model_11.hannibal>
  1434. file: <starpu_slu_lu_model_22.hannibal>
  1435. file: <starpu_slu_lu_model_12.hannibal>
  1436. @end example
  1437. Here, the codelets of the lu example are available. We can examine the
  1438. performance of the 22 kernel:
  1439. @example
  1440. $ starpu_perfmodel_display -s starpu_slu_lu_model_22
  1441. performance model for cpu
  1442. # hash size mean dev n
  1443. 57618ab0 19660800 2.851069e+05 1.829369e+04 109
  1444. performance model for cuda_0
  1445. # hash size mean dev n
  1446. 57618ab0 19660800 1.164144e+04 1.556094e+01 315
  1447. performance model for cuda_1
  1448. # hash size mean dev n
  1449. 57618ab0 19660800 1.164271e+04 1.330628e+01 360
  1450. performance model for cuda_2
  1451. # hash size mean dev n
  1452. 57618ab0 19660800 1.166730e+04 3.390395e+02 456
  1453. @end example
  1454. We can see that for the given size, over a sample of a few hundreds of
  1455. execution, the GPUs are about 20 times faster than the CPUs (numbers are in
  1456. us). The standard deviation is extremely low for the GPUs, and less than 10% for
  1457. CPUs.
  1458. @c ---------------------------------------------------------------------
  1459. @c MPI support
  1460. @c ---------------------------------------------------------------------
  1461. @node StarPU MPI support
  1462. @chapter StarPU MPI support
  1463. TODO: document include/starpu_mpi.h and explain a simple example (pingpong?)
  1464. @c ---------------------------------------------------------------------
  1465. @c Configuration options
  1466. @c ---------------------------------------------------------------------
  1467. @node Configuring StarPU
  1468. @chapter Configuring StarPU
  1469. @menu
  1470. * Compilation configuration::
  1471. * Execution configuration through environment variables::
  1472. @end menu
  1473. @node Compilation configuration
  1474. @section Compilation configuration
  1475. The following arguments can be given to the @code{configure} script.
  1476. @menu
  1477. * Common configuration::
  1478. * Configuring workers::
  1479. * Advanced configuration::
  1480. @end menu
  1481. @node Common configuration
  1482. @subsection Common configuration
  1483. @menu
  1484. * --enable-debug::
  1485. * --enable-fast::
  1486. * --enable-verbose::
  1487. * --enable-coverage::
  1488. @end menu
  1489. @node --enable-debug
  1490. @subsubsection @code{--enable-debug}
  1491. @table @asis
  1492. @item @emph{Description}:
  1493. Enable debugging messages.
  1494. @end table
  1495. @node --enable-fast
  1496. @subsubsection @code{--enable-fast}
  1497. @table @asis
  1498. @item @emph{Description}:
  1499. Do not enforce assertions, saves a lot of time spent to compute them otherwise.
  1500. @end table
  1501. @node --enable-verbose
  1502. @subsubsection @code{--enable-verbose}
  1503. @table @asis
  1504. @item @emph{Description}:
  1505. Augment the verbosity of the debugging messages. This can be disabled
  1506. at runtime by setting the environment variable @code{STARPU_SILENT} to
  1507. any value.
  1508. @smallexample
  1509. % STARPU_SILENT=1 ./vector_scal
  1510. @end smallexample
  1511. @end table
  1512. @node --enable-coverage
  1513. @subsubsection @code{--enable-coverage}
  1514. @table @asis
  1515. @item @emph{Description}:
  1516. Enable flags for the @code{gcov} coverage tool.
  1517. @end table
  1518. @node Configuring workers
  1519. @subsection Configuring workers
  1520. @menu
  1521. * --enable-nmaxcpus::
  1522. * --disable-cpu::
  1523. * --enable-maxcudadev::
  1524. * --disable-cuda::
  1525. * --with-cuda-dir::
  1526. * --with-cuda-include-dir::
  1527. * --with-cuda-lib-dir::
  1528. * --enable-maxopencldev::
  1529. * --disable-opencl::
  1530. * --with-opencl-dir::
  1531. * --with-opencl-include-dir::
  1532. * --with-opencl-lib-dir::
  1533. * --enable-gordon::
  1534. * --with-gordon-dir::
  1535. @end menu
  1536. @node --enable-nmaxcpus
  1537. @subsubsection @code{--enable-nmaxcpus=<number>}
  1538. @table @asis
  1539. @item @emph{Description}:
  1540. Defines the maximum number of CPU cores that StarPU will support, then
  1541. available as the @code{STARPU_NMAXCPUS} macro.
  1542. @end table
  1543. @node --disable-cpu
  1544. @subsubsection @code{--disable-cpu}
  1545. @table @asis
  1546. @item @emph{Description}:
  1547. Disable the use of CPUs of the machine. Only GPUs etc. will be used.
  1548. @end table
  1549. @node --enable-maxcudadev
  1550. @subsubsection @code{--enable-maxcudadev=<number>}
  1551. @table @asis
  1552. @item @emph{Description}:
  1553. Defines the maximum number of CUDA devices that StarPU will support, then
  1554. available as the @code{STARPU_MAXCUDADEVS} macro.
  1555. @end table
  1556. @node --disable-cuda
  1557. @subsubsection @code{--disable-cuda}
  1558. @table @asis
  1559. @item @emph{Description}:
  1560. Disable the use of CUDA, even if a valid CUDA installation was detected.
  1561. @end table
  1562. @node --with-cuda-dir
  1563. @subsubsection @code{--with-cuda-dir=<path>}
  1564. @table @asis
  1565. @item @emph{Description}:
  1566. Specify the directory where CUDA is installed. This directory should notably contain
  1567. @code{include/cuda.h}.
  1568. @end table
  1569. @node --with-cuda-include-dir
  1570. @subsubsection @code{--with-cuda-include-dir=<path>}
  1571. @table @asis
  1572. @item @emph{Description}:
  1573. Specify the directory where CUDA headers are installed. This directory should
  1574. notably contain @code{cuda.h}. This defaults to @code{/include} appended to the
  1575. value given to @code{--with-cuda-dir}.
  1576. @end table
  1577. @node --with-cuda-lib-dir
  1578. @subsubsection @code{--with-cuda-lib-dir=<path>}
  1579. @table @asis
  1580. @item @emph{Description}:
  1581. Specify the directory where the CUDA library is installed. This directory should
  1582. notably contain the CUDA shared libraries (e.g. libcuda.so). This defaults to
  1583. @code{/lib} appended to the value given to @code{--with-cuda-dir}.
  1584. @end table
  1585. @node --enable-maxopencldev
  1586. @subsubsection @code{--enable-maxopencldev=<number>}
  1587. @table @asis
  1588. @item @emph{Description}:
  1589. Defines the maximum number of OpenCL devices that StarPU will support, then
  1590. available as the @code{STARPU_MAXOPENCLDEVS} macro.
  1591. @end table
  1592. @node --disable-opencl
  1593. @subsubsection @code{--disable-opencl}
  1594. @table @asis
  1595. @item @emph{Description}:
  1596. Disable the use of OpenCL, even if the SDK is detected.
  1597. @end table
  1598. @node --with-opencl-dir
  1599. @subsubsection @code{--with-opencl-dir=<path>}
  1600. @table @asis
  1601. @item @emph{Description}:
  1602. Specify the location of the OpenCL SDK. This directory should notably contain
  1603. @code{include/CL/cl.h} (or @code{include/OpenCL/cl.h} on Mac OS).
  1604. @end table
  1605. @node --with-opencl-include-dir
  1606. @subsubsection @code{--with-opencl-include-dir=<path>}
  1607. @table @asis
  1608. @item @emph{Description}:
  1609. Specify the location of OpenCL headers. This directory should notably contain
  1610. @code{CL/cl.h} (or @code{OpenCL/cl.h} on Mac OS). This defaults to
  1611. @code{/include} appended to the value given to @code{--with-opencl-dir}.
  1612. @end table
  1613. @node --with-opencl-lib-dir
  1614. @subsubsection @code{--with-opencl-lib-dir=<path>}
  1615. @table @asis
  1616. @item @emph{Description}:
  1617. Specify the location of the OpenCL library. This directory should notably
  1618. contain the OpenCL shared libraries (e.g. libOpenCL.so). This defaults to
  1619. @code{/lib} appended to the value given to @code{--with-opencl-dir}.
  1620. @end table
  1621. @node --enable-gordon
  1622. @subsubsection @code{--enable-gordon}
  1623. @table @asis
  1624. @item @emph{Description}:
  1625. Enable the use of the Gordon runtime for Cell SPUs.
  1626. @c TODO: rather default to enabled when detected
  1627. @end table
  1628. @node --with-gordon-dir
  1629. @subsubsection @code{--with-gordon-dir=<path>}
  1630. @table @asis
  1631. @item @emph{Description}:
  1632. Specify the location of the Gordon SDK.
  1633. @end table
  1634. @node Advanced configuration
  1635. @subsection Advanced configuration
  1636. @menu
  1637. * --enable-perf-debug::
  1638. * --enable-model-debug::
  1639. * --enable-stats::
  1640. * --enable-maxbuffers::
  1641. * --enable-allocation-cache::
  1642. * --enable-opengl-render::
  1643. * --enable-blas-lib::
  1644. * --with-magma::
  1645. * --with-fxt::
  1646. * --with-perf-model-dir::
  1647. * --with-mpicc::
  1648. * --with-goto-dir::
  1649. * --with-atlas-dir::
  1650. * --with-mkl-cflags::
  1651. * --with-mkl-ldflags::
  1652. @end menu
  1653. @node --enable-perf-debug
  1654. @subsubsection @code{--enable-perf-debug}
  1655. @table @asis
  1656. @item @emph{Description}:
  1657. Enable performance debugging.
  1658. @end table
  1659. @node --enable-model-debug
  1660. @subsubsection @code{--enable-model-debug}
  1661. @table @asis
  1662. @item @emph{Description}:
  1663. Enable performance model debugging.
  1664. @end table
  1665. @node --enable-stats
  1666. @subsubsection @code{--enable-stats}
  1667. @table @asis
  1668. @item @emph{Description}:
  1669. Enable statistics.
  1670. @end table
  1671. @node --enable-maxbuffers
  1672. @subsubsection @code{--enable-maxbuffers=<nbuffers>}
  1673. @table @asis
  1674. @item @emph{Description}:
  1675. Define the maximum number of buffers that tasks will be able to take
  1676. as parameters, then available as the @code{STARPU_NMAXBUFS} macro.
  1677. @end table
  1678. @node --enable-allocation-cache
  1679. @subsubsection @code{--enable-allocation-cache}
  1680. @table @asis
  1681. @item @emph{Description}:
  1682. Enable the use of a data allocation cache to avoid the cost of it with
  1683. CUDA. Still experimental.
  1684. @end table
  1685. @node --enable-opengl-render
  1686. @subsubsection @code{--enable-opengl-render}
  1687. @table @asis
  1688. @item @emph{Description}:
  1689. Enable the use of OpenGL for the rendering of some examples.
  1690. @c TODO: rather default to enabled when detected
  1691. @end table
  1692. @node --enable-blas-lib
  1693. @subsubsection @code{--enable-blas-lib=<name>}
  1694. @table @asis
  1695. @item @emph{Description}:
  1696. Specify the blas library to be used by some of the examples. The
  1697. library has to be 'atlas' or 'goto'.
  1698. @end table
  1699. @node --with-magma
  1700. @subsubsection @code{--with-magma=<path>}
  1701. @table @asis
  1702. @item @emph{Description}:
  1703. Specify where magma is installed. This directory should notably contain
  1704. @code{include/magmablas.h}.
  1705. @end table
  1706. @node --with-fxt
  1707. @subsubsection @code{--with-fxt=<path>}
  1708. @table @asis
  1709. @item @emph{Description}:
  1710. Specify the location of FxT (for generating traces and rendering them
  1711. using ViTE). This directory should notably contain
  1712. @code{include/fxt/fxt.h}.
  1713. @c TODO add ref to other section
  1714. @end table
  1715. @node --with-perf-model-dir
  1716. @subsubsection @code{--with-perf-model-dir=<dir>}
  1717. @table @asis
  1718. @item @emph{Description}:
  1719. Specify where performance models should be stored (instead of defaulting to the
  1720. current user's home).
  1721. @end table
  1722. @node --with-mpicc
  1723. @subsubsection @code{--with-mpicc=<path to mpicc>}
  1724. @table @asis
  1725. @item @emph{Description}:
  1726. Specify the location of the @code{mpicc} compiler to be used for starpumpi.
  1727. @end table
  1728. @node --with-goto-dir
  1729. @subsubsection @code{--with-goto-dir=<dir>}
  1730. @table @asis
  1731. @item @emph{Description}:
  1732. Specify the location of GotoBLAS.
  1733. @end table
  1734. @node --with-atlas-dir
  1735. @subsubsection @code{--with-atlas-dir=<dir>}
  1736. @table @asis
  1737. @item @emph{Description}:
  1738. Specify the location of ATLAS. This directory should notably contain
  1739. @code{include/cblas.h}.
  1740. @end table
  1741. @node --with-mkl-cflags
  1742. @subsubsection @code{--with-mkl-cflags=<cflags>}
  1743. @table @asis
  1744. @item @emph{Description}:
  1745. Specify the compilation flags for the MKL Library.
  1746. @end table
  1747. @node --with-mkl-ldflags
  1748. @subsubsection @code{--with-mkl-ldflags=<ldflags>}
  1749. @table @asis
  1750. @item @emph{Description}:
  1751. Specify the linking flags for the MKL Library. Note that the
  1752. @url{http://software.intel.com/en-us/articles/intel-mkl-link-line-advisor/}
  1753. website provides a script to determine the linking flags.
  1754. @end table
  1755. @c ---------------------------------------------------------------------
  1756. @c Environment variables
  1757. @c ---------------------------------------------------------------------
  1758. @node Execution configuration through environment variables
  1759. @section Execution configuration through environment variables
  1760. @menu
  1761. * Workers:: Configuring workers
  1762. * Scheduling:: Configuring the Scheduling engine
  1763. * Misc:: Miscellaneous and debug
  1764. @end menu
  1765. Note: the values given in @code{starpu_conf} structure passed when
  1766. calling @code{starpu_init} will override the values of the environment
  1767. variables.
  1768. @node Workers
  1769. @subsection Configuring workers
  1770. @menu
  1771. * STARPU_NCPUS:: Number of CPU workers
  1772. * STARPU_NCUDA:: Number of CUDA workers
  1773. * STARPU_NOPENCL:: Number of OpenCL workers
  1774. * STARPU_NGORDON:: Number of SPU workers (Cell)
  1775. * STARPU_WORKERS_CPUID:: Bind workers to specific CPUs
  1776. * STARPU_WORKERS_CUDAID:: Select specific CUDA devices
  1777. * STARPU_WORKERS_OPENCLID:: Select specific OpenCL devices
  1778. @end menu
  1779. @node STARPU_NCPUS
  1780. @subsubsection @code{STARPU_NCPUS} -- Number of CPU workers
  1781. @table @asis
  1782. @item @emph{Description}:
  1783. Specify the number of CPU workers (thus not including workers dedicated to control acceleratores). Note that by default, StarPU will not allocate
  1784. more CPU workers than there are physical CPUs, and that some CPUs are used to control
  1785. the accelerators.
  1786. @end table
  1787. @node STARPU_NCUDA
  1788. @subsubsection @code{STARPU_NCUDA} -- Number of CUDA workers
  1789. @table @asis
  1790. @item @emph{Description}:
  1791. Specify the number of CUDA devices that StarPU can use. If
  1792. @code{STARPU_NCUDA} is lower than the number of physical devices, it is
  1793. possible to select which CUDA devices should be used by the means of the
  1794. @code{STARPU_WORKERS_CUDAID} environment variable. By default, StarPU will
  1795. create as many CUDA workers as there are CUDA devices.
  1796. @end table
  1797. @node STARPU_NOPENCL
  1798. @subsubsection @code{STARPU_NOPENCL} -- Number of OpenCL workers
  1799. @table @asis
  1800. @item @emph{Description}:
  1801. OpenCL equivalent of the @code{STARPU_NCUDA} environment variable.
  1802. @end table
  1803. @node STARPU_NGORDON
  1804. @subsubsection @code{STARPU_NGORDON} -- Number of SPU workers (Cell)
  1805. @table @asis
  1806. @item @emph{Description}:
  1807. Specify the number of SPUs that StarPU can use.
  1808. @end table
  1809. @node STARPU_WORKERS_CPUID
  1810. @subsubsection @code{STARPU_WORKERS_CPUID} -- Bind workers to specific CPUs
  1811. @table @asis
  1812. @item @emph{Description}:
  1813. Passing an array of integers (starting from 0) in @code{STARPU_WORKERS_CPUID}
  1814. specifies on which logical CPU the different workers should be
  1815. bound. For instance, if @code{STARPU_WORKERS_CPUID = "0 1 4 5"}, the first
  1816. worker will be bound to logical CPU #0, the second CPU worker will be bound to
  1817. logical CPU #1 and so on. Note that the logical ordering of the CPUs is either
  1818. determined by the OS, or provided by the @code{hwloc} library in case it is
  1819. available.
  1820. Note that the first workers correspond to the CUDA workers, then come the
  1821. OpenCL and the SPU, and finally the CPU workers. For example if
  1822. we have @code{STARPU_NCUDA=1}, @code{STARPU_NOPENCL=1}, @code{STARPU_NCPUS=2}
  1823. and @code{STARPU_WORKERS_CPUID = "0 2 1 3"}, the CUDA device will be controlled
  1824. by logical CPU #0, the OpenCL device will be controlled by logical CPU #2, and
  1825. the logical CPUs #1 and #3 will be used by the CPU workers.
  1826. If the number of workers is larger than the array given in
  1827. @code{STARPU_WORKERS_CPUID}, the workers are bound to the logical CPUs in a
  1828. round-robin fashion: if @code{STARPU_WORKERS_CPUID = "0 1"}, the first and the
  1829. third (resp. second and fourth) workers will be put on CPU #0 (resp. CPU #1).
  1830. This variable is ignored if the @code{use_explicit_workers_bindid} flag of the
  1831. @code{starpu_conf} structure passed to @code{starpu_init} is set.
  1832. @end table
  1833. @node STARPU_WORKERS_CUDAID
  1834. @subsubsection @code{STARPU_WORKERS_CUDAID} -- Select specific CUDA devices
  1835. @table @asis
  1836. @item @emph{Description}:
  1837. Similarly to the @code{STARPU_WORKERS_CPUID} environment variable, it is
  1838. possible to select which CUDA devices should be used by StarPU. On a machine
  1839. equipped with 4 GPUs, setting @code{STARPU_WORKERS_CUDAID = "1 3"} and
  1840. @code{STARPU_NCUDA=2} specifies that 2 CUDA workers should be created, and that
  1841. they should use CUDA devices #1 and #3 (the logical ordering of the devices is
  1842. the one reported by CUDA).
  1843. This variable is ignored if the @code{use_explicit_workers_cuda_gpuid} flag of
  1844. the @code{starpu_conf} structure passed to @code{starpu_init} is set.
  1845. @end table
  1846. @node STARPU_WORKERS_OPENCLID
  1847. @subsubsection @code{STARPU_WORKERS_OPENCLID} -- Select specific OpenCL devices
  1848. @table @asis
  1849. @item @emph{Description}:
  1850. OpenCL equivalent of the @code{STARPU_WORKERS_CUDAID} environment variable.
  1851. This variable is ignored if the @code{use_explicit_workers_opencl_gpuid} flag of
  1852. the @code{starpu_conf} structure passed to @code{starpu_init} is set.
  1853. @end table
  1854. @node Scheduling
  1855. @subsection Configuring the Scheduling engine
  1856. @menu
  1857. * STARPU_SCHED:: Scheduling policy
  1858. * STARPU_CALIBRATE:: Calibrate performance models
  1859. * STARPU_PREFETCH:: Use data prefetch
  1860. * STARPU_SCHED_ALPHA:: Computation factor
  1861. * STARPU_SCHED_BETA:: Communication factor
  1862. @end menu
  1863. @node STARPU_SCHED
  1864. @subsubsection @code{STARPU_SCHED} -- Scheduling policy
  1865. @table @asis
  1866. @item @emph{Description}:
  1867. This chooses between the different scheduling policies proposed by StarPU: work
  1868. random, stealing, greedy, with performance models, etc.
  1869. Use @code{STARPU_SCHED=help} to get the list of available schedulers.
  1870. @end table
  1871. @node STARPU_CALIBRATE
  1872. @subsubsection @code{STARPU_CALIBRATE} -- Calibrate performance models
  1873. @table @asis
  1874. @item @emph{Description}:
  1875. If this variable is set to 1, the performance models are calibrated during
  1876. the execution. If it is set to 2, the previous values are dropped to restart
  1877. calibration from scratch. Setting this variable to 0 disable calibration, this
  1878. is the default behaviour.
  1879. Note: this currently only applies to dm, dmda and heft scheduling policies.
  1880. @end table
  1881. @node STARPU_PREFETCH
  1882. @subsubsection @code{STARPU_PREFETCH} -- Use data prefetch
  1883. @table @asis
  1884. @item @emph{Description}:
  1885. This variable indicates whether data prefetching should be enabled (0 means
  1886. that it is disabled). If prefetching is enabled, when a task is scheduled to be
  1887. executed e.g. on a GPU, StarPU will request an asynchronous transfer in
  1888. advance, so that data is already present on the GPU when the task starts. As a
  1889. result, computation and data transfers are overlapped.
  1890. Note that prefetching is enabled by default in StarPU.
  1891. @end table
  1892. @node STARPU_SCHED_ALPHA
  1893. @subsubsection @code{STARPU_SCHED_ALPHA} -- Computation factor
  1894. @table @asis
  1895. @item @emph{Description}:
  1896. To estimate the cost of a task StarPU takes into account the estimated
  1897. computation time (obtained thanks to performance models). The alpha factor is
  1898. the coefficient to be applied to it before adding it to the communication part.
  1899. @end table
  1900. @node STARPU_SCHED_BETA
  1901. @subsubsection @code{STARPU_SCHED_BETA} -- Communication factor
  1902. @table @asis
  1903. @item @emph{Description}:
  1904. To estimate the cost of a task StarPU takes into account the estimated
  1905. data transfer time (obtained thanks to performance models). The beta factor is
  1906. the coefficient to be applied to it before adding it to the computation part.
  1907. @end table
  1908. @node Misc
  1909. @subsection Miscellaneous and debug
  1910. @menu
  1911. * STARPU_SILENT:: Disable verbose mode
  1912. * STARPU_LOGFILENAME:: Select debug file name
  1913. * STARPU_FXT_PREFIX:: FxT trace location
  1914. * STARPU_LIMIT_GPU_MEM:: Restrict memory size on the GPUs
  1915. * STARPU_GENERATE_TRACE:: Generate a Paje trace when StarPU is shut down
  1916. @end menu
  1917. @node STARPU_SILENT
  1918. @subsubsection @code{STARPU_SILENT} -- Disable verbose mode
  1919. @table @asis
  1920. @item @emph{Description}:
  1921. This variable allows to disable verbose mode at runtime when StarPU
  1922. has been configured with the option @code{--enable-verbose}.
  1923. @end table
  1924. @node STARPU_LOGFILENAME
  1925. @subsubsection @code{STARPU_LOGFILENAME} -- Select debug file name
  1926. @table @asis
  1927. @item @emph{Description}:
  1928. This variable specifies in which file the debugging output should be saved to.
  1929. @end table
  1930. @node STARPU_FXT_PREFIX
  1931. @subsubsection @code{STARPU_FXT_PREFIX} -- FxT trace location
  1932. @table @asis
  1933. @item @emph{Description}
  1934. This variable specifies in which directory to save the trace generated if FxT is enabled.
  1935. @end table
  1936. @node STARPU_LIMIT_GPU_MEM
  1937. @subsubsection @code{STARPU_LIMIT_GPU_MEM} -- Restrict memory size on the GPUs
  1938. @table @asis
  1939. @item @emph{Description}
  1940. This variable specifies the maximum number of megabytes that should be
  1941. available to the application on each GPUs. In case this value is smaller than
  1942. the size of the memory of a GPU, StarPU pre-allocates a buffer to waste memory
  1943. on the device. This variable is intended to be used for experimental purposes
  1944. as it emulates devices that have a limited amount of memory.
  1945. @end table
  1946. @node STARPU_GENERATE_TRACE
  1947. @subsubsection @code{STARPU_GENERATE_TRACE} -- Generate a Paje trace when StarPU is shut down
  1948. @table @asis
  1949. @item @emph{Description}
  1950. When set to 1, this variable indicates that StarPU should automatically
  1951. generate a Paje trace when starpu_shutdown is called.
  1952. @end table
  1953. @c ---------------------------------------------------------------------
  1954. @c StarPU API
  1955. @c ---------------------------------------------------------------------
  1956. @node StarPU API
  1957. @chapter StarPU API
  1958. @menu
  1959. * Initialization and Termination:: Initialization and Termination methods
  1960. * Workers' Properties:: Methods to enumerate workers' properties
  1961. * Data Library:: Methods to manipulate data
  1962. * Data Interfaces::
  1963. * Data Partition::
  1964. * Codelets and Tasks:: Methods to construct tasks
  1965. * Explicit Dependencies:: Explicit Dependencies
  1966. * Implicit Data Dependencies:: Implicit Data Dependencies
  1967. * Performance Model API::
  1968. * Profiling API:: Profiling API
  1969. * CUDA extensions:: CUDA extensions
  1970. * OpenCL extensions:: OpenCL extensions
  1971. * Cell extensions:: Cell extensions
  1972. * Miscellaneous helpers::
  1973. @end menu
  1974. @node Initialization and Termination
  1975. @section Initialization and Termination
  1976. @menu
  1977. * starpu_init:: Initialize StarPU
  1978. * struct starpu_conf:: StarPU runtime configuration
  1979. * starpu_conf_init:: Initialize starpu_conf structure
  1980. * starpu_shutdown:: Terminate StarPU
  1981. @end menu
  1982. @node starpu_init
  1983. @subsection @code{starpu_init} -- Initialize StarPU
  1984. @table @asis
  1985. @item @emph{Description}:
  1986. This is StarPU initialization method, which must be called prior to any other
  1987. StarPU call. It is possible to specify StarPU's configuration (e.g. scheduling
  1988. policy, number of cores, ...) by passing a non-null argument. Default
  1989. configuration is used if the passed argument is @code{NULL}.
  1990. @item @emph{Return value}:
  1991. Upon successful completion, this function returns 0. Otherwise, @code{-ENODEV}
  1992. indicates that no worker was available (so that StarPU was not initialized).
  1993. @item @emph{Prototype}:
  1994. @code{int starpu_init(struct starpu_conf *conf);}
  1995. @end table
  1996. @node struct starpu_conf
  1997. @subsection @code{struct starpu_conf} -- StarPU runtime configuration
  1998. @table @asis
  1999. @item @emph{Description}:
  2000. This structure is passed to the @code{starpu_init} function in order
  2001. to configure StarPU.
  2002. When the default value is used, StarPU automatically selects the number
  2003. of processing units and takes the default scheduling policy. This parameter
  2004. overwrites the equivalent environment variables.
  2005. @item @emph{Fields}:
  2006. @table @asis
  2007. @item @code{sched_policy_name} (default = NULL):
  2008. This is the name of the scheduling policy. This can also be specified with the
  2009. @code{STARPU_SCHED} environment variable.
  2010. @item @code{sched_policy} (default = NULL):
  2011. This is the definition of the scheduling policy. This field is ignored
  2012. if @code{sched_policy_name} is set.
  2013. @item @code{ncpus} (default = -1):
  2014. This is the number of CPU cores that StarPU can use. This can also be
  2015. specified with the @code{STARPU_NCPUS} environment variable.
  2016. @item @code{ncuda} (default = -1):
  2017. This is the number of CUDA devices that StarPU can use. This can also be
  2018. specified with the @code{STARPU_NCUDA} environment variable.
  2019. @item @code{nopencl} (default = -1):
  2020. This is the number of OpenCL devices that StarPU can use. This can also be
  2021. specified with the @code{STARPU_NOPENCL} environment variable.
  2022. @item @code{nspus} (default = -1):
  2023. This is the number of Cell SPUs that StarPU can use. This can also be
  2024. specified with the @code{STARPU_NGORDON} environment variable.
  2025. @item @code{use_explicit_workers_bindid} (default = 0)
  2026. If this flag is set, the @code{workers_bindid} array indicates where the
  2027. different workers are bound, otherwise StarPU automatically selects where to
  2028. bind the different workers unless the @code{STARPU_WORKERS_CPUID} environment
  2029. variable is set. The @code{STARPU_WORKERS_CPUID} environment variable is
  2030. ignored if the @code{use_explicit_workers_bindid} flag is set.
  2031. @item @code{workers_bindid[STARPU_NMAXWORKERS]}
  2032. If the @code{use_explicit_workers_bindid} flag is set, this array indicates
  2033. where to bind the different workers. The i-th entry of the
  2034. @code{workers_bindid} indicates the logical identifier of the processor which
  2035. should execute the i-th worker. Note that the logical ordering of the CPUs is
  2036. either determined by the OS, or provided by the @code{hwloc} library in case it
  2037. is available.
  2038. When this flag is set, the @ref{STARPU_WORKERS_CPUID} environment variable is
  2039. ignored.
  2040. @item @code{use_explicit_workers_cuda_gpuid} (default = 0)
  2041. If this flag is set, the CUDA workers will be attached to the CUDA devices
  2042. specified in the @code{workers_cuda_gpuid} array. Otherwise, StarPU affects the
  2043. CUDA devices in a round-robin fashion.
  2044. When this flag is set, the @ref{STARPU_WORKERS_CUDAID} environment variable is
  2045. ignored.
  2046. @item @code{workers_cuda_gpuid[STARPU_NMAXWORKERS]}
  2047. If the @code{use_explicit_workers_cuda_gpuid} flag is set, this array contains
  2048. the logical identifiers of the CUDA devices (as used by @code{cudaGetDevice}).
  2049. @item @code{use_explicit_workers_opencl_gpuid} (default = 0)
  2050. If this flag is set, the OpenCL workers will be attached to the OpenCL devices
  2051. specified in the @code{workers_opencl_gpuid} array. Otherwise, StarPU affects the
  2052. OpenCL devices in a round-robin fashion.
  2053. @item @code{workers_opencl_gpuid[STARPU_NMAXWORKERS]}:
  2054. @item @code{calibrate} (default = 0):
  2055. If this flag is set, StarPU will calibrate the performance models when
  2056. executing tasks. If this value is equal to -1, the default value is used. The
  2057. default value is overwritten by the @code{STARPU_CALIBRATE} environment
  2058. variable when it is set.
  2059. @end table
  2060. @end table
  2061. @node starpu_conf_init
  2062. @subsection @code{starpu_conf_init} -- Initialize starpu_conf structure
  2063. @table @asis
  2064. This function initializes the @code{starpu_conf} structure passed as argument
  2065. with the default values. In case some configuration parameters are already
  2066. specified through environment variables, @code{starpu_conf_init} initializes
  2067. the fields of the structure according to the environment variables. For
  2068. instance if @code{STARPU_CALIBRATE} is set, its value is put in the
  2069. @code{.ncuda} field of the structure passed as argument.
  2070. @item @emph{Return value}:
  2071. Upon successful completion, this function returns 0. Otherwise, @code{-EINVAL}
  2072. indicates that the argument was NULL.
  2073. @item @emph{Prototype}:
  2074. @code{int starpu_conf_init(struct starpu_conf *conf);}
  2075. @end table
  2076. @node starpu_shutdown
  2077. @subsection @code{starpu_shutdown} -- Terminate StarPU
  2078. @table @asis
  2079. @item @emph{Description}:
  2080. This is StarPU termination method. It must be called at the end of the
  2081. application: statistics and other post-mortem debugging information are not
  2082. guaranteed to be available until this method has been called.
  2083. @item @emph{Prototype}:
  2084. @code{void starpu_shutdown(void);}
  2085. @end table
  2086. @node Workers' Properties
  2087. @section Workers' Properties
  2088. @menu
  2089. * starpu_worker_get_count:: Get the number of processing units
  2090. * starpu_worker_get_count_by_type:: Get the number of processing units of a given type
  2091. * starpu_cpu_worker_get_count:: Get the number of CPU controlled by StarPU
  2092. * starpu_cuda_worker_get_count:: Get the number of CUDA devices controlled by StarPU
  2093. * starpu_opencl_worker_get_count:: Get the number of OpenCL devices controlled by StarPU
  2094. * starpu_spu_worker_get_count:: Get the number of Cell SPUs controlled by StarPU
  2095. * starpu_worker_get_id:: Get the identifier of the current worker
  2096. * starpu_worker_get_ids_by_type:: Get the list of identifiers of workers with a given type
  2097. * starpu_worker_get_devid:: Get the device identifier of a worker
  2098. * starpu_worker_get_type:: Get the type of processing unit associated to a worker
  2099. * starpu_worker_get_name:: Get the name of a worker
  2100. * starpu_worker_get_memory_node:: Get the memory node of a worker
  2101. @end menu
  2102. @node starpu_worker_get_count
  2103. @subsection @code{starpu_worker_get_count} -- Get the number of processing units
  2104. @table @asis
  2105. @item @emph{Description}:
  2106. This function returns the number of workers (i.e. processing units executing
  2107. StarPU tasks). The returned value should be at most @code{STARPU_NMAXWORKERS}.
  2108. @item @emph{Prototype}:
  2109. @code{unsigned starpu_worker_get_count(void);}
  2110. @end table
  2111. @node starpu_worker_get_count_by_type
  2112. @subsection @code{starpu_worker_get_count_by_type} -- Get the number of processing units of a given type
  2113. @table @asis
  2114. @item @emph{Description}:
  2115. Returns the number of workers of the type indicated by the argument. A positive
  2116. (or null) value is returned in case of success, @code{-EINVAL} indicates that
  2117. the type is not valid otherwise.
  2118. @item @emph{Prototype}:
  2119. @code{int starpu_worker_get_count_by_type(enum starpu_archtype type);}
  2120. @end table
  2121. @node starpu_cpu_worker_get_count
  2122. @subsection @code{starpu_cpu_worker_get_count} -- Get the number of CPU controlled by StarPU
  2123. @table @asis
  2124. @item @emph{Description}:
  2125. This function returns the number of CPUs controlled by StarPU. The returned
  2126. value should be at most @code{STARPU_NMAXCPUS}.
  2127. @item @emph{Prototype}:
  2128. @code{unsigned starpu_cpu_worker_get_count(void);}
  2129. @end table
  2130. @node starpu_cuda_worker_get_count
  2131. @subsection @code{starpu_cuda_worker_get_count} -- Get the number of CUDA devices controlled by StarPU
  2132. @table @asis
  2133. @item @emph{Description}:
  2134. This function returns the number of CUDA devices controlled by StarPU. The returned
  2135. value should be at most @code{STARPU_MAXCUDADEVS}.
  2136. @item @emph{Prototype}:
  2137. @code{unsigned starpu_cuda_worker_get_count(void);}
  2138. @end table
  2139. @node starpu_opencl_worker_get_count
  2140. @subsection @code{starpu_opencl_worker_get_count} -- Get the number of OpenCL devices controlled by StarPU
  2141. @table @asis
  2142. @item @emph{Description}:
  2143. This function returns the number of OpenCL devices controlled by StarPU. The returned
  2144. value should be at most @code{STARPU_MAXOPENCLDEVS}.
  2145. @item @emph{Prototype}:
  2146. @code{unsigned starpu_opencl_worker_get_count(void);}
  2147. @end table
  2148. @node starpu_spu_worker_get_count
  2149. @subsection @code{starpu_spu_worker_get_count} -- Get the number of Cell SPUs controlled by StarPU
  2150. @table @asis
  2151. @item @emph{Description}:
  2152. This function returns the number of Cell SPUs controlled by StarPU.
  2153. @item @emph{Prototype}:
  2154. @code{unsigned starpu_opencl_worker_get_count(void);}
  2155. @end table
  2156. @node starpu_worker_get_id
  2157. @subsection @code{starpu_worker_get_id} -- Get the identifier of the current worker
  2158. @table @asis
  2159. @item @emph{Description}:
  2160. This function returns the identifier of the worker associated to the calling
  2161. thread. The returned value is either -1 if the current context is not a StarPU
  2162. worker (i.e. when called from the application outside a task or a callback), or
  2163. an integer between 0 and @code{starpu_worker_get_count() - 1}.
  2164. @item @emph{Prototype}:
  2165. @code{int starpu_worker_get_id(void);}
  2166. @end table
  2167. @node starpu_worker_get_ids_by_type
  2168. @subsection @code{starpu_worker_get_ids_by_type} -- Get the list of identifiers of workers with a given type
  2169. @table @asis
  2170. @item @emph{Description}:
  2171. Fill the workerids array with the identifiers of the workers that have the type
  2172. indicated in the first argument. The maxsize argument indicates the size of the
  2173. workids array. The returned value gives the number of identifiers that were put
  2174. in the array. @code{-ERANGE} is returned is maxsize is lower than the number of
  2175. workers with the appropriate type: in that case, the array is filled with the
  2176. maxsize first elements. To avoid such overflows, the value of maxsize can be
  2177. chosen by the means of the @code{starpu_worker_get_count_by_type} function, or
  2178. by passing a value greater or equal to @code{STARPU_NMAXWORKERS}.
  2179. @item @emph{Prototype}:
  2180. @code{int starpu_worker_get_ids_by_type(enum starpu_archtype type, int *workerids, int maxsize);}
  2181. @end table
  2182. @node starpu_worker_get_devid
  2183. @subsection @code{starpu_worker_get_devid} -- Get the device identifier of a worker
  2184. @table @asis
  2185. @item @emph{Description}:
  2186. This functions returns the device id of the worker associated to an identifier
  2187. (as returned by the @code{starpu_worker_get_id} function). In the case of a
  2188. CUDA worker, this device identifier is the logical device identifier exposed by
  2189. CUDA (used by the @code{cudaGetDevice} function for instance). The device
  2190. identifier of a CPU worker is the logical identifier of the core on which the
  2191. worker was bound; this identifier is either provided by the OS or by the
  2192. @code{hwloc} library in case it is available.
  2193. @item @emph{Prototype}:
  2194. @code{int starpu_worker_get_devid(int id);}
  2195. @end table
  2196. @node starpu_worker_get_type
  2197. @subsection @code{starpu_worker_get_type} -- Get the type of processing unit associated to a worker
  2198. @table @asis
  2199. @item @emph{Description}:
  2200. This function returns the type of worker associated to an identifier (as
  2201. returned by the @code{starpu_worker_get_id} function). The returned value
  2202. indicates the architecture of the worker: @code{STARPU_CPU_WORKER} for a CPU
  2203. core, @code{STARPU_CUDA_WORKER} for a CUDA device,
  2204. @code{STARPU_OPENCL_WORKER} for a OpenCL device, and
  2205. @code{STARPU_GORDON_WORKER} for a Cell SPU. The value returned for an invalid
  2206. identifier is unspecified.
  2207. @item @emph{Prototype}:
  2208. @code{enum starpu_archtype starpu_worker_get_type(int id);}
  2209. @end table
  2210. @node starpu_worker_get_name
  2211. @subsection @code{starpu_worker_get_name} -- Get the name of a worker
  2212. @table @asis
  2213. @item @emph{Description}:
  2214. StarPU associates a unique human readable string to each processing unit. This
  2215. function copies at most the @code{maxlen} first bytes of the unique string
  2216. associated to a worker identified by its identifier @code{id} into the
  2217. @code{dst} buffer. The caller is responsible for ensuring that the @code{dst}
  2218. is a valid pointer to a buffer of @code{maxlen} bytes at least. Calling this
  2219. function on an invalid identifier results in an unspecified behaviour.
  2220. @item @emph{Prototype}:
  2221. @code{void starpu_worker_get_name(int id, char *dst, size_t maxlen);}
  2222. @end table
  2223. @node starpu_worker_get_memory_node
  2224. @subsection @code{starpu_worker_get_memory_node} -- Get the memory node of a worker
  2225. @table @asis
  2226. @item @emph{Description}:
  2227. This function returns the identifier of the memory node associated to the
  2228. worker identified by @code{workerid}.
  2229. @item @emph{Prototype}:
  2230. @code{unsigned starpu_worker_get_memory_node(unsigned workerid);}
  2231. @end table
  2232. @node Data Library
  2233. @section Data Library
  2234. This section describes the data management facilities provided by StarPU.
  2235. We show how to use existing data interfaces in @ref{Data Interfaces}, but developers can
  2236. design their own data interfaces if required.
  2237. @menu
  2238. * starpu_malloc:: Allocate data and pin it
  2239. * starpu_access_mode:: Data access mode
  2240. * unsigned memory_node:: Memory node
  2241. * starpu_data_handle:: StarPU opaque data handle
  2242. * void *interface:: StarPU data interface
  2243. * starpu_data_register:: Register a piece of data to StarPU
  2244. * starpu_data_unregister:: Unregister a piece of data from StarPU
  2245. * starpu_data_invalidate:: Invalidate all data replicates
  2246. * starpu_data_acquire:: Access registered data from the application
  2247. * starpu_data_acquire_cb:: Access registered data from the application asynchronously
  2248. * starpu_data_release:: Release registered data from the application
  2249. * starpu_data_set_wt_mask:: Set the Write-Through mask
  2250. @end menu
  2251. @node starpu_malloc
  2252. @subsection @code{starpu_malloc} -- Allocate data and pin it
  2253. @table @asis
  2254. @item @emph{Description}:
  2255. This function allocates data of the given size. It will also try to pin it in
  2256. CUDA or OpenGL, so that data transfers from this buffer can be asynchronous, and
  2257. thus permit data transfer and computation overlapping. The allocated buffer must
  2258. be freed thanks to the @code{starpu_free} function.
  2259. @item @emph{Prototype}:
  2260. @code{int starpu_malloc(void **A, size_t dim);}
  2261. @end table
  2262. @node starpu_access_mode
  2263. @subsection @code{starpu_access_mode} -- Data access mode
  2264. This datatype describes a data access mode. The different available modes are:
  2265. @table @asis
  2266. @table @asis
  2267. @item @code{STARPU_R} read-only mode.
  2268. @item @code{STARPU_W} write-only mode.
  2269. @item @code{STARPU_RW} read-write mode. This is equivalent to @code{STARPU_R|STARPU_W}.
  2270. @item @code{STARPU_SCRATCH} scratch memory. A temporary buffer is allocated for the task, but StarPU does not enforce data consistency, i.e. each device has its own buffer, independently from each other (even for CPUs). This is useful for temporary variables. For now, no behaviour is defined concerning the relation with STARPU_R/W modes and the value provided at registration, i.e. the value of the scratch buffer is undefined at entry of the codelet function, but this is being considered for future extensions.
  2271. @end table
  2272. @end table
  2273. @node unsigned memory_node
  2274. @subsection @code{unsigned memory_node} -- Memory node
  2275. @table @asis
  2276. @item @emph{Description}:
  2277. Every worker is associated to a memory node which is a logical abstraction of
  2278. the address space from which the processing unit gets its data. For instance,
  2279. the memory node associated to the different CPU workers represents main memory
  2280. (RAM), the memory node associated to a GPU is DRAM embedded on the device.
  2281. Every memory node is identified by a logical index which is accessible from the
  2282. @code{starpu_worker_get_memory_node} function. When registering a piece of data
  2283. to StarPU, the specified memory node indicates where the piece of data
  2284. initially resides (we also call this memory node the home node of a piece of
  2285. data).
  2286. @end table
  2287. @node starpu_data_handle
  2288. @subsection @code{starpu_data_handle} -- StarPU opaque data handle
  2289. @table @asis
  2290. @item @emph{Description}:
  2291. StarPU uses @code{starpu_data_handle} as an opaque handle to manage a piece of
  2292. data. Once a piece of data has been registered to StarPU, it is associated to a
  2293. @code{starpu_data_handle} which keeps track of the state of the piece of data
  2294. over the entire machine, so that we can maintain data consistency and locate
  2295. data replicates for instance.
  2296. @end table
  2297. @node void *interface
  2298. @subsection @code{void *interface} -- StarPU data interface
  2299. @table @asis
  2300. @item @emph{Description}:
  2301. Data management is done at a high-level in StarPU: rather than accessing a mere
  2302. list of contiguous buffers, the tasks may manipulate data that are described by
  2303. a high-level construct which we call data interface.
  2304. An example of data interface is the "vector" interface which describes a
  2305. contiguous data array on a spefic memory node. This interface is a simple
  2306. structure containing the number of elements in the array, the size of the
  2307. elements, and the address of the array in the appropriate address space (this
  2308. address may be invalid if there is no valid copy of the array in the memory
  2309. node). More informations on the data interfaces provided by StarPU are
  2310. given in @ref{Data Interfaces}.
  2311. When a piece of data managed by StarPU is used by a task, the task
  2312. implementation is given a pointer to an interface describing a valid copy of
  2313. the data that is accessible from the current processing unit.
  2314. @end table
  2315. @node starpu_data_register
  2316. @subsection @code{starpu_data_register} -- Register a piece of data to StarPU
  2317. @table @asis
  2318. @item @emph{Description}:
  2319. Register a piece of data into the handle located at the @code{handleptr}
  2320. address. The @code{interface} buffer contains the initial description of the
  2321. data in the home node. The @code{ops} argument is a pointer to a structure
  2322. describing the different methods used to manipulate this type of interface. See
  2323. @ref{struct starpu_data_interface_ops_t} for more details on this structure.
  2324. If @code{home_node} is -1, StarPU will automatically
  2325. allocate the memory when it is used for the
  2326. first time in write-only mode. Once such data handle has been automatically
  2327. allocated, it is possible to access it using any access mode.
  2328. Note that StarPU supplies a set of predefined types of interface (e.g. vector or
  2329. matrix) which can be registered by the means of helper functions (e.g.
  2330. @code{starpu_vector_data_register} or @code{starpu_matrix_data_register}).
  2331. @item @emph{Prototype}:
  2332. @code{void starpu_data_register(starpu_data_handle *handleptr,
  2333. uint32_t home_node,
  2334. void *interface,
  2335. struct starpu_data_interface_ops_t *ops);}
  2336. @end table
  2337. @node starpu_data_unregister
  2338. @subsection @code{starpu_data_unregister} -- Unregister a piece of data from StarPU
  2339. @table @asis
  2340. @item @emph{Description}:
  2341. This function unregisters a data handle from StarPU. If the data was
  2342. automatically allocated by StarPU because the home node was -1, all
  2343. automatically allocated buffers are freed. Otherwise, a valid copy of the data
  2344. is put back into the home node in the buffer that was initially registered.
  2345. Using a data handle that has been unregistered from StarPU results in an
  2346. undefined behaviour.
  2347. @item @emph{Prototype}:
  2348. @code{void starpu_data_unregister(starpu_data_handle handle);}
  2349. @end table
  2350. @node starpu_data_invalidate
  2351. @subsection @code{starpu_data_invalidate} -- Invalidate all data replicates
  2352. @table @asis
  2353. @item @emph{Description}:
  2354. Destroy all replicates of the data handle. After data invalidation, the first
  2355. access to the handle must be performed in write-only mode. Accessing an
  2356. invalidated data in read-mode results in undefined behaviour.
  2357. @item @emph{Prototype}:
  2358. @code{void starpu_data_invalidate(starpu_data_handle handle);}
  2359. @end table
  2360. @c TODO create a specific sections about user interaction with the DSM ?
  2361. @node starpu_data_acquire
  2362. @subsection @code{starpu_data_acquire} -- Access registered data from the application
  2363. @table @asis
  2364. @item @emph{Description}:
  2365. The application must call this function prior to accessing registered data from
  2366. main memory outside tasks. StarPU ensures that the application will get an
  2367. up-to-date copy of the data in main memory located where the data was
  2368. originally registered, and that all concurrent accesses (e.g. from tasks) will
  2369. be consistent with the access mode specified in the @code{mode} argument.
  2370. @code{starpu_data_release} must be called once the application does not need to
  2371. access the piece of data anymore. Note that implicit data
  2372. dependencies are also enforced by @code{starpu_data_acquire}, i.e.
  2373. @code{starpu_data_acquire} will wait for all tasks scheduled to work on
  2374. the data, unless that they have not been disabled explictly by calling
  2375. @code{starpu_data_set_default_sequential_consistency_flag} or
  2376. @code{starpu_data_set_sequential_consistency_flag}.
  2377. @code{starpu_data_acquire} is a blocking call, so that it cannot be called from
  2378. tasks or from their callbacks (in that case, @code{starpu_data_acquire} returns
  2379. @code{-EDEADLK}). Upon successful completion, this function returns 0.
  2380. @item @emph{Prototype}:
  2381. @code{int starpu_data_acquire(starpu_data_handle handle, starpu_access_mode mode);}
  2382. @end table
  2383. @node starpu_data_acquire_cb
  2384. @subsection @code{starpu_data_acquire_cb} -- Access registered data from the application asynchronously
  2385. @table @asis
  2386. @item @emph{Description}:
  2387. @code{starpu_data_acquire_cb} is the asynchronous equivalent of
  2388. @code{starpu_data_release}. When the data specified in the first argument is
  2389. available in the appropriate access mode, the callback function is executed.
  2390. The application may access the requested data during the execution of this
  2391. callback. The callback function must call @code{starpu_data_release} once the
  2392. application does not need to access the piece of data anymore.
  2393. Note that implicit data dependencies are also enforced by
  2394. @code{starpu_data_acquire_cb} in case they are enabled.
  2395. Contrary to @code{starpu_data_acquire}, this function is non-blocking and may
  2396. be called from task callbacks. Upon successful completion, this function
  2397. returns 0.
  2398. @item @emph{Prototype}:
  2399. @code{int starpu_data_acquire_cb(starpu_data_handle handle, starpu_access_mode mode, void (*callback)(void *), void *arg);}
  2400. @end table
  2401. @node starpu_data_release
  2402. @subsection @code{starpu_data_release} -- Release registered data from the application
  2403. @table @asis
  2404. @item @emph{Description}:
  2405. This function releases the piece of data acquired by the application either by
  2406. @code{starpu_data_acquire} or by @code{starpu_data_acquire_cb}.
  2407. @item @emph{Prototype}:
  2408. @code{void starpu_data_release(starpu_data_handle handle);}
  2409. @end table
  2410. @node starpu_data_set_wt_mask
  2411. @subsection @code{starpu_data_set_wt_mask} -- Set the Write-Through mask
  2412. @table @asis
  2413. @item @emph{Description}:
  2414. This function sets the write-through mask of a given data, i.e. a bitmask of
  2415. nodes where the data should be always replicated after modification.
  2416. @item @emph{Prototype}:
  2417. @code{void starpu_data_set_wt_mask(starpu_data_handle handle, uint32_t wt_mask);}
  2418. @end table
  2419. @node Data Interfaces
  2420. @section Data Interfaces
  2421. @menu
  2422. * Variable Interface::
  2423. * Vector Interface::
  2424. * Matrix Interface::
  2425. * 3D Matrix Interface::
  2426. * BCSR Interface for Sparse Matrices (Blocked Compressed Sparse Row Representation)::
  2427. * CSR Interface for Sparse Matrices (Compressed Sparse Row Representation)::
  2428. @end menu
  2429. @node Variable Interface
  2430. @subsection Variable Interface
  2431. @table @asis
  2432. @item @emph{Description}:
  2433. This variant of @code{starpu_data_register} uses the variable interface,
  2434. i.e. for a mere single variable. @code{ptr} is the address of the variable,
  2435. and @code{elemsize} is the size of the variable.
  2436. @item @emph{Prototype}:
  2437. @code{void starpu_variable_data_register(starpu_data_handle *handle,
  2438. uint32_t home_node,
  2439. uintptr_t ptr, size_t elemsize);}
  2440. @item @emph{Example}:
  2441. @cartouche
  2442. @smallexample
  2443. float var;
  2444. starpu_data_handle var_handle;
  2445. starpu_variable_data_register(&var_handle, 0, (uintptr_t)&var, sizeof(var));
  2446. @end smallexample
  2447. @end cartouche
  2448. @end table
  2449. @node Vector Interface
  2450. @subsection Vector Interface
  2451. @table @asis
  2452. @item @emph{Description}:
  2453. This variant of @code{starpu_data_register} uses the vector interface,
  2454. i.e. for mere arrays of elements. @code{ptr} is the address of the first
  2455. element in the home node. @code{nx} is the number of elements in the vector.
  2456. @code{elemsize} is the size of each element.
  2457. @item @emph{Prototype}:
  2458. @code{void starpu_vector_data_register(starpu_data_handle *handle, uint32_t home_node,
  2459. uintptr_t ptr, uint32_t nx, size_t elemsize);}
  2460. @item @emph{Example}:
  2461. @cartouche
  2462. @smallexample
  2463. float vector[NX];
  2464. starpu_data_handle vector_handle;
  2465. starpu_vector_data_register(&vector_handle, 0, (uintptr_t)vector, NX,
  2466. sizeof(vector[0]));
  2467. @end smallexample
  2468. @end cartouche
  2469. @end table
  2470. @node Matrix Interface
  2471. @subsection Matrix Interface
  2472. @table @asis
  2473. @item @emph{Description}:
  2474. This variant of @code{starpu_data_register} uses the matrix interface, i.e. for
  2475. matrices of elements. @code{ptr} is the address of the first element in the home
  2476. node. @code{ld} is the number of elements between rows. @code{nx} is the number
  2477. of elements in a row (this can be different from @code{ld} if there are extra
  2478. elements for alignment for instance). @code{ny} is the number of rows.
  2479. @code{elemsize} is the size of each element.
  2480. @item @emph{Prototype}:
  2481. @code{void starpu_matrix_data_register(starpu_data_handle *handle, uint32_t home_node,
  2482. uintptr_t ptr, uint32_t ld, uint32_t nx,
  2483. uint32_t ny, size_t elemsize);}
  2484. @item @emph{Example}:
  2485. @cartouche
  2486. @smallexample
  2487. float *matrix;
  2488. starpu_data_handle matrix_handle;
  2489. matrix = (float*)malloc(width * height * sizeof(float));
  2490. starpu_matrix_data_register(&matrix_handle, 0, (uintptr_t)matrix,
  2491. width, width, height, sizeof(float));
  2492. @end smallexample
  2493. @end cartouche
  2494. @end table
  2495. @node 3D Matrix Interface
  2496. @subsection 3D Matrix Interface
  2497. @table @asis
  2498. @item @emph{Description}:
  2499. This variant of @code{starpu_data_register} uses the 3D matrix interface.
  2500. @code{ptr} is the address of the array of first element in the home node.
  2501. @code{ldy} is the number of elements between rows. @code{ldz} is the number
  2502. of rows between z planes. @code{nx} is the number of elements in a row (this
  2503. can be different from @code{ldy} if there are extra elements for alignment
  2504. for instance). @code{ny} is the number of rows in a z plane (likewise with
  2505. @code{ldz}). @code{nz} is the number of z planes. @code{elemsize} is the size of
  2506. each element.
  2507. @item @emph{Prototype}:
  2508. @code{void starpu_block_data_register(starpu_data_handle *handle, uint32_t home_node,
  2509. uintptr_t ptr, uint32_t ldy, uint32_t ldz, uint32_t nx,
  2510. uint32_t ny, uint32_t nz, size_t elemsize);}
  2511. @item @emph{Example}:
  2512. @cartouche
  2513. @smallexample
  2514. float *block;
  2515. starpu_data_handle block_handle;
  2516. block = (float*)malloc(nx*ny*nz*sizeof(float));
  2517. starpu_block_data_register(&block_handle, 0, (uintptr_t)block,
  2518. nx, nx*ny, nx, ny, nz, sizeof(float));
  2519. @end smallexample
  2520. @end cartouche
  2521. @end table
  2522. @node BCSR Interface for Sparse Matrices (Blocked Compressed Sparse Row Representation)
  2523. @subsection BCSR Interface for Sparse Matrices (Blocked Compressed Sparse Row Representation)
  2524. @table @asis
  2525. @item @emph{Description}:
  2526. This variant of @code{starpu_data_register} uses the BCSR sparse matrix interface.
  2527. TODO
  2528. @item @emph{Prototype}:
  2529. @code{void starpu_bcsr_data_register(starpu_data_handle *handle, uint32_t home_node, uint32_t nnz, uint32_t nrow,
  2530. uintptr_t nzval, uint32_t *colind, uint32_t *rowptr, uint32_t firstentry, uint32_t r, uint32_t c, size_t elemsize);}
  2531. @item @emph{Example}:
  2532. @cartouche
  2533. @smallexample
  2534. @end smallexample
  2535. @end cartouche
  2536. @end table
  2537. @node CSR Interface for Sparse Matrices (Compressed Sparse Row Representation)
  2538. @subsection CSR Interface for Sparse Matrices (Compressed Sparse Row Representation)
  2539. @table @asis
  2540. @item @emph{Description}:
  2541. This variant of @code{starpu_data_register} uses the CSR sparse matrix interface.
  2542. TODO
  2543. @item @emph{Prototype}:
  2544. @code{void starpu_csr_data_register(starpu_data_handle *handle, uint32_t home_node, uint32_t nnz, uint32_t nrow,
  2545. uintptr_t nzval, uint32_t *colind, uint32_t *rowptr, uint32_t firstentry, size_t elemsize);}
  2546. @item @emph{Example}:
  2547. @cartouche
  2548. @smallexample
  2549. @end smallexample
  2550. @end cartouche
  2551. @end table
  2552. @node Data Partition
  2553. @section Data Partition
  2554. @menu
  2555. * struct starpu_data_filter:: StarPU filter structure
  2556. * starpu_data_partition:: Partition Data
  2557. * starpu_data_unpartition:: Unpartition Data
  2558. * starpu_data_get_nb_children::
  2559. * starpu_data_get_sub_data::
  2560. * Predefined filter functions::
  2561. @end menu
  2562. @node struct starpu_data_filter
  2563. @subsection @code{struct starpu_data_filter} -- StarPU filter structure
  2564. @table @asis
  2565. @item @emph{Description}:
  2566. The filter structure describes a data partitioning operation, to be given to the
  2567. @code{starpu_data_partition} function, see @ref{starpu_data_partition} for an example.
  2568. @item @emph{Fields}:
  2569. @table @asis
  2570. @item @code{filter_func}:
  2571. This function fills the @code{child_interface} structure with interface
  2572. information for the @code{id}-th child of the parent @code{father_interface} (among @code{nparts}).
  2573. @code{void (*filter_func)(void *father_interface, void* child_interface, struct starpu_data_filter *, unsigned id, unsigned nparts);}
  2574. @item @code{nchildren}:
  2575. This is the number of parts to partition the data into.
  2576. @item @code{get_nchildren}:
  2577. This returns the number of children. This can be used instead of @code{nchildren} when the number of
  2578. children depends on the actual data (e.g. the number of blocks in a sparse
  2579. matrix).
  2580. @code{unsigned (*get_nchildren)(struct starpu_data_filter *, starpu_data_handle initial_handle);}
  2581. @item @code{get_child_ops}:
  2582. In case the resulting children use a different data interface, this function
  2583. returns which interface is used by child number @code{id}.
  2584. @code{struct starpu_data_interface_ops_t *(*get_child_ops)(struct starpu_data_filter *, unsigned id);}
  2585. @item @code{filter_arg}:
  2586. Some filters take an addition parameter, but this is usually unused.
  2587. @item @code{filter_arg_ptr}:
  2588. Some filters take an additional array parameter like the sizes of the parts, but
  2589. this is usually unused.
  2590. @end table
  2591. @end table
  2592. @node starpu_data_partition
  2593. @subsection starpu_data_partition -- Partition Data
  2594. @table @asis
  2595. @item @emph{Description}:
  2596. This requests partitioning one StarPU data @code{initial_handle} into several
  2597. subdata according to the filter @code{f}
  2598. @item @emph{Prototype}:
  2599. @code{void starpu_data_partition(starpu_data_handle initial_handle, struct starpu_data_filter *f);}
  2600. @item @emph{Example}:
  2601. @cartouche
  2602. @smallexample
  2603. struct starpu_data_filter f = @{
  2604. .filter_func = starpu_vertical_block_filter_func,
  2605. .nchildren = nslicesx,
  2606. .get_nchildren = NULL,
  2607. .get_child_ops = NULL
  2608. @};
  2609. starpu_data_partition(A_handle, &f);
  2610. @end smallexample
  2611. @end cartouche
  2612. @end table
  2613. @node starpu_data_unpartition
  2614. @subsection starpu_data_unpartition -- Unpartition data
  2615. @table @asis
  2616. @item @emph{Description}:
  2617. This unapplies one filter, thus unpartitioning the data. The pieces of data are
  2618. collected back into one big piece in the @code{gathering_node} (usually 0).
  2619. @item @emph{Prototype}:
  2620. @code{void starpu_data_unpartition(starpu_data_handle root_data, uint32_t gathering_node);}
  2621. @item @emph{Example}:
  2622. @cartouche
  2623. @smallexample
  2624. starpu_data_unpartition(A_handle, 0);
  2625. @end smallexample
  2626. @end cartouche
  2627. @end table
  2628. @node starpu_data_get_nb_children
  2629. @subsection starpu_data_get_nb_children
  2630. @table @asis
  2631. @item @emph{Description}:
  2632. This function returns the number of children.
  2633. @item @emph{Return value}:
  2634. The number of children.
  2635. @item @emph{Prototype}:
  2636. @code{int starpu_data_get_nb_children(starpu_data_handle handle);}
  2637. @end table
  2638. @c starpu_data_handle starpu_data_get_child(starpu_data_handle handle, unsigned i);
  2639. @node starpu_data_get_sub_data
  2640. @subsection starpu_data_get_sub_data
  2641. @table @asis
  2642. @item @emph{Description}:
  2643. After partitioning a StarPU data by applying a filter,
  2644. @code{starpu_data_get_sub_data} can be used to get handles for each of the data
  2645. portions. @code{root_data} is the parent data that was partitioned. @code{depth}
  2646. is the number of filters to traverse (in case several filters have been applied,
  2647. to e.g. partition in row blocks, and then in column blocks), and the subsequent
  2648. parameters are the indexes.
  2649. @item @emph{Return value}:
  2650. A handle to the subdata.
  2651. @item @emph{Prototype}:
  2652. @code{starpu_data_handle starpu_data_get_sub_data(starpu_data_handle root_data, unsigned depth, ... );}
  2653. @item @emph{Example}:
  2654. @cartouche
  2655. @smallexample
  2656. h = starpu_data_get_sub_data(A_handle, 1, taskx);
  2657. @end smallexample
  2658. @end cartouche
  2659. @end table
  2660. @node Predefined filter functions
  2661. @subsection Predefined filter functions
  2662. @menu
  2663. * Partitioning BCSR Data::
  2664. * Partitioning BLAS interface::
  2665. * Partitioning Vector Data::
  2666. * Partitioning Block Data::
  2667. @end menu
  2668. This section gives a partial list of the predefined partitioning functions.
  2669. Examples on how to use them are shown in @ref{Partitioning Data}. The complete
  2670. list can be found in @code{starpu_data_filters.h} .
  2671. @node Partitioning BCSR Data
  2672. @subsubsection Partitioning BCSR Data
  2673. @table @asis
  2674. @item @emph{Description}:
  2675. TODO
  2676. @item @emph{Prototype}:
  2677. @code{void starpu_canonical_block_filter_bcsr(void *father_interface, void *child_interface, struct starpu_data_filter *f, unsigned id, unsigned nparts);}
  2678. @end table
  2679. @table @asis
  2680. @item @emph{Description}:
  2681. TODO
  2682. @item @emph{Prototype}:
  2683. @code{void starpu_vertical_block_filter_func_csr(void *father_interface, void *child_interface, struct starpu_data_filter *f, unsigned id, unsigned nparts);}
  2684. @end table
  2685. @node Partitioning BLAS interface
  2686. @subsubsection Partitioning BLAS interface
  2687. @table @asis
  2688. @item @emph{Description}:
  2689. This partitions a dense Matrix into horizontal blocks.
  2690. @item @emph{Prototype}:
  2691. @code{void starpu_block_filter_func(void *father_interface, void *child_interface, struct starpu_data_filter *f, unsigned id, unsigned nparts);}
  2692. @end table
  2693. @table @asis
  2694. @item @emph{Description}:
  2695. This partitions a dense Matrix into vertical blocks.
  2696. @item @emph{Prototype}:
  2697. @code{void starpu_vertical_block_filter_func(void *father_interface, void *child_interface, struct starpu_data_filter *f, unsigned id, unsigned nparts);}
  2698. @end table
  2699. @node Partitioning Vector Data
  2700. @subsubsection Partitioning Vector Data
  2701. @table @asis
  2702. @item @emph{Description}:
  2703. This partitions a vector into blocks of the same size.
  2704. @item @emph{Prototype}:
  2705. @code{void starpu_block_filter_func_vector(void *father_interface, void *child_interface, struct starpu_data_filter *f, unsigned id, unsigned nparts);}
  2706. @end table
  2707. @table @asis
  2708. @item @emph{Description}:
  2709. This partitions a vector into blocks of sizes given in @code{filter_arg_ptr}.
  2710. @item @emph{Prototype}:
  2711. @code{void starpu_vector_list_filter_func(void *father_interface, void *child_interface, struct starpu_data_filter *f, unsigned id, unsigned nparts);}
  2712. @end table
  2713. @table @asis
  2714. @item @emph{Description}:
  2715. This partitions a vector into two blocks, the first block size being given in @code{filter_arg}.
  2716. @item @emph{Prototype}:
  2717. @code{void starpu_vector_divide_in_2_filter_func(void *father_interface, void *child_interface, struct starpu_data_filter *f, unsigned id, unsigned nparts);}
  2718. @end table
  2719. @node Partitioning Block Data
  2720. @subsubsection Partitioning Block Data
  2721. @table @asis
  2722. @item @emph{Description}:
  2723. This partitions a 3D matrix along the X axis.
  2724. @item @emph{Prototype}:
  2725. @code{void starpu_block_filter_func_block(void *father_interface, void *child_interface, struct starpu_data_filter *f, unsigned id, unsigned nparts);}
  2726. @end table
  2727. @node Codelets and Tasks
  2728. @section Codelets and Tasks
  2729. @menu
  2730. * struct starpu_codelet:: StarPU codelet structure
  2731. * struct starpu_task:: StarPU task structure
  2732. * starpu_task_init:: Initialize a Task
  2733. * starpu_task_create:: Allocate and Initialize a Task
  2734. * starpu_task_deinit:: Release all the resources used by a Task
  2735. * starpu_task_destroy:: Destroy a dynamically allocated Task
  2736. * starpu_task_wait:: Wait for the termination of a Task
  2737. * starpu_task_submit:: Submit a Task
  2738. * starpu_task_wait_for_all:: Wait for the termination of all Tasks
  2739. * starpu_get_current_task:: Return the task currently executed by the worker
  2740. * starpu_display_codelet_stats:: Display statistics
  2741. @end menu
  2742. @node struct starpu_codelet
  2743. @subsection @code{struct starpu_codelet} -- StarPU codelet structure
  2744. @table @asis
  2745. @item @emph{Description}:
  2746. The codelet structure describes a kernel that is possibly implemented on various
  2747. targets. For compatibility, make sure to initialize the whole structure to zero.
  2748. @item @emph{Fields}:
  2749. @table @asis
  2750. @item @code{where}:
  2751. Indicates which types of processing units are able to execute the codelet.
  2752. @code{STARPU_CPU|STARPU_CUDA} for instance indicates that the codelet is
  2753. implemented for both CPU cores and CUDA devices while @code{STARPU_GORDON}
  2754. indicates that it is only available on Cell SPUs.
  2755. @item @code{cpu_func} (optional):
  2756. Is a function pointer to the CPU implementation of the codelet. Its prototype
  2757. must be: @code{void cpu_func(void *buffers[], void *cl_arg)}. The first
  2758. argument being the array of data managed by the data management library, and
  2759. the second argument is a pointer to the argument passed from the @code{cl_arg}
  2760. field of the @code{starpu_task} structure.
  2761. The @code{cpu_func} field is ignored if @code{STARPU_CPU} does not appear in
  2762. the @code{where} field, it must be non-null otherwise.
  2763. @item @code{cuda_func} (optional):
  2764. Is a function pointer to the CUDA implementation of the codelet. @emph{This
  2765. must be a host-function written in the CUDA runtime API}. Its prototype must
  2766. be: @code{void cuda_func(void *buffers[], void *cl_arg);}. The @code{cuda_func}
  2767. field is ignored if @code{STARPU_CUDA} does not appear in the @code{where}
  2768. field, it must be non-null otherwise.
  2769. @item @code{opencl_func} (optional):
  2770. Is a function pointer to the OpenCL implementation of the codelet. Its
  2771. prototype must be:
  2772. @code{void opencl_func(starpu_data_interface_t *descr, void *arg);}.
  2773. This pointer is ignored if @code{STARPU_OPENCL} does not appear in the
  2774. @code{where} field, it must be non-null otherwise.
  2775. @item @code{gordon_func} (optional):
  2776. This is the index of the Cell SPU implementation within the Gordon library.
  2777. See Gordon documentation for more details on how to register a kernel and
  2778. retrieve its index.
  2779. @item @code{nbuffers}:
  2780. Specifies the number of arguments taken by the codelet. These arguments are
  2781. managed by the DSM and are accessed from the @code{void *buffers[]}
  2782. array. The constant argument passed with the @code{cl_arg} field of the
  2783. @code{starpu_task} structure is not counted in this number. This value should
  2784. not be above @code{STARPU_NMAXBUFS}.
  2785. @item @code{model} (optional):
  2786. This is a pointer to the task duration performance model associated to this
  2787. codelet. This optional field is ignored when set to @code{NULL}.
  2788. TODO
  2789. @item @code{power_model} (optional):
  2790. This is a pointer to the task power consumption performance model associated
  2791. to this codelet. This optional field is ignored when set to @code{NULL}.
  2792. In the case of parallel codelets, this has to account for all processing units
  2793. involved in the parallel execution.
  2794. TODO
  2795. @end table
  2796. @end table
  2797. @node struct starpu_task
  2798. @subsection @code{struct starpu_task} -- StarPU task structure
  2799. @table @asis
  2800. @item @emph{Description}:
  2801. The @code{starpu_task} structure describes a task that can be offloaded on the various
  2802. processing units managed by StarPU. It instantiates a codelet. It can either be
  2803. allocated dynamically with the @code{starpu_task_create} method, or declared
  2804. statically. In the latter case, the programmer has to zero the
  2805. @code{starpu_task} structure and to fill the different fields properly. The
  2806. indicated default values correspond to the configuration of a task allocated
  2807. with @code{starpu_task_create}.
  2808. @item @emph{Fields}:
  2809. @table @asis
  2810. @item @code{cl}:
  2811. Is a pointer to the corresponding @code{starpu_codelet} data structure. This
  2812. describes where the kernel should be executed, and supplies the appropriate
  2813. implementations. When set to @code{NULL}, no code is executed during the tasks,
  2814. such empty tasks can be useful for synchronization purposes.
  2815. @item @code{buffers}:
  2816. Is an array of @code{starpu_buffer_descr_t} structures. It describes the
  2817. different pieces of data accessed by the task, and how they should be accessed.
  2818. The @code{starpu_buffer_descr_t} structure is composed of two fields, the
  2819. @code{handle} field specifies the handle of the piece of data, and the
  2820. @code{mode} field is the required access mode (eg @code{STARPU_RW}). The number
  2821. of entries in this array must be specified in the @code{nbuffers} field of the
  2822. @code{starpu_codelet} structure, and should not excede @code{STARPU_NMAXBUFS}.
  2823. If unsufficient, this value can be set with the @code{--enable-maxbuffers}
  2824. option when configuring StarPU.
  2825. @item @code{cl_arg} (optional) (default = NULL):
  2826. This pointer is passed to the codelet through the second argument
  2827. of the codelet implementation (e.g. @code{cpu_func} or @code{cuda_func}).
  2828. In the specific case of the Cell processor, see the @code{cl_arg_size}
  2829. argument.
  2830. @item @code{cl_arg_size} (optional, Cell specific):
  2831. In the case of the Cell processor, the @code{cl_arg} pointer is not directly
  2832. given to the SPU function. A buffer of size @code{cl_arg_size} is allocated on
  2833. the SPU. This buffer is then filled with the @code{cl_arg_size} bytes starting
  2834. at address @code{cl_arg}. In this case, the argument given to the SPU codelet
  2835. is therefore not the @code{cl_arg} pointer, but the address of the buffer in
  2836. local store (LS) instead. This field is ignored for CPU, CUDA and OpenCL
  2837. codelets, where the @code{cl_arg} pointer is given as such.
  2838. @item @code{callback_func} (optional) (default = @code{NULL}):
  2839. This is a function pointer of prototype @code{void (*f)(void *)} which
  2840. specifies a possible callback. If this pointer is non-null, the callback
  2841. function is executed @emph{on the host} after the execution of the task. The
  2842. callback is passed the value contained in the @code{callback_arg} field. No
  2843. callback is executed if the field is set to @code{NULL}.
  2844. @item @code{callback_arg} (optional) (default = @code{NULL}):
  2845. This is the pointer passed to the callback function. This field is ignored if
  2846. the @code{callback_func} is set to @code{NULL}.
  2847. @item @code{use_tag} (optional) (default = 0):
  2848. If set, this flag indicates that the task should be associated with the tag
  2849. contained in the @code{tag_id} field. Tag allow the application to synchronize
  2850. with the task and to express task dependencies easily.
  2851. @item @code{tag_id}:
  2852. This fields contains the tag associated to the task if the @code{use_tag} field
  2853. was set, it is ignored otherwise.
  2854. @item @code{synchronous}:
  2855. If this flag is set, the @code{starpu_task_submit} function is blocking and
  2856. returns only when the task has been executed (or if no worker is able to
  2857. process the task). Otherwise, @code{starpu_task_submit} returns immediately.
  2858. @item @code{priority} (optional) (default = @code{STARPU_DEFAULT_PRIO}):
  2859. This field indicates a level of priority for the task. This is an integer value
  2860. that must be set between the return values of the
  2861. @code{starpu_sched_get_min_priority} function for the least important tasks,
  2862. and that of the @code{starpu_sched_get_max_priority} for the most important
  2863. tasks (included). The @code{STARPU_MIN_PRIO} and @code{STARPU_MAX_PRIO} macros
  2864. are provided for convenience and respectively returns value of
  2865. @code{starpu_sched_get_min_priority} and @code{starpu_sched_get_max_priority}.
  2866. Default priority is @code{STARPU_DEFAULT_PRIO}, which is always defined as 0 in
  2867. order to allow static task initialization. Scheduling strategies that take
  2868. priorities into account can use this parameter to take better scheduling
  2869. decisions, but the scheduling policy may also ignore it.
  2870. @item @code{execute_on_a_specific_worker} (default = 0):
  2871. If this flag is set, StarPU will bypass the scheduler and directly affect this
  2872. task to the worker specified by the @code{workerid} field.
  2873. @item @code{workerid} (optional):
  2874. If the @code{execute_on_a_specific_worker} field is set, this field indicates
  2875. which is the identifier of the worker that should process this task (as
  2876. returned by @code{starpu_worker_get_id}). This field is ignored if
  2877. @code{execute_on_a_specific_worker} field is set to 0.
  2878. @item @code{detach} (optional) (default = 1):
  2879. If this flag is set, it is not possible to synchronize with the task
  2880. by the means of @code{starpu_task_wait} later on. Internal data structures
  2881. are only guaranteed to be freed once @code{starpu_task_wait} is called if the
  2882. flag is not set.
  2883. @item @code{destroy} (optional) (default = 1):
  2884. If this flag is set, the task structure will automatically be freed, either
  2885. after the execution of the callback if the task is detached, or during
  2886. @code{starpu_task_wait} otherwise. If this flag is not set, dynamically
  2887. allocated data structures will not be freed until @code{starpu_task_destroy} is
  2888. called explicitly. Setting this flag for a statically allocated task structure
  2889. will result in undefined behaviour.
  2890. @item @code{predicted} (output field):
  2891. Predicted duration of the task. This field is only set if the scheduling
  2892. strategy used performance models.
  2893. @end table
  2894. @end table
  2895. @node starpu_task_init
  2896. @subsection @code{starpu_task_init} -- Initialize a Task
  2897. @table @asis
  2898. @item @emph{Description}:
  2899. Initialize a task structure with default values. This function is implicitly
  2900. called by @code{starpu_task_create}. By default, tasks initialized with
  2901. @code{starpu_task_init} must be deinitialized explicitly with
  2902. @code{starpu_task_deinit}. Tasks can also be initialized statically, using the
  2903. constant @code{STARPU_TASK_INITIALIZER}.
  2904. @item @emph{Prototype}:
  2905. @code{void starpu_task_init(struct starpu_task *task);}
  2906. @end table
  2907. @node starpu_task_create
  2908. @subsection @code{starpu_task_create} -- Allocate and Initialize a Task
  2909. @table @asis
  2910. @item @emph{Description}:
  2911. Allocate a task structure and initialize it with default values. Tasks
  2912. allocated dynamically with @code{starpu_task_create} are automatically freed when the
  2913. task is terminated. If the destroy flag is explicitly unset, the resources used
  2914. by the task are freed by calling
  2915. @code{starpu_task_destroy}.
  2916. @item @emph{Prototype}:
  2917. @code{struct starpu_task *starpu_task_create(void);}
  2918. @end table
  2919. @node starpu_task_deinit
  2920. @subsection @code{starpu_task_deinit} -- Release all the resources used by a Task
  2921. @table @asis
  2922. @item @emph{Description}:
  2923. Release all the structures automatically allocated to execute the task. This is
  2924. called automatically by @code{starpu_task_destroy}, but the task structure itself is not
  2925. freed. This should be used for statically allocated tasks for instance.
  2926. @item @emph{Prototype}:
  2927. @code{void starpu_task_deinit(struct starpu_task *task);}
  2928. @end table
  2929. @node starpu_task_destroy
  2930. @subsection @code{starpu_task_destroy} -- Destroy a dynamically allocated Task
  2931. @table @asis
  2932. @item @emph{Description}:
  2933. Free the resource allocated during @code{starpu_task_create}. This function can be
  2934. called automatically after the execution of a task by setting the
  2935. @code{destroy} flag of the @code{starpu_task} structure (default behaviour).
  2936. Calling this function on a statically allocated task results in an undefined
  2937. behaviour.
  2938. @item @emph{Prototype}:
  2939. @code{void starpu_task_destroy(struct starpu_task *task);}
  2940. @end table
  2941. @node starpu_task_wait
  2942. @subsection @code{starpu_task_wait} -- Wait for the termination of a Task
  2943. @table @asis
  2944. @item @emph{Description}:
  2945. This function blocks until the task has been executed. It is not possible to
  2946. synchronize with a task more than once. It is not possible to wait for
  2947. synchronous or detached tasks.
  2948. @item @emph{Return value}:
  2949. Upon successful completion, this function returns 0. Otherwise, @code{-EINVAL}
  2950. indicates that the specified task was either synchronous or detached.
  2951. @item @emph{Prototype}:
  2952. @code{int starpu_task_wait(struct starpu_task *task);}
  2953. @end table
  2954. @node starpu_task_submit
  2955. @subsection @code{starpu_task_submit} -- Submit a Task
  2956. @table @asis
  2957. @item @emph{Description}:
  2958. This function submits a task to StarPU. Calling this function does
  2959. not mean that the task will be executed immediately as there can be data or task
  2960. (tag) dependencies that are not fulfilled yet: StarPU will take care of
  2961. scheduling this task with respect to such dependencies.
  2962. This function returns immediately if the @code{synchronous} field of the
  2963. @code{starpu_task} structure was set to 0, and block until the termination of
  2964. the task otherwise. It is also possible to synchronize the application with
  2965. asynchronous tasks by the means of tags, using the @code{starpu_tag_wait}
  2966. function for instance.
  2967. @item @emph{Return value}:
  2968. In case of success, this function returns 0, a return value of @code{-ENODEV}
  2969. means that there is no worker able to process this task (e.g. there is no GPU
  2970. available and this task is only implemented for CUDA devices).
  2971. @item @emph{Prototype}:
  2972. @code{int starpu_task_submit(struct starpu_task *task);}
  2973. @end table
  2974. @node starpu_task_wait_for_all
  2975. @subsection @code{starpu_task_wait_for_all} -- Wait for the termination of all Tasks
  2976. @table @asis
  2977. @item @emph{Description}:
  2978. This function blocks until all the tasks that were submitted are terminated.
  2979. @item @emph{Prototype}:
  2980. @code{void starpu_task_wait_for_all(void);}
  2981. @end table
  2982. @node starpu_get_current_task
  2983. @subsection @code{starpu_get_current_task} -- Return the task currently executed by the worker
  2984. @table @asis
  2985. @item @emph{Description}:
  2986. This function returns the task currently executed by the worker, or
  2987. NULL if it is called either from a thread that is not a task or simply
  2988. because there is no task being executed at the moment.
  2989. @item @emph{Prototype}:
  2990. @code{struct starpu_task *starpu_get_current_task(void);}
  2991. @end table
  2992. @node starpu_display_codelet_stats
  2993. @subsection @code{starpu_display_codelet_stats} -- Display statistics
  2994. @table @asis
  2995. @item @emph{Description}:
  2996. Output on @code{stderr} some statistics on the codelet @code{cl}.
  2997. @item @emph{Prototype}:
  2998. @code{void starpu_display_codelet_stats(struct starpu_codelet_t *cl);}
  2999. @end table
  3000. @c Callbacks : what can we put in callbacks ?
  3001. @node Explicit Dependencies
  3002. @section Explicit Dependencies
  3003. @menu
  3004. * starpu_task_declare_deps_array:: starpu_task_declare_deps_array
  3005. * starpu_tag_t:: Task logical identifier
  3006. * starpu_tag_declare_deps:: Declare the Dependencies of a Tag
  3007. * starpu_tag_declare_deps_array:: Declare the Dependencies of a Tag
  3008. * starpu_tag_wait:: Block until a Tag is terminated
  3009. * starpu_tag_wait_array:: Block until a set of Tags is terminated
  3010. * starpu_tag_remove:: Destroy a Tag
  3011. * starpu_tag_notify_from_apps:: Feed a tag explicitly
  3012. @end menu
  3013. @node starpu_task_declare_deps_array
  3014. @subsection @code{starpu_task_declare_deps_array} -- Declare task dependencies
  3015. @table @asis
  3016. @item @emph{Description}:
  3017. Declare task dependencies between a @code{task} and an array of tasks of length
  3018. @code{ndeps}. This function must be called prior to the submission of the task,
  3019. but it may called after the submission or the execution of the tasks in the
  3020. array provided the tasks are still valid (ie. they were not automatically
  3021. destroyed). Calling this function on a task that was already submitted or with
  3022. an entry of @code{task_array} that is not a valid task anymore results in an
  3023. undefined behaviour. If @code{ndeps} is null, no dependency is added. It is
  3024. possible to call @code{starpu_task_declare_deps_array} multiple times on the
  3025. same task, in this case, the dependencies are added. It is possible to have
  3026. redundancy in the task dependencies.
  3027. @item @emph{Prototype}:
  3028. @code{void starpu_task_declare_deps_array(struct starpu_task *task, unsigned ndeps, struct starpu_task *task_array[]);}
  3029. @end table
  3030. @node starpu_tag_t
  3031. @subsection @code{starpu_tag_t} -- Task logical identifier
  3032. @table @asis
  3033. @item @emph{Description}:
  3034. It is possible to associate a task with a unique ``tag'' chosen by the application, and to express
  3035. dependencies between tasks by the means of those tags. To do so, fill the
  3036. @code{tag_id} field of the @code{starpu_task} structure with a tag number (can
  3037. be arbitrary) and set the @code{use_tag} field to 1.
  3038. If @code{starpu_tag_declare_deps} is called with this tag number, the task will
  3039. not be started until the tasks which holds the declared dependency tags are
  3040. completed.
  3041. @end table
  3042. @node starpu_tag_declare_deps
  3043. @subsection @code{starpu_tag_declare_deps} -- Declare the Dependencies of a Tag
  3044. @table @asis
  3045. @item @emph{Description}:
  3046. Specify the dependencies of the task identified by tag @code{id}. The first
  3047. argument specifies the tag which is configured, the second argument gives the
  3048. number of tag(s) on which @code{id} depends. The following arguments are the
  3049. tags which have to be terminated to unlock the task.
  3050. This function must be called before the associated task is submitted to StarPU
  3051. with @code{starpu_task_submit}.
  3052. @item @emph{Remark}
  3053. Because of the variable arity of @code{starpu_tag_declare_deps}, note that the
  3054. last arguments @emph{must} be of type @code{starpu_tag_t}: constant values
  3055. typically need to be explicitly casted. Using the
  3056. @code{starpu_tag_declare_deps_array} function avoids this hazard.
  3057. @item @emph{Prototype}:
  3058. @code{void starpu_tag_declare_deps(starpu_tag_t id, unsigned ndeps, ...);}
  3059. @item @emph{Example}:
  3060. @cartouche
  3061. @example
  3062. /* Tag 0x1 depends on tags 0x32 and 0x52 */
  3063. starpu_tag_declare_deps((starpu_tag_t)0x1,
  3064. 2, (starpu_tag_t)0x32, (starpu_tag_t)0x52);
  3065. @end example
  3066. @end cartouche
  3067. @end table
  3068. @node starpu_tag_declare_deps_array
  3069. @subsection @code{starpu_tag_declare_deps_array} -- Declare the Dependencies of a Tag
  3070. @table @asis
  3071. @item @emph{Description}:
  3072. This function is similar to @code{starpu_tag_declare_deps}, except that its
  3073. does not take a variable number of arguments but an array of tags of size
  3074. @code{ndeps}.
  3075. @item @emph{Prototype}:
  3076. @code{void starpu_tag_declare_deps_array(starpu_tag_t id, unsigned ndeps, starpu_tag_t *array);}
  3077. @item @emph{Example}:
  3078. @cartouche
  3079. @example
  3080. /* Tag 0x1 depends on tags 0x32 and 0x52 */
  3081. starpu_tag_t tag_array[2] = @{0x32, 0x52@};
  3082. starpu_tag_declare_deps_array((starpu_tag_t)0x1, 2, tag_array);
  3083. @end example
  3084. @end cartouche
  3085. @end table
  3086. @node starpu_tag_wait
  3087. @subsection @code{starpu_tag_wait} -- Block until a Tag is terminated
  3088. @table @asis
  3089. @item @emph{Description}:
  3090. This function blocks until the task associated to tag @code{id} has been
  3091. executed. This is a blocking call which must therefore not be called within
  3092. tasks or callbacks, but only from the application directly. It is possible to
  3093. synchronize with the same tag multiple times, as long as the
  3094. @code{starpu_tag_remove} function is not called. Note that it is still
  3095. possible to synchronize with a tag associated to a task which @code{starpu_task}
  3096. data structure was freed (e.g. if the @code{destroy} flag of the
  3097. @code{starpu_task} was enabled).
  3098. @item @emph{Prototype}:
  3099. @code{void starpu_tag_wait(starpu_tag_t id);}
  3100. @end table
  3101. @node starpu_tag_wait_array
  3102. @subsection @code{starpu_tag_wait_array} -- Block until a set of Tags is terminated
  3103. @table @asis
  3104. @item @emph{Description}:
  3105. This function is similar to @code{starpu_tag_wait} except that it blocks until
  3106. @emph{all} the @code{ntags} tags contained in the @code{id} array are
  3107. terminated.
  3108. @item @emph{Prototype}:
  3109. @code{void starpu_tag_wait_array(unsigned ntags, starpu_tag_t *id);}
  3110. @end table
  3111. @node starpu_tag_remove
  3112. @subsection @code{starpu_tag_remove} -- Destroy a Tag
  3113. @table @asis
  3114. @item @emph{Description}:
  3115. This function releases the resources associated to tag @code{id}. It can be
  3116. called once the corresponding task has been executed and when there is
  3117. no other tag that depend on this tag anymore.
  3118. @item @emph{Prototype}:
  3119. @code{void starpu_tag_remove(starpu_tag_t id);}
  3120. @end table
  3121. @node starpu_tag_notify_from_apps
  3122. @subsection @code{starpu_tag_notify_from_apps} -- Feed a Tag explicitly
  3123. @table @asis
  3124. @item @emph{Description}:
  3125. This function explicitly unlocks tag @code{id}. It may be useful in the
  3126. case of applications which execute part of their computation outside StarPU
  3127. tasks (e.g. third-party libraries). It is also provided as a
  3128. convenient tool for the programmer, for instance to entirely construct the task
  3129. DAG before actually giving StarPU the opportunity to execute the tasks.
  3130. @item @emph{Prototype}:
  3131. @code{void starpu_tag_notify_from_apps(starpu_tag_t id);}
  3132. @end table
  3133. @node Implicit Data Dependencies
  3134. @section Implicit Data Dependencies
  3135. @menu
  3136. * starpu_data_set_default_sequential_consistency_flag:: starpu_data_set_default_sequential_consistency_flag
  3137. * starpu_data_get_default_sequential_consistency_flag:: starpu_data_get_default_sequential_consistency_flag
  3138. * starpu_data_set_sequential_consistency_flag:: starpu_data_set_sequential_consistency_flag
  3139. @end menu
  3140. In this section, we describe how StarPU makes it possible to insert implicit
  3141. task dependencies in order to enforce sequential data consistency. When this
  3142. data consistency is enabled on a specific data handle, any data access will
  3143. appear as sequentially consistent from the application. For instance, if the
  3144. application submits two tasks that access the same piece of data in read-only
  3145. mode, and then a third task that access it in write mode, dependencies will be
  3146. added between the two first tasks and the third one. Implicit data dependencies
  3147. are also inserted in the case of data accesses from the application.
  3148. @node starpu_data_set_default_sequential_consistency_flag
  3149. @subsection @code{starpu_data_set_default_sequential_consistency_flag} -- Set default sequential consistency flag
  3150. @table @asis
  3151. @item @emph{Description}:
  3152. Set the default sequential consistency flag. If a non-zero value is passed, a
  3153. sequential data consistency will be enforced for all handles registered after
  3154. this function call, otherwise it is disabled. By default, StarPU enables
  3155. sequential data consistency. It is also possible to select the data consistency
  3156. mode of a specific data handle with the
  3157. @code{starpu_data_set_sequential_consistency_flag} function.
  3158. @item @emph{Prototype}:
  3159. @code{void starpu_data_set_default_sequential_consistency_flag(unsigned flag);}
  3160. @end table
  3161. @node starpu_data_get_default_sequential_consistency_flag
  3162. @subsection @code{starpu_data_get_default_sequential_consistency_flag} -- Get current default sequential consistency flag
  3163. @table @asis
  3164. @item @emph{Description}:
  3165. This function returns the current default sequential consistency flag.
  3166. @item @emph{Prototype}:
  3167. @code{unsigned starpu_data_set_default_sequential_consistency_flag(void);}
  3168. @end table
  3169. @node starpu_data_set_sequential_consistency_flag
  3170. @subsection @code{starpu_data_set_sequential_consistency_flag} -- Set data sequential consistency mode
  3171. @table @asis
  3172. @item @emph{Description}:
  3173. Select the data consistency mode associated to a data handle. The consistency
  3174. mode set using this function has the priority over the default mode which can
  3175. be set with @code{starpu_data_set_sequential_consistency_flag}.
  3176. @item @emph{Prototype}:
  3177. @code{void starpu_data_set_sequential_consistency_flag(starpu_data_handle handle, unsigned flag);}
  3178. @end table
  3179. @node Performance Model API
  3180. @section Performance Model API
  3181. @menu
  3182. * starpu_load_history_debug::
  3183. * starpu_perfmodel_debugfilepath::
  3184. * starpu_perfmodel_get_arch_name::
  3185. * starpu_force_bus_sampling::
  3186. @end menu
  3187. @node starpu_load_history_debug
  3188. @subsection @code{starpu_load_history_debug}
  3189. @table @asis
  3190. @item @emph{Description}:
  3191. TODO
  3192. @item @emph{Prototype}:
  3193. @code{int starpu_load_history_debug(const char *symbol, struct starpu_perfmodel_t *model);}
  3194. @end table
  3195. @node starpu_perfmodel_debugfilepath
  3196. @subsection @code{starpu_perfmodel_debugfilepath}
  3197. @table @asis
  3198. @item @emph{Description}:
  3199. TODO
  3200. @item @emph{Prototype}:
  3201. @code{void starpu_perfmodel_debugfilepath(struct starpu_perfmodel_t *model, enum starpu_perf_archtype arch, char *path, size_t maxlen);}
  3202. @end table
  3203. @node starpu_perfmodel_get_arch_name
  3204. @subsection @code{starpu_perfmodel_get_arch_name}
  3205. @table @asis
  3206. @item @emph{Description}:
  3207. TODO
  3208. @item @emph{Prototype}:
  3209. @code{void starpu_perfmodel_get_arch_name(enum starpu_perf_archtype arch, char *archname, size_t maxlen);}
  3210. @end table
  3211. @node starpu_force_bus_sampling
  3212. @subsection @code{starpu_force_bus_sampling}
  3213. @table @asis
  3214. @item @emph{Description}:
  3215. This forces sampling the bus performance model again.
  3216. @item @emph{Prototype}:
  3217. @code{void starpu_force_bus_sampling(void);}
  3218. @end table
  3219. @node Profiling API
  3220. @section Profiling API
  3221. @menu
  3222. * starpu_profiling_status_set:: starpu_profiling_status_set
  3223. * starpu_profiling_status_get:: starpu_profiling_status_get
  3224. * struct starpu_task_profiling_info:: task profiling information
  3225. * struct starpu_worker_profiling_info:: worker profiling information
  3226. * starpu_worker_get_profiling_info:: starpu_worker_get_profiling_info
  3227. * struct starpu_bus_profiling_info:: bus profiling information
  3228. * starpu_bus_get_count::
  3229. * starpu_bus_get_id::
  3230. * starpu_bus_get_src::
  3231. * starpu_bus_get_dst::
  3232. * starpu_timing_timespec_delay_us::
  3233. * starpu_timing_timespec_to_us::
  3234. * starpu_bus_profiling_helper_display_summary::
  3235. * starpu_worker_profiling_helper_display_summary::
  3236. @end menu
  3237. @node starpu_profiling_status_set
  3238. @subsection @code{starpu_profiling_status_set} -- Set current profiling status
  3239. @table @asis
  3240. @item @emph{Description}:
  3241. Thie function sets the profiling status. Profiling is activated by passing
  3242. @code{STARPU_PROFILING_ENABLE} in @code{status}. Passing
  3243. @code{STARPU_PROFILING_DISABLE} disables profiling. Calling this function
  3244. resets all profiling measurements. When profiling is enabled, the
  3245. @code{profiling_info} field of the @code{struct starpu_task} structure points
  3246. to a valid @code{struct starpu_task_profiling_info} structure containing
  3247. information about the execution of the task.
  3248. @item @emph{Return value}:
  3249. Negative return values indicate an error, otherwise the previous status is
  3250. returned.
  3251. @item @emph{Prototype}:
  3252. @code{int starpu_profiling_status_set(int status);}
  3253. @end table
  3254. @node starpu_profiling_status_get
  3255. @subsection @code{starpu_profiling_status_get} -- Get current profiling status
  3256. @table @asis
  3257. @item @emph{Description}:
  3258. Return the current profiling status or a negative value in case there was an error.
  3259. @item @emph{Prototype}:
  3260. @code{int starpu_profiling_status_get(void);}
  3261. @end table
  3262. @node struct starpu_task_profiling_info
  3263. @subsection @code{struct starpu_task_profiling_info} -- Task profiling information
  3264. @table @asis
  3265. @item @emph{Description}:
  3266. This structure contains information about the execution of a task. It is
  3267. accessible from the @code{.profiling_info} field of the @code{starpu_task}
  3268. structure if profiling was enabled.
  3269. @item @emph{Fields}:
  3270. @table @asis
  3271. @item @code{submit_time}:
  3272. Date of task submission (relative to the initialization of StarPU).
  3273. @item @code{start_time}:
  3274. Date of task execution beginning (relative to the initialization of StarPU).
  3275. @item @code{end_time}:
  3276. Date of task execution termination (relative to the initialization of StarPU).
  3277. @item @code{workerid}:
  3278. Identifier of the worker which has executed the task.
  3279. @end table
  3280. @end table
  3281. @node struct starpu_worker_profiling_info
  3282. @subsection @code{struct starpu_worker_profiling_info} -- Worker profiling information
  3283. @table @asis
  3284. @item @emph{Description}:
  3285. This structure contains the profiling information associated to a worker.
  3286. @item @emph{Fields}:
  3287. @table @asis
  3288. @item @code{start_time}:
  3289. Starting date for the reported profiling measurements.
  3290. @item @code{total_time}:
  3291. Duration of the profiling measurement interval.
  3292. @item @code{executing_time}:
  3293. Time spent by the worker to execute tasks during the profiling measurement interval.
  3294. @item @code{sleeping_time}:
  3295. Time spent idling by the worker during the profiling measurement interval.
  3296. @item @code{executed_tasks}:
  3297. Number of tasks executed by the worker during the profiling measurement interval.
  3298. @end table
  3299. @end table
  3300. @node starpu_worker_get_profiling_info
  3301. @subsection @code{starpu_worker_get_profiling_info} -- Get worker profiling info
  3302. @table @asis
  3303. @item @emph{Description}:
  3304. Get the profiling info associated to the worker identified by @code{workerid},
  3305. and reset the profiling measurements. If the @code{worker_info} argument is
  3306. NULL, only reset the counters associated to worker @code{workerid}.
  3307. @item @emph{Return value}:
  3308. Upon successful completion, this function returns 0. Otherwise, a negative
  3309. value is returned.
  3310. @item @emph{Prototype}:
  3311. @code{int starpu_worker_get_profiling_info(int workerid, struct starpu_worker_profiling_info *worker_info);}
  3312. @end table
  3313. @node struct starpu_bus_profiling_info
  3314. @subsection @code{struct starpu_bus_profiling_info} -- Bus profiling information
  3315. @table @asis
  3316. @item @emph{Description}:
  3317. TODO
  3318. @item @emph{Fields}:
  3319. @table @asis
  3320. @item @code{start_time}:
  3321. TODO
  3322. @item @code{total_time}:
  3323. TODO
  3324. @item @code{transferred_bytes}:
  3325. TODO
  3326. @item @code{transfer_count}:
  3327. TODO
  3328. @end table
  3329. @end table
  3330. @node starpu_bus_get_count
  3331. @subsection @code{starpu_bus_get_count}
  3332. @table @asis
  3333. @item @emph{Description}:
  3334. TODO
  3335. @item @emph{Prototype}:
  3336. @code{int starpu_bus_get_count(void);}
  3337. @end table
  3338. @node starpu_bus_get_id
  3339. @subsection @code{starpu_bus_get_id}
  3340. @table @asis
  3341. @item @emph{Description}:
  3342. TODO
  3343. @item @emph{Prototype}:
  3344. @code{int starpu_bus_get_id(int src, int dst);}
  3345. @end table
  3346. @node starpu_bus_get_src
  3347. @subsection @code{starpu_bus_get_src}
  3348. @table @asis
  3349. @item @emph{Description}:
  3350. TODO
  3351. @item @emph{Prototype}:
  3352. @code{int starpu_bus_get_src(int busid);}
  3353. @end table
  3354. @node starpu_bus_get_dst
  3355. @subsection @code{starpu_bus_get_dst}
  3356. @table @asis
  3357. @item @emph{Description}:
  3358. TODO
  3359. @item @emph{Prototype}:
  3360. @code{int starpu_bus_get_dst(int busid);}
  3361. @end table
  3362. @node starpu_timing_timespec_delay_us
  3363. @subsection @code{starpu_timing_timespec_delay_us}
  3364. @table @asis
  3365. @item @emph{Description}:
  3366. TODO
  3367. @item @emph{Prototype}:
  3368. @code{double starpu_timing_timespec_delay_us(struct timespec *start, struct timespec *end);}
  3369. @end table
  3370. @node starpu_timing_timespec_to_us
  3371. @subsection @code{starpu_timing_timespec_to_us}
  3372. @table @asis
  3373. @item @emph{Description}:
  3374. TODO
  3375. @item @emph{Prototype}:
  3376. @code{double starpu_timing_timespec_to_us(struct timespec *ts);}
  3377. @end table
  3378. @node starpu_bus_profiling_helper_display_summary
  3379. @subsection @code{starpu_bus_profiling_helper_display_summary}
  3380. @table @asis
  3381. @item @emph{Description}:
  3382. TODO
  3383. @item @emph{Prototype}:
  3384. @code{void starpu_bus_profiling_helper_display_summary(void);}
  3385. @end table
  3386. @node starpu_worker_profiling_helper_display_summary
  3387. @subsection @code{starpu_worker_profiling_helper_display_summary}
  3388. @table @asis
  3389. @item @emph{Description}:
  3390. TODO
  3391. @item @emph{Prototype}:
  3392. @code{void starpu_worker_profiling_helper_display_summary(void);}
  3393. @end table
  3394. @node CUDA extensions
  3395. @section CUDA extensions
  3396. @c void starpu_malloc(float **A, size_t dim);
  3397. @menu
  3398. * starpu_cuda_get_local_stream:: Get current worker's CUDA stream
  3399. * starpu_helper_cublas_init:: Initialize CUBLAS on every CUDA device
  3400. * starpu_helper_cublas_shutdown:: Deinitialize CUBLAS on every CUDA device
  3401. @end menu
  3402. @node starpu_cuda_get_local_stream
  3403. @subsection @code{starpu_cuda_get_local_stream} -- Get current worker's CUDA stream
  3404. @table @asis
  3405. @item @emph{Description}:
  3406. StarPU provides a stream for every CUDA device controlled by StarPU. This
  3407. function is only provided for convenience so that programmers can easily use
  3408. asynchronous operations within codelets without having to create a stream by
  3409. hand. Note that the application is not forced to use the stream provided by
  3410. @code{starpu_cuda_get_local_stream} and may also create its own streams.
  3411. Synchronizing with @code{cudaThreadSynchronize()} is allowed, but will reduce
  3412. the likelihood of having all transfers overlapped.
  3413. @item @emph{Prototype}:
  3414. @code{cudaStream_t *starpu_cuda_get_local_stream(void);}
  3415. @end table
  3416. @node starpu_helper_cublas_init
  3417. @subsection @code{starpu_helper_cublas_init} -- Initialize CUBLAS on every CUDA device
  3418. @table @asis
  3419. @item @emph{Description}:
  3420. The CUBLAS library must be initialized prior to any CUBLAS call. Calling
  3421. @code{starpu_helper_cublas_init} will initialize CUBLAS on every CUDA device
  3422. controlled by StarPU. This call blocks until CUBLAS has been properly
  3423. initialized on every device.
  3424. @item @emph{Prototype}:
  3425. @code{void starpu_helper_cublas_init(void);}
  3426. @end table
  3427. @node starpu_helper_cublas_shutdown
  3428. @subsection @code{starpu_helper_cublas_shutdown} -- Deinitialize CUBLAS on every CUDA device
  3429. @table @asis
  3430. @item @emph{Description}:
  3431. This function synchronously deinitializes the CUBLAS library on every CUDA device.
  3432. @item @emph{Prototype}:
  3433. @code{void starpu_helper_cublas_shutdown(void);}
  3434. @end table
  3435. @node OpenCL extensions
  3436. @section OpenCL extensions
  3437. @menu
  3438. * Enabling OpenCL:: Enabling OpenCL
  3439. * Compiling OpenCL kernels:: Compiling OpenCL kernels
  3440. * Loading OpenCL kernels:: Loading OpenCL kernels
  3441. * OpenCL statistics:: Collecting statistics from OpenCL
  3442. @end menu
  3443. @node Enabling OpenCL
  3444. @subsection Enabling OpenCL
  3445. On GPU devices which can run both CUDA and OpenCL, CUDA will be
  3446. enabled by default. To enable OpenCL, you need either to disable CUDA
  3447. when configuring StarPU:
  3448. @example
  3449. % ./configure --disable-cuda
  3450. @end example
  3451. or when running applications:
  3452. @example
  3453. % STARPU_NCUDA=0 ./application
  3454. @end example
  3455. OpenCL will automatically be started on any device not yet used by
  3456. CUDA. So on a machine running 4 GPUS, it is therefore possible to
  3457. enable CUDA on 2 devices, and OpenCL on the 2 other devices by doing
  3458. so:
  3459. @example
  3460. % STARPU_NCUDA=2 ./application
  3461. @end example
  3462. @node Compiling OpenCL kernels
  3463. @subsection Compiling OpenCL kernels
  3464. Source codes for OpenCL kernels can be stored in a file or in a
  3465. string. StarPU provides functions to build the program executable for
  3466. each available OpenCL device as a @code{cl_program} object. This
  3467. program executable can then be loaded within a specific queue as
  3468. explained in the next section. These are only helpers, Applications
  3469. can also fill a @code{starpu_opencl_program} array by hand for more advanced
  3470. use (e.g. different programs on the different OpenCL devices, for
  3471. relocation purpose for instance).
  3472. @menu
  3473. * starpu_opencl_load_opencl_from_file:: Compiling OpenCL source code
  3474. * starpu_opencl_load_opencl_from_string:: Compiling OpenCL source code
  3475. * starpu_opencl_unload_opencl:: Releasing OpenCL code
  3476. @end menu
  3477. @node starpu_opencl_load_opencl_from_file
  3478. @subsubsection @code{starpu_opencl_load_opencl_from_file} -- Compiling OpenCL source code
  3479. @table @asis
  3480. @item @emph{Description}:
  3481. TODO
  3482. @item @emph{Prototype}:
  3483. @code{int starpu_opencl_load_opencl_from_file(char *source_file_name, struct starpu_opencl_program *opencl_programs, const char* build_options);}
  3484. @end table
  3485. @node starpu_opencl_load_opencl_from_string
  3486. @subsubsection @code{starpu_opencl_load_opencl_from_string} -- Compiling OpenCL source code
  3487. @table @asis
  3488. @item @emph{Description}:
  3489. TODO
  3490. @item @emph{Prototype}:
  3491. @code{int starpu_opencl_load_opencl_from_string(char *opencl_program_source, struct starpu_opencl_program *opencl_programs, const char* build_options);}
  3492. @end table
  3493. @node starpu_opencl_unload_opencl
  3494. @subsubsection @code{starpu_opencl_unload_opencl} -- Releasing OpenCL code
  3495. @table @asis
  3496. @item @emph{Description}:
  3497. TODO
  3498. @item @emph{Prototype}:
  3499. @code{int starpu_opencl_unload_opencl(struct starpu_opencl_program *opencl_programs);}
  3500. @end table
  3501. @node Loading OpenCL kernels
  3502. @subsection Loading OpenCL kernels
  3503. @menu
  3504. * starpu_opencl_load_kernel:: Loading a kernel
  3505. * starpu_opencl_relase_kernel:: Releasing a kernel
  3506. @end menu
  3507. @node starpu_opencl_load_kernel
  3508. @subsubsection @code{starpu_opencl_load_kernel} -- Loading a kernel
  3509. @table @asis
  3510. @item @emph{Description}:
  3511. TODO
  3512. @item @emph{Prototype}:
  3513. @code{int starpu_opencl_load_kernel(cl_kernel *kernel, cl_command_queue *queue, struct starpu_opencl_program *opencl_programs, char *kernel_name, int devid)
  3514. }
  3515. @end table
  3516. @node starpu_opencl_relase_kernel
  3517. @subsubsection @code{starpu_opencl_release_kernel} -- Releasing a kernel
  3518. @table @asis
  3519. @item @emph{Description}:
  3520. TODO
  3521. @item @emph{Prototype}:
  3522. @code{int starpu_opencl_release_kernel(cl_kernel kernel);}
  3523. @end table
  3524. @node OpenCL statistics
  3525. @subsection OpenCL statistics
  3526. @menu
  3527. * starpu_opencl_collect_stats:: Collect statistics on a kernel execution
  3528. @end menu
  3529. @node starpu_opencl_collect_stats
  3530. @subsubsection @code{starpu_opencl_collect_stats} -- Collect statistics on a kernel execution
  3531. @table @asis
  3532. @item @emph{Description}:
  3533. After termination of the kernels, the OpenCL codelet should call this function
  3534. to pass it the even returned by @code{clEnqueueNDRangeKernel}, to let StarPU
  3535. collect statistics about the kernel execution (used cycles, consumed power).
  3536. @item @emph{Prototype}:
  3537. @code{int starpu_opencl_collect_stats(cl_event event);}
  3538. @end table
  3539. @node Cell extensions
  3540. @section Cell extensions
  3541. nothing yet.
  3542. @node Miscellaneous helpers
  3543. @section Miscellaneous helpers
  3544. @menu
  3545. * starpu_data_cpy:: Copy a data handle into another data handle
  3546. * starpu_execute_on_each_worker:: Execute a function on a subset of workers
  3547. @end menu
  3548. @node starpu_data_cpy
  3549. @subsection @code{starpu_data_cpy} -- Copy a data handle into another data handle
  3550. @table @asis
  3551. @item @emph{Description}:
  3552. Copy the content of the @code{src_handle} into the @code{dst_handle} handle.
  3553. The @code{asynchronous} parameter indicates whether the function should
  3554. block or not. In the case of an asynchronous call, it is possible to
  3555. synchronize with the termination of this operation either by the means of
  3556. implicit dependencies (if enabled) or by calling
  3557. @code{starpu_task_wait_for_all()}. If @code{callback_func} is not @code{NULL},
  3558. this callback function is executed after the handle has been copied, and it is
  3559. given the @code{callback_arg} pointer as argument.
  3560. @item @emph{Prototype}:
  3561. @code{int starpu_data_cpy(starpu_data_handle dst_handle, starpu_data_handle src_handle, int asynchronous, void (*callback_func)(void*), void *callback_arg);}
  3562. @end table
  3563. @node starpu_execute_on_each_worker
  3564. @subsection @code{starpu_execute_on_each_worker} -- Execute a function on a subset of workers
  3565. @table @asis
  3566. @item @emph{Description}:
  3567. When calling this method, the offloaded function specified by the first argument is
  3568. executed by every StarPU worker that may execute the function.
  3569. The second argument is passed to the offloaded function.
  3570. The last argument specifies on which types of processing units the function
  3571. should be executed. Similarly to the @code{where} field of the
  3572. @code{starpu_codelet} structure, it is possible to specify that the function
  3573. should be executed on every CUDA device and every CPU by passing
  3574. @code{STARPU_CPU|STARPU_CUDA}.
  3575. This function blocks until the function has been executed on every appropriate
  3576. processing units, so that it may not be called from a callback function for
  3577. instance.
  3578. @item @emph{Prototype}:
  3579. @code{void starpu_execute_on_each_worker(void (*func)(void *), void *arg, uint32_t where);}
  3580. @end table
  3581. @c ---------------------------------------------------------------------
  3582. @c Advanced Topics
  3583. @c ---------------------------------------------------------------------
  3584. @node Advanced Topics
  3585. @chapter Advanced Topics
  3586. @menu
  3587. * Defining a new data interface::
  3588. * Defining a new scheduling policy::
  3589. @end menu
  3590. @node Defining a new data interface
  3591. @section Defining a new data interface
  3592. @menu
  3593. * struct starpu_data_interface_ops_t:: Per-interface methods
  3594. * struct starpu_data_copy_methods:: Per-interface data transfer methods
  3595. * An example of data interface:: An example of data interface
  3596. @end menu
  3597. @c void *starpu_data_get_interface_on_node(starpu_data_handle handle, unsigned memory_node); TODO
  3598. @node struct starpu_data_interface_ops_t
  3599. @subsection @code{struct starpu_data_interface_ops_t} -- Per-interface methods
  3600. @table @asis
  3601. @item @emph{Description}:
  3602. TODO describe all the different fields
  3603. @end table
  3604. @node struct starpu_data_copy_methods
  3605. @subsection @code{struct starpu_data_copy_methods} -- Per-interface data transfer methods
  3606. @table @asis
  3607. @item @emph{Description}:
  3608. TODO describe all the different fields
  3609. @end table
  3610. @node An example of data interface
  3611. @subsection An example of data interface
  3612. @table @asis
  3613. TODO
  3614. @end table
  3615. @node Defining a new scheduling policy
  3616. @section Defining a new scheduling policy
  3617. TODO
  3618. A full example showing how to define a new scheduling policy is available in
  3619. the StarPU sources in the directory @code{examples/scheduler/}.
  3620. @menu
  3621. * struct starpu_sched_policy_s::
  3622. * starpu_worker_set_sched_condition::
  3623. * starpu_sched_set_min_priority:: Set the minimum priority level
  3624. * starpu_sched_set_max_priority:: Set the maximum priority level
  3625. * starpu_push_local_task:: Assign a task to a worker
  3626. * Source code::
  3627. @end menu
  3628. @node struct starpu_sched_policy_s
  3629. @subsection @code{struct starpu_sched_policy_s} -- Scheduler methods
  3630. @table @asis
  3631. @item @emph{Description}:
  3632. This structure contains all the methods that implement a scheduling policy. An
  3633. application may specify which scheduling strategy in the @code{sched_policy}
  3634. field of the @code{starpu_conf} structure passed to the @code{starpu_init}
  3635. function.
  3636. @item @emph{Fields}:
  3637. @table @asis
  3638. @item @code{init_sched}:
  3639. Initialize the scheduling policy.
  3640. @item @code{deinit_sched}:
  3641. Cleanup the scheduling policy.
  3642. @item @code{push_task}:
  3643. Insert a task into the scheduler.
  3644. @item @code{push_prio_task}:
  3645. Insert a priority task into the scheduler.
  3646. @item @code{push_prio_notify}:
  3647. Notify the scheduler that a task was pushed on the worker. This method is
  3648. called when a task that was explicitely assigned to a worker is scheduled. This
  3649. method therefore permits to keep the state of of the scheduler coherent even
  3650. when StarPU bypasses the scheduling strategy.
  3651. @item @code{pop_task}:
  3652. Get a task from the scheduler. The mutex associated to the worker is already
  3653. taken when this method is called. If this method is defined as @code{NULL}, the
  3654. worker will only execute tasks from its local queue. In this case, the
  3655. @code{push_task} method should use the @code{starpu_push_local_task} method to
  3656. assign tasks to the different workers.
  3657. @item @code{pop_every_task}:
  3658. Remove all available tasks from the scheduler (tasks are chained by the means
  3659. of the prev and next fields of the starpu_task structure). The mutex associated
  3660. to the worker is already taken when this method is called.
  3661. @item @code{post_exec_hook} (optionnal):
  3662. This method is called every time a task has been executed.
  3663. @item @code{policy_name}:
  3664. Name of the policy (optionnal).
  3665. @item @code{policy_description}:
  3666. Description of the policy (optionnal).
  3667. @end table
  3668. @end table
  3669. @node starpu_worker_set_sched_condition
  3670. @subsection @code{starpu_worker_set_sched_condition} -- Specify the condition variable associated to a worker
  3671. @table @asis
  3672. @item @emph{Description}:
  3673. When there is no available task for a worker, StarPU blocks this worker on a
  3674. condition variable. This function specifies which condition variable (and the
  3675. associated mutex) should be used to block (and to wake up) a worker. Note that
  3676. multiple workers may use the same condition variable. For instance, in the case
  3677. of a scheduling strategy with a single task queue, the same condition variable
  3678. would be used to block and wake up all workers.
  3679. The initialization method of a scheduling strategy (@code{init_sched}) must
  3680. call this function once per worker.
  3681. @item @emph{Prototype}:
  3682. @code{void starpu_worker_set_sched_condition(int workerid, pthread_cond_t *sched_cond, pthread_mutex_t *sched_mutex);}
  3683. @end table
  3684. @node starpu_sched_set_min_priority
  3685. @subsection @code{starpu_sched_set_min_priority}
  3686. @table @asis
  3687. @item @emph{Description}:
  3688. Defines the minimum priority level supported by the scheduling policy. The
  3689. default minimum priority level is the same as the default priority level which
  3690. is 0 by convention. The application may access that value by calling the
  3691. @code{starpu_sched_get_min_priority} function. This function should only be
  3692. called from the initialization method of the scheduling policy, and should not
  3693. be used directly from the application.
  3694. @item @emph{Prototype}:
  3695. @code{void starpu_sched_set_min_priority(int min_prio);}
  3696. @end table
  3697. @node starpu_sched_set_max_priority
  3698. @subsection @code{starpu_sched_set_max_priority}
  3699. @table @asis
  3700. @item @emph{Description}:
  3701. Defines the maximum priority level supported by the scheduling policy. The
  3702. default maximum priority level is 1. The application may access that value by
  3703. calling the @code{starpu_sched_get_max_priority} function. This function should
  3704. only be called from the initialization method of the scheduling policy, and
  3705. should not be used directly from the application.
  3706. @item @emph{Prototype}:
  3707. @code{void starpu_sched_set_min_priority(int max_prio);}
  3708. @end table
  3709. @node starpu_push_local_task
  3710. @subsection @code{starpu_push_local_task}
  3711. @table @asis
  3712. @item @emph{Description}:
  3713. The scheduling policy may put tasks directly into a worker's local queue so
  3714. that it is not always necessary to create its own queue when the local queue
  3715. is sufficient. If "back" not null, the task is put at the back of the queue
  3716. where the worker will pop tasks first. Setting "back" to 0 therefore ensures
  3717. a FIFO ordering.
  3718. @item @emph{Prototype}:
  3719. @code{int starpu_push_local_task(int workerid, struct starpu_task *task, int back);}
  3720. @end table
  3721. @node Source code
  3722. @subsection Source code
  3723. @cartouche
  3724. @smallexample
  3725. static struct starpu_sched_policy_s dummy_sched_policy = @{
  3726. .init_sched = init_dummy_sched,
  3727. .deinit_sched = deinit_dummy_sched,
  3728. .push_task = push_task_dummy,
  3729. .push_prio_task = NULL,
  3730. .pop_task = pop_task_dummy,
  3731. .post_exec_hook = NULL,
  3732. .pop_every_task = NULL,
  3733. .policy_name = "dummy",
  3734. .policy_description = "dummy scheduling strategy"
  3735. @};
  3736. @end smallexample
  3737. @end cartouche
  3738. @c ---------------------------------------------------------------------
  3739. @c Appendices
  3740. @c ---------------------------------------------------------------------
  3741. @c ---------------------------------------------------------------------
  3742. @c Full source code for the 'Scaling a Vector' example
  3743. @c ---------------------------------------------------------------------
  3744. @node Full source code for the 'Scaling a Vector' example
  3745. @appendix Full source code for the 'Scaling a Vector' example
  3746. @menu
  3747. * Main application::
  3748. * CPU Kernel::
  3749. * CUDA Kernel::
  3750. * OpenCL Kernel::
  3751. @end menu
  3752. @node Main application
  3753. @section Main application
  3754. @smallexample
  3755. @include vector_scal_c.texi
  3756. @end smallexample
  3757. @node CPU Kernel
  3758. @section CPU Kernel
  3759. @smallexample
  3760. @include vector_scal_cpu.texi
  3761. @end smallexample
  3762. @node CUDA Kernel
  3763. @section CUDA Kernel
  3764. @smallexample
  3765. @include vector_scal_cuda.texi
  3766. @end smallexample
  3767. @node OpenCL Kernel
  3768. @section OpenCL Kernel
  3769. @menu
  3770. * Invoking the kernel::
  3771. * Source of the kernel::
  3772. @end menu
  3773. @node Invoking the kernel
  3774. @subsection Invoking the kernel
  3775. @smallexample
  3776. @include vector_scal_opencl.texi
  3777. @end smallexample
  3778. @node Source of the kernel
  3779. @subsection Source of the kernel
  3780. @smallexample
  3781. @include vector_scal_opencl_codelet.texi
  3782. @end smallexample
  3783. @c
  3784. @c Indices.
  3785. @c
  3786. @node Function Index
  3787. @unnumbered Function Index
  3788. @printindex fn
  3789. @bye