starpu.texi 178 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707
  1. \input texinfo @c -*-texinfo-*-
  2. @c %**start of header
  3. @setfilename starpu.info
  4. @settitle StarPU Handbook
  5. @c %**end of header
  6. @include version.texi
  7. @setchapternewpage odd
  8. @titlepage
  9. @title StarPU Handbook
  10. @subtitle for StarPU @value{VERSION}
  11. @page
  12. @vskip 0pt plus 1fill
  13. @comment For the @value{version-GCC} Version*
  14. @end titlepage
  15. @c @summarycontents
  16. @contents
  17. @page
  18. @node Top
  19. @top Preface
  20. @cindex Preface
  21. This manual documents the usage of StarPU version @value{VERSION}. It
  22. was last updated on @value{UPDATED}.
  23. @comment
  24. @comment When you add a new menu item, please keep the right hand
  25. @comment aligned to the same column. Do not use tabs. This provides
  26. @comment better formatting.
  27. @comment
  28. @menu
  29. * Introduction:: A basic introduction to using StarPU
  30. * Installing StarPU:: How to configure, build and install StarPU
  31. * Using StarPU:: How to run StarPU application
  32. * Basic Examples:: Basic examples of the use of StarPU
  33. * Performance optimization:: How to optimize performance with StarPU
  34. * Performance feedback:: Performance debugging tools
  35. * StarPU MPI support:: How to combine StarPU with MPI
  36. * Configuring StarPU:: How to configure StarPU
  37. * StarPU API:: The API to use StarPU
  38. * Advanced Topics:: Advanced use of StarPU
  39. * Full source code for the 'Scaling a Vector' example::
  40. * Function Index:: Index of C functions.
  41. @end menu
  42. @c ---------------------------------------------------------------------
  43. @c Introduction to StarPU
  44. @c ---------------------------------------------------------------------
  45. @node Introduction
  46. @chapter Introduction to StarPU
  47. @menu
  48. * Motivation:: Why StarPU ?
  49. * StarPU in a Nutshell:: The Fundamentals of StarPU
  50. @end menu
  51. @node Motivation
  52. @section Motivation
  53. @c complex machines with heterogeneous cores/devices
  54. The use of specialized hardware such as accelerators or coprocessors offers an
  55. interesting approach to overcome the physical limits encountered by processor
  56. architects. As a result, many machines are now equipped with one or several
  57. accelerators (e.g. a GPU), in addition to the usual processor(s). While a lot of
  58. efforts have been devoted to offload computation onto such accelerators, very
  59. little attention as been paid to portability concerns on the one hand, and to the
  60. possibility of having heterogeneous accelerators and processors to interact on the other hand.
  61. StarPU is a runtime system that offers support for heterogeneous multicore
  62. architectures, it not only offers a unified view of the computational resources
  63. (i.e. CPUs and accelerators at the same time), but it also takes care of
  64. efficiently mapping and executing tasks onto an heterogeneous machine while
  65. transparently handling low-level issues such as data transfers in a portable
  66. fashion.
  67. @c this leads to a complicated distributed memory design
  68. @c which is not (easily) manageable by hand
  69. @c added value/benefits of StarPU
  70. @c - portability
  71. @c - scheduling, perf. portability
  72. @node StarPU in a Nutshell
  73. @section StarPU in a Nutshell
  74. @menu
  75. * Codelet and Tasks::
  76. * StarPU Data Management Library::
  77. * Glossary::
  78. * Research Papers::
  79. @end menu
  80. From a programming point of view, StarPU is not a new language but a library
  81. that executes tasks explicitly submitted by the application. The data that a
  82. task manipulates are automatically transferred onto the accelerator so that the
  83. programmer does not have to take care of complex data movements. StarPU also
  84. takes particular care of scheduling those tasks efficiently and allows
  85. scheduling experts to implement custom scheduling policies in a portable
  86. fashion.
  87. @c explain the notion of codelet and task (i.e. g(A, B)
  88. @node Codelet and Tasks
  89. @subsection Codelet and Tasks
  90. One of the StarPU primary data structures is the @b{codelet}. A codelet describes a
  91. computational kernel that can possibly be implemented on multiple architectures
  92. such as a CPU, a CUDA device or a Cell's SPU.
  93. @c TODO insert illustration f : f_spu, f_cpu, ...
  94. Another important data structure is the @b{task}. Executing a StarPU task
  95. consists in applying a codelet on a data set, on one of the architectures on
  96. which the codelet is implemented. A task thus describes the codelet that it
  97. uses, but also which data are accessed, and how they are
  98. accessed during the computation (read and/or write).
  99. StarPU tasks are asynchronous: submitting a task to StarPU is a non-blocking
  100. operation. The task structure can also specify a @b{callback} function that is
  101. called once StarPU has properly executed the task. It also contains optional
  102. fields that the application may use to give hints to the scheduler (such as
  103. priority levels).
  104. By default, task dependencies are inferred from data dependency (sequential
  105. coherence) by StarPU. The application can however disable sequential coherency
  106. for some data, and dependencies be expressed by hand.
  107. A task may be identified by a unique 64-bit number chosen by the application
  108. which we refer as a @b{tag}.
  109. Task dependencies can be enforced by hand either by the means of callback functions, by
  110. submitting other tasks, or by expressing dependencies
  111. between tags (which can thus correspond to tasks that have not been submitted
  112. yet).
  113. @c TODO insert illustration f(Ar, Brw, Cr) + ..
  114. @c DSM
  115. @node StarPU Data Management Library
  116. @subsection StarPU Data Management Library
  117. Because StarPU schedules tasks at runtime, data transfers have to be
  118. done automatically and ``just-in-time'' between processing units,
  119. relieving the application programmer from explicit data transfers.
  120. Moreover, to avoid unnecessary transfers, StarPU keeps data
  121. where it was last needed, even if was modified there, and it
  122. allows multiple copies of the same data to reside at the same time on
  123. several processing units as long as it is not modified.
  124. @node Glossary
  125. @subsection Glossary
  126. A @b{codelet} records pointers to various implementations of the same
  127. theoretical function.
  128. A @b{memory node} can be either the main RAM or GPU-embedded memory.
  129. A @b{bus} is a link between memory nodes.
  130. A @b{data handle} keeps track of replicates of the same data (@b{registered} by the
  131. application) over various memory nodes. The data management library manages
  132. keeping them coherent.
  133. The @b{home} memory node of a data handle is the memory node from which the data
  134. was registered (usually the main memory node).
  135. A @b{task} represents a scheduled execution of a codelet on some data handles.
  136. A @b{tag} is a rendez-vous point. Tasks typically have their own tag, and can
  137. depend on other tags. The value is chosen by the application.
  138. A @b{worker} execute tasks. There is typically one per CPU computation core and
  139. one per accelerator (for which a whole CPU core is dedicated).
  140. A @b{driver} drives a given kind of workers. There are currently CPU, CUDA,
  141. OpenCL and Gordon drivers. They usually start several workers to actually drive
  142. them.
  143. A @b{performance model} is a (dynamic or static) model of the performance of a
  144. given codelet. Codelets can have execution time performance model as well as
  145. power consumption performance models.
  146. A data @b{interface} describes the layout of the data: for a vector, a pointer
  147. for the start, the number of elements and the size of elements ; for a matrix, a
  148. pointer for the start, the number of elements per row, the offset between rows,
  149. and the size of each element ; etc. To access their data, codelet functions are
  150. given interfaces for the local memory node replicates of the data handles of the
  151. scheduled task.
  152. @b{Partitioning} data means dividing the data of a given data handle (called
  153. @b{father}) into a series of @b{children} data handles which designate various
  154. portions of the former.
  155. A @b{filter} is the function which computes children data handles from a father
  156. data handle, and thus describes how the partitioning should be done (horizontal,
  157. vertical, etc.)
  158. @b{Acquiring} a data handle can be done from the main application, to safely
  159. access the data of a data handle from its home node, without having to
  160. unregister it.
  161. @node Research Papers
  162. @subsection Research Papers
  163. Research papers about StarPU can be found at
  164. @indicateurl{http://runtime.bordeaux.inria.fr/Publis/Keyword/STARPU.html}
  165. Notably a good overview in the research report
  166. @indicateurl{http://hal.archives-ouvertes.fr/inria-00467677}
  167. @c ---------------------------------------------------------------------
  168. @c Installing StarPU
  169. @c ---------------------------------------------------------------------
  170. @node Installing StarPU
  171. @chapter Installing StarPU
  172. @menu
  173. * Downloading StarPU::
  174. * Configuration of StarPU::
  175. * Building and Installing StarPU::
  176. @end menu
  177. StarPU can be built and installed by the standard means of the GNU
  178. autotools. The following chapter is intended to briefly remind how these tools
  179. can be used to install StarPU.
  180. @node Downloading StarPU
  181. @section Downloading StarPU
  182. @menu
  183. * Getting Sources::
  184. * Optional dependencies::
  185. @end menu
  186. @node Getting Sources
  187. @subsection Getting Sources
  188. The simplest way to get StarPU sources is to download the latest official
  189. release tarball from @indicateurl{https://gforge.inria.fr/frs/?group_id=1570} ,
  190. or the latest nightly snapshot from
  191. @indicateurl{http://starpu.gforge.inria.fr/testing/} . The following documents
  192. how to get the very latest version from the subversion repository itself, it
  193. should be needed only if you need the very latest changes (i.e. less than a
  194. day!)
  195. The source code is managed by a Subversion server hosted by the
  196. InriaGforge. To get the source code, you need:
  197. @itemize
  198. @item
  199. To install the client side of the software Subversion if it is
  200. not already available on your system. The software can be obtained from
  201. @indicateurl{http://subversion.tigris.org} . If you are running
  202. on Windows, you will probably prefer to use TortoiseSVN from
  203. @indicateurl{http://tortoisesvn.tigris.org/} .
  204. @item
  205. You can check out the project's SVN repository through anonymous
  206. access. This will provide you with a read access to the
  207. repository.
  208. If you need to have write access on the StarPU project, you can also choose to
  209. become a member of the project @code{starpu}. For this, you first need to get
  210. an account to the gForge server. You can then send a request to join the project
  211. (@indicateurl{https://gforge.inria.fr/project/request.php?group_id=1570}).
  212. @item
  213. More information on how to get a gForge account, to become a member of
  214. a project, or on any other related task can be obtained from the
  215. InriaGforge at @indicateurl{https://gforge.inria.fr/}. The most important
  216. thing is to upload your public SSH key on the gForge server (see the
  217. FAQ at @indicateurl{http://siteadmin.gforge.inria.fr/FAQ.html#Q6} for
  218. instructions).
  219. @end itemize
  220. You can now check out the latest version from the Subversion server:
  221. @itemize
  222. @item
  223. using the anonymous access via svn:
  224. @example
  225. % svn checkout svn://scm.gforge.inria.fr/svn/starpu/trunk
  226. @end example
  227. @item
  228. using the anonymous access via https:
  229. @example
  230. % svn checkout --username anonsvn https://scm.gforge.inria.fr/svn/starpu/trunk
  231. @end example
  232. The password is @code{anonsvn}.
  233. @item
  234. using your gForge account
  235. @example
  236. % svn checkout svn+ssh://<login>@@scm.gforge.inria.fr/svn/starpu/trunk
  237. @end example
  238. @end itemize
  239. The following step requires the availability of @code{autoconf} and
  240. @code{automake} to generate the @code{./configure} script. This is
  241. done by calling @code{./autogen.sh}. The required version for
  242. @code{autoconf} is 2.60 or higher. You will also need @code{makeinfo}.
  243. @example
  244. % ./autogen.sh
  245. @end example
  246. If the autotools are not available on your machine or not recent
  247. enough, you can choose to download the latest nightly tarball, which
  248. is provided with a @code{configure} script.
  249. @example
  250. % wget http://starpu.gforge.inria.fr/testing/starpu-nightly-latest.tar.gz
  251. @end example
  252. @node Optional dependencies
  253. @subsection Optional dependencies
  254. The topology discovery library, @code{hwloc}, is not mandatory to use StarPU
  255. but strongly recommended. It allows to increase performance, and to
  256. perform some topology aware scheduling.
  257. @code{hwloc} is available in major distributions and for most OSes and can be
  258. downloaded from @indicateurl{http://www.open-mpi.org/software/hwloc}.
  259. @node Configuration of StarPU
  260. @section Configuration of StarPU
  261. @menu
  262. * Generating Makefiles and configuration scripts::
  263. * Running the configuration::
  264. @end menu
  265. @node Generating Makefiles and configuration scripts
  266. @subsection Generating Makefiles and configuration scripts
  267. This step is not necessary when using the tarball releases of StarPU. If you
  268. are using the source code from the svn repository, you first need to generate
  269. the configure scripts and the Makefiles.
  270. @example
  271. % ./autogen.sh
  272. @end example
  273. @node Running the configuration
  274. @subsection Running the configuration
  275. @example
  276. % ./configure
  277. @end example
  278. Details about options that are useful to give to @code{./configure} are given in
  279. @ref{Compilation configuration}.
  280. @node Building and Installing StarPU
  281. @section Building and Installing StarPU
  282. @menu
  283. * Building::
  284. * Sanity Checks::
  285. * Installing::
  286. @end menu
  287. @node Building
  288. @subsection Building
  289. @example
  290. % make
  291. @end example
  292. @node Sanity Checks
  293. @subsection Sanity Checks
  294. In order to make sure that StarPU is working properly on the system, it is also
  295. possible to run a test suite.
  296. @example
  297. % make check
  298. @end example
  299. @node Installing
  300. @subsection Installing
  301. In order to install StarPU at the location that was specified during
  302. configuration:
  303. @example
  304. % make install
  305. @end example
  306. @c ---------------------------------------------------------------------
  307. @c Using StarPU
  308. @c ---------------------------------------------------------------------
  309. @node Using StarPU
  310. @chapter Using StarPU
  311. @menu
  312. * Setting flags for compiling and linking applications::
  313. * Running a basic StarPU application::
  314. * Kernel threads started by StarPU::
  315. * Using accelerators::
  316. @end menu
  317. @node Setting flags for compiling and linking applications
  318. @section Setting flags for compiling and linking applications
  319. Compiling and linking an application against StarPU may require to use
  320. specific flags or libraries (for instance @code{CUDA} or @code{libspe2}).
  321. To this end, it is possible to use the @code{pkg-config} tool.
  322. If StarPU was not installed at some standard location, the path of StarPU's
  323. library must be specified in the @code{PKG_CONFIG_PATH} environment variable so
  324. that @code{pkg-config} can find it. For example if StarPU was installed in
  325. @code{$prefix_dir}:
  326. @example
  327. % PKG_CONFIG_PATH=$PKG_CONFIG_PATH:$prefix_dir/lib/pkgconfig
  328. @end example
  329. The flags required to compile or link against StarPU are then
  330. accessible with the following commands:
  331. @example
  332. % pkg-config --cflags libstarpu # options for the compiler
  333. % pkg-config --libs libstarpu # options for the linker
  334. @end example
  335. @node Running a basic StarPU application
  336. @section Running a basic StarPU application
  337. Basic examples using StarPU have been built in the directory
  338. @code{$prefix_dir/lib/starpu/examples/}. You can for example run the
  339. example @code{vector_scal}.
  340. @example
  341. % $prefix_dir/lib/starpu/examples/vector_scal
  342. BEFORE : First element was 1.000000
  343. AFTER First element is 3.140000
  344. %
  345. @end example
  346. When StarPU is used for the first time, the directory
  347. @code{$HOME/.starpu/} is created, performance models will be stored in
  348. that directory.
  349. Please note that buses are benchmarked when StarPU is launched for the
  350. first time. This may take a few minutes, or less if @code{hwloc} is
  351. installed. This step is done only once per user and per machine.
  352. @node Kernel threads started by StarPU
  353. @section Kernel threads started by StarPU
  354. TODO: StarPU starts one thread per CPU core and binds them there, uses one of
  355. them per GPU. The application is not supposed to do computations in its own
  356. threads. TODO: add a StarPU function to bind an application thread (e.g. the
  357. main thread) to a dedicated core (and thus disable the corresponding StarPU CPU
  358. worker).
  359. @node Using accelerators
  360. @section Using accelerators
  361. When both CUDA and OpenCL drivers are enabled, StarPU will launch an
  362. OpenCL worker for NVIDIA GPUs only if CUDA is not already running on them.
  363. This design choice was necessary as OpenCL and CUDA can not run at the
  364. same time on the same NVIDIA GPU, as there is currently no interoperability
  365. between them.
  366. Details on how to specify devices running OpenCL and the ones running
  367. CUDA are given in @ref{Enabling OpenCL}.
  368. @c ---------------------------------------------------------------------
  369. @c Basic Examples
  370. @c ---------------------------------------------------------------------
  371. @node Basic Examples
  372. @chapter Basic Examples
  373. @menu
  374. * Compiling and linking options::
  375. * Hello World:: Submitting Tasks
  376. * Scaling a Vector:: Manipulating Data
  377. * Vector Scaling on an Hybrid CPU/GPU Machine:: Handling Heterogeneous Architectures
  378. * Task and Worker Profiling::
  379. * Partitioning Data:: Partitioning Data
  380. * Performance model example::
  381. * Theoretical lower bound on execution time::
  382. * Insert Task Utility::
  383. * More examples:: More examples shipped with StarPU
  384. * Debugging:: When things go wrong.
  385. @end menu
  386. @node Compiling and linking options
  387. @section Compiling and linking options
  388. Let's suppose StarPU has been installed in the directory
  389. @code{$STARPU_DIR}. As explained in @ref{Setting flags for compiling and linking applications},
  390. the variable @code{PKG_CONFIG_PATH} needs to be set. It is also
  391. necessary to set the variable @code{LD_LIBRARY_PATH} to locate dynamic
  392. libraries at runtime.
  393. @example
  394. % PKG_CONFIG_PATH=$STARPU_DIR/lib/pkgconfig:$PKG_CONFIG_PATH
  395. % LD_LIBRARY_PATH=$STARPU_DIR/lib:$LD_LIBRARY_PATH
  396. @end example
  397. The Makefile could for instance contain the following lines to define which
  398. options must be given to the compiler and to the linker:
  399. @cartouche
  400. @example
  401. CFLAGS += $$(pkg-config --cflags libstarpu)
  402. LDFLAGS += $$(pkg-config --libs libstarpu)
  403. @end example
  404. @end cartouche
  405. @node Hello World
  406. @section Hello World
  407. @menu
  408. * Required Headers::
  409. * Defining a Codelet::
  410. * Submitting a Task::
  411. * Execution of Hello World::
  412. @end menu
  413. In this section, we show how to implement a simple program that submits a task to StarPU.
  414. @node Required Headers
  415. @subsection Required Headers
  416. The @code{starpu.h} header should be included in any code using StarPU.
  417. @cartouche
  418. @smallexample
  419. #include <starpu.h>
  420. @end smallexample
  421. @end cartouche
  422. @node Defining a Codelet
  423. @subsection Defining a Codelet
  424. @cartouche
  425. @smallexample
  426. struct params @{
  427. int i;
  428. float f;
  429. @};
  430. void cpu_func(void *buffers[], void *cl_arg)
  431. @{
  432. struct params *params = cl_arg;
  433. printf("Hello world (params = @{%i, %f@} )\n", params->i, params->f);
  434. @}
  435. starpu_codelet cl =
  436. @{
  437. .where = STARPU_CPU,
  438. .cpu_func = cpu_func,
  439. .nbuffers = 0
  440. @};
  441. @end smallexample
  442. @end cartouche
  443. A codelet is a structure that represents a computational kernel. Such a codelet
  444. may contain an implementation of the same kernel on different architectures
  445. (e.g. CUDA, Cell's SPU, x86, ...).
  446. The @code{nbuffers} field specifies the number of data buffers that are
  447. manipulated by the codelet: here the codelet does not access or modify any data
  448. that is controlled by our data management library. Note that the argument
  449. passed to the codelet (the @code{cl_arg} field of the @code{starpu_task}
  450. structure) does not count as a buffer since it is not managed by our data
  451. management library, but just contain trivial parameters.
  452. @c TODO need a crossref to the proper description of "where" see bla for more ...
  453. We create a codelet which may only be executed on the CPUs. The @code{where}
  454. field is a bitmask that defines where the codelet may be executed. Here, the
  455. @code{STARPU_CPU} value means that only CPUs can execute this codelet
  456. (@pxref{Codelets and Tasks} for more details on this field).
  457. When a CPU core executes a codelet, it calls the @code{cpu_func} function,
  458. which @emph{must} have the following prototype:
  459. @code{void (*cpu_func)(void *buffers[], void *cl_arg);}
  460. In this example, we can ignore the first argument of this function which gives a
  461. description of the input and output buffers (e.g. the size and the location of
  462. the matrices) since there is none.
  463. The second argument is a pointer to a buffer passed as an
  464. argument to the codelet by the means of the @code{cl_arg} field of the
  465. @code{starpu_task} structure.
  466. @c TODO rewrite so that it is a little clearer ?
  467. Be aware that this may be a pointer to a
  468. @emph{copy} of the actual buffer, and not the pointer given by the programmer:
  469. if the codelet modifies this buffer, there is no guarantee that the initial
  470. buffer will be modified as well: this for instance implies that the buffer
  471. cannot be used as a synchronization medium. If synchronization is needed, data
  472. has to be registered to StarPU, see @ref{Scaling a Vector}.
  473. @node Submitting a Task
  474. @subsection Submitting a Task
  475. @cartouche
  476. @smallexample
  477. void callback_func(void *callback_arg)
  478. @{
  479. printf("Callback function (arg %x)\n", callback_arg);
  480. @}
  481. int main(int argc, char **argv)
  482. @{
  483. /* @b{initialize StarPU} */
  484. starpu_init(NULL);
  485. struct starpu_task *task = starpu_task_create();
  486. task->cl = &cl; /* @b{Pointer to the codelet defined above} */
  487. struct params params = @{ 1, 2.0f @};
  488. task->cl_arg = &params;
  489. task->cl_arg_size = sizeof(params);
  490. task->callback_func = callback_func;
  491. task->callback_arg = 0x42;
  492. /* @b{starpu_task_submit will be a blocking call} */
  493. task->synchronous = 1;
  494. /* @b{submit the task to StarPU} */
  495. starpu_task_submit(task);
  496. /* @b{terminate StarPU} */
  497. starpu_shutdown();
  498. return 0;
  499. @}
  500. @end smallexample
  501. @end cartouche
  502. Before submitting any tasks to StarPU, @code{starpu_init} must be called. The
  503. @code{NULL} argument specifies that we use default configuration. Tasks cannot
  504. be submitted after the termination of StarPU by a call to
  505. @code{starpu_shutdown}.
  506. In the example above, a task structure is allocated by a call to
  507. @code{starpu_task_create}. This function only allocates and fills the
  508. corresponding structure with the default settings (@pxref{Codelets and
  509. Tasks, starpu_task_create}), but it does not submit the task to StarPU.
  510. @c not really clear ;)
  511. The @code{cl} field is a pointer to the codelet which the task will
  512. execute: in other words, the codelet structure describes which computational
  513. kernel should be offloaded on the different architectures, and the task
  514. structure is a wrapper containing a codelet and the piece of data on which the
  515. codelet should operate.
  516. The optional @code{cl_arg} field is a pointer to a buffer (of size
  517. @code{cl_arg_size}) with some parameters for the kernel
  518. described by the codelet. For instance, if a codelet implements a computational
  519. kernel that multiplies its input vector by a constant, the constant could be
  520. specified by the means of this buffer, instead of registering it as a StarPU
  521. data. It must however be noted that StarPU avoids making copy whenever possible
  522. and rather passes the pointer as such, so the buffer which is pointed at must
  523. kept allocated until the task terminates, and if several tasks are submitted
  524. with various parameters, each of them must be given a pointer to their own
  525. buffer.
  526. Once a task has been executed, an optional callback function is be called.
  527. While the computational kernel could be offloaded on various architectures, the
  528. callback function is always executed on a CPU. The @code{callback_arg}
  529. pointer is passed as an argument of the callback. The prototype of a callback
  530. function must be:
  531. @code{void (*callback_function)(void *);}
  532. If the @code{synchronous} field is non-zero, task submission will be
  533. synchronous: the @code{starpu_task_submit} function will not return until the
  534. task was executed. Note that the @code{starpu_shutdown} method does not
  535. guarantee that asynchronous tasks have been executed before it returns,
  536. @code{starpu_task_wait_for_all} can be used to that effect, or data can be
  537. unregistered (@code{starpu_data_unregister(vector_handle);}), which will
  538. implicitly wait for all the tasks scheduled to work on it, unless explicitly
  539. disabled thanks to @code{starpu_data_set_default_sequential_consistency_flag} or
  540. @code{starpu_data_set_sequential_consistency_flag}.
  541. @node Execution of Hello World
  542. @subsection Execution of Hello World
  543. @smallexample
  544. % make hello_world
  545. cc $(pkg-config --cflags libstarpu) $(pkg-config --libs libstarpu) hello_world.c -o hello_world
  546. % ./hello_world
  547. Hello world (params = @{1, 2.000000@} )
  548. Callback function (arg 42)
  549. @end smallexample
  550. @node Scaling a Vector
  551. @section Manipulating Data: Scaling a Vector
  552. The previous example has shown how to submit tasks. In this section,
  553. we show how StarPU tasks can manipulate data. The full source code for
  554. this example is given in @ref{Full source code for the 'Scaling a Vector' example}.
  555. @menu
  556. * Source code of Vector Scaling::
  557. * Execution of Vector Scaling::
  558. @end menu
  559. @node Source code of Vector Scaling
  560. @subsection Source code of Vector Scaling
  561. Programmers can describe the data layout of their application so that StarPU is
  562. responsible for enforcing data coherency and availability across the machine.
  563. Instead of handling complex (and non-portable) mechanisms to perform data
  564. movements, programmers only declare which piece of data is accessed and/or
  565. modified by a task, and StarPU makes sure that when a computational kernel
  566. starts somewhere (e.g. on a GPU), its data are available locally.
  567. Before submitting those tasks, the programmer first needs to declare the
  568. different pieces of data to StarPU using the @code{starpu_*_data_register}
  569. functions. To ease the development of applications for StarPU, it is possible
  570. to describe multiple types of data layout. A type of data layout is called an
  571. @b{interface}. There are different predefined interfaces available in StarPU:
  572. here we will consider the @b{vector interface}.
  573. The following lines show how to declare an array of @code{NX} elements of type
  574. @code{float} using the vector interface:
  575. @cartouche
  576. @smallexample
  577. float vector[NX];
  578. starpu_data_handle vector_handle;
  579. starpu_vector_data_register(&vector_handle, 0, (uintptr_t)vector, NX,
  580. sizeof(vector[0]));
  581. @end smallexample
  582. @end cartouche
  583. The first argument, called the @b{data handle}, is an opaque pointer which
  584. designates the array in StarPU. This is also the structure which is used to
  585. describe which data is used by a task. The second argument is the node number
  586. where the data originally resides. Here it is 0 since the @code{vector} array is in
  587. the main memory. Then comes the pointer @code{vector} where the data can be found in main memory,
  588. the number of elements in the vector and the size of each element.
  589. The following shows how to construct a StarPU task that will manipulate the
  590. vector and a constant factor.
  591. @cartouche
  592. @smallexample
  593. float factor = 3.14;
  594. struct starpu_task *task = starpu_task_create();
  595. task->cl = &cl; /* @b{Pointer to the codelet defined below} */
  596. task->buffers[0].handle = vector_handle; /* @b{First parameter of the codelet} */
  597. task->buffers[0].mode = STARPU_RW;
  598. task->cl_arg = &factor;
  599. task->cl_arg_size = sizeof(factor);
  600. task->synchronous = 1;
  601. starpu_task_submit(task);
  602. @end smallexample
  603. @end cartouche
  604. Since the factor is a mere constant float value parameter,
  605. it does not need a preliminary registration, and
  606. can just be passed through the @code{cl_arg} pointer like in the previous
  607. example. The vector parameter is described by its handle.
  608. There are two fields in each element of the @code{buffers} array.
  609. @code{handle} is the handle of the data, and @code{mode} specifies how the
  610. kernel will access the data (@code{STARPU_R} for read-only, @code{STARPU_W} for
  611. write-only and @code{STARPU_RW} for read and write access).
  612. The definition of the codelet can be written as follows:
  613. @cartouche
  614. @smallexample
  615. void scal_cpu_func(void *buffers[], void *cl_arg)
  616. @{
  617. unsigned i;
  618. float *factor = cl_arg;
  619. /* length of the vector */
  620. unsigned n = STARPU_VECTOR_GET_NX(buffers[0]);
  621. /* CPU copy of the vector pointer */
  622. float *val = (float *)STARPU_VECTOR_GET_PTR(buffers[0]);
  623. for (i = 0; i < n; i++)
  624. val[i] *= *factor;
  625. @}
  626. starpu_codelet cl = @{
  627. .where = STARPU_CPU,
  628. .cpu_func = scal_cpu_func,
  629. .nbuffers = 1
  630. @};
  631. @end smallexample
  632. @end cartouche
  633. The first argument is an array that gives
  634. a description of all the buffers passed in the @code{task->buffers}@ array. The
  635. size of this array is given by the @code{nbuffers} field of the codelet
  636. structure. For the sake of genericity, this array contains pointers to the
  637. different interfaces describing each buffer. In the case of the @b{vector
  638. interface}, the location of the vector (resp. its length) is accessible in the
  639. @code{ptr} (resp. @code{nx}) of this array. Since the vector is accessed in a
  640. read-write fashion, any modification will automatically affect future accesses
  641. to this vector made by other tasks.
  642. The second argument of the @code{scal_cpu_func} function contains a pointer to the
  643. parameters of the codelet (given in @code{task->cl_arg}), so that we read the
  644. constant factor from this pointer.
  645. @node Execution of Vector Scaling
  646. @subsection Execution of Vector Scaling
  647. @smallexample
  648. % make vector_scal
  649. cc $(pkg-config --cflags libstarpu) $(pkg-config --libs libstarpu) vector_scal.c -o vector_scal
  650. % ./vector_scal
  651. 0.000000 3.000000 6.000000 9.000000 12.000000
  652. @end smallexample
  653. @node Vector Scaling on an Hybrid CPU/GPU Machine
  654. @section Vector Scaling on an Hybrid CPU/GPU Machine
  655. Contrary to the previous examples, the task submitted in this example may not
  656. only be executed by the CPUs, but also by a CUDA device.
  657. @menu
  658. * Definition of the CUDA Kernel::
  659. * Definition of the OpenCL Kernel::
  660. * Definition of the Main Code::
  661. * Execution of Hybrid Vector Scaling::
  662. @end menu
  663. @node Definition of the CUDA Kernel
  664. @subsection Definition of the CUDA Kernel
  665. The CUDA implementation can be written as follows. It needs to be compiled with
  666. a CUDA compiler such as nvcc, the NVIDIA CUDA compiler driver. It must be noted
  667. that the vector pointer returned by STARPU_VECTOR_GET_PTR is here a pointer in GPU
  668. memory, so that it can be passed as such to the @code{vector_mult_cuda} kernel
  669. call.
  670. @cartouche
  671. @smallexample
  672. #include <starpu.h>
  673. static __global__ void vector_mult_cuda(float *val, unsigned n,
  674. float factor)
  675. @{
  676. unsigned i = blockIdx.x*blockDim.x + threadIdx.x;
  677. if (i < n)
  678. val[i] *= factor;
  679. @}
  680. extern "C" void scal_cuda_func(void *buffers[], void *_args)
  681. @{
  682. float *factor = (float *)_args;
  683. /* length of the vector */
  684. unsigned n = STARPU_VECTOR_GET_NX(buffers[0]);
  685. /* CUDA copy of the vector pointer */
  686. float *val = (float *)STARPU_VECTOR_GET_PTR(buffers[0]);
  687. unsigned threads_per_block = 64;
  688. unsigned nblocks = (n + threads_per_block-1) / threads_per_block;
  689. @i{ vector_mult_cuda<<<nblocks,threads_per_block, 0, starpu_cuda_get_local_stream()>>>(val, n, *factor);}
  690. @i{ cudaStreamSynchronize(starpu_cuda_get_local_stream());}
  691. @}
  692. @end smallexample
  693. @end cartouche
  694. @node Definition of the OpenCL Kernel
  695. @subsection Definition of the OpenCL Kernel
  696. The OpenCL implementation can be written as follows. StarPU provides
  697. tools to compile a OpenCL kernel stored in a file.
  698. @cartouche
  699. @smallexample
  700. __kernel void vector_mult_opencl(__global float* val, int nx, float factor)
  701. @{
  702. const int i = get_global_id(0);
  703. if (i < nx) @{
  704. val[i] *= factor;
  705. @}
  706. @}
  707. @end smallexample
  708. @end cartouche
  709. Similarly to CUDA, the pointer returned by @code{STARPU_VECTOR_GET_PTR} is here
  710. a device pointer, so that it is passed as such to the OpenCL kernel.
  711. @cartouche
  712. @smallexample
  713. #include <starpu.h>
  714. @i{#include <starpu_opencl.h>}
  715. @i{extern struct starpu_opencl_program programs;}
  716. void scal_opencl_func(void *buffers[], void *_args)
  717. @{
  718. float *factor = _args;
  719. @i{ int id, devid, err;}
  720. @i{ cl_kernel kernel;}
  721. @i{ cl_command_queue queue;}
  722. @i{ cl_event event;}
  723. /* length of the vector */
  724. unsigned n = STARPU_VECTOR_GET_NX(buffers[0]);
  725. /* OpenCL copy of the vector pointer */
  726. cl_mem val = (cl_mem) STARPU_VECTOR_GET_PTR(buffers[0]);
  727. @i{ id = starpu_worker_get_id();}
  728. @i{ devid = starpu_worker_get_devid(id);}
  729. @i{ err = starpu_opencl_load_kernel(&kernel, &queue, &programs,}
  730. @i{ "vector_mult_opencl", devid); /* @b{Name of the codelet defined above} */}
  731. @i{ if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);}
  732. @i{ err = clSetKernelArg(kernel, 0, sizeof(val), &val);}
  733. @i{ err |= clSetKernelArg(kernel, 1, sizeof(n), &n);}
  734. @i{ err |= clSetKernelArg(kernel, 2, sizeof(*factor), factor);}
  735. @i{ if (err) STARPU_OPENCL_REPORT_ERROR(err);}
  736. @i{ @{}
  737. @i{ size_t global=1;}
  738. @i{ size_t local=1;}
  739. @i{ err = clEnqueueNDRangeKernel(queue, kernel, 1, NULL, &global, &local, 0, NULL, &event);}
  740. @i{ if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);}
  741. @i{ @}}
  742. @i{ clFinish(queue);}
  743. @i{ starpu_opencl_collect_stats(event);}
  744. @i{ clReleaseEvent(event);}
  745. @i{ starpu_opencl_release_kernel(kernel);}
  746. @}
  747. @end smallexample
  748. @end cartouche
  749. @node Definition of the Main Code
  750. @subsection Definition of the Main Code
  751. The CPU implementation is the same as in the previous section.
  752. Here is the source of the main application. You can notice the value of the
  753. field @code{where} for the codelet. We specify
  754. @code{STARPU_CPU|STARPU_CUDA|STARPU_OPENCL} to indicate to StarPU that the codelet
  755. can be executed either on a CPU or on a CUDA or an OpenCL device.
  756. @cartouche
  757. @smallexample
  758. #include <starpu.h>
  759. #define NX 2048
  760. extern void scal_cuda_func(void *buffers[], void *_args);
  761. extern void scal_cpu_func(void *buffers[], void *_args);
  762. extern void scal_opencl_func(void *buffers[], void *_args);
  763. /* @b{Definition of the codelet} */
  764. static starpu_codelet cl = @{
  765. .where = STARPU_CPU|STARPU_CUDA|STARPU_OPENCL; /* @b{It can be executed on a CPU,} */
  766. /* @b{on a CUDA device, or on an OpenCL device} */
  767. .cuda_func = scal_cuda_func;
  768. .cpu_func = scal_cpu_func;
  769. .opencl_func = scal_opencl_func;
  770. .nbuffers = 1;
  771. @}
  772. #ifdef STARPU_USE_OPENCL
  773. /* @b{The compiled version of the OpenCL program} */
  774. struct starpu_opencl_program programs;
  775. #endif
  776. int main(int argc, char **argv)
  777. @{
  778. float *vector;
  779. int i, ret;
  780. float factor=3.0;
  781. struct starpu_task *task;
  782. starpu_data_handle vector_handle;
  783. starpu_init(NULL); /* @b{Initialising StarPU} */
  784. #ifdef STARPU_USE_OPENCL
  785. starpu_opencl_load_opencl_from_file(
  786. "examples/basic_examples/vector_scal_opencl_codelet.cl",
  787. &programs, NULL);
  788. #endif
  789. vector = malloc(NX*sizeof(vector[0]));
  790. assert(vector);
  791. for(i=0 ; i<NX ; i++) vector[i] = i;
  792. @end smallexample
  793. @end cartouche
  794. @cartouche
  795. @smallexample
  796. /* @b{Registering data within StarPU} */
  797. starpu_vector_data_register(&vector_handle, 0, (uintptr_t)vector,
  798. NX, sizeof(vector[0]));
  799. /* @b{Definition of the task} */
  800. task = starpu_task_create();
  801. task->cl = &cl;
  802. task->buffers[0].handle = vector_handle;
  803. task->buffers[0].mode = STARPU_RW;
  804. task->cl_arg = &factor;
  805. task->cl_arg_size = sizeof(factor);
  806. @end smallexample
  807. @end cartouche
  808. @cartouche
  809. @smallexample
  810. /* @b{Submitting the task} */
  811. ret = starpu_task_submit(task);
  812. if (ret == -ENODEV) @{
  813. fprintf(stderr, "No worker may execute this task\n");
  814. return 1;
  815. @}
  816. @c TODO: Mmm, should rather be an unregistration with an implicit dependency, no?
  817. /* @b{Waiting for its termination} */
  818. starpu_task_wait_for_all();
  819. /* @b{Update the vector in RAM} */
  820. starpu_data_acquire(vector_handle, STARPU_R);
  821. @end smallexample
  822. @end cartouche
  823. @cartouche
  824. @smallexample
  825. /* @b{Access the data} */
  826. for(i=0 ; i<NX; i++) @{
  827. fprintf(stderr, "%f ", vector[i]);
  828. @}
  829. fprintf(stderr, "\n");
  830. /* @b{Release the RAM view of the data before unregistering it and shutting down StarPU} */
  831. starpu_data_release(vector_handle);
  832. starpu_data_unregister(vector_handle);
  833. starpu_shutdown();
  834. return 0;
  835. @}
  836. @end smallexample
  837. @end cartouche
  838. @node Execution of Hybrid Vector Scaling
  839. @subsection Execution of Hybrid Vector Scaling
  840. The Makefile given at the beginning of the section must be extended to
  841. give the rules to compile the CUDA source code. Note that the source
  842. file of the OpenCL kernel does not need to be compiled now, it will
  843. be compiled at run-time when calling the function
  844. @code{starpu_opencl_load_opencl_from_file()} (@pxref{starpu_opencl_load_opencl_from_file}).
  845. @cartouche
  846. @smallexample
  847. CFLAGS += $(shell pkg-config --cflags libstarpu)
  848. LDFLAGS += $(shell pkg-config --libs libstarpu)
  849. CC = gcc
  850. vector_scal: vector_scal.o vector_scal_cpu.o vector_scal_cuda.o vector_scal_opencl.o
  851. %.o: %.cu
  852. nvcc $(CFLAGS) $< -c $@
  853. clean:
  854. rm -f vector_scal *.o
  855. @end smallexample
  856. @end cartouche
  857. @smallexample
  858. % make
  859. @end smallexample
  860. and to execute it, with the default configuration:
  861. @smallexample
  862. % ./vector_scal
  863. 0.000000 3.000000 6.000000 9.000000 12.000000
  864. @end smallexample
  865. or for example, by disabling CPU devices:
  866. @smallexample
  867. % STARPU_NCPUS=0 ./vector_scal
  868. 0.000000 3.000000 6.000000 9.000000 12.000000
  869. @end smallexample
  870. or by disabling CUDA devices (which may permit to enable the use of OpenCL,
  871. see @ref{Using accelerators}):
  872. @smallexample
  873. % STARPU_NCUDA=0 ./vector_scal
  874. 0.000000 3.000000 6.000000 9.000000 12.000000
  875. @end smallexample
  876. @node Task and Worker Profiling
  877. @section Task and Worker Profiling
  878. A full example showing how to use the profiling API is available in
  879. the StarPU sources in the directory @code{examples/profiling/}.
  880. @cartouche
  881. @smallexample
  882. struct starpu_task *task = starpu_task_create();
  883. task->cl = &cl;
  884. task->synchronous = 1;
  885. /* We will destroy the task structure by hand so that we can
  886. * query the profiling info before the task is destroyed. */
  887. task->destroy = 0;
  888. /* Submit and wait for completion (since synchronous was set to 1) */
  889. starpu_task_submit(task);
  890. /* The task is finished, get profiling information */
  891. struct starpu_task_profiling_info *info = task->profiling_info;
  892. /* How much time did it take before the task started ? */
  893. double delay += starpu_timing_timespec_delay_us(&info->submit_time, &info->start_time);
  894. /* How long was the task execution ? */
  895. double length += starpu_timing_timespec_delay_us(&info->start_time, &info->end_time);
  896. /* We don't need the task structure anymore */
  897. starpu_task_destroy(task);
  898. @end smallexample
  899. @end cartouche
  900. @cartouche
  901. @smallexample
  902. /* Display the occupancy of all workers during the test */
  903. int worker;
  904. for (worker = 0; worker < starpu_worker_get_count(); worker++)
  905. @{
  906. struct starpu_worker_profiling_info worker_info;
  907. int ret = starpu_worker_get_profiling_info(worker, &worker_info);
  908. STARPU_ASSERT(!ret);
  909. double total_time = starpu_timing_timespec_to_us(&worker_info.total_time);
  910. double executing_time = starpu_timing_timespec_to_us(&worker_info.executing_time);
  911. double sleeping_time = starpu_timing_timespec_to_us(&worker_info.sleeping_time);
  912. float executing_ratio = 100.0*executing_time/total_time;
  913. float sleeping_ratio = 100.0*sleeping_time/total_time;
  914. char workername[128];
  915. starpu_worker_get_name(worker, workername, 128);
  916. fprintf(stderr, "Worker %s:\n", workername);
  917. fprintf(stderr, "\ttotal time : %.2lf ms\n", total_time*1e-3);
  918. fprintf(stderr, "\texec time : %.2lf ms (%.2f %%)\n", executing_time*1e-3,
  919. executing_ratio);
  920. fprintf(stderr, "\tblocked time : %.2lf ms (%.2f %%)\n", sleeping_time*1e-3,
  921. sleeping_ratio);
  922. @}
  923. @end smallexample
  924. @end cartouche
  925. @node Partitioning Data
  926. @section Partitioning Data
  927. An existing piece of data can be partitioned in sub parts to be used by different tasks, for instance:
  928. @cartouche
  929. @smallexample
  930. int vector[NX];
  931. starpu_data_handle handle;
  932. /* Declare data to StarPU */
  933. starpu_vector_data_register(&handle, 0, (uintptr_t)vector, NX, sizeof(vector[0]));
  934. /* Partition the vector in PARTS sub-vectors */
  935. starpu_filter f =
  936. @{
  937. .filter_func = starpu_block_filter_func_vector,
  938. .nchildren = PARTS,
  939. .get_nchildren = NULL,
  940. .get_child_ops = NULL
  941. @};
  942. starpu_data_partition(handle, &f);
  943. @end smallexample
  944. @end cartouche
  945. @cartouche
  946. @smallexample
  947. /* Submit a task on each sub-vector */
  948. for (i=0; i<starpu_data_get_nb_children(handle); i++) @{
  949. /* Get subdata number i (there is only 1 dimension) */
  950. starpu_data_handle sub_handle = starpu_data_get_sub_data(handle, 1, i);
  951. struct starpu_task *task = starpu_task_create();
  952. task->buffers[0].handle = sub_handle;
  953. task->buffers[0].mode = STARPU_RW;
  954. task->cl = &cl;
  955. task->synchronous = 1;
  956. task->cl_arg = &factor;
  957. task->cl_arg_size = sizeof(factor);
  958. starpu_task_submit(task);
  959. @}
  960. @end smallexample
  961. @end cartouche
  962. Partitioning can be applied several times, see
  963. @code{examples/basic_examples/mult.c} and @code{examples/filters/}.
  964. @node Performance model example
  965. @section Performance model example
  966. To achieve good scheduling, StarPU scheduling policies need to be able to
  967. estimate in advance the duration of a task. This is done by giving to codelets a
  968. performance model. There are several kinds of performance models.
  969. @itemize
  970. @item
  971. Providing an estimation from the application itself (@code{STARPU_COMMON} model type and @code{cost_model} field),
  972. see for instance
  973. @code{examples/common/blas_model.h} and @code{examples/common/blas_model.c}. It can also be provided for each architecture (@code{STARPU_PER_ARCH} model type and @code{per_arch} field)
  974. @item
  975. Measured at runtime (STARPU_HISTORY_BASED model type). This assumes that for a
  976. given set of data input/output sizes, the performance will always be about the
  977. same. This is very true for regular kernels on GPUs for instance (<0.1% error),
  978. and just a bit less true on CPUs (~=1% error). This also assumes that there are
  979. few different sets of data input/output sizes. StarPU will then keep record of
  980. the average time of previous executions on the various processing units, and use
  981. it as an estimation. History is done per task size, by using a hash of the input
  982. and ouput sizes as an index.
  983. It will also save it in @code{~/.starpu/sampling/codelets}
  984. for further executions, and can be observed by using the
  985. @code{starpu_perfmodel_display} command. The following is a small code example.
  986. @cartouche
  987. @smallexample
  988. static struct starpu_perfmodel_t mult_perf_model = @{
  989. .type = STARPU_HISTORY_BASED,
  990. .symbol = "mult_perf_model"
  991. @};
  992. starpu_codelet cl = @{
  993. .where = STARPU_CPU,
  994. .cpu_func = cpu_mult,
  995. .nbuffers = 3,
  996. /* for the scheduling policy to be able to use performance models */
  997. .model = &mult_perf_model
  998. @};
  999. @end smallexample
  1000. @end cartouche
  1001. @item
  1002. Measured at runtime and refined by regression (STARPU_REGRESSION_*_BASED
  1003. model type). This still assumes performance regularity, but can work
  1004. with various data input sizes, by applying regression over observed
  1005. execution times. STARPU_REGRESSION_BASED uses an a*n^b regression
  1006. form, STARPU_NL_REGRESSION_BASED uses an a*n^b+c (more precise than
  1007. STARPU_REGRESSION_BASED, but costs a lot more to compute)
  1008. @item
  1009. Provided explicitly by the application (STARPU_PER_ARCH model type): the
  1010. @code{.per_arch[i].cost_model} fields have to be filled with pointers to
  1011. functions which return the expected duration of the task in micro-seconds, one
  1012. per architecture.
  1013. @end itemize
  1014. How to use schedulers which can benefit from such performance model is explained
  1015. in @ref{Task scheduling policy}.
  1016. The same can be done for task power consumption estimation, by setting the
  1017. @code{power_model} field the same way as the @code{model} field. Note: for
  1018. now, the application has to give to the power consumption performance model
  1019. a name which is different from the execution time performance model.
  1020. @node Theoretical lower bound on execution time
  1021. @section Theoretical lower bound on execution time
  1022. For kernels with history-based performance models, StarPU can very easily provide a theoretical lower
  1023. bound for the execution time of a whole set of tasks. See for
  1024. instance @code{examples/lu/lu_example.c}: before submitting tasks,
  1025. call @code{starpu_bound_start}, and after complete execution, call
  1026. @code{starpu_bound_stop}. @code{starpu_bound_print_lp} or
  1027. @code{starpu_bound_print_mps} can then be used to output a Linear Programming
  1028. problem corresponding to the schedule of your tasks. Run it through
  1029. @code{lp_solve} or any other linear programming solver, and that will give you a
  1030. lower bound for the total execution time of your tasks. If StarPU was compiled
  1031. with the glpk library installed, @code{starpu_bound_compute} can be used to
  1032. solve it immediately and get the optimized minimum. Its @code{integer}
  1033. parameter allows to decide whether integer resolution should be computed
  1034. and returned.
  1035. The @code{deps} parameter tells StarPU whether to take tasks and implicit data
  1036. dependencies into account. It must be understood that the linear programming
  1037. problem size is quadratic with the number of tasks and thus the time to solve it
  1038. will be very long, it could be minutes for just a few dozen tasks. You should
  1039. probably use @code{lp_solve -timeout 1 test.pl -wmps test.mps} to convert the
  1040. problem to MPS format and then use a better solver, @code{glpsol} might be
  1041. better than @code{lp_solve} for instance (the @code{--pcost} option may be
  1042. useful), but sometimes doesn't manage to converge. @code{cbc} might look
  1043. slower, but it is parallel. Be sure to try at least all the @code{-B} options
  1044. of @code{lp_solve}. For instance, we often just use
  1045. @code{lp_solve -cc -B1 -Bb -Bg -Bp -Bf -Br -BG -Bd -Bs -BB -Bo -Bc -Bi} , and
  1046. the @code{-gr} option can also be quite useful.
  1047. Setting @code{deps} to 0 will only take into account the actual computations
  1048. on processing units. It however still properly takes into account the varying
  1049. performances of kernels and processing units, which is quite more accurate than
  1050. just comparing StarPU performances with the fastest of the kernels being used.
  1051. The @code{prio} parameter tells StarPU whether to simulate taking into account
  1052. the priorities as the StarPU scheduler would, i.e. schedule prioritized
  1053. tasks before less prioritized tasks, to check to which extend this results
  1054. to a less optimal solution. This increases even more computation time.
  1055. Note that for simplicity, all this however doesn't take into account data
  1056. transfers, which are assumed to be completely overlapped.
  1057. @node Insert Task Utility
  1058. @section Insert Task Utility
  1059. StarPU provides the wrapper function @code{starpu_insert_task} to ease
  1060. the creation and submission of tasks.
  1061. @deftypefun int starpu_insert_task (starpu_codelet *@var{cl}, ...)
  1062. Create and submit a task corresponding to @var{cl} with the following
  1063. arguments. The argument list must be zero-terminated.
  1064. The arguments following the codelets can be of the following types:
  1065. @itemize
  1066. @item
  1067. @code{STARPU_R}, @code{STARPU_W}, @code{STARPU_RW}, @code{STARPU_SCRATCH}, @code{STARPU_REDUX} an access mode followed by a data handle;
  1068. @item
  1069. @code{STARPU_VALUE} followed by a pointer to a constant value and
  1070. the size of the constant;
  1071. @item
  1072. @code{STARPU_CALLBACK} followed by a pointer to a callback function;
  1073. @item
  1074. @code{STARPU_CALLBACK_ARG} followed by a pointer to be given as an
  1075. argument to the callback function;
  1076. @item
  1077. @code{STARPU_PRIORITY} followed by a integer defining a priority level.
  1078. @end itemize
  1079. Parameters to be passed to the codelet implementation are defined
  1080. through the type @code{STARPU_VALUE}. The function
  1081. @code{starpu_unpack_cl_args} must be called within the codelet
  1082. implementation to retrieve them.
  1083. @end deftypefun
  1084. Here the implementation of the codelet:
  1085. @smallexample
  1086. void func_cpu(void *descr[], void *_args)
  1087. @{
  1088. int *x0 = (int *)STARPU_VARIABLE_GET_PTR(descr[0]);
  1089. float *x1 = (float *)STARPU_VARIABLE_GET_PTR(descr[1]);
  1090. int ifactor;
  1091. float ffactor;
  1092. starpu_unpack_cl_args(_args, &ifactor, &ffactor);
  1093. *x0 = *x0 * ifactor;
  1094. *x1 = *x1 * ffactor;
  1095. @}
  1096. starpu_codelet mycodelet = @{
  1097. .where = STARPU_CPU,
  1098. .cpu_func = func_cpu,
  1099. .nbuffers = 2
  1100. @};
  1101. @end smallexample
  1102. And the call to the @code{starpu_insert_task} wrapper:
  1103. @smallexample
  1104. starpu_insert_task(&mycodelet,
  1105. STARPU_VALUE, &ifactor, sizeof(ifactor),
  1106. STARPU_VALUE, &ffactor, sizeof(ffactor),
  1107. STARPU_RW, data_handles[0], STARPU_RW, data_handles[1],
  1108. 0);
  1109. @end smallexample
  1110. The call to @code{starpu_insert_task} is equivalent to the following
  1111. code:
  1112. @smallexample
  1113. struct starpu_task *task = starpu_task_create();
  1114. task->cl = &mycodelet;
  1115. task->buffers[0].handle = data_handles[0];
  1116. task->buffers[0].mode = STARPU_RW;
  1117. task->buffers[1].handle = data_handles[1];
  1118. task->buffers[1].mode = STARPU_RW;
  1119. char *arg_buffer;
  1120. size_t arg_buffer_size;
  1121. starpu_pack_cl_args(&arg_buffer, &arg_buffer_size,
  1122. STARPU_VALUE, &ifactor, sizeof(ifactor),
  1123. STARPU_VALUE, &ffactor, sizeof(ffactor),
  1124. 0);
  1125. task->cl_arg = arg_buffer;
  1126. task->cl_arg_size = arg_buffer_size;
  1127. int ret = starpu_task_submit(task);
  1128. @end smallexample
  1129. @node Debugging
  1130. @section Debugging
  1131. StarPU provides several tools to help debugging aplications. Execution traces
  1132. can be generated and displayed graphically, see @ref{Generating traces}. Some
  1133. gdb helpers are also provided to show the whole StarPU state:
  1134. @smallexample
  1135. (gdb) source tools/gdbinit
  1136. (gdb) help starpu
  1137. @end smallexample
  1138. @node More examples
  1139. @section More examples
  1140. More examples are available in the StarPU sources in the @code{examples/}
  1141. directory. Simple examples include:
  1142. @table @asis
  1143. @item @code{incrementer/}:
  1144. Trivial incrementation test.
  1145. @item @code{basic_examples/}:
  1146. Simple documented Hello world (as shown in @ref{Hello World}), vector/scalar product (as shown
  1147. in @ref{Vector Scaling on an Hybrid CPU/GPU Machine}), matrix
  1148. product examples (as shown in @ref{Performance model example}), an example using the blocked matrix data
  1149. interface, and an example using the variable data interface.
  1150. @item @code{matvecmult/}:
  1151. OpenCL example from NVidia, adapted to StarPU.
  1152. @item @code{axpy/}:
  1153. AXPY CUBLAS operation adapted to StarPU.
  1154. @item @code{fortran/}:
  1155. Example of Fortran bindings.
  1156. @end table
  1157. More advanced examples include:
  1158. @table @asis
  1159. @item @code{filters/}:
  1160. Examples using filters, as shown in @ref{Partitioning Data}.
  1161. @item @code{lu/}:
  1162. LU matrix factorization, see for instance @code{xlu_implicit.c}
  1163. @item @code{cholesky/}:
  1164. Cholesky matrix factorization, see for instance @code{cholesky_implicit.c}.
  1165. @end table
  1166. @c ---------------------------------------------------------------------
  1167. @c Performance options
  1168. @c ---------------------------------------------------------------------
  1169. @node Performance optimization
  1170. @chapter How to optimize performance with StarPU
  1171. TODO: improve!
  1172. @menu
  1173. * Data management::
  1174. * Task submission::
  1175. * Task priorities::
  1176. * Task scheduling policy::
  1177. * Performance model calibration::
  1178. * Task distribution vs Data transfer::
  1179. * Data prefetch::
  1180. * Power-based scheduling::
  1181. * Profiling::
  1182. * CUDA-specific optimizations::
  1183. @end menu
  1184. Simply encapsulating application kernels into tasks already permits to
  1185. seamlessly support CPU and GPUs at the same time. To achieve good performance, a
  1186. few additional changes are needed.
  1187. @node Data management
  1188. @section Data management
  1189. When the application allocates data, whenever possible it should use the
  1190. @code{starpu_malloc} function, which will ask CUDA or
  1191. OpenCL to make the allocation itself and pin the corresponding allocated
  1192. memory. This is needed to permit asynchronous data transfer, i.e. permit data
  1193. transfer to overlap with computations.
  1194. By default, StarPU leaves replicates of data wherever they were used, in case they
  1195. will be re-used by other tasks, thus saving the data transfer time. When some
  1196. task modifies some data, all the other replicates are invalidated, and only the
  1197. processing unit which ran that task will have a valid replicate of the data. If the application knows
  1198. that this data will not be re-used by further tasks, it should advise StarPU to
  1199. immediately replicate it to a desired list of memory nodes (given through a
  1200. bitmask). This can be understood like the write-through mode of CPU caches.
  1201. @example
  1202. starpu_data_set_wt_mask(img_handle, 1<<0);
  1203. @end example
  1204. will for instance request to always transfer a replicate into the main memory (node
  1205. 0), as bit 0 of the write-through bitmask is being set.
  1206. @node Task submission
  1207. @section Task submission
  1208. To let StarPU make online optimizations, tasks should be submitted
  1209. asynchronously as much as possible. Ideally, all the tasks should be
  1210. submitted, and mere calls to @code{starpu_task_wait_for_all} or
  1211. @code{starpu_data_unregister} be done to wait for
  1212. termination. StarPU will then be able to rework the whole schedule, overlap
  1213. computation with communication, manage accelerator local memory usage, etc.
  1214. @node Task priorities
  1215. @section Task priorities
  1216. By default, StarPU will consider the tasks in the order they are submitted by
  1217. the application. If the application programmer knows that some tasks should
  1218. be performed in priority (for instance because their output is needed by many
  1219. other tasks and may thus be a bottleneck if not executed early enough), the
  1220. @code{priority} field of the task structure should be set to transmit the
  1221. priority information to StarPU.
  1222. @node Task scheduling policy
  1223. @section Task scheduling policy
  1224. By default, StarPU uses the @code{eager} simple greedy scheduler. This is
  1225. because it provides correct load balance even if the application codelets do not
  1226. have performance models. If your application codelets have performance models
  1227. (@pxref{Performance model example} for examples showing how to do it),
  1228. you should change the scheduler thanks to the @code{STARPU_SCHED} environment
  1229. variable. For instance @code{export STARPU_SCHED=dmda} . Use @code{help} to get
  1230. the list of available schedulers.
  1231. The @b{eager} scheduler uses a central task queue, from which workers draw tasks
  1232. to work on. If a task has a non-0 priority, it is put at the front of the queue.
  1233. The @b{prio} scheduler also uses a central task queue, but sorts tasks by
  1234. priority (between -5 and 5).
  1235. The @b{random} scheduler distributes tasks randomly according to assumed worker
  1236. overall performance.
  1237. The @b{ws} (work stealing) scheduler schedules tasks on the local worker by
  1238. default. When a worker becomes idle, it steals a task from the most loaded
  1239. worker.
  1240. The @b{dm} (deque model) scheduler uses task execution performance models into account to
  1241. perform an HEFT-similar scheduling strategy: it schedules tasks where their
  1242. termination time will be minimal.
  1243. The @b{dmda} (deque model data aware) scheduler is similar to dm, it also takes
  1244. into account data transfer time.
  1245. The @b{dmdar} (deque model data aware ready) scheduler is similar to dmda,
  1246. it also sorts tasks on per-worker queues by number of already-available data
  1247. buffers.
  1248. The @b{dmdas} (deque model data aware sorted) scheduler is similar to dmda, it
  1249. also supports arbitrary priority values.
  1250. The @b{heft} (HEFT) scheduler is similar to dmda, it also supports task bundles.
  1251. The @b{pheft} (parallel HEFT) scheduler is similar to heft, it also supports
  1252. parallel tasks (still experimental).
  1253. The @b{pgreedy} (parallel greedy) scheduler is similar to greedy, it also
  1254. supports parallel tasks (still experimental).
  1255. @node Performance model calibration
  1256. @section Performance model calibration
  1257. Most schedulers are based on an estimation of codelet duration on each kind
  1258. of processing unit. For this to be possible, the application programmer needs
  1259. to configure a performance model for the codelets of the application (see
  1260. @ref{Performance model example} for instance). History-based performance models
  1261. use on-line calibration. StarPU will automatically calibrate codelets
  1262. which have never been calibrated yet. To force continuing calibration, use
  1263. @code{export STARPU_CALIBRATE=1} . This may be necessary if your application
  1264. has not-so-stable performance. Details on the current performance model status
  1265. can be obtained from the @code{starpu_perfmodel_display} command: the @code{-l}
  1266. option lists the available performance models, and the @code{-s} option permits
  1267. to choose the performance model to be displayed. The result looks like:
  1268. @example
  1269. $ starpu_perfmodel_display -s starpu_dlu_lu_model_22
  1270. performance model for cpu
  1271. # hash size mean dev n
  1272. 5c6c3401 1572864 1.216300e+04 2.277778e+03 1240
  1273. @end example
  1274. Which shows that for the LU 22 kernel with a 1.5MiB matrix, the average
  1275. execution time on CPUs was about 12ms, with a 2ms standard deviation, over
  1276. 1240 samples. It is a good idea to check this before doing actual performance
  1277. measurements.
  1278. If a kernel source code was modified (e.g. performance improvement), the
  1279. calibration information is stale and should be dropped, to re-calibrate from
  1280. start. This can be done by using @code{export STARPU_CALIBRATE=2}.
  1281. Note: due to CUDA limitations, to be able to measure kernel duration,
  1282. calibration mode needs to disable asynchronous data transfers. Calibration thus
  1283. disables data transfer / computation overlapping, and should thus not be used
  1284. for eventual benchmarks. Note 2: history-based performance models get calibrated
  1285. only if a performance-model-based scheduler is chosen.
  1286. @node Task distribution vs Data transfer
  1287. @section Task distribution vs Data transfer
  1288. Distributing tasks to balance the load induces data transfer penalty. StarPU
  1289. thus needs to find a balance between both. The target function that the
  1290. @code{dmda} scheduler of StarPU
  1291. tries to minimize is @code{alpha * T_execution + beta * T_data_transfer}, where
  1292. @code{T_execution} is the estimated execution time of the codelet (usually
  1293. accurate), and @code{T_data_transfer} is the estimated data transfer time. The
  1294. latter is estimated based on bus calibration before execution start,
  1295. i.e. with an idle machine, thus without contention. You can force bus re-calibration by running
  1296. @code{starpu_calibrate_bus}. The beta parameter defaults to 1, but it can be
  1297. worth trying to tweak it by using @code{export STARPU_BETA=2} for instance,
  1298. since during real application execution, contention makes transfer times bigger.
  1299. This is of course imprecise, but in practice, a rough estimation already gives
  1300. the good results that a precise estimation would give.
  1301. @node Data prefetch
  1302. @section Data prefetch
  1303. The @code{heft}, @code{dmda} and @code{pheft} scheduling policies perform data prefetch (see @ref{STARPU_PREFETCH}):
  1304. as soon as a scheduling decision is taken for a task, requests are issued to
  1305. transfer its required data to the target processing unit, if needeed, so that
  1306. when the processing unit actually starts the task, its data will hopefully be
  1307. already available and it will not have to wait for the transfer to finish.
  1308. The application may want to perform some manual prefetching, for several reasons
  1309. such as excluding initial data transfers from performance measurements, or
  1310. setting up an initial statically-computed data distribution on the machine
  1311. before submitting tasks, which will thus guide StarPU toward an initial task
  1312. distribution (since StarPU will try to avoid further transfers).
  1313. This can be achieved by giving the @code{starpu_data_prefetch_on_node} function
  1314. the handle and the desired target memory node.
  1315. @node Power-based scheduling
  1316. @section Power-based scheduling
  1317. If the application can provide some power performance model (through
  1318. the @code{power_model} field of the codelet structure), StarPU will
  1319. take it into account when distributing tasks. The target function that
  1320. the @code{dmda} scheduler minimizes becomes @code{alpha * T_execution +
  1321. beta * T_data_transfer + gamma * Consumption} , where @code{Consumption}
  1322. is the estimated task consumption in Joules. To tune this parameter, use
  1323. @code{export STARPU_GAMMA=3000} for instance, to express that each Joule
  1324. (i.e kW during 1000us) is worth 3000us execution time penalty. Setting
  1325. @code{alpha} and @code{beta} to zero permits to only take into account power consumption.
  1326. This is however not sufficient to correctly optimize power: the scheduler would
  1327. simply tend to run all computations on the most energy-conservative processing
  1328. unit. To account for the consumption of the whole machine (including idle
  1329. processing units), the idle power of the machine should be given by setting
  1330. @code{export STARPU_IDLE_POWER=200} for 200W, for instance. This value can often
  1331. be obtained from the machine power supplier.
  1332. The power actually consumed by the total execution can be displayed by setting
  1333. @code{export STARPU_PROFILING=1 STARPU_WORKER_STATS=1} .
  1334. @node Profiling
  1335. @section Profiling
  1336. A quick view of how many tasks each worker has executed can be obtained by setting
  1337. @code{export STARPU_WORKER_STATS=1} This is a convenient way to check that
  1338. execution did happen on accelerators without penalizing performance with
  1339. the profiling overhead.
  1340. More detailed profiling information can be enabled by using @code{export STARPU_PROFILING=1} or by
  1341. calling @code{starpu_profiling_status_set} from the source code.
  1342. Statistics on the execution can then be obtained by using @code{export
  1343. STARPU_BUS_STATS=1} and @code{export STARPU_WORKER_STATS=1} .
  1344. More details on performance feedback are provided by the next chapter.
  1345. @node CUDA-specific optimizations
  1346. @section CUDA-specific optimizations
  1347. Due to CUDA limitations, StarPU will have a hard time overlapping its own
  1348. communications and the codelet computations if the application does not use a
  1349. dedicated CUDA stream for its computations. StarPU provides one by the use of
  1350. @code{starpu_cuda_get_local_stream()} which should be used by all CUDA codelet
  1351. operations. For instance:
  1352. @example
  1353. func <<<grid,block,0,starpu_cuda_get_local_stream()>>> (foo, bar);
  1354. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  1355. @end example
  1356. Unfortunately, some CUDA libraries do not have stream variants of
  1357. kernels. That will lower the potential for overlapping.
  1358. @c ---------------------------------------------------------------------
  1359. @c Performance feedback
  1360. @c ---------------------------------------------------------------------
  1361. @node Performance feedback
  1362. @chapter Performance feedback
  1363. @menu
  1364. * On-line:: On-line performance feedback
  1365. * Off-line:: Off-line performance feedback
  1366. * Codelet performance:: Performance of codelets
  1367. @end menu
  1368. @node On-line
  1369. @section On-line performance feedback
  1370. @menu
  1371. * Enabling monitoring:: Enabling on-line performance monitoring
  1372. * Task feedback:: Per-task feedback
  1373. * Codelet feedback:: Per-codelet feedback
  1374. * Worker feedback:: Per-worker feedback
  1375. * Bus feedback:: Bus-related feedback
  1376. @end menu
  1377. @node Enabling monitoring
  1378. @subsection Enabling on-line performance monitoring
  1379. In order to enable online performance monitoring, the application can call
  1380. @code{starpu_profiling_status_set(STARPU_PROFILING_ENABLE)}. It is possible to
  1381. detect whether monitoring is already enabled or not by calling
  1382. @code{starpu_profiling_status_get()}. Enabling monitoring also reinitialize all
  1383. previously collected feedback. The @code{STARPU_PROFILING} environment variable
  1384. can also be set to 1 to achieve the same effect.
  1385. Likewise, performance monitoring is stopped by calling
  1386. @code{starpu_profiling_status_set(STARPU_PROFILING_DISABLE)}. Note that this
  1387. does not reset the performance counters so that the application may consult
  1388. them later on.
  1389. More details about the performance monitoring API are available in section
  1390. @ref{Profiling API}.
  1391. @node Task feedback
  1392. @subsection Per-task feedback
  1393. If profiling is enabled, a pointer to a @code{starpu_task_profiling_info}
  1394. structure is put in the @code{.profiling_info} field of the @code{starpu_task}
  1395. structure when a task terminates.
  1396. This structure is automatically destroyed when the task structure is destroyed,
  1397. either automatically or by calling @code{starpu_task_destroy}.
  1398. The @code{starpu_task_profiling_info} structure indicates the date when the
  1399. task was submitted (@code{submit_time}), started (@code{start_time}), and
  1400. terminated (@code{end_time}), relative to the initialization of
  1401. StarPU with @code{starpu_init}. It also specifies the identifier of the worker
  1402. that has executed the task (@code{workerid}).
  1403. These date are stored as @code{timespec} structures which the user may convert
  1404. into micro-seconds using the @code{starpu_timing_timespec_to_us} helper
  1405. function.
  1406. It it worth noting that the application may directly access this structure from
  1407. the callback executed at the end of the task. The @code{starpu_task} structure
  1408. associated to the callback currently being executed is indeed accessible with
  1409. the @code{starpu_get_current_task()} function.
  1410. @node Codelet feedback
  1411. @subsection Per-codelet feedback
  1412. The @code{per_worker_stats} field of the @code{starpu_codelet_t} structure is
  1413. an array of counters. The i-th entry of the array is incremented every time a
  1414. task implementing the codelet is executed on the i-th worker.
  1415. This array is not reinitialized when profiling is enabled or disabled.
  1416. @node Worker feedback
  1417. @subsection Per-worker feedback
  1418. The second argument returned by the @code{starpu_worker_get_profiling_info}
  1419. function is a @code{starpu_worker_profiling_info} structure that gives
  1420. statistics about the specified worker. This structure specifies when StarPU
  1421. started collecting profiling information for that worker (@code{start_time}),
  1422. the duration of the profiling measurement interval (@code{total_time}), the
  1423. time spent executing kernels (@code{executing_time}), the time spent sleeping
  1424. because there is no task to execute at all (@code{sleeping_time}), and the
  1425. number of tasks that were executed while profiling was enabled.
  1426. These values give an estimation of the proportion of time spent do real work,
  1427. and the time spent either sleeping because there are not enough executable
  1428. tasks or simply wasted in pure StarPU overhead.
  1429. Calling @code{starpu_worker_get_profiling_info} resets the profiling
  1430. information associated to a worker.
  1431. When an FxT trace is generated (see @ref{Generating traces}), it is also
  1432. possible to use the @code{starpu_top} script (described in @ref{starpu-top}) to
  1433. generate a graphic showing the evolution of these values during the time, for
  1434. the different workers.
  1435. @node Bus feedback
  1436. @subsection Bus-related feedback
  1437. TODO
  1438. @c how to enable/disable performance monitoring
  1439. @c what kind of information do we get ?
  1440. @node Off-line
  1441. @section Off-line performance feedback
  1442. @menu
  1443. * Generating traces:: Generating traces with FxT
  1444. * Gantt diagram:: Creating a Gantt Diagram
  1445. * DAG:: Creating a DAG with graphviz
  1446. * starpu-top:: Monitoring activity
  1447. @end menu
  1448. @node Generating traces
  1449. @subsection Generating traces with FxT
  1450. StarPU can use the FxT library (see
  1451. @indicateurl{https://savannah.nongnu.org/projects/fkt/}) to generate traces
  1452. with a limited runtime overhead.
  1453. You can either get the FxT library from CVS (autotools are required):
  1454. @example
  1455. % cvs -d :pserver:anonymous@@cvs.sv.gnu.org:/sources/fkt co FxT
  1456. % ./bootstrap
  1457. @end example
  1458. If autotools are not available on your machine, or if you prefer to do so,
  1459. FxT's code is also available as a tarball:
  1460. @example
  1461. % wget http://download.savannah.gnu.org/releases/fkt/fxt-0.2.2.tar.gz
  1462. @end example
  1463. Compiling and installing the FxT library in the @code{$FXTDIR} path is
  1464. done following the standard procedure:
  1465. @example
  1466. % ./configure --prefix=$FXTDIR
  1467. % make
  1468. % make install
  1469. @end example
  1470. In order to have StarPU to generate traces, StarPU should be configured with
  1471. the @code{--with-fxt} option:
  1472. @example
  1473. $ ./configure --with-fxt=$FXTDIR
  1474. @end example
  1475. When FxT is enabled, a trace is generated when StarPU is terminated by calling
  1476. @code{starpu_shutdown()}). The trace is a binary file whose name has the form
  1477. @code{prof_file_XXX_YYY} where @code{XXX} is the user name, and
  1478. @code{YYY} is the pid of the process that used StarPU. This file is saved in the
  1479. @code{/tmp/} directory by default, or by the directory specified by
  1480. the @code{STARPU_FXT_PREFIX} environment variable.
  1481. @node Gantt diagram
  1482. @subsection Creating a Gantt Diagram
  1483. When the FxT trace file @code{filename} has been generated, it is possible to
  1484. generate a trace in the Paje format by calling:
  1485. @example
  1486. % starpu_fxt_tool -i filename
  1487. @end example
  1488. Or alternatively, setting the @code{STARPU_GENERATE_TRACE} environment variable
  1489. to 1 before application execution will make StarPU do it automatically at
  1490. application shutdown.
  1491. This will create a @code{paje.trace} file in the current directory that can be
  1492. inspected with the ViTE trace visualizing open-source tool. More information
  1493. about ViTE is available at @indicateurl{http://vite.gforge.inria.fr/}. It is
  1494. possible to open the @code{paje.trace} file with ViTE by using the following
  1495. command:
  1496. @example
  1497. % vite paje.trace
  1498. @end example
  1499. @node DAG
  1500. @subsection Creating a DAG with graphviz
  1501. When the FxT trace file @code{filename} has been generated, it is possible to
  1502. generate a task graph in the DOT format by calling:
  1503. @example
  1504. $ starpu_fxt_tool -i filename
  1505. @end example
  1506. This will create a @code{dag.dot} file in the current directory. This file is a
  1507. task graph described using the DOT language. It is possible to get a
  1508. graphical output of the graph by using the graphviz library:
  1509. @example
  1510. $ dot -Tpdf dag.dot -o output.pdf
  1511. @end example
  1512. @node starpu-top
  1513. @subsection Monitoring activity
  1514. When the FxT trace file @code{filename} has been generated, it is possible to
  1515. generate a activity trace by calling:
  1516. @example
  1517. $ starpu_fxt_tool -i filename
  1518. @end example
  1519. This will create an @code{activity.data} file in the current
  1520. directory. A profile of the application showing the activity of StarPU
  1521. during the execution of the program can be generated:
  1522. @example
  1523. $ starpu_top.sh activity.data
  1524. @end example
  1525. This will create a file named @code{activity.eps} in the current directory.
  1526. This picture is composed of two parts.
  1527. The first part shows the activity of the different workers. The green sections
  1528. indicate which proportion of the time was spent executed kernels on the
  1529. processing unit. The red sections indicate the proportion of time spent in
  1530. StartPU: an important overhead may indicate that the granularity may be too
  1531. low, and that bigger tasks may be appropriate to use the processing unit more
  1532. efficiently. The black sections indicate that the processing unit was blocked
  1533. because there was no task to process: this may indicate a lack of parallelism
  1534. which may be alleviated by creating more tasks when it is possible.
  1535. The second part of the @code{activity.eps} picture is a graph showing the
  1536. evolution of the number of tasks available in the system during the execution.
  1537. Ready tasks are shown in black, and tasks that are submitted but not
  1538. schedulable yet are shown in grey.
  1539. @node Codelet performance
  1540. @section Performance of codelets
  1541. The performance model of codelets can be examined by using the
  1542. @code{starpu_perfmodel_display} tool:
  1543. @example
  1544. $ starpu_perfmodel_display -l
  1545. file: <malloc_pinned.hannibal>
  1546. file: <starpu_slu_lu_model_21.hannibal>
  1547. file: <starpu_slu_lu_model_11.hannibal>
  1548. file: <starpu_slu_lu_model_22.hannibal>
  1549. file: <starpu_slu_lu_model_12.hannibal>
  1550. @end example
  1551. Here, the codelets of the lu example are available. We can examine the
  1552. performance of the 22 kernel:
  1553. @example
  1554. $ starpu_perfmodel_display -s starpu_slu_lu_model_22
  1555. performance model for cpu
  1556. # hash size mean dev n
  1557. 57618ab0 19660800 2.851069e+05 1.829369e+04 109
  1558. performance model for cuda_0
  1559. # hash size mean dev n
  1560. 57618ab0 19660800 1.164144e+04 1.556094e+01 315
  1561. performance model for cuda_1
  1562. # hash size mean dev n
  1563. 57618ab0 19660800 1.164271e+04 1.330628e+01 360
  1564. performance model for cuda_2
  1565. # hash size mean dev n
  1566. 57618ab0 19660800 1.166730e+04 3.390395e+02 456
  1567. @end example
  1568. We can see that for the given size, over a sample of a few hundreds of
  1569. execution, the GPUs are about 20 times faster than the CPUs (numbers are in
  1570. us). The standard deviation is extremely low for the GPUs, and less than 10% for
  1571. CPUs.
  1572. @c ---------------------------------------------------------------------
  1573. @c MPI support
  1574. @c ---------------------------------------------------------------------
  1575. @node StarPU MPI support
  1576. @chapter StarPU MPI support
  1577. The integration of MPI transfers within task parallelism is done in a
  1578. very natural way by the means of asynchronous interactions between the
  1579. application and StarPU. This is implemented in a separate libstarpumpi library
  1580. which basically provides "StarPU" equivalents of @code{MPI_*} functions, where
  1581. @code{void *} buffers are replaced with @code{starpu_data_handle}s, and all
  1582. GPU-RAM-NIC transfers are handled efficiently by StarPU-MPI.
  1583. @menu
  1584. * The API::
  1585. * Simple Example::
  1586. * MPI Insert Task Utility::
  1587. @end menu
  1588. @node The API
  1589. @section The API
  1590. @subsection Initialisation
  1591. @deftypefun int starpu_mpi_initialize (void)
  1592. Initializes the starpumpi library. This must be called between calling
  1593. @code{starpu_init} and other @code{starpu_mpi} functions. This
  1594. function does not call @code{MPI_Init}, it should be called beforehand.
  1595. @end deftypefun
  1596. @deftypefun int starpu_mpi_initialize_extended (int *@var{rank}, int *@var{world_size})
  1597. Initializes the starpumpi library. This must be called between calling
  1598. @code{starpu_init} and other @code{starpu_mpi} functions.
  1599. This function calls @code{MPI_Init}, and therefore should be prefered
  1600. to the previous one for MPI implementations which are not thread-safe.
  1601. Returns the current MPI node rank and world size.
  1602. @end deftypefun
  1603. @deftypefun int starpu_mpi_shutdown (void)
  1604. Cleans the starpumpi library. This must be called between calling
  1605. @code{starpu_mpi} functions and @code{starpu_shutdown}.
  1606. @code{MPI_Finalize} will be called if StarPU-MPI has been initialized
  1607. by calling @code{starpu_mpi_initialize_extended}.
  1608. @end deftypefun
  1609. @subsection Communication
  1610. @deftypefun int starpu_mpi_send (starpu_data_handle @var{data_handle}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm})
  1611. @end deftypefun
  1612. @deftypefun int starpu_mpi_recv (starpu_data_handle @var{data_handle}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm}, MPI_Status *@var{status})
  1613. @end deftypefun
  1614. @deftypefun int starpu_mpi_isend (starpu_data_handle @var{data_handle}, starpu_mpi_req *@var{req}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm})
  1615. @end deftypefun
  1616. @deftypefun int starpu_mpi_irecv (starpu_data_handle @var{data_handle}, starpu_mpi_req *@var{req}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm})
  1617. @end deftypefun
  1618. @deftypefun int starpu_mpi_isend_detached (starpu_data_handle @var{data_handle}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm}, void (*@var{callback})(void *), void *@var{arg})
  1619. @end deftypefun
  1620. @deftypefun int starpu_mpi_irecv_detached (starpu_data_handle @var{data_handle}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm}, void (*@var{callback})(void *), void *@var{arg})
  1621. @end deftypefun
  1622. @deftypefun int starpu_mpi_wait (starpu_mpi_req *@var{req}, MPI_Status *@var{status})
  1623. @end deftypefun
  1624. @deftypefun int starpu_mpi_test (starpu_mpi_req *@var{req}, int *@var{flag}, MPI_Status *@var{status})
  1625. @end deftypefun
  1626. @deftypefun int starpu_mpi_barrier (MPI_Comm @var{comm})
  1627. @end deftypefun
  1628. @deftypefun int starpu_mpi_isend_detached_unlock_tag (starpu_data_handle @var{data_handle}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm}, starpu_tag_t @var{tag})
  1629. When the transfer is completed, the tag is unlocked
  1630. @end deftypefun
  1631. @deftypefun int starpu_mpi_irecv_detached_unlock_tag (starpu_data_handle @var{data_handle}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm}, starpu_tag_t @var{tag})
  1632. @end deftypefun
  1633. @deftypefun int starpu_mpi_isend_array_detached_unlock_tag (unsigned @var{array_size}, starpu_data_handle *@var{data_handle}, int *@var{dest}, int *@var{mpi_tag}, MPI_Comm *@var{comm}, starpu_tag_t @var{tag})
  1634. Asynchronously send an array of buffers, and unlocks the tag once all
  1635. of them are transmitted.
  1636. @end deftypefun
  1637. @deftypefun int starpu_mpi_irecv_array_detached_unlock_tag (unsigned @var{array_size}, starpu_data_handle *@var{data_handle}, int *@var{source}, int *@var{mpi_tag}, MPI_Comm *@var{comm}, starpu_tag_t @var{tag})
  1638. @end deftypefun
  1639. @page
  1640. @node Simple Example
  1641. @section Simple Example
  1642. @cartouche
  1643. @smallexample
  1644. void increment_token(void)
  1645. @{
  1646. struct starpu_task *task = starpu_task_create();
  1647. task->cl = &increment_cl;
  1648. task->buffers[0].handle = token_handle;
  1649. task->buffers[0].mode = STARPU_RW;
  1650. starpu_task_submit(task);
  1651. @}
  1652. @end smallexample
  1653. @end cartouche
  1654. @cartouche
  1655. @smallexample
  1656. int main(int argc, char **argv)
  1657. @{
  1658. int rank, size;
  1659. starpu_init(NULL);
  1660. starpu_mpi_initialize_extended(&rank, &size);
  1661. starpu_vector_data_register(&token_handle, 0, (uintptr_t)&token, 1, sizeof(unsigned));
  1662. unsigned nloops = NITER;
  1663. unsigned loop;
  1664. unsigned last_loop = nloops - 1;
  1665. unsigned last_rank = size - 1;
  1666. @end smallexample
  1667. @end cartouche
  1668. @cartouche
  1669. @smallexample
  1670. for (loop = 0; loop < nloops; loop++) @{
  1671. int tag = loop*size + rank;
  1672. if (loop == 0 && rank == 0)
  1673. @{
  1674. token = 0;
  1675. fprintf(stdout, "Start with token value %d\n", token);
  1676. @}
  1677. else
  1678. @{
  1679. starpu_mpi_irecv_detached(token_handle, (rank+size-1)%size, tag,
  1680. MPI_COMM_WORLD, NULL, NULL);
  1681. @}
  1682. increment_token();
  1683. if (loop == last_loop && rank == last_rank)
  1684. @{
  1685. starpu_data_acquire(token_handle, STARPU_R);
  1686. fprintf(stdout, "Finished : token value %d\n", token);
  1687. starpu_data_release(token_handle);
  1688. @}
  1689. else
  1690. @{
  1691. starpu_mpi_isend_detached(token_handle, (rank+1)%size, tag+1,
  1692. MPI_COMM_WORLD, NULL, NULL);
  1693. @}
  1694. @}
  1695. starpu_task_wait_for_all();
  1696. @end smallexample
  1697. @end cartouche
  1698. @cartouche
  1699. @smallexample
  1700. starpu_mpi_shutdown();
  1701. starpu_shutdown();
  1702. if (rank == last_rank)
  1703. @{
  1704. fprintf(stderr, "[%d] token = %d == %d * %d ?\n", rank, token, nloops, size);
  1705. STARPU_ASSERT(token == nloops*size);
  1706. @}
  1707. @end smallexample
  1708. @end cartouche
  1709. @page
  1710. @node MPI Insert Task Utility
  1711. @section MPI Insert Task Utility
  1712. @deftypefun void starpu_mpi_insert_task (MPI_Comm @var{comm}, starpu_codelet *@var{cl}, ...)
  1713. Create and submit a task corresponding to @var{cl} with the following
  1714. arguments. The argument list must be zero-terminated.
  1715. The arguments following the codelets are the same types as for the
  1716. function @code{starpu_insert_task} defined in @ref{Insert Task
  1717. Utility}. The extra argument @code{STARPU_EXECUTE} followed by an
  1718. integer allows to specify the node to execute the codelet.
  1719. The algorithm is as follows:
  1720. @enumerate
  1721. @item Find out whether we are to execute the codelet because we own the
  1722. data to be written to. If different tasks own data to be written to,
  1723. the argument @code{STARPU_EXECUTE} should be used to specify the
  1724. executing task @code{ET}.
  1725. @item Send and receive data as requested. Tasks owning data which need
  1726. to be read by the executing task @code{ET} are sending them to @code{ET}.
  1727. @item Execute the codelet. This is done by the task selected in the
  1728. 1st step of the algorithm.
  1729. @item In the case when different tasks own data to be written to, send
  1730. W data back to their owners.
  1731. @end enumerate
  1732. The algorithm also includes a cache mechanism that allows not to send
  1733. data twice to the same task, unless the data has been modified.
  1734. @end deftypefun
  1735. @deftypefun void starpu_mpi_get_data_on_node (MPI_Comm @var{comm}, starpu_data_handle @var{data_handle}, int @var{node})
  1736. @end deftypefun
  1737. @page
  1738. Here an example showing how to use @code{starpu_mpi_insert_task}. One
  1739. first needs to define a distribution function which specifies the
  1740. locality of the data. Note that that distribution information needs to
  1741. be given to StarPU by calling @code{starpu_data_set_rank}.
  1742. @cartouche
  1743. @smallexample
  1744. /* Returns the MPI node number where data is */
  1745. int my_distrib(int x, int y, int nb_nodes) @{
  1746. /* Cyclic distrib */
  1747. return ((int)(x / sqrt(nb_nodes) + (y / sqrt(nb_nodes)) * sqrt(nb_nodes))) % nb_nodes;
  1748. // /* Linear distrib */
  1749. // return x / sqrt(nb_nodes) + (y / sqrt(nb_nodes)) * X;
  1750. @}
  1751. @end smallexample
  1752. @end cartouche
  1753. Now the data can be registered within StarPU. Data which are not
  1754. owned but will be needed for computations can be registered through
  1755. the lazy allocation mechanism, i.e. with a @code{home_node} set to -1.
  1756. StarPU will automatically allocate the memory when it is used for the
  1757. first time.
  1758. @cartouche
  1759. @smallexample
  1760. unsigned matrix[X][Y];
  1761. starpu_data_handle data_handles[X][Y];
  1762. for(x = 0; x < X; x++) @{
  1763. for (y = 0; y < Y; y++) @{
  1764. int mpi_rank = my_distrib(x, y, size);
  1765. if (mpi_rank == rank)
  1766. /* Owning data */
  1767. starpu_variable_data_register(&data_handles[x][y], 0,
  1768. (uintptr_t)&(matrix[x][y]), sizeof(unsigned));
  1769. else if (rank == mpi_rank+1 || rank == mpi_rank-1)
  1770. /* I don't own that index, but will need it for my computations */
  1771. starpu_variable_data_register(&data_handles[x][y], -1,
  1772. (uintptr_t)NULL, sizeof(unsigned));
  1773. else
  1774. /* I know it's useless to allocate anything for this */
  1775. data_handles[x][y] = NULL;
  1776. if (data_handles[x][y])
  1777. starpu_data_set_rank(data_handles[x][y], mpi_rank);
  1778. @}
  1779. @}
  1780. @end smallexample
  1781. @end cartouche
  1782. Now @code{starpu_mpi_insert_task()} can be called for the different
  1783. steps of the application.
  1784. @cartouche
  1785. @smallexample
  1786. for(loop=0 ; loop<niter; loop++)
  1787. for (x = 1; x < X-1; x++)
  1788. for (y = 1; y < Y-1; y++)
  1789. starpu_mpi_insert_task(MPI_COMM_WORLD, &stencil5_cl,
  1790. STARPU_RW, data_handles[x][y],
  1791. STARPU_R, data_handles[x-1][y],
  1792. STARPU_R, data_handles[x+1][y],
  1793. STARPU_R, data_handles[x][y-1],
  1794. STARPU_R, data_handles[x][y+1],
  1795. 0);
  1796. starpu_task_wait_for_all();
  1797. @end smallexample
  1798. @end cartouche
  1799. @c ---------------------------------------------------------------------
  1800. @c Configuration options
  1801. @c ---------------------------------------------------------------------
  1802. @node Configuring StarPU
  1803. @chapter Configuring StarPU
  1804. @menu
  1805. * Compilation configuration::
  1806. * Execution configuration through environment variables::
  1807. @end menu
  1808. @node Compilation configuration
  1809. @section Compilation configuration
  1810. The following arguments can be given to the @code{configure} script.
  1811. @menu
  1812. * Common configuration::
  1813. * Configuring workers::
  1814. * Advanced configuration::
  1815. @end menu
  1816. @node Common configuration
  1817. @subsection Common configuration
  1818. @menu
  1819. * --enable-debug::
  1820. * --enable-fast::
  1821. * --enable-verbose::
  1822. * --enable-coverage::
  1823. @end menu
  1824. @node --enable-debug
  1825. @subsubsection @code{--enable-debug}
  1826. @table @asis
  1827. @item @emph{Description}:
  1828. Enable debugging messages.
  1829. @end table
  1830. @node --enable-fast
  1831. @subsubsection @code{--enable-fast}
  1832. @table @asis
  1833. @item @emph{Description}:
  1834. Do not enforce assertions, saves a lot of time spent to compute them otherwise.
  1835. @end table
  1836. @node --enable-verbose
  1837. @subsubsection @code{--enable-verbose}
  1838. @table @asis
  1839. @item @emph{Description}:
  1840. Augment the verbosity of the debugging messages. This can be disabled
  1841. at runtime by setting the environment variable @code{STARPU_SILENT} to
  1842. any value.
  1843. @smallexample
  1844. % STARPU_SILENT=1 ./vector_scal
  1845. @end smallexample
  1846. @end table
  1847. @node --enable-coverage
  1848. @subsubsection @code{--enable-coverage}
  1849. @table @asis
  1850. @item @emph{Description}:
  1851. Enable flags for the @code{gcov} coverage tool.
  1852. @end table
  1853. @node Configuring workers
  1854. @subsection Configuring workers
  1855. @menu
  1856. * --enable-nmaxcpus::
  1857. * --disable-cpu::
  1858. * --enable-maxcudadev::
  1859. * --disable-cuda::
  1860. * --with-cuda-dir::
  1861. * --with-cuda-include-dir::
  1862. * --with-cuda-lib-dir::
  1863. * --enable-maxopencldev::
  1864. * --disable-opencl::
  1865. * --with-opencl-dir::
  1866. * --with-opencl-include-dir::
  1867. * --with-opencl-lib-dir::
  1868. * --enable-gordon::
  1869. * --with-gordon-dir::
  1870. @end menu
  1871. @node --enable-nmaxcpus
  1872. @subsubsection @code{--enable-nmaxcpus=<number>}
  1873. @table @asis
  1874. @item @emph{Description}:
  1875. Defines the maximum number of CPU cores that StarPU will support, then
  1876. available as the @code{STARPU_NMAXCPUS} macro.
  1877. @end table
  1878. @node --disable-cpu
  1879. @subsubsection @code{--disable-cpu}
  1880. @table @asis
  1881. @item @emph{Description}:
  1882. Disable the use of CPUs of the machine. Only GPUs etc. will be used.
  1883. @end table
  1884. @node --enable-maxcudadev
  1885. @subsubsection @code{--enable-maxcudadev=<number>}
  1886. @table @asis
  1887. @item @emph{Description}:
  1888. Defines the maximum number of CUDA devices that StarPU will support, then
  1889. available as the @code{STARPU_MAXCUDADEVS} macro.
  1890. @end table
  1891. @node --disable-cuda
  1892. @subsubsection @code{--disable-cuda}
  1893. @table @asis
  1894. @item @emph{Description}:
  1895. Disable the use of CUDA, even if a valid CUDA installation was detected.
  1896. @end table
  1897. @node --with-cuda-dir
  1898. @subsubsection @code{--with-cuda-dir=<path>}
  1899. @table @asis
  1900. @item @emph{Description}:
  1901. Specify the directory where CUDA is installed. This directory should notably contain
  1902. @code{include/cuda.h}.
  1903. @end table
  1904. @node --with-cuda-include-dir
  1905. @subsubsection @code{--with-cuda-include-dir=<path>}
  1906. @table @asis
  1907. @item @emph{Description}:
  1908. Specify the directory where CUDA headers are installed. This directory should
  1909. notably contain @code{cuda.h}. This defaults to @code{/include} appended to the
  1910. value given to @code{--with-cuda-dir}.
  1911. @end table
  1912. @node --with-cuda-lib-dir
  1913. @subsubsection @code{--with-cuda-lib-dir=<path>}
  1914. @table @asis
  1915. @item @emph{Description}:
  1916. Specify the directory where the CUDA library is installed. This directory should
  1917. notably contain the CUDA shared libraries (e.g. libcuda.so). This defaults to
  1918. @code{/lib} appended to the value given to @code{--with-cuda-dir}.
  1919. @end table
  1920. @node --enable-maxopencldev
  1921. @subsubsection @code{--enable-maxopencldev=<number>}
  1922. @table @asis
  1923. @item @emph{Description}:
  1924. Defines the maximum number of OpenCL devices that StarPU will support, then
  1925. available as the @code{STARPU_MAXOPENCLDEVS} macro.
  1926. @end table
  1927. @node --disable-opencl
  1928. @subsubsection @code{--disable-opencl}
  1929. @table @asis
  1930. @item @emph{Description}:
  1931. Disable the use of OpenCL, even if the SDK is detected.
  1932. @end table
  1933. @node --with-opencl-dir
  1934. @subsubsection @code{--with-opencl-dir=<path>}
  1935. @table @asis
  1936. @item @emph{Description}:
  1937. Specify the location of the OpenCL SDK. This directory should notably contain
  1938. @code{include/CL/cl.h} (or @code{include/OpenCL/cl.h} on Mac OS).
  1939. @end table
  1940. @node --with-opencl-include-dir
  1941. @subsubsection @code{--with-opencl-include-dir=<path>}
  1942. @table @asis
  1943. @item @emph{Description}:
  1944. Specify the location of OpenCL headers. This directory should notably contain
  1945. @code{CL/cl.h} (or @code{OpenCL/cl.h} on Mac OS). This defaults to
  1946. @code{/include} appended to the value given to @code{--with-opencl-dir}.
  1947. @end table
  1948. @node --with-opencl-lib-dir
  1949. @subsubsection @code{--with-opencl-lib-dir=<path>}
  1950. @table @asis
  1951. @item @emph{Description}:
  1952. Specify the location of the OpenCL library. This directory should notably
  1953. contain the OpenCL shared libraries (e.g. libOpenCL.so). This defaults to
  1954. @code{/lib} appended to the value given to @code{--with-opencl-dir}.
  1955. @end table
  1956. @node --enable-gordon
  1957. @subsubsection @code{--enable-gordon}
  1958. @table @asis
  1959. @item @emph{Description}:
  1960. Enable the use of the Gordon runtime for Cell SPUs.
  1961. @c TODO: rather default to enabled when detected
  1962. @end table
  1963. @node --with-gordon-dir
  1964. @subsubsection @code{--with-gordon-dir=<path>}
  1965. @table @asis
  1966. @item @emph{Description}:
  1967. Specify the location of the Gordon SDK.
  1968. @end table
  1969. @node Advanced configuration
  1970. @subsection Advanced configuration
  1971. @menu
  1972. * --enable-perf-debug::
  1973. * --enable-model-debug::
  1974. * --enable-stats::
  1975. * --enable-maxbuffers::
  1976. * --enable-allocation-cache::
  1977. * --enable-opengl-render::
  1978. * --enable-blas-lib::
  1979. * --with-magma::
  1980. * --with-fxt::
  1981. * --with-perf-model-dir::
  1982. * --with-mpicc::
  1983. * --with-goto-dir::
  1984. * --with-atlas-dir::
  1985. * --with-mkl-cflags::
  1986. * --with-mkl-ldflags::
  1987. @end menu
  1988. @node --enable-perf-debug
  1989. @subsubsection @code{--enable-perf-debug}
  1990. @table @asis
  1991. @item @emph{Description}:
  1992. Enable performance debugging.
  1993. @end table
  1994. @node --enable-model-debug
  1995. @subsubsection @code{--enable-model-debug}
  1996. @table @asis
  1997. @item @emph{Description}:
  1998. Enable performance model debugging.
  1999. @end table
  2000. @node --enable-stats
  2001. @subsubsection @code{--enable-stats}
  2002. @table @asis
  2003. @item @emph{Description}:
  2004. Enable statistics.
  2005. @end table
  2006. @node --enable-maxbuffers
  2007. @subsubsection @code{--enable-maxbuffers=<nbuffers>}
  2008. @table @asis
  2009. @item @emph{Description}:
  2010. Define the maximum number of buffers that tasks will be able to take
  2011. as parameters, then available as the @code{STARPU_NMAXBUFS} macro.
  2012. @end table
  2013. @node --enable-allocation-cache
  2014. @subsubsection @code{--enable-allocation-cache}
  2015. @table @asis
  2016. @item @emph{Description}:
  2017. Enable the use of a data allocation cache to avoid the cost of it with
  2018. CUDA. Still experimental.
  2019. @end table
  2020. @node --enable-opengl-render
  2021. @subsubsection @code{--enable-opengl-render}
  2022. @table @asis
  2023. @item @emph{Description}:
  2024. Enable the use of OpenGL for the rendering of some examples.
  2025. @c TODO: rather default to enabled when detected
  2026. @end table
  2027. @node --enable-blas-lib
  2028. @subsubsection @code{--enable-blas-lib=<name>}
  2029. @table @asis
  2030. @item @emph{Description}:
  2031. Specify the blas library to be used by some of the examples. The
  2032. library has to be 'atlas' or 'goto'.
  2033. @end table
  2034. @node --with-magma
  2035. @subsubsection @code{--with-magma=<path>}
  2036. @table @asis
  2037. @item @emph{Description}:
  2038. Specify where magma is installed. This directory should notably contain
  2039. @code{include/magmablas.h}.
  2040. @end table
  2041. @node --with-fxt
  2042. @subsubsection @code{--with-fxt=<path>}
  2043. @table @asis
  2044. @item @emph{Description}:
  2045. Specify the location of FxT (for generating traces and rendering them
  2046. using ViTE). This directory should notably contain
  2047. @code{include/fxt/fxt.h}.
  2048. @c TODO add ref to other section
  2049. @end table
  2050. @node --with-perf-model-dir
  2051. @subsubsection @code{--with-perf-model-dir=<dir>}
  2052. @table @asis
  2053. @item @emph{Description}:
  2054. Specify where performance models should be stored (instead of defaulting to the
  2055. current user's home).
  2056. @end table
  2057. @node --with-mpicc
  2058. @subsubsection @code{--with-mpicc=<path to mpicc>}
  2059. @table @asis
  2060. @item @emph{Description}:
  2061. Specify the location of the @code{mpicc} compiler to be used for starpumpi.
  2062. @end table
  2063. @node --with-goto-dir
  2064. @subsubsection @code{--with-goto-dir=<dir>}
  2065. @table @asis
  2066. @item @emph{Description}:
  2067. Specify the location of GotoBLAS.
  2068. @end table
  2069. @node --with-atlas-dir
  2070. @subsubsection @code{--with-atlas-dir=<dir>}
  2071. @table @asis
  2072. @item @emph{Description}:
  2073. Specify the location of ATLAS. This directory should notably contain
  2074. @code{include/cblas.h}.
  2075. @end table
  2076. @node --with-mkl-cflags
  2077. @subsubsection @code{--with-mkl-cflags=<cflags>}
  2078. @table @asis
  2079. @item @emph{Description}:
  2080. Specify the compilation flags for the MKL Library.
  2081. @end table
  2082. @node --with-mkl-ldflags
  2083. @subsubsection @code{--with-mkl-ldflags=<ldflags>}
  2084. @table @asis
  2085. @item @emph{Description}:
  2086. Specify the linking flags for the MKL Library. Note that the
  2087. @url{http://software.intel.com/en-us/articles/intel-mkl-link-line-advisor/}
  2088. website provides a script to determine the linking flags.
  2089. @end table
  2090. @c ---------------------------------------------------------------------
  2091. @c Environment variables
  2092. @c ---------------------------------------------------------------------
  2093. @node Execution configuration through environment variables
  2094. @section Execution configuration through environment variables
  2095. @menu
  2096. * Workers:: Configuring workers
  2097. * Scheduling:: Configuring the Scheduling engine
  2098. * Misc:: Miscellaneous and debug
  2099. @end menu
  2100. Note: the values given in @code{starpu_conf} structure passed when
  2101. calling @code{starpu_init} will override the values of the environment
  2102. variables.
  2103. @node Workers
  2104. @subsection Configuring workers
  2105. @menu
  2106. * STARPU_NCPUS:: Number of CPU workers
  2107. * STARPU_NCUDA:: Number of CUDA workers
  2108. * STARPU_NOPENCL:: Number of OpenCL workers
  2109. * STARPU_NGORDON:: Number of SPU workers (Cell)
  2110. * STARPU_WORKERS_CPUID:: Bind workers to specific CPUs
  2111. * STARPU_WORKERS_CUDAID:: Select specific CUDA devices
  2112. * STARPU_WORKERS_OPENCLID:: Select specific OpenCL devices
  2113. @end menu
  2114. @node STARPU_NCPUS
  2115. @subsubsection @code{STARPU_NCPUS} -- Number of CPU workers
  2116. @table @asis
  2117. @item @emph{Description}:
  2118. Specify the number of CPU workers (thus not including workers dedicated to control acceleratores). Note that by default, StarPU will not allocate
  2119. more CPU workers than there are physical CPUs, and that some CPUs are used to control
  2120. the accelerators.
  2121. @end table
  2122. @node STARPU_NCUDA
  2123. @subsubsection @code{STARPU_NCUDA} -- Number of CUDA workers
  2124. @table @asis
  2125. @item @emph{Description}:
  2126. Specify the number of CUDA devices that StarPU can use. If
  2127. @code{STARPU_NCUDA} is lower than the number of physical devices, it is
  2128. possible to select which CUDA devices should be used by the means of the
  2129. @code{STARPU_WORKERS_CUDAID} environment variable. By default, StarPU will
  2130. create as many CUDA workers as there are CUDA devices.
  2131. @end table
  2132. @node STARPU_NOPENCL
  2133. @subsubsection @code{STARPU_NOPENCL} -- Number of OpenCL workers
  2134. @table @asis
  2135. @item @emph{Description}:
  2136. OpenCL equivalent of the @code{STARPU_NCUDA} environment variable.
  2137. @end table
  2138. @node STARPU_NGORDON
  2139. @subsubsection @code{STARPU_NGORDON} -- Number of SPU workers (Cell)
  2140. @table @asis
  2141. @item @emph{Description}:
  2142. Specify the number of SPUs that StarPU can use.
  2143. @end table
  2144. @node STARPU_WORKERS_CPUID
  2145. @subsubsection @code{STARPU_WORKERS_CPUID} -- Bind workers to specific CPUs
  2146. @table @asis
  2147. @item @emph{Description}:
  2148. Passing an array of integers (starting from 0) in @code{STARPU_WORKERS_CPUID}
  2149. specifies on which logical CPU the different workers should be
  2150. bound. For instance, if @code{STARPU_WORKERS_CPUID = "0 1 4 5"}, the first
  2151. worker will be bound to logical CPU #0, the second CPU worker will be bound to
  2152. logical CPU #1 and so on. Note that the logical ordering of the CPUs is either
  2153. determined by the OS, or provided by the @code{hwloc} library in case it is
  2154. available.
  2155. Note that the first workers correspond to the CUDA workers, then come the
  2156. OpenCL and the SPU, and finally the CPU workers. For example if
  2157. we have @code{STARPU_NCUDA=1}, @code{STARPU_NOPENCL=1}, @code{STARPU_NCPUS=2}
  2158. and @code{STARPU_WORKERS_CPUID = "0 2 1 3"}, the CUDA device will be controlled
  2159. by logical CPU #0, the OpenCL device will be controlled by logical CPU #2, and
  2160. the logical CPUs #1 and #3 will be used by the CPU workers.
  2161. If the number of workers is larger than the array given in
  2162. @code{STARPU_WORKERS_CPUID}, the workers are bound to the logical CPUs in a
  2163. round-robin fashion: if @code{STARPU_WORKERS_CPUID = "0 1"}, the first and the
  2164. third (resp. second and fourth) workers will be put on CPU #0 (resp. CPU #1).
  2165. This variable is ignored if the @code{use_explicit_workers_bindid} flag of the
  2166. @code{starpu_conf} structure passed to @code{starpu_init} is set.
  2167. @end table
  2168. @node STARPU_WORKERS_CUDAID
  2169. @subsubsection @code{STARPU_WORKERS_CUDAID} -- Select specific CUDA devices
  2170. @table @asis
  2171. @item @emph{Description}:
  2172. Similarly to the @code{STARPU_WORKERS_CPUID} environment variable, it is
  2173. possible to select which CUDA devices should be used by StarPU. On a machine
  2174. equipped with 4 GPUs, setting @code{STARPU_WORKERS_CUDAID = "1 3"} and
  2175. @code{STARPU_NCUDA=2} specifies that 2 CUDA workers should be created, and that
  2176. they should use CUDA devices #1 and #3 (the logical ordering of the devices is
  2177. the one reported by CUDA).
  2178. This variable is ignored if the @code{use_explicit_workers_cuda_gpuid} flag of
  2179. the @code{starpu_conf} structure passed to @code{starpu_init} is set.
  2180. @end table
  2181. @node STARPU_WORKERS_OPENCLID
  2182. @subsubsection @code{STARPU_WORKERS_OPENCLID} -- Select specific OpenCL devices
  2183. @table @asis
  2184. @item @emph{Description}:
  2185. OpenCL equivalent of the @code{STARPU_WORKERS_CUDAID} environment variable.
  2186. This variable is ignored if the @code{use_explicit_workers_opencl_gpuid} flag of
  2187. the @code{starpu_conf} structure passed to @code{starpu_init} is set.
  2188. @end table
  2189. @node Scheduling
  2190. @subsection Configuring the Scheduling engine
  2191. @menu
  2192. * STARPU_SCHED:: Scheduling policy
  2193. * STARPU_CALIBRATE:: Calibrate performance models
  2194. * STARPU_PREFETCH:: Use data prefetch
  2195. * STARPU_SCHED_ALPHA:: Computation factor
  2196. * STARPU_SCHED_BETA:: Communication factor
  2197. @end menu
  2198. @node STARPU_SCHED
  2199. @subsubsection @code{STARPU_SCHED} -- Scheduling policy
  2200. @table @asis
  2201. @item @emph{Description}:
  2202. This chooses between the different scheduling policies proposed by StarPU: work
  2203. random, stealing, greedy, with performance models, etc.
  2204. Use @code{STARPU_SCHED=help} to get the list of available schedulers.
  2205. @end table
  2206. @node STARPU_CALIBRATE
  2207. @subsubsection @code{STARPU_CALIBRATE} -- Calibrate performance models
  2208. @table @asis
  2209. @item @emph{Description}:
  2210. If this variable is set to 1, the performance models are calibrated during
  2211. the execution. If it is set to 2, the previous values are dropped to restart
  2212. calibration from scratch. Setting this variable to 0 disable calibration, this
  2213. is the default behaviour.
  2214. Note: this currently only applies to @code{dm}, @code{dmda} and @code{heft} scheduling policies.
  2215. @end table
  2216. @node STARPU_PREFETCH
  2217. @subsubsection @code{STARPU_PREFETCH} -- Use data prefetch
  2218. @table @asis
  2219. @item @emph{Description}:
  2220. This variable indicates whether data prefetching should be enabled (0 means
  2221. that it is disabled). If prefetching is enabled, when a task is scheduled to be
  2222. executed e.g. on a GPU, StarPU will request an asynchronous transfer in
  2223. advance, so that data is already present on the GPU when the task starts. As a
  2224. result, computation and data transfers are overlapped.
  2225. Note that prefetching is enabled by default in StarPU.
  2226. @end table
  2227. @node STARPU_SCHED_ALPHA
  2228. @subsubsection @code{STARPU_SCHED_ALPHA} -- Computation factor
  2229. @table @asis
  2230. @item @emph{Description}:
  2231. To estimate the cost of a task StarPU takes into account the estimated
  2232. computation time (obtained thanks to performance models). The alpha factor is
  2233. the coefficient to be applied to it before adding it to the communication part.
  2234. @end table
  2235. @node STARPU_SCHED_BETA
  2236. @subsubsection @code{STARPU_SCHED_BETA} -- Communication factor
  2237. @table @asis
  2238. @item @emph{Description}:
  2239. To estimate the cost of a task StarPU takes into account the estimated
  2240. data transfer time (obtained thanks to performance models). The beta factor is
  2241. the coefficient to be applied to it before adding it to the computation part.
  2242. @end table
  2243. @node Misc
  2244. @subsection Miscellaneous and debug
  2245. @menu
  2246. * STARPU_SILENT:: Disable verbose mode
  2247. * STARPU_LOGFILENAME:: Select debug file name
  2248. * STARPU_FXT_PREFIX:: FxT trace location
  2249. * STARPU_LIMIT_GPU_MEM:: Restrict memory size on the GPUs
  2250. * STARPU_GENERATE_TRACE:: Generate a Paje trace when StarPU is shut down
  2251. @end menu
  2252. @node STARPU_SILENT
  2253. @subsubsection @code{STARPU_SILENT} -- Disable verbose mode
  2254. @table @asis
  2255. @item @emph{Description}:
  2256. This variable allows to disable verbose mode at runtime when StarPU
  2257. has been configured with the option @code{--enable-verbose}.
  2258. @end table
  2259. @node STARPU_LOGFILENAME
  2260. @subsubsection @code{STARPU_LOGFILENAME} -- Select debug file name
  2261. @table @asis
  2262. @item @emph{Description}:
  2263. This variable specifies in which file the debugging output should be saved to.
  2264. @end table
  2265. @node STARPU_FXT_PREFIX
  2266. @subsubsection @code{STARPU_FXT_PREFIX} -- FxT trace location
  2267. @table @asis
  2268. @item @emph{Description}
  2269. This variable specifies in which directory to save the trace generated if FxT is enabled.
  2270. @end table
  2271. @node STARPU_LIMIT_GPU_MEM
  2272. @subsubsection @code{STARPU_LIMIT_GPU_MEM} -- Restrict memory size on the GPUs
  2273. @table @asis
  2274. @item @emph{Description}
  2275. This variable specifies the maximum number of megabytes that should be
  2276. available to the application on each GPUs. In case this value is smaller than
  2277. the size of the memory of a GPU, StarPU pre-allocates a buffer to waste memory
  2278. on the device. This variable is intended to be used for experimental purposes
  2279. as it emulates devices that have a limited amount of memory.
  2280. @end table
  2281. @node STARPU_GENERATE_TRACE
  2282. @subsubsection @code{STARPU_GENERATE_TRACE} -- Generate a Paje trace when StarPU is shut down
  2283. @table @asis
  2284. @item @emph{Description}
  2285. When set to 1, this variable indicates that StarPU should automatically
  2286. generate a Paje trace when starpu_shutdown is called.
  2287. @end table
  2288. @c ---------------------------------------------------------------------
  2289. @c StarPU API
  2290. @c ---------------------------------------------------------------------
  2291. @node StarPU API
  2292. @chapter StarPU API
  2293. @menu
  2294. * Initialization and Termination:: Initialization and Termination methods
  2295. * Workers' Properties:: Methods to enumerate workers' properties
  2296. * Data Library:: Methods to manipulate data
  2297. * Data Interfaces::
  2298. * Data Partition::
  2299. * Codelets and Tasks:: Methods to construct tasks
  2300. * Explicit Dependencies:: Explicit Dependencies
  2301. * Implicit Data Dependencies:: Implicit Data Dependencies
  2302. * Performance Model API::
  2303. * Profiling API:: Profiling API
  2304. * CUDA extensions:: CUDA extensions
  2305. * OpenCL extensions:: OpenCL extensions
  2306. * Cell extensions:: Cell extensions
  2307. * Miscellaneous helpers::
  2308. @end menu
  2309. @node Initialization and Termination
  2310. @section Initialization and Termination
  2311. @menu
  2312. * starpu_init:: Initialize StarPU
  2313. * struct starpu_conf:: StarPU runtime configuration
  2314. * starpu_conf_init:: Initialize starpu_conf structure
  2315. * starpu_shutdown:: Terminate StarPU
  2316. @end menu
  2317. @node starpu_init
  2318. @subsection @code{starpu_init} -- Initialize StarPU
  2319. @table @asis
  2320. @item @emph{Description}:
  2321. This is StarPU initialization method, which must be called prior to any other
  2322. StarPU call. It is possible to specify StarPU's configuration (e.g. scheduling
  2323. policy, number of cores, ...) by passing a non-null argument. Default
  2324. configuration is used if the passed argument is @code{NULL}.
  2325. @item @emph{Return value}:
  2326. Upon successful completion, this function returns 0. Otherwise, @code{-ENODEV}
  2327. indicates that no worker was available (so that StarPU was not initialized).
  2328. @item @emph{Prototype}:
  2329. @code{int starpu_init(struct starpu_conf *conf);}
  2330. @end table
  2331. @node struct starpu_conf
  2332. @subsection @code{struct starpu_conf} -- StarPU runtime configuration
  2333. @table @asis
  2334. @item @emph{Description}:
  2335. This structure is passed to the @code{starpu_init} function in order
  2336. to configure StarPU.
  2337. When the default value is used, StarPU automatically selects the number
  2338. of processing units and takes the default scheduling policy. This parameter
  2339. overwrites the equivalent environment variables.
  2340. @item @emph{Fields}:
  2341. @table @asis
  2342. @item @code{sched_policy_name} (default = NULL):
  2343. This is the name of the scheduling policy. This can also be specified with the
  2344. @code{STARPU_SCHED} environment variable.
  2345. @item @code{sched_policy} (default = NULL):
  2346. This is the definition of the scheduling policy. This field is ignored
  2347. if @code{sched_policy_name} is set.
  2348. @item @code{ncpus} (default = -1):
  2349. This is the number of CPU cores that StarPU can use. This can also be
  2350. specified with the @code{STARPU_NCPUS} environment variable.
  2351. @item @code{ncuda} (default = -1):
  2352. This is the number of CUDA devices that StarPU can use. This can also be
  2353. specified with the @code{STARPU_NCUDA} environment variable.
  2354. @item @code{nopencl} (default = -1):
  2355. This is the number of OpenCL devices that StarPU can use. This can also be
  2356. specified with the @code{STARPU_NOPENCL} environment variable.
  2357. @item @code{nspus} (default = -1):
  2358. This is the number of Cell SPUs that StarPU can use. This can also be
  2359. specified with the @code{STARPU_NGORDON} environment variable.
  2360. @item @code{use_explicit_workers_bindid} (default = 0)
  2361. If this flag is set, the @code{workers_bindid} array indicates where the
  2362. different workers are bound, otherwise StarPU automatically selects where to
  2363. bind the different workers unless the @code{STARPU_WORKERS_CPUID} environment
  2364. variable is set. The @code{STARPU_WORKERS_CPUID} environment variable is
  2365. ignored if the @code{use_explicit_workers_bindid} flag is set.
  2366. @item @code{workers_bindid[STARPU_NMAXWORKERS]}
  2367. If the @code{use_explicit_workers_bindid} flag is set, this array indicates
  2368. where to bind the different workers. The i-th entry of the
  2369. @code{workers_bindid} indicates the logical identifier of the processor which
  2370. should execute the i-th worker. Note that the logical ordering of the CPUs is
  2371. either determined by the OS, or provided by the @code{hwloc} library in case it
  2372. is available.
  2373. When this flag is set, the @ref{STARPU_WORKERS_CPUID} environment variable is
  2374. ignored.
  2375. @item @code{use_explicit_workers_cuda_gpuid} (default = 0)
  2376. If this flag is set, the CUDA workers will be attached to the CUDA devices
  2377. specified in the @code{workers_cuda_gpuid} array. Otherwise, StarPU affects the
  2378. CUDA devices in a round-robin fashion.
  2379. When this flag is set, the @ref{STARPU_WORKERS_CUDAID} environment variable is
  2380. ignored.
  2381. @item @code{workers_cuda_gpuid[STARPU_NMAXWORKERS]}
  2382. If the @code{use_explicit_workers_cuda_gpuid} flag is set, this array contains
  2383. the logical identifiers of the CUDA devices (as used by @code{cudaGetDevice}).
  2384. @item @code{use_explicit_workers_opencl_gpuid} (default = 0)
  2385. If this flag is set, the OpenCL workers will be attached to the OpenCL devices
  2386. specified in the @code{workers_opencl_gpuid} array. Otherwise, StarPU affects the
  2387. OpenCL devices in a round-robin fashion.
  2388. @item @code{workers_opencl_gpuid[STARPU_NMAXWORKERS]}:
  2389. @item @code{calibrate} (default = 0):
  2390. If this flag is set, StarPU will calibrate the performance models when
  2391. executing tasks. If this value is equal to -1, the default value is used. The
  2392. default value is overwritten by the @code{STARPU_CALIBRATE} environment
  2393. variable when it is set.
  2394. @end table
  2395. @end table
  2396. @node starpu_conf_init
  2397. @subsection @code{starpu_conf_init} -- Initialize starpu_conf structure
  2398. @table @asis
  2399. This function initializes the @code{starpu_conf} structure passed as argument
  2400. with the default values. In case some configuration parameters are already
  2401. specified through environment variables, @code{starpu_conf_init} initializes
  2402. the fields of the structure according to the environment variables. For
  2403. instance if @code{STARPU_CALIBRATE} is set, its value is put in the
  2404. @code{.ncuda} field of the structure passed as argument.
  2405. @item @emph{Return value}:
  2406. Upon successful completion, this function returns 0. Otherwise, @code{-EINVAL}
  2407. indicates that the argument was NULL.
  2408. @item @emph{Prototype}:
  2409. @code{int starpu_conf_init(struct starpu_conf *conf);}
  2410. @end table
  2411. @node starpu_shutdown
  2412. @subsection @code{starpu_shutdown} -- Terminate StarPU
  2413. @deftypefun void starpu_shutdown (void)
  2414. This is StarPU termination method. It must be called at the end of the
  2415. application: statistics and other post-mortem debugging information are not
  2416. guaranteed to be available until this method has been called.
  2417. @end deftypefun
  2418. @node Workers' Properties
  2419. @section Workers' Properties
  2420. @menu
  2421. * starpu_worker_get_count:: Get the number of processing units
  2422. * starpu_worker_get_count_by_type:: Get the number of processing units of a given type
  2423. * starpu_cpu_worker_get_count:: Get the number of CPU controlled by StarPU
  2424. * starpu_cuda_worker_get_count:: Get the number of CUDA devices controlled by StarPU
  2425. * starpu_opencl_worker_get_count:: Get the number of OpenCL devices controlled by StarPU
  2426. * starpu_spu_worker_get_count:: Get the number of Cell SPUs controlled by StarPU
  2427. * starpu_worker_get_id:: Get the identifier of the current worker
  2428. * starpu_worker_get_ids_by_type:: Get the list of identifiers of workers with a given type
  2429. * starpu_worker_get_devid:: Get the device identifier of a worker
  2430. * starpu_worker_get_type:: Get the type of processing unit associated to a worker
  2431. * starpu_worker_get_name:: Get the name of a worker
  2432. * starpu_worker_get_memory_node:: Get the memory node of a worker
  2433. @end menu
  2434. @node starpu_worker_get_count
  2435. @subsection @code{starpu_worker_get_count} -- Get the number of processing units
  2436. @deftypefun unsigned starpu_worker_get_count (void)
  2437. This function returns the number of workers (i.e. processing units executing
  2438. StarPU tasks). The returned value should be at most @code{STARPU_NMAXWORKERS}.
  2439. @end deftypefun
  2440. @node starpu_worker_get_count_by_type
  2441. @subsection @code{starpu_worker_get_count_by_type} -- Get the number of processing units of a given type
  2442. @deftypefun int starpu_worker_get_count_by_type ({enum starpu_archtype} @var{type})
  2443. Returns the number of workers of the type indicated by the argument. A positive
  2444. (or null) value is returned in case of success, @code{-EINVAL} indicates that
  2445. the type is not valid otherwise.
  2446. @end deftypefun
  2447. @node starpu_cpu_worker_get_count
  2448. @subsection @code{starpu_cpu_worker_get_count} -- Get the number of CPU controlled by StarPU
  2449. @deftypefun unsigned starpu_cpu_worker_get_count (void)
  2450. This function returns the number of CPUs controlled by StarPU. The returned
  2451. value should be at most @code{STARPU_NMAXCPUS}.
  2452. @end deftypefun
  2453. @node starpu_cuda_worker_get_count
  2454. @subsection @code{starpu_cuda_worker_get_count} -- Get the number of CUDA devices controlled by StarPU
  2455. @deftypefun unsigned starpu_cuda_worker_get_count (void)
  2456. This function returns the number of CUDA devices controlled by StarPU. The returned
  2457. value should be at most @code{STARPU_MAXCUDADEVS}.
  2458. @end deftypefun
  2459. @node starpu_opencl_worker_get_count
  2460. @subsection @code{starpu_opencl_worker_get_count} -- Get the number of OpenCL devices controlled by StarPU
  2461. @deftypefun unsigned starpu_opencl_worker_get_count (void)
  2462. This function returns the number of OpenCL devices controlled by StarPU. The returned
  2463. value should be at most @code{STARPU_MAXOPENCLDEVS}.
  2464. @end deftypefun
  2465. @node starpu_spu_worker_get_count
  2466. @subsection @code{starpu_spu_worker_get_count} -- Get the number of Cell SPUs controlled by StarPU
  2467. @deftypefun unsigned starpu_opencl_worker_get_count (void)
  2468. This function returns the number of Cell SPUs controlled by StarPU.
  2469. @end deftypefun
  2470. @node starpu_worker_get_id
  2471. @subsection @code{starpu_worker_get_id} -- Get the identifier of the current worker
  2472. @deftypefun int starpu_worker_get_id (void)
  2473. This function returns the identifier of the worker associated to the calling
  2474. thread. The returned value is either -1 if the current context is not a StarPU
  2475. worker (i.e. when called from the application outside a task or a callback), or
  2476. an integer between 0 and @code{starpu_worker_get_count() - 1}.
  2477. @end deftypefun
  2478. @node starpu_worker_get_ids_by_type
  2479. @subsection @code{starpu_worker_get_ids_by_type} -- Get the list of identifiers of workers with a given type
  2480. @deftypefun int starpu_worker_get_ids_by_type ({enum starpu_archtype} @var{type}, int *@var{workerids}, int @var{maxsize})
  2481. Fill the workerids array with the identifiers of the workers that have the type
  2482. indicated in the first argument. The maxsize argument indicates the size of the
  2483. workids array. The returned value gives the number of identifiers that were put
  2484. in the array. @code{-ERANGE} is returned is maxsize is lower than the number of
  2485. workers with the appropriate type: in that case, the array is filled with the
  2486. maxsize first elements. To avoid such overflows, the value of maxsize can be
  2487. chosen by the means of the @code{starpu_worker_get_count_by_type} function, or
  2488. by passing a value greater or equal to @code{STARPU_NMAXWORKERS}.
  2489. @end deftypefun
  2490. @node starpu_worker_get_devid
  2491. @subsection @code{starpu_worker_get_devid} -- Get the device identifier of a worker
  2492. @deftypefun int starpu_worker_get_devid (int @var{id})
  2493. This functions returns the device id of the worker associated to an identifier
  2494. (as returned by the @code{starpu_worker_get_id} function). In the case of a
  2495. CUDA worker, this device identifier is the logical device identifier exposed by
  2496. CUDA (used by the @code{cudaGetDevice} function for instance). The device
  2497. identifier of a CPU worker is the logical identifier of the core on which the
  2498. worker was bound; this identifier is either provided by the OS or by the
  2499. @code{hwloc} library in case it is available.
  2500. @end deftypefun
  2501. @node starpu_worker_get_type
  2502. @subsection @code{starpu_worker_get_type} -- Get the type of processing unit associated to a worker
  2503. @deftypefun {enum starpu_archtype} starpu_worker_get_type (int @var{id})
  2504. This function returns the type of worker associated to an identifier (as
  2505. returned by the @code{starpu_worker_get_id} function). The returned value
  2506. indicates the architecture of the worker: @code{STARPU_CPU_WORKER} for a CPU
  2507. core, @code{STARPU_CUDA_WORKER} for a CUDA device,
  2508. @code{STARPU_OPENCL_WORKER} for a OpenCL device, and
  2509. @code{STARPU_GORDON_WORKER} for a Cell SPU. The value returned for an invalid
  2510. identifier is unspecified.
  2511. @end deftypefun
  2512. @node starpu_worker_get_name
  2513. @subsection @code{starpu_worker_get_name} -- Get the name of a worker
  2514. @deftypefun void starpu_worker_get_name (int @var{id}, char *@var{dst}, size_t @var{maxlen})
  2515. StarPU associates a unique human readable string to each processing unit. This
  2516. function copies at most the @var{maxlen} first bytes of the unique string
  2517. associated to a worker identified by its identifier @var{id} into the
  2518. @var{dst} buffer. The caller is responsible for ensuring that the @var{dst}
  2519. is a valid pointer to a buffer of @var{maxlen} bytes at least. Calling this
  2520. function on an invalid identifier results in an unspecified behaviour.
  2521. @end deftypefun
  2522. @node starpu_worker_get_memory_node
  2523. @subsection @code{starpu_worker_get_memory_node} -- Get the memory node of a worker
  2524. @deftypefun unsigned starpu_worker_get_memory_node (unsigned @var{workerid})
  2525. This function returns the identifier of the memory node associated to the
  2526. worker identified by @var{workerid}.
  2527. @end deftypefun
  2528. @node Data Library
  2529. @section Data Library
  2530. This section describes the data management facilities provided by StarPU.
  2531. We show how to use existing data interfaces in @ref{Data Interfaces}, but developers can
  2532. design their own data interfaces if required.
  2533. @menu
  2534. * starpu_malloc:: Allocate data and pin it
  2535. * starpu_access_mode:: Data access mode
  2536. * unsigned memory_node:: Memory node
  2537. * starpu_data_handle:: StarPU opaque data handle
  2538. * void *interface:: StarPU data interface
  2539. * starpu_data_register:: Register a piece of data to StarPU
  2540. * starpu_data_unregister:: Unregister a piece of data from StarPU
  2541. * starpu_data_invalidate:: Invalidate all data replicates
  2542. * starpu_data_acquire:: Access registered data from the application
  2543. * starpu_data_acquire_cb:: Access registered data from the application asynchronously
  2544. * starpu_data_release:: Release registered data from the application
  2545. * starpu_data_set_wt_mask:: Set the Write-Through mask
  2546. * starpu_data_prefetch_on_node:: Prefetch data to a given node
  2547. @end menu
  2548. @node starpu_malloc
  2549. @subsection @code{starpu_malloc} -- Allocate data and pin it
  2550. @deftypefun int starpu_malloc (void **@var{A}, size_t @var{dim})
  2551. This function allocates data of the given size in main memory. It will also try to pin it in
  2552. CUDA or OpenCL, so that data transfers from this buffer can be asynchronous, and
  2553. thus permit data transfer and computation overlapping. The allocated buffer must
  2554. be freed thanks to the @code{starpu_free} function.
  2555. @end deftypefun
  2556. @node starpu_access_mode
  2557. @subsection @code{starpu_access_mode} -- Data access mode
  2558. This datatype describes a data access mode. The different available modes are:
  2559. @table @asis
  2560. @table @asis
  2561. @item @code{STARPU_R} read-only mode.
  2562. @item @code{STARPU_W} write-only mode.
  2563. @item @code{STARPU_RW} read-write mode. This is equivalent to @code{STARPU_R|STARPU_W}.
  2564. @item @code{STARPU_SCRATCH} scratch memory. A temporary buffer is allocated for the task, but StarPU does not enforce data consistency, i.e. each device has its own buffer, independently from each other (even for CPUs). This is useful for temporary variables. For now, no behaviour is defined concerning the relation with STARPU_R/W modes and the value provided at registration, i.e. the value of the scratch buffer is undefined at entry of the codelet function, but this is being considered for future extensions.
  2565. @item @code{STARPU_REDUX} reduction mode. TODO: document, as well as @code{starpu_data_set_reduction_methods}
  2566. @end table
  2567. @end table
  2568. @node unsigned memory_node
  2569. @subsection @code{unsigned memory_node} -- Memory node
  2570. @table @asis
  2571. @item @emph{Description}:
  2572. Every worker is associated to a memory node which is a logical abstraction of
  2573. the address space from which the processing unit gets its data. For instance,
  2574. the memory node associated to the different CPU workers represents main memory
  2575. (RAM), the memory node associated to a GPU is DRAM embedded on the device.
  2576. Every memory node is identified by a logical index which is accessible from the
  2577. @code{starpu_worker_get_memory_node} function. When registering a piece of data
  2578. to StarPU, the specified memory node indicates where the piece of data
  2579. initially resides (we also call this memory node the home node of a piece of
  2580. data).
  2581. @end table
  2582. @node starpu_data_handle
  2583. @subsection @code{starpu_data_handle} -- StarPU opaque data handle
  2584. @table @asis
  2585. @item @emph{Description}:
  2586. StarPU uses @code{starpu_data_handle} as an opaque handle to manage a piece of
  2587. data. Once a piece of data has been registered to StarPU, it is associated to a
  2588. @code{starpu_data_handle} which keeps track of the state of the piece of data
  2589. over the entire machine, so that we can maintain data consistency and locate
  2590. data replicates for instance.
  2591. @end table
  2592. @node void *interface
  2593. @subsection @code{void *interface} -- StarPU data interface
  2594. @table @asis
  2595. @item @emph{Description}:
  2596. Data management is done at a high-level in StarPU: rather than accessing a mere
  2597. list of contiguous buffers, the tasks may manipulate data that are described by
  2598. a high-level construct which we call data interface.
  2599. An example of data interface is the "vector" interface which describes a
  2600. contiguous data array on a spefic memory node. This interface is a simple
  2601. structure containing the number of elements in the array, the size of the
  2602. elements, and the address of the array in the appropriate address space (this
  2603. address may be invalid if there is no valid copy of the array in the memory
  2604. node). More informations on the data interfaces provided by StarPU are
  2605. given in @ref{Data Interfaces}.
  2606. When a piece of data managed by StarPU is used by a task, the task
  2607. implementation is given a pointer to an interface describing a valid copy of
  2608. the data that is accessible from the current processing unit.
  2609. @end table
  2610. @node starpu_data_register
  2611. @subsection @code{starpu_data_register} -- Register a piece of data to StarPU
  2612. @deftypefun void starpu_data_register (starpu_data_handle *@var{handleptr}, uint32_t @var{home_node}, void *@var{interface}, {struct starpu_data_interface_ops_t} *@var{ops})
  2613. Register a piece of data into the handle located at the @var{handleptr}
  2614. address. The @var{interface} buffer contains the initial description of the
  2615. data in the home node. The @var{ops} argument is a pointer to a structure
  2616. describing the different methods used to manipulate this type of interface. See
  2617. @ref{struct starpu_data_interface_ops_t} for more details on this structure.
  2618. If @code{home_node} is -1, StarPU will automatically
  2619. allocate the memory when it is used for the
  2620. first time in write-only mode. Once such data handle has been automatically
  2621. allocated, it is possible to access it using any access mode.
  2622. Note that StarPU supplies a set of predefined types of interface (e.g. vector or
  2623. matrix) which can be registered by the means of helper functions (e.g.
  2624. @code{starpu_vector_data_register} or @code{starpu_matrix_data_register}).
  2625. @end deftypefun
  2626. @node starpu_data_unregister
  2627. @subsection @code{starpu_data_unregister} -- Unregister a piece of data from StarPU
  2628. @deftypefun void starpu_data_unregister (starpu_data_handle @var{handle})
  2629. This function unregisters a data handle from StarPU. If the data was
  2630. automatically allocated by StarPU because the home node was -1, all
  2631. automatically allocated buffers are freed. Otherwise, a valid copy of the data
  2632. is put back into the home node in the buffer that was initially registered.
  2633. Using a data handle that has been unregistered from StarPU results in an
  2634. undefined behaviour.
  2635. @end deftypefun
  2636. @node starpu_data_invalidate
  2637. @subsection @code{starpu_data_invalidate} -- Invalidate all data replicates
  2638. @deftypefun void starpu_data_invalidate (starpu_data_handle @var{handle})
  2639. Destroy all replicates of the data handle. After data invalidation, the first
  2640. access to the handle must be performed in write-only mode. Accessing an
  2641. invalidated data in read-mode results in undefined behaviour.
  2642. @end deftypefun
  2643. @c TODO create a specific sections about user interaction with the DSM ?
  2644. @node starpu_data_acquire
  2645. @subsection @code{starpu_data_acquire} -- Access registered data from the application
  2646. @deftypefun int starpu_data_acquire (starpu_data_handle @var{handle}, starpu_access_mode @var{mode})
  2647. The application must call this function prior to accessing registered data from
  2648. main memory outside tasks. StarPU ensures that the application will get an
  2649. up-to-date copy of the data in main memory located where the data was
  2650. originally registered, and that all concurrent accesses (e.g. from tasks) will
  2651. be consistent with the access mode specified in the @var{mode} argument.
  2652. @code{starpu_data_release} must be called once the application does not need to
  2653. access the piece of data anymore. Note that implicit data
  2654. dependencies are also enforced by @code{starpu_data_acquire}, i.e.
  2655. @code{starpu_data_acquire} will wait for all tasks scheduled to work on
  2656. the data, unless that they have not been disabled explictly by calling
  2657. @code{starpu_data_set_default_sequential_consistency_flag} or
  2658. @code{starpu_data_set_sequential_consistency_flag}.
  2659. @code{starpu_data_acquire} is a blocking call, so that it cannot be called from
  2660. tasks or from their callbacks (in that case, @code{starpu_data_acquire} returns
  2661. @code{-EDEADLK}). Upon successful completion, this function returns 0.
  2662. @end deftypefun
  2663. @node starpu_data_acquire_cb
  2664. @subsection @code{starpu_data_acquire_cb} -- Access registered data from the application asynchronously
  2665. @deftypefun int starpu_data_acquire_cb (starpu_data_handle @var{handle}, starpu_access_mode @var{mode}, void (*@var{callback})(void *), void *@var{arg})
  2666. @code{starpu_data_acquire_cb} is the asynchronous equivalent of
  2667. @code{starpu_data_release}. When the data specified in the first argument is
  2668. available in the appropriate access mode, the callback function is executed.
  2669. The application may access the requested data during the execution of this
  2670. callback. The callback function must call @code{starpu_data_release} once the
  2671. application does not need to access the piece of data anymore.
  2672. Note that implicit data dependencies are also enforced by
  2673. @code{starpu_data_acquire_cb} in case they are enabled.
  2674. Contrary to @code{starpu_data_acquire}, this function is non-blocking and may
  2675. be called from task callbacks. Upon successful completion, this function
  2676. returns 0.
  2677. @end deftypefun
  2678. @node starpu_data_release
  2679. @subsection @code{starpu_data_release} -- Release registered data from the application
  2680. @deftypefun void starpu_data_release (starpu_data_handle @var{handle})
  2681. This function releases the piece of data acquired by the application either by
  2682. @code{starpu_data_acquire} or by @code{starpu_data_acquire_cb}.
  2683. @end deftypefun
  2684. @node starpu_data_set_wt_mask
  2685. @subsection @code{starpu_data_set_wt_mask} -- Set the Write-Through mask
  2686. @deftypefun void starpu_data_set_wt_mask (starpu_data_handle @var{handle}, uint32_t @var{wt_mask})
  2687. This function sets the write-through mask of a given data, i.e. a bitmask of
  2688. nodes where the data should be always replicated after modification.
  2689. @end deftypefun
  2690. @node starpu_data_prefetch_on_node
  2691. @subsection @code{starpu_data_prefetch_on_node} -- Prefetch data to a given node
  2692. @deftypefun int starpu_data_prefetch_on_node (starpu_data_handle @var{handle}, unsigned @var{node}, unsigned @var{async})
  2693. Issue a prefetch request for a given data to a given node, i.e.
  2694. requests that the data be replicated to the given node, so that it is available
  2695. there for tasks. If the @var{async} parameter is 0, the call will block until
  2696. the transfer is achieved, else the call will return as soon as the request is
  2697. scheduled (which may however have to wait for a task completion).
  2698. @end deftypefun
  2699. @node Data Interfaces
  2700. @section Data Interfaces
  2701. @menu
  2702. * Variable Interface::
  2703. * Vector Interface::
  2704. * Matrix Interface::
  2705. * 3D Matrix Interface::
  2706. * BCSR Interface for Sparse Matrices (Blocked Compressed Sparse Row Representation)::
  2707. * CSR Interface for Sparse Matrices (Compressed Sparse Row Representation)::
  2708. @end menu
  2709. @node Variable Interface
  2710. @subsection Variable Interface
  2711. @table @asis
  2712. @item @emph{Description}:
  2713. This variant of @code{starpu_data_register} uses the variable interface,
  2714. i.e. for a mere single variable. @code{ptr} is the address of the variable,
  2715. and @code{elemsize} is the size of the variable.
  2716. @item @emph{Prototype}:
  2717. @code{void starpu_variable_data_register(starpu_data_handle *handle,
  2718. uint32_t home_node,
  2719. uintptr_t ptr, size_t elemsize);}
  2720. @item @emph{Example}:
  2721. @cartouche
  2722. @smallexample
  2723. float var;
  2724. starpu_data_handle var_handle;
  2725. starpu_variable_data_register(&var_handle, 0, (uintptr_t)&var, sizeof(var));
  2726. @end smallexample
  2727. @end cartouche
  2728. @end table
  2729. @node Vector Interface
  2730. @subsection Vector Interface
  2731. @table @asis
  2732. @item @emph{Description}:
  2733. This variant of @code{starpu_data_register} uses the vector interface,
  2734. i.e. for mere arrays of elements. @code{ptr} is the address of the first
  2735. element in the home node. @code{nx} is the number of elements in the vector.
  2736. @code{elemsize} is the size of each element.
  2737. @item @emph{Prototype}:
  2738. @code{void starpu_vector_data_register(starpu_data_handle *handle, uint32_t home_node,
  2739. uintptr_t ptr, uint32_t nx, size_t elemsize);}
  2740. @item @emph{Example}:
  2741. @cartouche
  2742. @smallexample
  2743. float vector[NX];
  2744. starpu_data_handle vector_handle;
  2745. starpu_vector_data_register(&vector_handle, 0, (uintptr_t)vector, NX,
  2746. sizeof(vector[0]));
  2747. @end smallexample
  2748. @end cartouche
  2749. @end table
  2750. @node Matrix Interface
  2751. @subsection Matrix Interface
  2752. @table @asis
  2753. @item @emph{Description}:
  2754. This variant of @code{starpu_data_register} uses the matrix interface, i.e. for
  2755. matrices of elements. @code{ptr} is the address of the first element in the home
  2756. node. @code{ld} is the number of elements between rows. @code{nx} is the number
  2757. of elements in a row (this can be different from @code{ld} if there are extra
  2758. elements for alignment for instance). @code{ny} is the number of rows.
  2759. @code{elemsize} is the size of each element.
  2760. @item @emph{Prototype}:
  2761. @code{void starpu_matrix_data_register(starpu_data_handle *handle, uint32_t home_node,
  2762. uintptr_t ptr, uint32_t ld, uint32_t nx,
  2763. uint32_t ny, size_t elemsize);}
  2764. @item @emph{Example}:
  2765. @cartouche
  2766. @smallexample
  2767. float *matrix;
  2768. starpu_data_handle matrix_handle;
  2769. matrix = (float*)malloc(width * height * sizeof(float));
  2770. starpu_matrix_data_register(&matrix_handle, 0, (uintptr_t)matrix,
  2771. width, width, height, sizeof(float));
  2772. @end smallexample
  2773. @end cartouche
  2774. @end table
  2775. @node 3D Matrix Interface
  2776. @subsection 3D Matrix Interface
  2777. @table @asis
  2778. @item @emph{Description}:
  2779. This variant of @code{starpu_data_register} uses the 3D matrix interface.
  2780. @code{ptr} is the address of the array of first element in the home node.
  2781. @code{ldy} is the number of elements between rows. @code{ldz} is the number
  2782. of rows between z planes. @code{nx} is the number of elements in a row (this
  2783. can be different from @code{ldy} if there are extra elements for alignment
  2784. for instance). @code{ny} is the number of rows in a z plane (likewise with
  2785. @code{ldz}). @code{nz} is the number of z planes. @code{elemsize} is the size of
  2786. each element.
  2787. @item @emph{Prototype}:
  2788. @code{void starpu_block_data_register(starpu_data_handle *handle, uint32_t home_node,
  2789. uintptr_t ptr, uint32_t ldy, uint32_t ldz, uint32_t nx,
  2790. uint32_t ny, uint32_t nz, size_t elemsize);}
  2791. @item @emph{Example}:
  2792. @cartouche
  2793. @smallexample
  2794. float *block;
  2795. starpu_data_handle block_handle;
  2796. block = (float*)malloc(nx*ny*nz*sizeof(float));
  2797. starpu_block_data_register(&block_handle, 0, (uintptr_t)block,
  2798. nx, nx*ny, nx, ny, nz, sizeof(float));
  2799. @end smallexample
  2800. @end cartouche
  2801. @end table
  2802. @node BCSR Interface for Sparse Matrices (Blocked Compressed Sparse Row Representation)
  2803. @subsection BCSR Interface for Sparse Matrices (Blocked Compressed Sparse Row Representation)
  2804. @deftypefun void starpu_bcsr_data_register (starpu_data_handle *@var{handle}, uint32_t @var{home_node}, uint32_t @var{nnz}, uint32_t @var{nrow}, uintptr_t @var{nzval}, uint32_t *@var{colind}, uint32_t *@var{rowptr}, uint32_t @var{firstentry}, uint32_t @var{r}, uint32_t @var{c}, size_t @var{elemsize})
  2805. This variant of @code{starpu_data_register} uses the BCSR sparse matrix interface.
  2806. TODO
  2807. @end deftypefun
  2808. @node CSR Interface for Sparse Matrices (Compressed Sparse Row Representation)
  2809. @subsection CSR Interface for Sparse Matrices (Compressed Sparse Row Representation)
  2810. @deftypefun void starpu_csr_data_register (starpu_data_handle *@var{handle}, uint32_t @var{home_node}, uint32_t @var{nnz}, uint32_t @var{nrow}, uintptr_t @var{nzval}, uint32_t *@var{colind}, uint32_t *@var{rowptr}, uint32_t @var{firstentry}, size_t @var{elemsize})
  2811. This variant of @code{starpu_data_register} uses the CSR sparse matrix interface.
  2812. TODO
  2813. @end deftypefun
  2814. @node Data Partition
  2815. @section Data Partition
  2816. @menu
  2817. * struct starpu_data_filter:: StarPU filter structure
  2818. * starpu_data_partition:: Partition Data
  2819. * starpu_data_unpartition:: Unpartition Data
  2820. * starpu_data_get_nb_children::
  2821. * starpu_data_get_sub_data::
  2822. * Predefined filter functions::
  2823. @end menu
  2824. @node struct starpu_data_filter
  2825. @subsection @code{struct starpu_data_filter} -- StarPU filter structure
  2826. @table @asis
  2827. @item @emph{Description}:
  2828. The filter structure describes a data partitioning operation, to be given to the
  2829. @code{starpu_data_partition} function, see @ref{starpu_data_partition} for an example.
  2830. @item @emph{Fields}:
  2831. @table @asis
  2832. @item @code{filter_func}:
  2833. This function fills the @code{child_interface} structure with interface
  2834. information for the @code{id}-th child of the parent @code{father_interface} (among @code{nparts}).
  2835. @code{void (*filter_func)(void *father_interface, void* child_interface, struct starpu_data_filter *, unsigned id, unsigned nparts);}
  2836. @item @code{nchildren}:
  2837. This is the number of parts to partition the data into.
  2838. @item @code{get_nchildren}:
  2839. This returns the number of children. This can be used instead of @code{nchildren} when the number of
  2840. children depends on the actual data (e.g. the number of blocks in a sparse
  2841. matrix).
  2842. @code{unsigned (*get_nchildren)(struct starpu_data_filter *, starpu_data_handle initial_handle);}
  2843. @item @code{get_child_ops}:
  2844. In case the resulting children use a different data interface, this function
  2845. returns which interface is used by child number @code{id}.
  2846. @code{struct starpu_data_interface_ops_t *(*get_child_ops)(struct starpu_data_filter *, unsigned id);}
  2847. @item @code{filter_arg}:
  2848. Some filters take an addition parameter, but this is usually unused.
  2849. @item @code{filter_arg_ptr}:
  2850. Some filters take an additional array parameter like the sizes of the parts, but
  2851. this is usually unused.
  2852. @end table
  2853. @end table
  2854. @node starpu_data_partition
  2855. @subsection starpu_data_partition -- Partition Data
  2856. @table @asis
  2857. @item @emph{Description}:
  2858. This requests partitioning one StarPU data @code{initial_handle} into several
  2859. subdata according to the filter @code{f}
  2860. @item @emph{Prototype}:
  2861. @code{void starpu_data_partition(starpu_data_handle initial_handle, struct starpu_data_filter *f);}
  2862. @item @emph{Example}:
  2863. @cartouche
  2864. @smallexample
  2865. struct starpu_data_filter f = @{
  2866. .filter_func = starpu_vertical_block_filter_func,
  2867. .nchildren = nslicesx,
  2868. .get_nchildren = NULL,
  2869. .get_child_ops = NULL
  2870. @};
  2871. starpu_data_partition(A_handle, &f);
  2872. @end smallexample
  2873. @end cartouche
  2874. @end table
  2875. @node starpu_data_unpartition
  2876. @subsection starpu_data_unpartition -- Unpartition data
  2877. @table @asis
  2878. @item @emph{Description}:
  2879. This unapplies one filter, thus unpartitioning the data. The pieces of data are
  2880. collected back into one big piece in the @code{gathering_node} (usually 0).
  2881. @item @emph{Prototype}:
  2882. @code{void starpu_data_unpartition(starpu_data_handle root_data, uint32_t gathering_node);}
  2883. @item @emph{Example}:
  2884. @cartouche
  2885. @smallexample
  2886. starpu_data_unpartition(A_handle, 0);
  2887. @end smallexample
  2888. @end cartouche
  2889. @end table
  2890. @node starpu_data_get_nb_children
  2891. @subsection starpu_data_get_nb_children
  2892. @table @asis
  2893. @item @emph{Description}:
  2894. This function returns the number of children.
  2895. @item @emph{Return value}:
  2896. The number of children.
  2897. @item @emph{Prototype}:
  2898. @code{int starpu_data_get_nb_children(starpu_data_handle handle);}
  2899. @end table
  2900. @c starpu_data_handle starpu_data_get_child(starpu_data_handle handle, unsigned i);
  2901. @node starpu_data_get_sub_data
  2902. @subsection starpu_data_get_sub_data
  2903. @table @asis
  2904. @item @emph{Description}:
  2905. After partitioning a StarPU data by applying a filter,
  2906. @code{starpu_data_get_sub_data} can be used to get handles for each of the data
  2907. portions. @code{root_data} is the parent data that was partitioned. @code{depth}
  2908. is the number of filters to traverse (in case several filters have been applied,
  2909. to e.g. partition in row blocks, and then in column blocks), and the subsequent
  2910. parameters are the indexes.
  2911. @item @emph{Return value}:
  2912. A handle to the subdata.
  2913. @item @emph{Prototype}:
  2914. @code{starpu_data_handle starpu_data_get_sub_data(starpu_data_handle root_data, unsigned depth, ... );}
  2915. @item @emph{Example}:
  2916. @cartouche
  2917. @smallexample
  2918. h = starpu_data_get_sub_data(A_handle, 1, taskx);
  2919. @end smallexample
  2920. @end cartouche
  2921. @end table
  2922. @node Predefined filter functions
  2923. @subsection Predefined filter functions
  2924. @menu
  2925. * Partitioning BCSR Data::
  2926. * Partitioning BLAS interface::
  2927. * Partitioning Vector Data::
  2928. * Partitioning Block Data::
  2929. @end menu
  2930. This section gives a partial list of the predefined partitioning functions.
  2931. Examples on how to use them are shown in @ref{Partitioning Data}. The complete
  2932. list can be found in @code{starpu_data_filters.h} .
  2933. @node Partitioning BCSR Data
  2934. @subsubsection Partitioning BCSR Data
  2935. @deftypefun void starpu_canonical_block_filter_bcsr (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  2936. TODO
  2937. @end deftypefun
  2938. @deftypefun void starpu_vertical_block_filter_func_csr (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  2939. TODO
  2940. @end deftypefun
  2941. @node Partitioning BLAS interface
  2942. @subsubsection Partitioning BLAS interface
  2943. @deftypefun void starpu_block_filter_func (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  2944. This partitions a dense Matrix into horizontal blocks.
  2945. @end deftypefun
  2946. @deftypefun void starpu_vertical_block_filter_func (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  2947. This partitions a dense Matrix into vertical blocks.
  2948. @end deftypefun
  2949. @node Partitioning Vector Data
  2950. @subsubsection Partitioning Vector Data
  2951. @deftypefun void starpu_block_filter_func_vector (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  2952. This partitions a vector into blocks of the same size.
  2953. @end deftypefun
  2954. @deftypefun void starpu_vector_list_filter_func (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  2955. This partitions a vector into blocks of sizes given in @var{filter_arg_ptr}.
  2956. @end deftypefun
  2957. @deftypefun void starpu_vector_divide_in_2_filter_func (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  2958. This partitions a vector into two blocks, the first block size being given in @var{filter_arg}.
  2959. @end deftypefun
  2960. @node Partitioning Block Data
  2961. @subsubsection Partitioning Block Data
  2962. @deftypefun void starpu_block_filter_func_block (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  2963. This partitions a 3D matrix along the X axis.
  2964. @end deftypefun
  2965. @node Codelets and Tasks
  2966. @section Codelets and Tasks
  2967. This section describes the interface to manipulate codelets and tasks.
  2968. @deftp {Data Type} {struct starpu_codelet}
  2969. The codelet structure describes a kernel that is possibly implemented on various
  2970. targets. For compatibility, make sure to initialize the whole structure to zero.
  2971. @table @asis
  2972. @item @code{where}
  2973. Indicates which types of processing units are able to execute the codelet.
  2974. @code{STARPU_CPU|STARPU_CUDA} for instance indicates that the codelet is
  2975. implemented for both CPU cores and CUDA devices while @code{STARPU_GORDON}
  2976. indicates that it is only available on Cell SPUs.
  2977. @item @code{cpu_func} (optional)
  2978. Is a function pointer to the CPU implementation of the codelet. Its prototype
  2979. must be: @code{void cpu_func(void *buffers[], void *cl_arg)}. The first
  2980. argument being the array of data managed by the data management library, and
  2981. the second argument is a pointer to the argument passed from the @code{cl_arg}
  2982. field of the @code{starpu_task} structure.
  2983. The @code{cpu_func} field is ignored if @code{STARPU_CPU} does not appear in
  2984. the @code{where} field, it must be non-null otherwise.
  2985. @item @code{cuda_func} (optional)
  2986. Is a function pointer to the CUDA implementation of the codelet. @emph{This
  2987. must be a host-function written in the CUDA runtime API}. Its prototype must
  2988. be: @code{void cuda_func(void *buffers[], void *cl_arg);}. The @code{cuda_func}
  2989. field is ignored if @code{STARPU_CUDA} does not appear in the @code{where}
  2990. field, it must be non-null otherwise.
  2991. @item @code{opencl_func} (optional)
  2992. Is a function pointer to the OpenCL implementation of the codelet. Its
  2993. prototype must be:
  2994. @code{void opencl_func(starpu_data_interface_t *descr, void *arg);}.
  2995. This pointer is ignored if @code{STARPU_OPENCL} does not appear in the
  2996. @code{where} field, it must be non-null otherwise.
  2997. @item @code{gordon_func} (optional)
  2998. This is the index of the Cell SPU implementation within the Gordon library.
  2999. See Gordon documentation for more details on how to register a kernel and
  3000. retrieve its index.
  3001. @item @code{nbuffers}
  3002. Specifies the number of arguments taken by the codelet. These arguments are
  3003. managed by the DSM and are accessed from the @code{void *buffers[]}
  3004. array. The constant argument passed with the @code{cl_arg} field of the
  3005. @code{starpu_task} structure is not counted in this number. This value should
  3006. not be above @code{STARPU_NMAXBUFS}.
  3007. @item @code{model} (optional)
  3008. This is a pointer to the task duration performance model associated to this
  3009. codelet. This optional field is ignored when set to @code{NULL}.
  3010. TODO
  3011. @item @code{power_model} (optional)
  3012. This is a pointer to the task power consumption performance model associated
  3013. to this codelet. This optional field is ignored when set to @code{NULL}.
  3014. In the case of parallel codelets, this has to account for all processing units
  3015. involved in the parallel execution.
  3016. TODO
  3017. @end table
  3018. @end deftp
  3019. @deftp {Data Type} {struct starpu_task}
  3020. The @code{starpu_task} structure describes a task that can be offloaded on the various
  3021. processing units managed by StarPU. It instantiates a codelet. It can either be
  3022. allocated dynamically with the @code{starpu_task_create} method, or declared
  3023. statically. In the latter case, the programmer has to zero the
  3024. @code{starpu_task} structure and to fill the different fields properly. The
  3025. indicated default values correspond to the configuration of a task allocated
  3026. with @code{starpu_task_create}.
  3027. @table @asis
  3028. @item @code{cl}
  3029. Is a pointer to the corresponding @code{starpu_codelet} data structure. This
  3030. describes where the kernel should be executed, and supplies the appropriate
  3031. implementations. When set to @code{NULL}, no code is executed during the tasks,
  3032. such empty tasks can be useful for synchronization purposes.
  3033. @item @code{buffers}
  3034. Is an array of @code{starpu_buffer_descr_t} structures. It describes the
  3035. different pieces of data accessed by the task, and how they should be accessed.
  3036. The @code{starpu_buffer_descr_t} structure is composed of two fields, the
  3037. @code{handle} field specifies the handle of the piece of data, and the
  3038. @code{mode} field is the required access mode (eg @code{STARPU_RW}). The number
  3039. of entries in this array must be specified in the @code{nbuffers} field of the
  3040. @code{starpu_codelet} structure, and should not excede @code{STARPU_NMAXBUFS}.
  3041. If unsufficient, this value can be set with the @code{--enable-maxbuffers}
  3042. option when configuring StarPU.
  3043. @item @code{cl_arg} (optional; default: @code{NULL})
  3044. This pointer is passed to the codelet through the second argument
  3045. of the codelet implementation (e.g. @code{cpu_func} or @code{cuda_func}).
  3046. In the specific case of the Cell processor, see the @code{cl_arg_size}
  3047. argument.
  3048. @item @code{cl_arg_size} (optional, Cell-specific)
  3049. In the case of the Cell processor, the @code{cl_arg} pointer is not directly
  3050. given to the SPU function. A buffer of size @code{cl_arg_size} is allocated on
  3051. the SPU. This buffer is then filled with the @code{cl_arg_size} bytes starting
  3052. at address @code{cl_arg}. In this case, the argument given to the SPU codelet
  3053. is therefore not the @code{cl_arg} pointer, but the address of the buffer in
  3054. local store (LS) instead. This field is ignored for CPU, CUDA and OpenCL
  3055. codelets, where the @code{cl_arg} pointer is given as such.
  3056. @item @code{callback_func} (optional) (default: @code{NULL})
  3057. This is a function pointer of prototype @code{void (*f)(void *)} which
  3058. specifies a possible callback. If this pointer is non-null, the callback
  3059. function is executed @emph{on the host} after the execution of the task. The
  3060. callback is passed the value contained in the @code{callback_arg} field. No
  3061. callback is executed if the field is set to @code{NULL}.
  3062. @item @code{callback_arg} (optional) (default: @code{NULL})
  3063. This is the pointer passed to the callback function. This field is ignored if
  3064. the @code{callback_func} is set to @code{NULL}.
  3065. @item @code{use_tag} (optional) (default: @code{0})
  3066. If set, this flag indicates that the task should be associated with the tag
  3067. contained in the @code{tag_id} field. Tag allow the application to synchronize
  3068. with the task and to express task dependencies easily.
  3069. @item @code{tag_id}
  3070. This fields contains the tag associated to the task if the @code{use_tag} field
  3071. was set, it is ignored otherwise.
  3072. @item @code{synchronous}
  3073. If this flag is set, the @code{starpu_task_submit} function is blocking and
  3074. returns only when the task has been executed (or if no worker is able to
  3075. process the task). Otherwise, @code{starpu_task_submit} returns immediately.
  3076. @item @code{priority} (optional) (default: @code{STARPU_DEFAULT_PRIO})
  3077. This field indicates a level of priority for the task. This is an integer value
  3078. that must be set between the return values of the
  3079. @code{starpu_sched_get_min_priority} function for the least important tasks,
  3080. and that of the @code{starpu_sched_get_max_priority} for the most important
  3081. tasks (included). The @code{STARPU_MIN_PRIO} and @code{STARPU_MAX_PRIO} macros
  3082. are provided for convenience and respectively returns value of
  3083. @code{starpu_sched_get_min_priority} and @code{starpu_sched_get_max_priority}.
  3084. Default priority is @code{STARPU_DEFAULT_PRIO}, which is always defined as 0 in
  3085. order to allow static task initialization. Scheduling strategies that take
  3086. priorities into account can use this parameter to take better scheduling
  3087. decisions, but the scheduling policy may also ignore it.
  3088. @item @code{execute_on_a_specific_worker} (default: @code{0})
  3089. If this flag is set, StarPU will bypass the scheduler and directly affect this
  3090. task to the worker specified by the @code{workerid} field.
  3091. @item @code{workerid} (optional)
  3092. If the @code{execute_on_a_specific_worker} field is set, this field indicates
  3093. which is the identifier of the worker that should process this task (as
  3094. returned by @code{starpu_worker_get_id}). This field is ignored if
  3095. @code{execute_on_a_specific_worker} field is set to 0.
  3096. @item @code{detach} (optional) (default: @code{1})
  3097. If this flag is set, it is not possible to synchronize with the task
  3098. by the means of @code{starpu_task_wait} later on. Internal data structures
  3099. are only guaranteed to be freed once @code{starpu_task_wait} is called if the
  3100. flag is not set.
  3101. @item @code{destroy} (optional) (default: @code{1})
  3102. If this flag is set, the task structure will automatically be freed, either
  3103. after the execution of the callback if the task is detached, or during
  3104. @code{starpu_task_wait} otherwise. If this flag is not set, dynamically
  3105. allocated data structures will not be freed until @code{starpu_task_destroy} is
  3106. called explicitly. Setting this flag for a statically allocated task structure
  3107. will result in undefined behaviour.
  3108. @item @code{predicted} (output field)
  3109. Predicted duration of the task. This field is only set if the scheduling
  3110. strategy used performance models.
  3111. @end table
  3112. @end deftp
  3113. @deftypefun void starpu_task_init ({struct starpu_task} *@var{task})
  3114. Initialize @var{task} with default values. This function is implicitly
  3115. called by @code{starpu_task_create}. By default, tasks initialized with
  3116. @code{starpu_task_init} must be deinitialized explicitly with
  3117. @code{starpu_task_deinit}. Tasks can also be initialized statically, using the
  3118. constant @code{STARPU_TASK_INITIALIZER}.
  3119. @end deftypefun
  3120. @deftypefun {struct starpu_task *} starpu_task_create (void)
  3121. Allocate a task structure and initialize it with default values. Tasks
  3122. allocated dynamically with @code{starpu_task_create} are automatically freed when the
  3123. task is terminated. If the destroy flag is explicitly unset, the resources used
  3124. by the task are freed by calling
  3125. @code{starpu_task_destroy}.
  3126. @end deftypefun
  3127. @deftypefun void starpu_task_deinit ({struct starpu_task} *@var{task})
  3128. Release all the structures automatically allocated to execute @var{task}. This is
  3129. called automatically by @code{starpu_task_destroy}, but the task structure itself is not
  3130. freed. This should be used for statically allocated tasks for instance.
  3131. @end deftypefun
  3132. @deftypefun void starpu_task_destroy ({struct starpu_task} *@var{task})
  3133. Free the resource allocated during @code{starpu_task_create} and
  3134. associated with @var{task}. This function can be called automatically
  3135. after the execution of a task by setting the @code{destroy} flag of the
  3136. @code{starpu_task} structure (default behaviour). Calling this function
  3137. on a statically allocated task results in an undefined behaviour.
  3138. @end deftypefun
  3139. @deftypefun int starpu_task_wait ({struct starpu_task} *@var{task})
  3140. This function blocks until @var{task} has been executed. It is not possible to
  3141. synchronize with a task more than once. It is not possible to wait for
  3142. synchronous or detached tasks.
  3143. Upon successful completion, this function returns 0. Otherwise, @code{-EINVAL}
  3144. indicates that the specified task was either synchronous or detached.
  3145. @end deftypefun
  3146. @deftypefun int starpu_task_submit ({struct starpu_task} *@var{task})
  3147. This function submits @var{task} to StarPU. Calling this function does
  3148. not mean that the task will be executed immediately as there can be data or task
  3149. (tag) dependencies that are not fulfilled yet: StarPU will take care of
  3150. scheduling this task with respect to such dependencies.
  3151. This function returns immediately if the @code{synchronous} field of the
  3152. @code{starpu_task} structure was set to 0, and block until the termination of
  3153. the task otherwise. It is also possible to synchronize the application with
  3154. asynchronous tasks by the means of tags, using the @code{starpu_tag_wait}
  3155. function for instance.
  3156. In case of success, this function returns 0, a return value of @code{-ENODEV}
  3157. means that there is no worker able to process this task (e.g. there is no GPU
  3158. available and this task is only implemented for CUDA devices).
  3159. @end deftypefun
  3160. @deftypefun int starpu_task_wait_for_all (void)
  3161. This function blocks until all the tasks that were submitted are terminated.
  3162. @end deftypefun
  3163. @deftypefun {struct starpu_task *} starpu_get_current_task (void)
  3164. This function returns the task currently executed by the worker, or
  3165. NULL if it is called either from a thread that is not a task or simply
  3166. because there is no task being executed at the moment.
  3167. @end deftypefun
  3168. @deftypefun void starpu_display_codelet_stats ({struct starpu_codelet_t} *@var{cl})
  3169. Output on @code{stderr} some statistics on the codelet @var{cl}.
  3170. @end deftypefun
  3171. @c Callbacks : what can we put in callbacks ?
  3172. @node Explicit Dependencies
  3173. @section Explicit Dependencies
  3174. @menu
  3175. * starpu_task_declare_deps_array:: starpu_task_declare_deps_array
  3176. * starpu_tag_t:: Task logical identifier
  3177. * starpu_tag_declare_deps:: Declare the Dependencies of a Tag
  3178. * starpu_tag_declare_deps_array:: Declare the Dependencies of a Tag
  3179. * starpu_tag_wait:: Block until a Tag is terminated
  3180. * starpu_tag_wait_array:: Block until a set of Tags is terminated
  3181. * starpu_tag_remove:: Destroy a Tag
  3182. * starpu_tag_notify_from_apps:: Feed a tag explicitly
  3183. @end menu
  3184. @node starpu_task_declare_deps_array
  3185. @subsection @code{starpu_task_declare_deps_array} -- Declare task dependencies
  3186. @deftypefun void starpu_task_declare_deps_array ({struct starpu_task} *@var{task}, unsigned @var{ndeps}, {struct starpu_task} *@var{task_array}[])
  3187. Declare task dependencies between a @var{task} and an array of tasks of length
  3188. @var{ndeps}. This function must be called prior to the submission of the task,
  3189. but it may called after the submission or the execution of the tasks in the
  3190. array provided the tasks are still valid (ie. they were not automatically
  3191. destroyed). Calling this function on a task that was already submitted or with
  3192. an entry of @var{task_array} that is not a valid task anymore results in an
  3193. undefined behaviour. If @var{ndeps} is null, no dependency is added. It is
  3194. possible to call @code{starpu_task_declare_deps_array} multiple times on the
  3195. same task, in this case, the dependencies are added. It is possible to have
  3196. redundancy in the task dependencies.
  3197. @end deftypefun
  3198. @node starpu_tag_t
  3199. @subsection @code{starpu_tag_t} -- Task logical identifier
  3200. @table @asis
  3201. @item @emph{Description}:
  3202. It is possible to associate a task with a unique ``tag'' chosen by the application, and to express
  3203. dependencies between tasks by the means of those tags. To do so, fill the
  3204. @code{tag_id} field of the @code{starpu_task} structure with a tag number (can
  3205. be arbitrary) and set the @code{use_tag} field to 1.
  3206. If @code{starpu_tag_declare_deps} is called with this tag number, the task will
  3207. not be started until the tasks which holds the declared dependency tags are
  3208. completed.
  3209. @end table
  3210. @node starpu_tag_declare_deps
  3211. @subsection @code{starpu_tag_declare_deps} -- Declare the Dependencies of a Tag
  3212. @table @asis
  3213. @item @emph{Description}:
  3214. Specify the dependencies of the task identified by tag @code{id}. The first
  3215. argument specifies the tag which is configured, the second argument gives the
  3216. number of tag(s) on which @code{id} depends. The following arguments are the
  3217. tags which have to be terminated to unlock the task.
  3218. This function must be called before the associated task is submitted to StarPU
  3219. with @code{starpu_task_submit}.
  3220. @item @emph{Remark}
  3221. Because of the variable arity of @code{starpu_tag_declare_deps}, note that the
  3222. last arguments @emph{must} be of type @code{starpu_tag_t}: constant values
  3223. typically need to be explicitly casted. Using the
  3224. @code{starpu_tag_declare_deps_array} function avoids this hazard.
  3225. @item @emph{Prototype}:
  3226. @code{void starpu_tag_declare_deps(starpu_tag_t id, unsigned ndeps, ...);}
  3227. @item @emph{Example}:
  3228. @cartouche
  3229. @example
  3230. /* Tag 0x1 depends on tags 0x32 and 0x52 */
  3231. starpu_tag_declare_deps((starpu_tag_t)0x1,
  3232. 2, (starpu_tag_t)0x32, (starpu_tag_t)0x52);
  3233. @end example
  3234. @end cartouche
  3235. @end table
  3236. @node starpu_tag_declare_deps_array
  3237. @subsection @code{starpu_tag_declare_deps_array} -- Declare the Dependencies of a Tag
  3238. @table @asis
  3239. @item @emph{Description}:
  3240. This function is similar to @code{starpu_tag_declare_deps}, except that its
  3241. does not take a variable number of arguments but an array of tags of size
  3242. @code{ndeps}.
  3243. @item @emph{Prototype}:
  3244. @code{void starpu_tag_declare_deps_array(starpu_tag_t id, unsigned ndeps, starpu_tag_t *array);}
  3245. @item @emph{Example}:
  3246. @cartouche
  3247. @example
  3248. /* Tag 0x1 depends on tags 0x32 and 0x52 */
  3249. starpu_tag_t tag_array[2] = @{0x32, 0x52@};
  3250. starpu_tag_declare_deps_array((starpu_tag_t)0x1, 2, tag_array);
  3251. @end example
  3252. @end cartouche
  3253. @end table
  3254. @node starpu_tag_wait
  3255. @subsection @code{starpu_tag_wait} -- Block until a Tag is terminated
  3256. @deftypefun void starpu_tag_wait (starpu_tag_t @var{id})
  3257. This function blocks until the task associated to tag @var{id} has been
  3258. executed. This is a blocking call which must therefore not be called within
  3259. tasks or callbacks, but only from the application directly. It is possible to
  3260. synchronize with the same tag multiple times, as long as the
  3261. @code{starpu_tag_remove} function is not called. Note that it is still
  3262. possible to synchronize with a tag associated to a task which @code{starpu_task}
  3263. data structure was freed (e.g. if the @code{destroy} flag of the
  3264. @code{starpu_task} was enabled).
  3265. @end deftypefun
  3266. @node starpu_tag_wait_array
  3267. @subsection @code{starpu_tag_wait_array} -- Block until a set of Tags is terminated
  3268. @deftypefun void starpu_tag_wait_array (unsigned @var{ntags}, starpu_tag_t *@var{id})
  3269. This function is similar to @code{starpu_tag_wait} except that it blocks until
  3270. @emph{all} the @var{ntags} tags contained in the @var{id} array are
  3271. terminated.
  3272. @end deftypefun
  3273. @node starpu_tag_remove
  3274. @subsection @code{starpu_tag_remove} -- Destroy a Tag
  3275. @deftypefun void starpu_tag_remove (starpu_tag_t @var{id})
  3276. This function releases the resources associated to tag @var{id}. It can be
  3277. called once the corresponding task has been executed and when there is
  3278. no other tag that depend on this tag anymore.
  3279. @end deftypefun
  3280. @node starpu_tag_notify_from_apps
  3281. @subsection @code{starpu_tag_notify_from_apps} -- Feed a Tag explicitly
  3282. @deftypefun void starpu_tag_notify_from_apps (starpu_tag_t @var{id})
  3283. This function explicitly unlocks tag @var{id}. It may be useful in the
  3284. case of applications which execute part of their computation outside StarPU
  3285. tasks (e.g. third-party libraries). It is also provided as a
  3286. convenient tool for the programmer, for instance to entirely construct the task
  3287. DAG before actually giving StarPU the opportunity to execute the tasks.
  3288. @end deftypefun
  3289. @node Implicit Data Dependencies
  3290. @section Implicit Data Dependencies
  3291. @menu
  3292. * starpu_data_set_default_sequential_consistency_flag:: starpu_data_set_default_sequential_consistency_flag
  3293. * starpu_data_get_default_sequential_consistency_flag:: starpu_data_get_default_sequential_consistency_flag
  3294. * starpu_data_set_sequential_consistency_flag:: starpu_data_set_sequential_consistency_flag
  3295. @end menu
  3296. In this section, we describe how StarPU makes it possible to insert implicit
  3297. task dependencies in order to enforce sequential data consistency. When this
  3298. data consistency is enabled on a specific data handle, any data access will
  3299. appear as sequentially consistent from the application. For instance, if the
  3300. application submits two tasks that access the same piece of data in read-only
  3301. mode, and then a third task that access it in write mode, dependencies will be
  3302. added between the two first tasks and the third one. Implicit data dependencies
  3303. are also inserted in the case of data accesses from the application.
  3304. @node starpu_data_set_default_sequential_consistency_flag
  3305. @subsection @code{starpu_data_set_default_sequential_consistency_flag} -- Set default sequential consistency flag
  3306. @deftypefun void starpu_data_set_default_sequential_consistency_flag (unsigned @var{flag})
  3307. Set the default sequential consistency flag. If a non-zero value is passed, a
  3308. sequential data consistency will be enforced for all handles registered after
  3309. this function call, otherwise it is disabled. By default, StarPU enables
  3310. sequential data consistency. It is also possible to select the data consistency
  3311. mode of a specific data handle with the
  3312. @code{starpu_data_set_sequential_consistency_flag} function.
  3313. @end deftypefun
  3314. @node starpu_data_get_default_sequential_consistency_flag
  3315. @subsection @code{starpu_data_get_default_sequential_consistency_flag} -- Get current default sequential consistency flag
  3316. @deftypefun unsigned starpu_data_set_default_sequential_consistency_flag (void)
  3317. This function returns the current default sequential consistency flag.
  3318. @end deftypefun
  3319. @node starpu_data_set_sequential_consistency_flag
  3320. @subsection @code{starpu_data_set_sequential_consistency_flag} -- Set data sequential consistency mode
  3321. @deftypefun void starpu_data_set_sequential_consistency_flag (starpu_data_handle @var{handle}, unsigned @var{flag})
  3322. Select the data consistency mode associated to a data handle. The consistency
  3323. mode set using this function has the priority over the default mode which can
  3324. be set with @code{starpu_data_set_sequential_consistency_flag}.
  3325. @end deftypefun
  3326. @node Performance Model API
  3327. @section Performance Model API
  3328. @menu
  3329. * starpu_load_history_debug::
  3330. * starpu_perfmodel_debugfilepath::
  3331. * starpu_perfmodel_get_arch_name::
  3332. * starpu_force_bus_sampling::
  3333. @end menu
  3334. @node starpu_load_history_debug
  3335. @subsection @code{starpu_load_history_debug}
  3336. @deftypefun int starpu_load_history_debug ({const char} *@var{symbol}, {struct starpu_perfmodel_t} *@var{model})
  3337. TODO
  3338. @end deftypefun
  3339. @node starpu_perfmodel_debugfilepath
  3340. @subsection @code{starpu_perfmodel_debugfilepath}
  3341. @deftypefun void starpu_perfmodel_debugfilepath ({struct starpu_perfmodel_t} *@var{model}, {enum starpu_perf_archtype} @var{arch}, char *@var{path}, size_t @var{maxlen})
  3342. TODO
  3343. @end deftypefun
  3344. @node starpu_perfmodel_get_arch_name
  3345. @subsection @code{starpu_perfmodel_get_arch_name}
  3346. @deftypefun void starpu_perfmodel_get_arch_name ({enum starpu_perf_archtype} @var{arch}, char *@var{archname}, size_t @var{maxlen})
  3347. TODO
  3348. @end deftypefun
  3349. @node starpu_force_bus_sampling
  3350. @subsection @code{starpu_force_bus_sampling}
  3351. @deftypefun void starpu_force_bus_sampling (void)
  3352. This forces sampling the bus performance model again.
  3353. @end deftypefun
  3354. @node Profiling API
  3355. @section Profiling API
  3356. @menu
  3357. * starpu_profiling_status_set:: starpu_profiling_status_set
  3358. * starpu_profiling_status_get:: starpu_profiling_status_get
  3359. * struct starpu_task_profiling_info:: task profiling information
  3360. * struct starpu_worker_profiling_info:: worker profiling information
  3361. * starpu_worker_get_profiling_info:: starpu_worker_get_profiling_info
  3362. * struct starpu_bus_profiling_info:: bus profiling information
  3363. * starpu_bus_get_count::
  3364. * starpu_bus_get_id::
  3365. * starpu_bus_get_src::
  3366. * starpu_bus_get_dst::
  3367. * starpu_timing_timespec_delay_us::
  3368. * starpu_timing_timespec_to_us::
  3369. * starpu_bus_profiling_helper_display_summary::
  3370. * starpu_worker_profiling_helper_display_summary::
  3371. @end menu
  3372. @node starpu_profiling_status_set
  3373. @subsection @code{starpu_profiling_status_set} -- Set current profiling status
  3374. @table @asis
  3375. @item @emph{Description}:
  3376. Thie function sets the profiling status. Profiling is activated by passing
  3377. @code{STARPU_PROFILING_ENABLE} in @code{status}. Passing
  3378. @code{STARPU_PROFILING_DISABLE} disables profiling. Calling this function
  3379. resets all profiling measurements. When profiling is enabled, the
  3380. @code{profiling_info} field of the @code{struct starpu_task} structure points
  3381. to a valid @code{struct starpu_task_profiling_info} structure containing
  3382. information about the execution of the task.
  3383. @item @emph{Return value}:
  3384. Negative return values indicate an error, otherwise the previous status is
  3385. returned.
  3386. @item @emph{Prototype}:
  3387. @code{int starpu_profiling_status_set(int status);}
  3388. @end table
  3389. @node starpu_profiling_status_get
  3390. @subsection @code{starpu_profiling_status_get} -- Get current profiling status
  3391. @deftypefun int starpu_profiling_status_get (void)
  3392. Return the current profiling status or a negative value in case there was an error.
  3393. @end deftypefun
  3394. @node struct starpu_task_profiling_info
  3395. @subsection @code{struct starpu_task_profiling_info} -- Task profiling information
  3396. @table @asis
  3397. @item @emph{Description}:
  3398. This structure contains information about the execution of a task. It is
  3399. accessible from the @code{.profiling_info} field of the @code{starpu_task}
  3400. structure if profiling was enabled.
  3401. @item @emph{Fields}:
  3402. @table @asis
  3403. @item @code{submit_time}:
  3404. Date of task submission (relative to the initialization of StarPU).
  3405. @item @code{start_time}:
  3406. Date of task execution beginning (relative to the initialization of StarPU).
  3407. @item @code{end_time}:
  3408. Date of task execution termination (relative to the initialization of StarPU).
  3409. @item @code{workerid}:
  3410. Identifier of the worker which has executed the task.
  3411. @end table
  3412. @end table
  3413. @node struct starpu_worker_profiling_info
  3414. @subsection @code{struct starpu_worker_profiling_info} -- Worker profiling information
  3415. @table @asis
  3416. @item @emph{Description}:
  3417. This structure contains the profiling information associated to a worker.
  3418. @item @emph{Fields}:
  3419. @table @asis
  3420. @item @code{start_time}:
  3421. Starting date for the reported profiling measurements.
  3422. @item @code{total_time}:
  3423. Duration of the profiling measurement interval.
  3424. @item @code{executing_time}:
  3425. Time spent by the worker to execute tasks during the profiling measurement interval.
  3426. @item @code{sleeping_time}:
  3427. Time spent idling by the worker during the profiling measurement interval.
  3428. @item @code{executed_tasks}:
  3429. Number of tasks executed by the worker during the profiling measurement interval.
  3430. @end table
  3431. @end table
  3432. @node starpu_worker_get_profiling_info
  3433. @subsection @code{starpu_worker_get_profiling_info} -- Get worker profiling info
  3434. @table @asis
  3435. @item @emph{Description}:
  3436. Get the profiling info associated to the worker identified by @code{workerid},
  3437. and reset the profiling measurements. If the @code{worker_info} argument is
  3438. NULL, only reset the counters associated to worker @code{workerid}.
  3439. @item @emph{Return value}:
  3440. Upon successful completion, this function returns 0. Otherwise, a negative
  3441. value is returned.
  3442. @item @emph{Prototype}:
  3443. @code{int starpu_worker_get_profiling_info(int workerid, struct starpu_worker_profiling_info *worker_info);}
  3444. @end table
  3445. @node struct starpu_bus_profiling_info
  3446. @subsection @code{struct starpu_bus_profiling_info} -- Bus profiling information
  3447. @table @asis
  3448. @item @emph{Description}:
  3449. TODO
  3450. @item @emph{Fields}:
  3451. @table @asis
  3452. @item @code{start_time}:
  3453. TODO
  3454. @item @code{total_time}:
  3455. TODO
  3456. @item @code{transferred_bytes}:
  3457. TODO
  3458. @item @code{transfer_count}:
  3459. TODO
  3460. @end table
  3461. @end table
  3462. @node starpu_bus_get_count
  3463. @subsection @code{starpu_bus_get_count}
  3464. @deftypefun int starpu_bus_get_count (void)
  3465. TODO
  3466. @end deftypefun
  3467. @node starpu_bus_get_id
  3468. @subsection @code{starpu_bus_get_id}
  3469. @deftypefun int starpu_bus_get_id (int @var{src}, int @var{dst})
  3470. TODO
  3471. @end deftypefun
  3472. @node starpu_bus_get_src
  3473. @subsection @code{starpu_bus_get_src}
  3474. @deftypefun int starpu_bus_get_src (int @var{busid})
  3475. TODO
  3476. @end deftypefun
  3477. @node starpu_bus_get_dst
  3478. @subsection @code{starpu_bus_get_dst}
  3479. @deftypefun int starpu_bus_get_dst (int @var{busid})
  3480. TODO
  3481. @end deftypefun
  3482. @node starpu_timing_timespec_delay_us
  3483. @subsection @code{starpu_timing_timespec_delay_us}
  3484. @deftypefun double starpu_timing_timespec_delay_us ({struct timespec} *@var{start}, {struct timespec} *@var{end})
  3485. TODO
  3486. @end deftypefun
  3487. @node starpu_timing_timespec_to_us
  3488. @subsection @code{starpu_timing_timespec_to_us}
  3489. @deftypefun double starpu_timing_timespec_to_us ({struct timespec} *@var{ts})
  3490. TODO
  3491. @end deftypefun
  3492. @node starpu_bus_profiling_helper_display_summary
  3493. @subsection @code{starpu_bus_profiling_helper_display_summary}
  3494. @deftypefun void starpu_bus_profiling_helper_display_summary (void)
  3495. TODO
  3496. @end deftypefun
  3497. @node starpu_worker_profiling_helper_display_summary
  3498. @subsection @code{starpu_worker_profiling_helper_display_summary}
  3499. @deftypefun void starpu_worker_profiling_helper_display_summary (void)
  3500. TODO
  3501. @end deftypefun
  3502. @node CUDA extensions
  3503. @section CUDA extensions
  3504. @c void starpu_malloc(float **A, size_t dim);
  3505. @menu
  3506. * starpu_cuda_get_local_stream:: Get current worker's CUDA stream
  3507. * starpu_helper_cublas_init:: Initialize CUBLAS on every CUDA device
  3508. * starpu_helper_cublas_shutdown:: Deinitialize CUBLAS on every CUDA device
  3509. @end menu
  3510. @node starpu_cuda_get_local_stream
  3511. @subsection @code{starpu_cuda_get_local_stream} -- Get current worker's CUDA stream
  3512. @deftypefun {cudaStream_t *} starpu_cuda_get_local_stream (void)
  3513. StarPU provides a stream for every CUDA device controlled by StarPU. This
  3514. function is only provided for convenience so that programmers can easily use
  3515. asynchronous operations within codelets without having to create a stream by
  3516. hand. Note that the application is not forced to use the stream provided by
  3517. @code{starpu_cuda_get_local_stream} and may also create its own streams.
  3518. Synchronizing with @code{cudaThreadSynchronize()} is allowed, but will reduce
  3519. the likelihood of having all transfers overlapped.
  3520. @end deftypefun
  3521. @node starpu_helper_cublas_init
  3522. @subsection @code{starpu_helper_cublas_init} -- Initialize CUBLAS on every CUDA device
  3523. @deftypefun void starpu_helper_cublas_init (void)
  3524. The CUBLAS library must be initialized prior to any CUBLAS call. Calling
  3525. @code{starpu_helper_cublas_init} will initialize CUBLAS on every CUDA device
  3526. controlled by StarPU. This call blocks until CUBLAS has been properly
  3527. initialized on every device.
  3528. @end deftypefun
  3529. @node starpu_helper_cublas_shutdown
  3530. @subsection @code{starpu_helper_cublas_shutdown} -- Deinitialize CUBLAS on every CUDA device
  3531. @deftypefun void starpu_helper_cublas_shutdown (void)
  3532. This function synchronously deinitializes the CUBLAS library on every CUDA device.
  3533. @end deftypefun
  3534. @node OpenCL extensions
  3535. @section OpenCL extensions
  3536. @menu
  3537. * Enabling OpenCL:: Enabling OpenCL
  3538. * Compiling OpenCL kernels:: Compiling OpenCL kernels
  3539. * Loading OpenCL kernels:: Loading OpenCL kernels
  3540. * OpenCL statistics:: Collecting statistics from OpenCL
  3541. @end menu
  3542. @node Enabling OpenCL
  3543. @subsection Enabling OpenCL
  3544. On GPU devices which can run both CUDA and OpenCL, CUDA will be
  3545. enabled by default. To enable OpenCL, you need either to disable CUDA
  3546. when configuring StarPU:
  3547. @example
  3548. % ./configure --disable-cuda
  3549. @end example
  3550. or when running applications:
  3551. @example
  3552. % STARPU_NCUDA=0 ./application
  3553. @end example
  3554. OpenCL will automatically be started on any device not yet used by
  3555. CUDA. So on a machine running 4 GPUS, it is therefore possible to
  3556. enable CUDA on 2 devices, and OpenCL on the 2 other devices by doing
  3557. so:
  3558. @example
  3559. % STARPU_NCUDA=2 ./application
  3560. @end example
  3561. @node Compiling OpenCL kernels
  3562. @subsection Compiling OpenCL kernels
  3563. Source codes for OpenCL kernels can be stored in a file or in a
  3564. string. StarPU provides functions to build the program executable for
  3565. each available OpenCL device as a @code{cl_program} object. This
  3566. program executable can then be loaded within a specific queue as
  3567. explained in the next section. These are only helpers, Applications
  3568. can also fill a @code{starpu_opencl_program} array by hand for more advanced
  3569. use (e.g. different programs on the different OpenCL devices, for
  3570. relocation purpose for instance).
  3571. @menu
  3572. * starpu_opencl_load_opencl_from_file:: Compiling OpenCL source code
  3573. * starpu_opencl_load_opencl_from_string:: Compiling OpenCL source code
  3574. * starpu_opencl_unload_opencl:: Releasing OpenCL code
  3575. @end menu
  3576. @node starpu_opencl_load_opencl_from_file
  3577. @subsubsection @code{starpu_opencl_load_opencl_from_file} -- Compiling OpenCL source code
  3578. @deftypefun int starpu_opencl_load_opencl_from_file (char *@var{source_file_name}, {struct starpu_opencl_program} *@var{opencl_programs}, {const char}* @var{build_options})
  3579. TODO
  3580. @end deftypefun
  3581. @node starpu_opencl_load_opencl_from_string
  3582. @subsubsection @code{starpu_opencl_load_opencl_from_string} -- Compiling OpenCL source code
  3583. @deftypefun int starpu_opencl_load_opencl_from_string (char *@var{opencl_program_source}, {struct starpu_opencl_program} *@var{opencl_programs}, {const char}* @var{build_options})
  3584. TODO
  3585. @end deftypefun
  3586. @node starpu_opencl_unload_opencl
  3587. @subsubsection @code{starpu_opencl_unload_opencl} -- Releasing OpenCL code
  3588. @deftypefun int starpu_opencl_unload_opencl ({struct starpu_opencl_program} *@var{opencl_programs})
  3589. TODO
  3590. @end deftypefun
  3591. @node Loading OpenCL kernels
  3592. @subsection Loading OpenCL kernels
  3593. @menu
  3594. * starpu_opencl_load_kernel:: Loading a kernel
  3595. * starpu_opencl_relase_kernel:: Releasing a kernel
  3596. @end menu
  3597. @node starpu_opencl_load_kernel
  3598. @subsubsection @code{starpu_opencl_load_kernel} -- Loading a kernel
  3599. @deftypefun int starpu_opencl_load_kernel (cl_kernel *@var{kernel}, cl_command_queue *@var{queue}, {struct starpu_opencl_program} *@var{opencl_programs}, char *@var{kernel_name}, int @var{devid})
  3600. TODO
  3601. @end deftypefun
  3602. @node starpu_opencl_relase_kernel
  3603. @subsubsection @code{starpu_opencl_release_kernel} -- Releasing a kernel
  3604. @deftypefun int starpu_opencl_release_kernel (cl_kernel @var{kernel})
  3605. TODO
  3606. @end deftypefun
  3607. @node OpenCL statistics
  3608. @subsection OpenCL statistics
  3609. @menu
  3610. * starpu_opencl_collect_stats:: Collect statistics on a kernel execution
  3611. @end menu
  3612. @node starpu_opencl_collect_stats
  3613. @subsubsection @code{starpu_opencl_collect_stats} -- Collect statistics on a kernel execution
  3614. @deftypefun int starpu_opencl_collect_stats (cl_event @var{event})
  3615. After termination of the kernels, the OpenCL codelet should call this function
  3616. to pass it the even returned by @code{clEnqueueNDRangeKernel}, to let StarPU
  3617. collect statistics about the kernel execution (used cycles, consumed power).
  3618. @end deftypefun
  3619. @node Cell extensions
  3620. @section Cell extensions
  3621. nothing yet.
  3622. @node Miscellaneous helpers
  3623. @section Miscellaneous helpers
  3624. @menu
  3625. * starpu_data_cpy:: Copy a data handle into another data handle
  3626. * starpu_execute_on_each_worker:: Execute a function on a subset of workers
  3627. @end menu
  3628. @node starpu_data_cpy
  3629. @subsection @code{starpu_data_cpy} -- Copy a data handle into another data handle
  3630. @deftypefun int starpu_data_cpy (starpu_data_handle @var{dst_handle}, starpu_data_handle @var{src_handle}, int @var{asynchronous}, void (*@var{callback_func})(void*), void *@var{callback_arg})
  3631. Copy the content of the @var{src_handle} into the @var{dst_handle} handle.
  3632. The @var{asynchronous} parameter indicates whether the function should
  3633. block or not. In the case of an asynchronous call, it is possible to
  3634. synchronize with the termination of this operation either by the means of
  3635. implicit dependencies (if enabled) or by calling
  3636. @code{starpu_task_wait_for_all()}. If @var{callback_func} is not @code{NULL},
  3637. this callback function is executed after the handle has been copied, and it is
  3638. given the @var{callback_arg} pointer as argument.
  3639. @end deftypefun
  3640. @node starpu_execute_on_each_worker
  3641. @subsection @code{starpu_execute_on_each_worker} -- Execute a function on a subset of workers
  3642. @deftypefun void starpu_execute_on_each_worker (void (*@var{func})(void *), void *@var{arg}, uint32_t @var{where})
  3643. When calling this method, the offloaded function specified by the first argument is
  3644. executed by every StarPU worker that may execute the function.
  3645. The second argument is passed to the offloaded function.
  3646. The last argument specifies on which types of processing units the function
  3647. should be executed. Similarly to the @var{where} field of the
  3648. @code{starpu_codelet} structure, it is possible to specify that the function
  3649. should be executed on every CUDA device and every CPU by passing
  3650. @code{STARPU_CPU|STARPU_CUDA}.
  3651. This function blocks until the function has been executed on every appropriate
  3652. processing units, so that it may not be called from a callback function for
  3653. instance.
  3654. @end deftypefun
  3655. @c ---------------------------------------------------------------------
  3656. @c Advanced Topics
  3657. @c ---------------------------------------------------------------------
  3658. @node Advanced Topics
  3659. @chapter Advanced Topics
  3660. @menu
  3661. * Defining a new data interface::
  3662. * Defining a new scheduling policy::
  3663. @end menu
  3664. @node Defining a new data interface
  3665. @section Defining a new data interface
  3666. @menu
  3667. * struct starpu_data_interface_ops_t:: Per-interface methods
  3668. * struct starpu_data_copy_methods:: Per-interface data transfer methods
  3669. * An example of data interface:: An example of data interface
  3670. @end menu
  3671. @c void *starpu_data_get_interface_on_node(starpu_data_handle handle, unsigned memory_node); TODO
  3672. @node struct starpu_data_interface_ops_t
  3673. @subsection @code{struct starpu_data_interface_ops_t} -- Per-interface methods
  3674. @table @asis
  3675. @item @emph{Description}:
  3676. TODO describe all the different fields
  3677. @end table
  3678. @node struct starpu_data_copy_methods
  3679. @subsection @code{struct starpu_data_copy_methods} -- Per-interface data transfer methods
  3680. @table @asis
  3681. @item @emph{Description}:
  3682. TODO describe all the different fields
  3683. @end table
  3684. @node An example of data interface
  3685. @subsection An example of data interface
  3686. @table @asis
  3687. TODO
  3688. See @code{src/datawizard/interfaces/vector_interface.c} for now.
  3689. @end table
  3690. @node Defining a new scheduling policy
  3691. @section Defining a new scheduling policy
  3692. TODO
  3693. A full example showing how to define a new scheduling policy is available in
  3694. the StarPU sources in the directory @code{examples/scheduler/}.
  3695. @menu
  3696. * struct starpu_sched_policy_s::
  3697. * starpu_worker_set_sched_condition::
  3698. * starpu_sched_set_min_priority:: Set the minimum priority level
  3699. * starpu_sched_set_max_priority:: Set the maximum priority level
  3700. * starpu_push_local_task:: Assign a task to a worker
  3701. * Source code::
  3702. @end menu
  3703. @node struct starpu_sched_policy_s
  3704. @subsection @code{struct starpu_sched_policy_s} -- Scheduler methods
  3705. @table @asis
  3706. @item @emph{Description}:
  3707. This structure contains all the methods that implement a scheduling policy. An
  3708. application may specify which scheduling strategy in the @code{sched_policy}
  3709. field of the @code{starpu_conf} structure passed to the @code{starpu_init}
  3710. function.
  3711. @item @emph{Fields}:
  3712. @table @asis
  3713. @item @code{init_sched}:
  3714. Initialize the scheduling policy.
  3715. @item @code{deinit_sched}:
  3716. Cleanup the scheduling policy.
  3717. @item @code{push_task}:
  3718. Insert a task into the scheduler.
  3719. @item @code{push_prio_task}:
  3720. Insert a priority task into the scheduler.
  3721. @item @code{push_prio_notify}:
  3722. Notify the scheduler that a task was pushed on the worker. This method is
  3723. called when a task that was explicitely assigned to a worker is scheduled. This
  3724. method therefore permits to keep the state of of the scheduler coherent even
  3725. when StarPU bypasses the scheduling strategy.
  3726. @item @code{pop_task}:
  3727. Get a task from the scheduler. The mutex associated to the worker is already
  3728. taken when this method is called. If this method is defined as @code{NULL}, the
  3729. worker will only execute tasks from its local queue. In this case, the
  3730. @code{push_task} method should use the @code{starpu_push_local_task} method to
  3731. assign tasks to the different workers.
  3732. @item @code{pop_every_task}:
  3733. Remove all available tasks from the scheduler (tasks are chained by the means
  3734. of the prev and next fields of the starpu_task structure). The mutex associated
  3735. to the worker is already taken when this method is called.
  3736. @item @code{post_exec_hook} (optionnal):
  3737. This method is called every time a task has been executed.
  3738. @item @code{policy_name}:
  3739. Name of the policy (optionnal).
  3740. @item @code{policy_description}:
  3741. Description of the policy (optionnal).
  3742. @end table
  3743. @end table
  3744. @node starpu_worker_set_sched_condition
  3745. @subsection @code{starpu_worker_set_sched_condition} -- Specify the condition variable associated to a worker
  3746. @deftypefun void starpu_worker_set_sched_condition (int @var{workerid}, pthread_cond_t *@var{sched_cond}, pthread_mutex_t *@var{sched_mutex})
  3747. When there is no available task for a worker, StarPU blocks this worker on a
  3748. condition variable. This function specifies which condition variable (and the
  3749. associated mutex) should be used to block (and to wake up) a worker. Note that
  3750. multiple workers may use the same condition variable. For instance, in the case
  3751. of a scheduling strategy with a single task queue, the same condition variable
  3752. would be used to block and wake up all workers.
  3753. The initialization method of a scheduling strategy (@code{init_sched}) must
  3754. call this function once per worker.
  3755. @end deftypefun
  3756. @node starpu_sched_set_min_priority
  3757. @subsection @code{starpu_sched_set_min_priority}
  3758. @deftypefun void starpu_sched_set_min_priority (int @var{min_prio})
  3759. Defines the minimum priority level supported by the scheduling policy. The
  3760. default minimum priority level is the same as the default priority level which
  3761. is 0 by convention. The application may access that value by calling the
  3762. @code{starpu_sched_get_min_priority} function. This function should only be
  3763. called from the initialization method of the scheduling policy, and should not
  3764. be used directly from the application.
  3765. @end deftypefun
  3766. @node starpu_sched_set_max_priority
  3767. @subsection @code{starpu_sched_set_max_priority}
  3768. @deftypefun void starpu_sched_set_min_priority (int @var{max_prio})
  3769. Defines the maximum priority level supported by the scheduling policy. The
  3770. default maximum priority level is 1. The application may access that value by
  3771. calling the @code{starpu_sched_get_max_priority} function. This function should
  3772. only be called from the initialization method of the scheduling policy, and
  3773. should not be used directly from the application.
  3774. @end deftypefun
  3775. @node starpu_push_local_task
  3776. @subsection @code{starpu_push_local_task}
  3777. @deftypefun int starpu_push_local_task (int @var{workerid}, {struct starpu_task} *@var{task}, int @var{back})
  3778. The scheduling policy may put tasks directly into a worker's local queue so
  3779. that it is not always necessary to create its own queue when the local queue
  3780. is sufficient. If "back" not null, the task is put at the back of the queue
  3781. where the worker will pop tasks first. Setting "back" to 0 therefore ensures
  3782. a FIFO ordering.
  3783. @end deftypefun
  3784. @node Source code
  3785. @subsection Source code
  3786. @cartouche
  3787. @smallexample
  3788. static struct starpu_sched_policy_s dummy_sched_policy = @{
  3789. .init_sched = init_dummy_sched,
  3790. .deinit_sched = deinit_dummy_sched,
  3791. .push_task = push_task_dummy,
  3792. .push_prio_task = NULL,
  3793. .pop_task = pop_task_dummy,
  3794. .post_exec_hook = NULL,
  3795. .pop_every_task = NULL,
  3796. .policy_name = "dummy",
  3797. .policy_description = "dummy scheduling strategy"
  3798. @};
  3799. @end smallexample
  3800. @end cartouche
  3801. @c ---------------------------------------------------------------------
  3802. @c Appendices
  3803. @c ---------------------------------------------------------------------
  3804. @c ---------------------------------------------------------------------
  3805. @c Full source code for the 'Scaling a Vector' example
  3806. @c ---------------------------------------------------------------------
  3807. @node Full source code for the 'Scaling a Vector' example
  3808. @appendix Full source code for the 'Scaling a Vector' example
  3809. @menu
  3810. * Main application::
  3811. * CPU Kernel::
  3812. * CUDA Kernel::
  3813. * OpenCL Kernel::
  3814. @end menu
  3815. @node Main application
  3816. @section Main application
  3817. @smallexample
  3818. @include vector_scal_c.texi
  3819. @end smallexample
  3820. @node CPU Kernel
  3821. @section CPU Kernel
  3822. @smallexample
  3823. @include vector_scal_cpu.texi
  3824. @end smallexample
  3825. @node CUDA Kernel
  3826. @section CUDA Kernel
  3827. @smallexample
  3828. @include vector_scal_cuda.texi
  3829. @end smallexample
  3830. @node OpenCL Kernel
  3831. @section OpenCL Kernel
  3832. @menu
  3833. * Invoking the kernel::
  3834. * Source of the kernel::
  3835. @end menu
  3836. @node Invoking the kernel
  3837. @subsection Invoking the kernel
  3838. @smallexample
  3839. @include vector_scal_opencl.texi
  3840. @end smallexample
  3841. @node Source of the kernel
  3842. @subsection Source of the kernel
  3843. @smallexample
  3844. @include vector_scal_opencl_codelet.texi
  3845. @end smallexample
  3846. @c
  3847. @c Indices.
  3848. @c
  3849. @node Function Index
  3850. @unnumbered Function Index
  3851. @printindex fn
  3852. @bye