starpu.texi 195 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139
  1. \input texinfo @c -*-texinfo-*-
  2. @c %**start of header
  3. @setfilename starpu.info
  4. @settitle StarPU Handbook
  5. @c %**end of header
  6. @include version.texi
  7. @copying
  8. Copyright @copyright{} 2009--2011 Universit@'e de Bordeaux 1
  9. @noindent
  10. Copyright @copyright{} 2010, 2011 Centre National de la Recherche Scientifique
  11. @noindent
  12. Copyright @copyright{} 2011 Institut National de Recherche en Informatique et Automatique
  13. @quotation
  14. Permission is granted to copy, distribute and/or modify this document
  15. under the terms of the GNU Free Documentation License, Version 1.3
  16. or any later version published by the Free Software Foundation;
  17. with no Invariant Sections, no Front-Cover Texts, and no Back-Cover
  18. Texts. A copy of the license is included in the section entitled ``GNU
  19. Free Documentation License''.
  20. @end quotation
  21. @end copying
  22. @setchapternewpage odd
  23. @dircategory Development
  24. @direntry
  25. * StarPU: (starpu). StarPU Handbook
  26. @end direntry
  27. @titlepage
  28. @title StarPU Handbook
  29. @subtitle for StarPU @value{VERSION}
  30. @page
  31. @vskip 0pt plus 1fill
  32. @insertcopying
  33. @end titlepage
  34. @c @summarycontents
  35. @contents
  36. @page
  37. @node Top
  38. @top Preface
  39. This manual documents the usage of StarPU version @value{VERSION}. It
  40. was last updated on @value{UPDATED}.
  41. @ifnottex
  42. @insertcopying
  43. @end ifnottex
  44. @comment
  45. @comment When you add a new menu item, please keep the right hand
  46. @comment aligned to the same column. Do not use tabs. This provides
  47. @comment better formatting.
  48. @comment
  49. @menu
  50. * Introduction:: A basic introduction to using StarPU
  51. * Installing StarPU:: How to configure, build and install StarPU
  52. * Using StarPU:: How to run StarPU application
  53. * Basic Examples:: Basic examples of the use of StarPU
  54. * Performance optimization:: How to optimize performance with StarPU
  55. * Performance feedback:: Performance debugging tools
  56. * StarPU MPI support:: How to combine StarPU with MPI
  57. * Tips and Tricks:: Tips and tricks to know about
  58. * Configuring StarPU:: How to configure StarPU
  59. * StarPU API:: The API to use StarPU
  60. * Advanced Topics:: Advanced use of StarPU
  61. * C Extensions:: Easier StarPU programming with GCC
  62. * Full source code for the 'Scaling a Vector' example::
  63. * Function Index:: Index of C functions.
  64. * GNU Free Documentation License:: How you can copy and share this manual.
  65. @end menu
  66. @c ---------------------------------------------------------------------
  67. @c Introduction to StarPU
  68. @c ---------------------------------------------------------------------
  69. @node Introduction
  70. @chapter Introduction to StarPU
  71. @menu
  72. * Motivation:: Why StarPU ?
  73. * StarPU in a Nutshell:: The Fundamentals of StarPU
  74. @end menu
  75. @node Motivation
  76. @section Motivation
  77. @c complex machines with heterogeneous cores/devices
  78. The use of specialized hardware such as accelerators or coprocessors offers an
  79. interesting approach to overcome the physical limits encountered by processor
  80. architects. As a result, many machines are now equipped with one or several
  81. accelerators (e.g. a GPU), in addition to the usual processor(s). While a lot of
  82. efforts have been devoted to offload computation onto such accelerators, very
  83. little attention as been paid to portability concerns on the one hand, and to the
  84. possibility of having heterogeneous accelerators and processors to interact on the other hand.
  85. StarPU is a runtime system that offers support for heterogeneous multicore
  86. architectures, it not only offers a unified view of the computational resources
  87. (i.e. CPUs and accelerators at the same time), but it also takes care of
  88. efficiently mapping and executing tasks onto an heterogeneous machine while
  89. transparently handling low-level issues such as data transfers in a portable
  90. fashion.
  91. @c this leads to a complicated distributed memory design
  92. @c which is not (easily) manageable by hand
  93. @c added value/benefits of StarPU
  94. @c - portability
  95. @c - scheduling, perf. portability
  96. @node StarPU in a Nutshell
  97. @section StarPU in a Nutshell
  98. @menu
  99. * Codelet and Tasks::
  100. * StarPU Data Management Library::
  101. * Glossary::
  102. * Research Papers::
  103. @end menu
  104. From a programming point of view, StarPU is not a new language but a library
  105. that executes tasks explicitly submitted by the application. The data that a
  106. task manipulates are automatically transferred onto the accelerator so that the
  107. programmer does not have to take care of complex data movements. StarPU also
  108. takes particular care of scheduling those tasks efficiently and allows
  109. scheduling experts to implement custom scheduling policies in a portable
  110. fashion.
  111. @c explain the notion of codelet and task (i.e. g(A, B)
  112. @node Codelet and Tasks
  113. @subsection Codelet and Tasks
  114. One of the StarPU primary data structures is the @b{codelet}. A codelet describes a
  115. computational kernel that can possibly be implemented on multiple architectures
  116. such as a CPU, a CUDA device or a Cell's SPU.
  117. @c TODO insert illustration f : f_spu, f_cpu, ...
  118. Another important data structure is the @b{task}. Executing a StarPU task
  119. consists in applying a codelet on a data set, on one of the architectures on
  120. which the codelet is implemented. A task thus describes the codelet that it
  121. uses, but also which data are accessed, and how they are
  122. accessed during the computation (read and/or write).
  123. StarPU tasks are asynchronous: submitting a task to StarPU is a non-blocking
  124. operation. The task structure can also specify a @b{callback} function that is
  125. called once StarPU has properly executed the task. It also contains optional
  126. fields that the application may use to give hints to the scheduler (such as
  127. priority levels).
  128. By default, task dependencies are inferred from data dependency (sequential
  129. coherence) by StarPU. The application can however disable sequential coherency
  130. for some data, and dependencies be expressed by hand.
  131. A task may be identified by a unique 64-bit number chosen by the application
  132. which we refer as a @b{tag}.
  133. Task dependencies can be enforced by hand either by the means of callback functions, by
  134. submitting other tasks, or by expressing dependencies
  135. between tags (which can thus correspond to tasks that have not been submitted
  136. yet).
  137. @c TODO insert illustration f(Ar, Brw, Cr) + ..
  138. @c DSM
  139. @node StarPU Data Management Library
  140. @subsection StarPU Data Management Library
  141. Because StarPU schedules tasks at runtime, data transfers have to be
  142. done automatically and ``just-in-time'' between processing units,
  143. relieving the application programmer from explicit data transfers.
  144. Moreover, to avoid unnecessary transfers, StarPU keeps data
  145. where it was last needed, even if was modified there, and it
  146. allows multiple copies of the same data to reside at the same time on
  147. several processing units as long as it is not modified.
  148. @node Glossary
  149. @subsection Glossary
  150. A @b{codelet} records pointers to various implementations of the same
  151. theoretical function.
  152. A @b{memory node} can be either the main RAM or GPU-embedded memory.
  153. A @b{bus} is a link between memory nodes.
  154. A @b{data handle} keeps track of replicates of the same data (@b{registered} by the
  155. application) over various memory nodes. The data management library manages
  156. keeping them coherent.
  157. The @b{home} memory node of a data handle is the memory node from which the data
  158. was registered (usually the main memory node).
  159. A @b{task} represents a scheduled execution of a codelet on some data handles.
  160. A @b{tag} is a rendez-vous point. Tasks typically have their own tag, and can
  161. depend on other tags. The value is chosen by the application.
  162. A @b{worker} execute tasks. There is typically one per CPU computation core and
  163. one per accelerator (for which a whole CPU core is dedicated).
  164. A @b{driver} drives a given kind of workers. There are currently CPU, CUDA,
  165. OpenCL and Gordon drivers. They usually start several workers to actually drive
  166. them.
  167. A @b{performance model} is a (dynamic or static) model of the performance of a
  168. given codelet. Codelets can have execution time performance model as well as
  169. power consumption performance models.
  170. A data @b{interface} describes the layout of the data: for a vector, a pointer
  171. for the start, the number of elements and the size of elements ; for a matrix, a
  172. pointer for the start, the number of elements per row, the offset between rows,
  173. and the size of each element ; etc. To access their data, codelet functions are
  174. given interfaces for the local memory node replicates of the data handles of the
  175. scheduled task.
  176. @b{Partitioning} data means dividing the data of a given data handle (called
  177. @b{father}) into a series of @b{children} data handles which designate various
  178. portions of the former.
  179. A @b{filter} is the function which computes children data handles from a father
  180. data handle, and thus describes how the partitioning should be done (horizontal,
  181. vertical, etc.)
  182. @b{Acquiring} a data handle can be done from the main application, to safely
  183. access the data of a data handle from its home node, without having to
  184. unregister it.
  185. @node Research Papers
  186. @subsection Research Papers
  187. Research papers about StarPU can be found at
  188. @indicateurl{http://runtime.bordeaux.inria.fr/Publis/Keyword/STARPU.html}
  189. Notably a good overview in the research report
  190. @indicateurl{http://hal.archives-ouvertes.fr/inria-00467677}
  191. @c ---------------------------------------------------------------------
  192. @c Installing StarPU
  193. @c ---------------------------------------------------------------------
  194. @node Installing StarPU
  195. @chapter Installing StarPU
  196. @menu
  197. * Downloading StarPU::
  198. * Configuration of StarPU::
  199. * Building and Installing StarPU::
  200. @end menu
  201. StarPU can be built and installed by the standard means of the GNU
  202. autotools. The following chapter is intended to briefly remind how these tools
  203. can be used to install StarPU.
  204. @node Downloading StarPU
  205. @section Downloading StarPU
  206. @menu
  207. * Getting Sources::
  208. * Optional dependencies::
  209. @end menu
  210. @node Getting Sources
  211. @subsection Getting Sources
  212. The simplest way to get StarPU sources is to download the latest official
  213. release tarball from @indicateurl{https://gforge.inria.fr/frs/?group_id=1570} ,
  214. or the latest nightly snapshot from
  215. @indicateurl{http://starpu.gforge.inria.fr/testing/} . The following documents
  216. how to get the very latest version from the subversion repository itself, it
  217. should be needed only if you need the very latest changes (i.e. less than a
  218. day!)
  219. The source code is managed by a Subversion server hosted by the
  220. InriaGforge. To get the source code, you need:
  221. @itemize
  222. @item
  223. To install the client side of the software Subversion if it is
  224. not already available on your system. The software can be obtained from
  225. @indicateurl{http://subversion.tigris.org} . If you are running
  226. on Windows, you will probably prefer to use TortoiseSVN from
  227. @indicateurl{http://tortoisesvn.tigris.org/} .
  228. @item
  229. You can check out the project's SVN repository through anonymous
  230. access. This will provide you with a read access to the
  231. repository.
  232. If you need to have write access on the StarPU project, you can also choose to
  233. become a member of the project @code{starpu}. For this, you first need to get
  234. an account to the gForge server. You can then send a request to join the project
  235. (@indicateurl{https://gforge.inria.fr/project/request.php?group_id=1570}).
  236. @item
  237. More information on how to get a gForge account, to become a member of
  238. a project, or on any other related task can be obtained from the
  239. InriaGforge at @indicateurl{https://gforge.inria.fr/}. The most important
  240. thing is to upload your public SSH key on the gForge server (see the
  241. FAQ at @indicateurl{http://siteadmin.gforge.inria.fr/FAQ.html#Q6} for
  242. instructions).
  243. @end itemize
  244. You can now check out the latest version from the Subversion server:
  245. @itemize
  246. @item
  247. using the anonymous access via svn:
  248. @example
  249. % svn checkout svn://scm.gforge.inria.fr/svn/starpu/trunk
  250. @end example
  251. @item
  252. using the anonymous access via https:
  253. @example
  254. % svn checkout --username anonsvn https://scm.gforge.inria.fr/svn/starpu/trunk
  255. @end example
  256. The password is @code{anonsvn}.
  257. @item
  258. using your gForge account
  259. @example
  260. % svn checkout svn+ssh://<login>@@scm.gforge.inria.fr/svn/starpu/trunk
  261. @end example
  262. @end itemize
  263. The following step requires the availability of @code{autoconf} and
  264. @code{automake} to generate the @code{./configure} script. This is
  265. done by calling @code{./autogen.sh}. The required version for
  266. @code{autoconf} is 2.60 or higher. You will also need @code{makeinfo}.
  267. @example
  268. % ./autogen.sh
  269. @end example
  270. If the autotools are not available on your machine or not recent
  271. enough, you can choose to download the latest nightly tarball, which
  272. is provided with a @code{configure} script.
  273. @example
  274. % wget http://starpu.gforge.inria.fr/testing/starpu-nightly-latest.tar.gz
  275. @end example
  276. @node Optional dependencies
  277. @subsection Optional dependencies
  278. The topology discovery library, @code{hwloc}, is not mandatory to use StarPU
  279. but strongly recommended. It allows to increase performance, and to
  280. perform some topology aware scheduling.
  281. @code{hwloc} is available in major distributions and for most OSes and can be
  282. downloaded from @indicateurl{http://www.open-mpi.org/software/hwloc}.
  283. @node Configuration of StarPU
  284. @section Configuration of StarPU
  285. @menu
  286. * Generating Makefiles and configuration scripts::
  287. * Running the configuration::
  288. @end menu
  289. @node Generating Makefiles and configuration scripts
  290. @subsection Generating Makefiles and configuration scripts
  291. This step is not necessary when using the tarball releases of StarPU. If you
  292. are using the source code from the svn repository, you first need to generate
  293. the configure scripts and the Makefiles.
  294. @example
  295. % ./autogen.sh
  296. @end example
  297. @node Running the configuration
  298. @subsection Running the configuration
  299. @example
  300. % ./configure
  301. @end example
  302. Details about options that are useful to give to @code{./configure} are given in
  303. @ref{Compilation configuration}.
  304. @node Building and Installing StarPU
  305. @section Building and Installing StarPU
  306. @menu
  307. * Building::
  308. * Sanity Checks::
  309. * Installing::
  310. @end menu
  311. @node Building
  312. @subsection Building
  313. @example
  314. % make
  315. @end example
  316. @node Sanity Checks
  317. @subsection Sanity Checks
  318. In order to make sure that StarPU is working properly on the system, it is also
  319. possible to run a test suite.
  320. @example
  321. % make check
  322. @end example
  323. @node Installing
  324. @subsection Installing
  325. In order to install StarPU at the location that was specified during
  326. configuration:
  327. @example
  328. % make install
  329. @end example
  330. @c ---------------------------------------------------------------------
  331. @c Using StarPU
  332. @c ---------------------------------------------------------------------
  333. @node Using StarPU
  334. @chapter Using StarPU
  335. @menu
  336. * Setting flags for compiling and linking applications::
  337. * Running a basic StarPU application::
  338. * Kernel threads started by StarPU::
  339. * Enabling OpenCL::
  340. @end menu
  341. @node Setting flags for compiling and linking applications
  342. @section Setting flags for compiling and linking applications
  343. Compiling and linking an application against StarPU may require to use
  344. specific flags or libraries (for instance @code{CUDA} or @code{libspe2}).
  345. To this end, it is possible to use the @code{pkg-config} tool.
  346. If StarPU was not installed at some standard location, the path of StarPU's
  347. library must be specified in the @code{PKG_CONFIG_PATH} environment variable so
  348. that @code{pkg-config} can find it. For example if StarPU was installed in
  349. @code{$prefix_dir}:
  350. @example
  351. % PKG_CONFIG_PATH=$PKG_CONFIG_PATH:$prefix_dir/lib/pkgconfig
  352. @end example
  353. The flags required to compile or link against StarPU are then
  354. accessible with the following commands:
  355. @example
  356. % pkg-config --cflags libstarpu # options for the compiler
  357. % pkg-config --libs libstarpu # options for the linker
  358. @end example
  359. @node Running a basic StarPU application
  360. @section Running a basic StarPU application
  361. Basic examples using StarPU are built in the directory
  362. @code{examples/basic_examples/} (and installed in
  363. @code{$prefix_dir/lib/starpu/examples/}). You can for example run the example
  364. @code{vector_scal}.
  365. @example
  366. % ./examples/basic_examples/vector_scal
  367. BEFORE : First element was 1.000000
  368. AFTER First element is 3.140000
  369. %
  370. @end example
  371. When StarPU is used for the first time, the directory
  372. @code{$HOME/.starpu/} is created, performance models will be stored in
  373. that directory.
  374. Please note that buses are benchmarked when StarPU is launched for the
  375. first time. This may take a few minutes, or less if @code{hwloc} is
  376. installed. This step is done only once per user and per machine.
  377. @node Kernel threads started by StarPU
  378. @section Kernel threads started by StarPU
  379. StarPU automatically binds one thread per CPU core. It does not use
  380. SMT/hyperthreading because kernels are usually already optimized for using a
  381. full core, and using hyperthreading would make kernel calibration rather random.
  382. Since driving GPUs is a CPU-consuming task, StarPU dedicates one core per GPU
  383. While StarPU tasks are executing, the application is not supposed to do
  384. computations in the threads it starts itself, tasks should be used instead.
  385. TODO: add a StarPU function to bind an application thread (e.g. the main thread)
  386. to a dedicated core (and thus disable the corresponding StarPU CPU worker).
  387. @node Enabling OpenCL
  388. @section Enabling OpenCL
  389. When both CUDA and OpenCL drivers are enabled, StarPU will launch an
  390. OpenCL worker for NVIDIA GPUs only if CUDA is not already running on them.
  391. This design choice was necessary as OpenCL and CUDA can not run at the
  392. same time on the same NVIDIA GPU, as there is currently no interoperability
  393. between them.
  394. To enable OpenCL, you need either to disable CUDA when configuring StarPU:
  395. @example
  396. % ./configure --disable-cuda
  397. @end example
  398. or when running applications:
  399. @example
  400. % STARPU_NCUDA=0 ./application
  401. @end example
  402. OpenCL will automatically be started on any device not yet used by
  403. CUDA. So on a machine running 4 GPUS, it is therefore possible to
  404. enable CUDA on 2 devices, and OpenCL on the 2 other devices by doing
  405. so:
  406. @example
  407. % STARPU_NCUDA=2 ./application
  408. @end example
  409. @c ---------------------------------------------------------------------
  410. @c Basic Examples
  411. @c ---------------------------------------------------------------------
  412. @node Basic Examples
  413. @chapter Basic Examples
  414. @menu
  415. * Compiling and linking options::
  416. * Hello World:: Submitting Tasks
  417. * Scaling a Vector:: Manipulating Data
  418. * Vector Scaling on an Hybrid CPU/GPU Machine:: Handling Heterogeneous Architectures
  419. * Using multiple implementations of a codelet::
  420. * Task and Worker Profiling::
  421. * Partitioning Data:: Partitioning Data
  422. * Performance model example::
  423. * Theoretical lower bound on execution time::
  424. * Insert Task Utility::
  425. * More examples:: More examples shipped with StarPU
  426. * Debugging:: When things go wrong.
  427. @end menu
  428. @node Compiling and linking options
  429. @section Compiling and linking options
  430. Let's suppose StarPU has been installed in the directory
  431. @code{$STARPU_DIR}. As explained in @ref{Setting flags for compiling and linking applications},
  432. the variable @code{PKG_CONFIG_PATH} needs to be set. It is also
  433. necessary to set the variable @code{LD_LIBRARY_PATH} to locate dynamic
  434. libraries at runtime.
  435. @example
  436. % PKG_CONFIG_PATH=$STARPU_DIR/lib/pkgconfig:$PKG_CONFIG_PATH
  437. % LD_LIBRARY_PATH=$STARPU_DIR/lib:$LD_LIBRARY_PATH
  438. @end example
  439. The Makefile could for instance contain the following lines to define which
  440. options must be given to the compiler and to the linker:
  441. @cartouche
  442. @example
  443. CFLAGS += $$(pkg-config --cflags libstarpu)
  444. LDFLAGS += $$(pkg-config --libs libstarpu)
  445. @end example
  446. @end cartouche
  447. @node Hello World
  448. @section Hello World
  449. @menu
  450. * Required Headers::
  451. * Defining a Codelet::
  452. * Submitting a Task::
  453. * Execution of Hello World::
  454. @end menu
  455. In this section, we show how to implement a simple program that submits a task to StarPU.
  456. @node Required Headers
  457. @subsection Required Headers
  458. The @code{starpu.h} header should be included in any code using StarPU.
  459. @cartouche
  460. @smallexample
  461. #include <starpu.h>
  462. @end smallexample
  463. @end cartouche
  464. @node Defining a Codelet
  465. @subsection Defining a Codelet
  466. @cartouche
  467. @smallexample
  468. struct params @{
  469. int i;
  470. float f;
  471. @};
  472. void cpu_func(void *buffers[], void *cl_arg)
  473. @{
  474. struct params *params = cl_arg;
  475. printf("Hello world (params = @{%i, %f@} )\n", params->i, params->f);
  476. @}
  477. starpu_codelet cl =
  478. @{
  479. .where = STARPU_CPU,
  480. .cpu_func = cpu_func,
  481. .nbuffers = 0
  482. @};
  483. @end smallexample
  484. @end cartouche
  485. A codelet is a structure that represents a computational kernel. Such a codelet
  486. may contain an implementation of the same kernel on different architectures
  487. (e.g. CUDA, Cell's SPU, x86, ...).
  488. The @code{nbuffers} field specifies the number of data buffers that are
  489. manipulated by the codelet: here the codelet does not access or modify any data
  490. that is controlled by our data management library. Note that the argument
  491. passed to the codelet (the @code{cl_arg} field of the @code{starpu_task}
  492. structure) does not count as a buffer since it is not managed by our data
  493. management library, but just contain trivial parameters.
  494. @c TODO need a crossref to the proper description of "where" see bla for more ...
  495. We create a codelet which may only be executed on the CPUs. The @code{where}
  496. field is a bitmask that defines where the codelet may be executed. Here, the
  497. @code{STARPU_CPU} value means that only CPUs can execute this codelet
  498. (@pxref{Codelets and Tasks} for more details on this field).
  499. When a CPU core executes a codelet, it calls the @code{cpu_func} function,
  500. which @emph{must} have the following prototype:
  501. @code{void (*cpu_func)(void *buffers[], void *cl_arg);}
  502. In this example, we can ignore the first argument of this function which gives a
  503. description of the input and output buffers (e.g. the size and the location of
  504. the matrices) since there is none.
  505. The second argument is a pointer to a buffer passed as an
  506. argument to the codelet by the means of the @code{cl_arg} field of the
  507. @code{starpu_task} structure.
  508. @c TODO rewrite so that it is a little clearer ?
  509. Be aware that this may be a pointer to a
  510. @emph{copy} of the actual buffer, and not the pointer given by the programmer:
  511. if the codelet modifies this buffer, there is no guarantee that the initial
  512. buffer will be modified as well: this for instance implies that the buffer
  513. cannot be used as a synchronization medium. If synchronization is needed, data
  514. has to be registered to StarPU, see @ref{Scaling a Vector}.
  515. @node Submitting a Task
  516. @subsection Submitting a Task
  517. @cartouche
  518. @smallexample
  519. void callback_func(void *callback_arg)
  520. @{
  521. printf("Callback function (arg %x)\n", callback_arg);
  522. @}
  523. int main(int argc, char **argv)
  524. @{
  525. /* @b{initialize StarPU} */
  526. starpu_init(NULL);
  527. struct starpu_task *task = starpu_task_create();
  528. task->cl = &cl; /* @b{Pointer to the codelet defined above} */
  529. struct params params = @{ 1, 2.0f @};
  530. task->cl_arg = &params;
  531. task->cl_arg_size = sizeof(params);
  532. task->callback_func = callback_func;
  533. task->callback_arg = 0x42;
  534. /* @b{starpu_task_submit will be a blocking call} */
  535. task->synchronous = 1;
  536. /* @b{submit the task to StarPU} */
  537. starpu_task_submit(task);
  538. /* @b{terminate StarPU} */
  539. starpu_shutdown();
  540. return 0;
  541. @}
  542. @end smallexample
  543. @end cartouche
  544. Before submitting any tasks to StarPU, @code{starpu_init} must be called. The
  545. @code{NULL} argument specifies that we use default configuration. Tasks cannot
  546. be submitted after the termination of StarPU by a call to
  547. @code{starpu_shutdown}.
  548. In the example above, a task structure is allocated by a call to
  549. @code{starpu_task_create}. This function only allocates and fills the
  550. corresponding structure with the default settings (@pxref{Codelets and
  551. Tasks, starpu_task_create}), but it does not submit the task to StarPU.
  552. @c not really clear ;)
  553. The @code{cl} field is a pointer to the codelet which the task will
  554. execute: in other words, the codelet structure describes which computational
  555. kernel should be offloaded on the different architectures, and the task
  556. structure is a wrapper containing a codelet and the piece of data on which the
  557. codelet should operate.
  558. The optional @code{cl_arg} field is a pointer to a buffer (of size
  559. @code{cl_arg_size}) with some parameters for the kernel
  560. described by the codelet. For instance, if a codelet implements a computational
  561. kernel that multiplies its input vector by a constant, the constant could be
  562. specified by the means of this buffer, instead of registering it as a StarPU
  563. data. It must however be noted that StarPU avoids making copy whenever possible
  564. and rather passes the pointer as such, so the buffer which is pointed at must
  565. kept allocated until the task terminates, and if several tasks are submitted
  566. with various parameters, each of them must be given a pointer to their own
  567. buffer.
  568. Once a task has been executed, an optional callback function is be called.
  569. While the computational kernel could be offloaded on various architectures, the
  570. callback function is always executed on a CPU. The @code{callback_arg}
  571. pointer is passed as an argument of the callback. The prototype of a callback
  572. function must be:
  573. @code{void (*callback_function)(void *);}
  574. If the @code{synchronous} field is non-zero, task submission will be
  575. synchronous: the @code{starpu_task_submit} function will not return until the
  576. task was executed. Note that the @code{starpu_shutdown} method does not
  577. guarantee that asynchronous tasks have been executed before it returns,
  578. @code{starpu_task_wait_for_all} can be used to that effect, or data can be
  579. unregistered (@code{starpu_data_unregister(vector_handle);}), which will
  580. implicitly wait for all the tasks scheduled to work on it, unless explicitly
  581. disabled thanks to @code{starpu_data_set_default_sequential_consistency_flag} or
  582. @code{starpu_data_set_sequential_consistency_flag}.
  583. @node Execution of Hello World
  584. @subsection Execution of Hello World
  585. @smallexample
  586. % make hello_world
  587. cc $(pkg-config --cflags libstarpu) $(pkg-config --libs libstarpu) hello_world.c -o hello_world
  588. % ./hello_world
  589. Hello world (params = @{1, 2.000000@} )
  590. Callback function (arg 42)
  591. @end smallexample
  592. @node Scaling a Vector
  593. @section Manipulating Data: Scaling a Vector
  594. The previous example has shown how to submit tasks. In this section,
  595. we show how StarPU tasks can manipulate data. The full source code for
  596. this example is given in @ref{Full source code for the 'Scaling a Vector' example}.
  597. @menu
  598. * Source code of Vector Scaling::
  599. * Execution of Vector Scaling::
  600. @end menu
  601. @node Source code of Vector Scaling
  602. @subsection Source code of Vector Scaling
  603. Programmers can describe the data layout of their application so that StarPU is
  604. responsible for enforcing data coherency and availability across the machine.
  605. Instead of handling complex (and non-portable) mechanisms to perform data
  606. movements, programmers only declare which piece of data is accessed and/or
  607. modified by a task, and StarPU makes sure that when a computational kernel
  608. starts somewhere (e.g. on a GPU), its data are available locally.
  609. Before submitting those tasks, the programmer first needs to declare the
  610. different pieces of data to StarPU using the @code{starpu_*_data_register}
  611. functions. To ease the development of applications for StarPU, it is possible
  612. to describe multiple types of data layout. A type of data layout is called an
  613. @b{interface}. There are different predefined interfaces available in StarPU:
  614. here we will consider the @b{vector interface}.
  615. The following lines show how to declare an array of @code{NX} elements of type
  616. @code{float} using the vector interface:
  617. @cartouche
  618. @smallexample
  619. float vector[NX];
  620. starpu_data_handle vector_handle;
  621. starpu_vector_data_register(&vector_handle, 0, (uintptr_t)vector, NX,
  622. sizeof(vector[0]));
  623. @end smallexample
  624. @end cartouche
  625. The first argument, called the @b{data handle}, is an opaque pointer which
  626. designates the array in StarPU. This is also the structure which is used to
  627. describe which data is used by a task. The second argument is the node number
  628. where the data originally resides. Here it is 0 since the @code{vector} array is in
  629. the main memory. Then comes the pointer @code{vector} where the data can be found in main memory,
  630. the number of elements in the vector and the size of each element.
  631. The following shows how to construct a StarPU task that will manipulate the
  632. vector and a constant factor.
  633. @cartouche
  634. @smallexample
  635. float factor = 3.14;
  636. struct starpu_task *task = starpu_task_create();
  637. task->cl = &cl; /* @b{Pointer to the codelet defined below} */
  638. task->buffers[0].handle = vector_handle; /* @b{First parameter of the codelet} */
  639. task->buffers[0].mode = STARPU_RW;
  640. task->cl_arg = &factor;
  641. task->cl_arg_size = sizeof(factor);
  642. task->synchronous = 1;
  643. starpu_task_submit(task);
  644. @end smallexample
  645. @end cartouche
  646. Since the factor is a mere constant float value parameter,
  647. it does not need a preliminary registration, and
  648. can just be passed through the @code{cl_arg} pointer like in the previous
  649. example. The vector parameter is described by its handle.
  650. There are two fields in each element of the @code{buffers} array.
  651. @code{handle} is the handle of the data, and @code{mode} specifies how the
  652. kernel will access the data (@code{STARPU_R} for read-only, @code{STARPU_W} for
  653. write-only and @code{STARPU_RW} for read and write access).
  654. The definition of the codelet can be written as follows:
  655. @cartouche
  656. @smallexample
  657. void scal_cpu_func(void *buffers[], void *cl_arg)
  658. @{
  659. unsigned i;
  660. float *factor = cl_arg;
  661. /* length of the vector */
  662. unsigned n = STARPU_VECTOR_GET_NX(buffers[0]);
  663. /* CPU copy of the vector pointer */
  664. float *val = (float *)STARPU_VECTOR_GET_PTR(buffers[0]);
  665. for (i = 0; i < n; i++)
  666. val[i] *= *factor;
  667. @}
  668. starpu_codelet cl = @{
  669. .where = STARPU_CPU,
  670. .cpu_func = scal_cpu_func,
  671. .nbuffers = 1
  672. @};
  673. @end smallexample
  674. @end cartouche
  675. The first argument is an array that gives
  676. a description of all the buffers passed in the @code{task->buffers}@ array. The
  677. size of this array is given by the @code{nbuffers} field of the codelet
  678. structure. For the sake of genericity, this array contains pointers to the
  679. different interfaces describing each buffer. In the case of the @b{vector
  680. interface}, the location of the vector (resp. its length) is accessible in the
  681. @code{ptr} (resp. @code{nx}) of this array. Since the vector is accessed in a
  682. read-write fashion, any modification will automatically affect future accesses
  683. to this vector made by other tasks.
  684. The second argument of the @code{scal_cpu_func} function contains a pointer to the
  685. parameters of the codelet (given in @code{task->cl_arg}), so that we read the
  686. constant factor from this pointer.
  687. @node Execution of Vector Scaling
  688. @subsection Execution of Vector Scaling
  689. @smallexample
  690. % make vector_scal
  691. cc $(pkg-config --cflags libstarpu) $(pkg-config --libs libstarpu) vector_scal.c -o vector_scal
  692. % ./vector_scal
  693. 0.000000 3.000000 6.000000 9.000000 12.000000
  694. @end smallexample
  695. @node Vector Scaling on an Hybrid CPU/GPU Machine
  696. @section Vector Scaling on an Hybrid CPU/GPU Machine
  697. Contrary to the previous examples, the task submitted in this example may not
  698. only be executed by the CPUs, but also by a CUDA device.
  699. @menu
  700. * Definition of the CUDA Kernel::
  701. * Definition of the OpenCL Kernel::
  702. * Definition of the Main Code::
  703. * Execution of Hybrid Vector Scaling::
  704. @end menu
  705. @node Definition of the CUDA Kernel
  706. @subsection Definition of the CUDA Kernel
  707. The CUDA implementation can be written as follows. It needs to be compiled with
  708. a CUDA compiler such as nvcc, the NVIDIA CUDA compiler driver. It must be noted
  709. that the vector pointer returned by STARPU_VECTOR_GET_PTR is here a pointer in GPU
  710. memory, so that it can be passed as such to the @code{vector_mult_cuda} kernel
  711. call.
  712. @cartouche
  713. @smallexample
  714. #include <starpu.h>
  715. #include <starpu_cuda.h>
  716. static __global__ void vector_mult_cuda(float *val, unsigned n,
  717. float factor)
  718. @{
  719. unsigned i = blockIdx.x*blockDim.x + threadIdx.x;
  720. if (i < n)
  721. val[i] *= factor;
  722. @}
  723. extern "C" void scal_cuda_func(void *buffers[], void *_args)
  724. @{
  725. float *factor = (float *)_args;
  726. /* length of the vector */
  727. unsigned n = STARPU_VECTOR_GET_NX(buffers[0]);
  728. /* CUDA copy of the vector pointer */
  729. float *val = (float *)STARPU_VECTOR_GET_PTR(buffers[0]);
  730. unsigned threads_per_block = 64;
  731. unsigned nblocks = (n + threads_per_block-1) / threads_per_block;
  732. @i{ vector_mult_cuda<<<nblocks,threads_per_block, 0, starpu_cuda_get_local_stream()>>>(val, n, *factor);}
  733. @i{ cudaStreamSynchronize(starpu_cuda_get_local_stream());}
  734. @}
  735. @end smallexample
  736. @end cartouche
  737. @node Definition of the OpenCL Kernel
  738. @subsection Definition of the OpenCL Kernel
  739. The OpenCL implementation can be written as follows. StarPU provides
  740. tools to compile a OpenCL kernel stored in a file.
  741. @cartouche
  742. @smallexample
  743. __kernel void vector_mult_opencl(__global float* val, int nx, float factor)
  744. @{
  745. const int i = get_global_id(0);
  746. if (i < nx) @{
  747. val[i] *= factor;
  748. @}
  749. @}
  750. @end smallexample
  751. @end cartouche
  752. Similarly to CUDA, the pointer returned by @code{STARPU_VECTOR_GET_PTR} is here
  753. a device pointer, so that it is passed as such to the OpenCL kernel.
  754. @cartouche
  755. @smallexample
  756. #include <starpu.h>
  757. @i{#include <starpu_opencl.h>}
  758. @i{extern struct starpu_opencl_program programs;}
  759. void scal_opencl_func(void *buffers[], void *_args)
  760. @{
  761. float *factor = _args;
  762. @i{ int id, devid, err;}
  763. @i{ cl_kernel kernel;}
  764. @i{ cl_command_queue queue;}
  765. @i{ cl_event event;}
  766. /* length of the vector */
  767. unsigned n = STARPU_VECTOR_GET_NX(buffers[0]);
  768. /* OpenCL copy of the vector pointer */
  769. cl_mem val = (cl_mem) STARPU_VECTOR_GET_PTR(buffers[0]);
  770. @i{ id = starpu_worker_get_id();}
  771. @i{ devid = starpu_worker_get_devid(id);}
  772. @i{ err = starpu_opencl_load_kernel(&kernel, &queue, &programs,}
  773. @i{ "vector_mult_opencl", devid); /* @b{Name of the codelet defined above} */}
  774. @i{ if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);}
  775. @i{ err = clSetKernelArg(kernel, 0, sizeof(val), &val);}
  776. @i{ err |= clSetKernelArg(kernel, 1, sizeof(n), &n);}
  777. @i{ err |= clSetKernelArg(kernel, 2, sizeof(*factor), factor);}
  778. @i{ if (err) STARPU_OPENCL_REPORT_ERROR(err);}
  779. @i{ @{}
  780. @i{ size_t global=1;}
  781. @i{ size_t local=1;}
  782. @i{ err = clEnqueueNDRangeKernel(queue, kernel, 1, NULL, &global, &local, 0, NULL, &event);}
  783. @i{ if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);}
  784. @i{ @}}
  785. @i{ clFinish(queue);}
  786. @i{ starpu_opencl_collect_stats(event);}
  787. @i{ clReleaseEvent(event);}
  788. @i{ starpu_opencl_release_kernel(kernel);}
  789. @}
  790. @end smallexample
  791. @end cartouche
  792. @node Definition of the Main Code
  793. @subsection Definition of the Main Code
  794. The CPU implementation is the same as in the previous section.
  795. Here is the source of the main application. You can notice the value of the
  796. field @code{where} for the codelet. We specify
  797. @code{STARPU_CPU|STARPU_CUDA|STARPU_OPENCL} to indicate to StarPU that the codelet
  798. can be executed either on a CPU or on a CUDA or an OpenCL device.
  799. @cartouche
  800. @smallexample
  801. #include <starpu.h>
  802. #define NX 2048
  803. extern void scal_cuda_func(void *buffers[], void *_args);
  804. extern void scal_cpu_func(void *buffers[], void *_args);
  805. extern void scal_opencl_func(void *buffers[], void *_args);
  806. /* @b{Definition of the codelet} */
  807. static starpu_codelet cl = @{
  808. .where = STARPU_CPU|STARPU_CUDA|STARPU_OPENCL; /* @b{It can be executed on a CPU,} */
  809. /* @b{on a CUDA device, or on an OpenCL device} */
  810. .cuda_func = scal_cuda_func,
  811. .cpu_func = scal_cpu_func,
  812. .opencl_func = scal_opencl_func,
  813. .nbuffers = 1
  814. @}
  815. #ifdef STARPU_USE_OPENCL
  816. /* @b{The compiled version of the OpenCL program} */
  817. struct starpu_opencl_program programs;
  818. #endif
  819. int main(int argc, char **argv)
  820. @{
  821. float *vector;
  822. int i, ret;
  823. float factor=3.0;
  824. struct starpu_task *task;
  825. starpu_data_handle vector_handle;
  826. starpu_init(NULL); /* @b{Initialising StarPU} */
  827. #ifdef STARPU_USE_OPENCL
  828. starpu_opencl_load_opencl_from_file(
  829. "examples/basic_examples/vector_scal_opencl_codelet.cl",
  830. &programs, NULL);
  831. #endif
  832. vector = malloc(NX*sizeof(vector[0]));
  833. assert(vector);
  834. for(i=0 ; i<NX ; i++) vector[i] = i;
  835. @end smallexample
  836. @end cartouche
  837. @cartouche
  838. @smallexample
  839. /* @b{Registering data within StarPU} */
  840. starpu_vector_data_register(&vector_handle, 0, (uintptr_t)vector,
  841. NX, sizeof(vector[0]));
  842. /* @b{Definition of the task} */
  843. task = starpu_task_create();
  844. task->cl = &cl;
  845. task->buffers[0].handle = vector_handle;
  846. task->buffers[0].mode = STARPU_RW;
  847. task->cl_arg = &factor;
  848. task->cl_arg_size = sizeof(factor);
  849. @end smallexample
  850. @end cartouche
  851. @cartouche
  852. @smallexample
  853. /* @b{Submitting the task} */
  854. ret = starpu_task_submit(task);
  855. if (ret == -ENODEV) @{
  856. fprintf(stderr, "No worker may execute this task\n");
  857. return 1;
  858. @}
  859. @c TODO: Mmm, should rather be an unregistration with an implicit dependency, no?
  860. /* @b{Waiting for its termination} */
  861. starpu_task_wait_for_all();
  862. /* @b{Update the vector in RAM} */
  863. starpu_data_acquire(vector_handle, STARPU_R);
  864. @end smallexample
  865. @end cartouche
  866. @cartouche
  867. @smallexample
  868. /* @b{Access the data} */
  869. for(i=0 ; i<NX; i++) @{
  870. fprintf(stderr, "%f ", vector[i]);
  871. @}
  872. fprintf(stderr, "\n");
  873. /* @b{Release the RAM view of the data before unregistering it and shutting down StarPU} */
  874. starpu_data_release(vector_handle);
  875. starpu_data_unregister(vector_handle);
  876. starpu_shutdown();
  877. return 0;
  878. @}
  879. @end smallexample
  880. @end cartouche
  881. @node Execution of Hybrid Vector Scaling
  882. @subsection Execution of Hybrid Vector Scaling
  883. The Makefile given at the beginning of the section must be extended to
  884. give the rules to compile the CUDA source code. Note that the source
  885. file of the OpenCL kernel does not need to be compiled now, it will
  886. be compiled at run-time when calling the function
  887. @code{starpu_opencl_load_opencl_from_file()} (@pxref{starpu_opencl_load_opencl_from_file}).
  888. @cartouche
  889. @smallexample
  890. CFLAGS += $(shell pkg-config --cflags libstarpu)
  891. LDFLAGS += $(shell pkg-config --libs libstarpu)
  892. CC = gcc
  893. vector_scal: vector_scal.o vector_scal_cpu.o vector_scal_cuda.o vector_scal_opencl.o
  894. %.o: %.cu
  895. nvcc $(CFLAGS) $< -c $@
  896. clean:
  897. rm -f vector_scal *.o
  898. @end smallexample
  899. @end cartouche
  900. @smallexample
  901. % make
  902. @end smallexample
  903. and to execute it, with the default configuration:
  904. @smallexample
  905. % ./vector_scal
  906. 0.000000 3.000000 6.000000 9.000000 12.000000
  907. @end smallexample
  908. or for example, by disabling CPU devices:
  909. @smallexample
  910. % STARPU_NCPUS=0 ./vector_scal
  911. 0.000000 3.000000 6.000000 9.000000 12.000000
  912. @end smallexample
  913. or by disabling CUDA devices (which may permit to enable the use of OpenCL,
  914. see @ref{Enabling OpenCL}):
  915. @smallexample
  916. % STARPU_NCUDA=0 ./vector_scal
  917. 0.000000 3.000000 6.000000 9.000000 12.000000
  918. @end smallexample
  919. @node Using multiple implementations of a codelet
  920. @section Using multiple implementations of a codelet
  921. One may want to write multiple implementations of a codelet for a single type of
  922. device and let StarPU choose which one to run. As an example, we will show how
  923. to use SSE to scale a vector. The codelet can be written as follows :
  924. @cartouche
  925. @smallexample
  926. #include <xmmintrin.h>
  927. void scal_sse_func(void *buffers[], void *cl_arg)
  928. @{
  929. float *vector = (float *) STARPU_VECTOR_GET_PTR(buffers[0]);
  930. unsigned int n = STARPU_VECTOR_GET_NX(buffers[0]);
  931. unsigned int n_iterations = n/4;
  932. if (n % 4 != 0)
  933. n_iterations++;
  934. __m128 *VECTOR = (__m128*) vector;
  935. __m128 factor __attribute__((aligned(16)));
  936. factor = _mm_set1_ps(*(float *) cl_arg);
  937. unsigned int i;
  938. for (i = 0; i < n_iterations; i++)
  939. VECTOR[i] = _mm_mul_ps(factor, VECTOR[i]);
  940. @}
  941. @end smallexample
  942. @end cartouche
  943. The @code{cpu_func} field of the @code{starpu_codelet} structure has to be set
  944. to the special value @code{STARPU_MULTIPLE_CPU_IMPLEMENTATIONS}. Note that
  945. @code{STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS} and
  946. @code{STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS} are also available.
  947. @cartouche
  948. @smallexample
  949. starpu_codelet cl = @{
  950. .where = STARPU_CPU,
  951. .cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS,
  952. .cpu_funcs = @{ scal_cpu_func, scal_sse_func @},
  953. .nbuffers = 1
  954. @};
  955. @end smallexample
  956. @end cartouche
  957. The scheduler will measure the performance of all the implementations it was
  958. given, and pick the one that seems to be the fastest.
  959. @node Task and Worker Profiling
  960. @section Task and Worker Profiling
  961. A full example showing how to use the profiling API is available in
  962. the StarPU sources in the directory @code{examples/profiling/}.
  963. @cartouche
  964. @smallexample
  965. struct starpu_task *task = starpu_task_create();
  966. task->cl = &cl;
  967. task->synchronous = 1;
  968. /* We will destroy the task structure by hand so that we can
  969. * query the profiling info before the task is destroyed. */
  970. task->destroy = 0;
  971. /* Submit and wait for completion (since synchronous was set to 1) */
  972. starpu_task_submit(task);
  973. /* The task is finished, get profiling information */
  974. struct starpu_task_profiling_info *info = task->profiling_info;
  975. /* How much time did it take before the task started ? */
  976. double delay += starpu_timing_timespec_delay_us(&info->submit_time, &info->start_time);
  977. /* How long was the task execution ? */
  978. double length += starpu_timing_timespec_delay_us(&info->start_time, &info->end_time);
  979. /* We don't need the task structure anymore */
  980. starpu_task_destroy(task);
  981. @end smallexample
  982. @end cartouche
  983. @cartouche
  984. @smallexample
  985. /* Display the occupancy of all workers during the test */
  986. int worker;
  987. for (worker = 0; worker < starpu_worker_get_count(); worker++)
  988. @{
  989. struct starpu_worker_profiling_info worker_info;
  990. int ret = starpu_worker_get_profiling_info(worker, &worker_info);
  991. STARPU_ASSERT(!ret);
  992. double total_time = starpu_timing_timespec_to_us(&worker_info.total_time);
  993. double executing_time = starpu_timing_timespec_to_us(&worker_info.executing_time);
  994. double sleeping_time = starpu_timing_timespec_to_us(&worker_info.sleeping_time);
  995. float executing_ratio = 100.0*executing_time/total_time;
  996. float sleeping_ratio = 100.0*sleeping_time/total_time;
  997. char workername[128];
  998. starpu_worker_get_name(worker, workername, 128);
  999. fprintf(stderr, "Worker %s:\n", workername);
  1000. fprintf(stderr, "\ttotal time : %.2lf ms\n", total_time*1e-3);
  1001. fprintf(stderr, "\texec time : %.2lf ms (%.2f %%)\n", executing_time*1e-3,
  1002. executing_ratio);
  1003. fprintf(stderr, "\tblocked time : %.2lf ms (%.2f %%)\n", sleeping_time*1e-3,
  1004. sleeping_ratio);
  1005. @}
  1006. @end smallexample
  1007. @end cartouche
  1008. @node Partitioning Data
  1009. @section Partitioning Data
  1010. An existing piece of data can be partitioned in sub parts to be used by different tasks, for instance:
  1011. @cartouche
  1012. @smallexample
  1013. int vector[NX];
  1014. starpu_data_handle handle;
  1015. /* Declare data to StarPU */
  1016. starpu_vector_data_register(&handle, 0, (uintptr_t)vector, NX, sizeof(vector[0]));
  1017. /* Partition the vector in PARTS sub-vectors */
  1018. starpu_filter f =
  1019. @{
  1020. .filter_func = starpu_block_filter_func_vector,
  1021. .nchildren = PARTS
  1022. @};
  1023. starpu_data_partition(handle, &f);
  1024. @end smallexample
  1025. @end cartouche
  1026. @cartouche
  1027. @smallexample
  1028. /* Submit a task on each sub-vector */
  1029. for (i=0; i<starpu_data_get_nb_children(handle); i++) @{
  1030. /* Get subdata number i (there is only 1 dimension) */
  1031. starpu_data_handle sub_handle = starpu_data_get_sub_data(handle, 1, i);
  1032. struct starpu_task *task = starpu_task_create();
  1033. task->buffers[0].handle = sub_handle;
  1034. task->buffers[0].mode = STARPU_RW;
  1035. task->cl = &cl;
  1036. task->synchronous = 1;
  1037. task->cl_arg = &factor;
  1038. task->cl_arg_size = sizeof(factor);
  1039. starpu_task_submit(task);
  1040. @}
  1041. @end smallexample
  1042. @end cartouche
  1043. Partitioning can be applied several times, see
  1044. @code{examples/basic_examples/mult.c} and @code{examples/filters/}.
  1045. @node Performance model example
  1046. @section Performance model example
  1047. To achieve good scheduling, StarPU scheduling policies need to be able to
  1048. estimate in advance the duration of a task. This is done by giving to codelets
  1049. a performance model, by defining a @code{starpu_perfmodel_t} structure and
  1050. providing its address in the @code{model} field of the @code{starpu_codelet}
  1051. structure. The @code{symbol} and @code{type} fields of @code{starpu_perfmodel_t}
  1052. are mandatory, to give a name to the model, and the type of the model, since
  1053. there are several kinds of performance models.
  1054. @itemize
  1055. @item
  1056. Measured at runtime (@code{STARPU_HISTORY_BASED} model type). This assumes that for a
  1057. given set of data input/output sizes, the performance will always be about the
  1058. same. This is very true for regular kernels on GPUs for instance (<0.1% error),
  1059. and just a bit less true on CPUs (~=1% error). This also assumes that there are
  1060. few different sets of data input/output sizes. StarPU will then keep record of
  1061. the average time of previous executions on the various processing units, and use
  1062. it as an estimation. History is done per task size, by using a hash of the input
  1063. and ouput sizes as an index.
  1064. It will also save it in @code{~/.starpu/sampling/codelets}
  1065. for further executions, and can be observed by using the
  1066. @code{starpu_perfmodel_display} command, or drawn by using
  1067. the @code{starpu_perfmodel_plot}. The models are indexed by machine name. To
  1068. share the models between machines (e.g. for a homogeneous cluster), use
  1069. @code{export STARPU_HOSTNAME=some_global_name}. The following is a small code
  1070. example.
  1071. If e.g. the code is recompiled with other compilation options, or several
  1072. variants of the code are used, the symbol string should be changed to reflect
  1073. that, in order to recalibrate a new model from zero. The symbol string can even
  1074. be constructed dynamically at execution time, as long as this is done before
  1075. submitting any task using it.
  1076. @cartouche
  1077. @smallexample
  1078. static struct starpu_perfmodel_t mult_perf_model = @{
  1079. .type = STARPU_HISTORY_BASED,
  1080. .symbol = "mult_perf_model"
  1081. @};
  1082. starpu_codelet cl = @{
  1083. .where = STARPU_CPU,
  1084. .cpu_func = cpu_mult,
  1085. .nbuffers = 3,
  1086. /* for the scheduling policy to be able to use performance models */
  1087. .model = &mult_perf_model
  1088. @};
  1089. @end smallexample
  1090. @end cartouche
  1091. @item
  1092. Measured at runtime and refined by regression (@code{STARPU_REGRESSION_*_BASED}
  1093. model type). This still assumes performance regularity, but can work
  1094. with various data input sizes, by applying regression over observed
  1095. execution times. STARPU_REGRESSION_BASED uses an a*n^b regression
  1096. form, STARPU_NL_REGRESSION_BASED uses an a*n^b+c (more precise than
  1097. STARPU_REGRESSION_BASED, but costs a lot more to compute). For instance,
  1098. @code{tests/perfmodels/regression_based.c} uses a regression-based performance
  1099. model for the @code{memset} operation.
  1100. @item
  1101. Provided as an estimation from the application itself (@code{STARPU_COMMON} model type and @code{cost_model} field),
  1102. see for instance
  1103. @code{examples/common/blas_model.h} and @code{examples/common/blas_model.c}.
  1104. @item
  1105. Provided explicitly by the application (@code{STARPU_PER_ARCH} model type): the
  1106. @code{.per_arch[i].cost_model} fields have to be filled with pointers to
  1107. functions which return the expected duration of the task in micro-seconds, one
  1108. per architecture.
  1109. @end itemize
  1110. How to use schedulers which can benefit from such performance model is explained
  1111. in @ref{Task scheduling policy}.
  1112. The same can be done for task power consumption estimation, by setting the
  1113. @code{power_model} field the same way as the @code{model} field. Note: for
  1114. now, the application has to give to the power consumption performance model
  1115. a name which is different from the execution time performance model.
  1116. The application can request time estimations from the StarPU performance
  1117. models by filling a task structure as usual without actually submitting
  1118. it. The data handles can be created by calling @code{starpu_data_register}
  1119. functions with a @code{NULL} pointer (and need to be unregistered as usual)
  1120. and the desired data sizes. The @code{starpu_task_expected_length} and
  1121. @code{starpu_task_expected_power} functions can then be called to get an
  1122. estimation of the task duration on a given arch. @code{starpu_task_destroy}
  1123. needs to be called to destroy the dummy task afterwards. See
  1124. @code{tests/perfmodels/regression_based.c} for an example.
  1125. @node Theoretical lower bound on execution time
  1126. @section Theoretical lower bound on execution time
  1127. For kernels with history-based performance models, StarPU can very easily provide a theoretical lower
  1128. bound for the execution time of a whole set of tasks. See for
  1129. instance @code{examples/lu/lu_example.c}: before submitting tasks,
  1130. call @code{starpu_bound_start}, and after complete execution, call
  1131. @code{starpu_bound_stop}. @code{starpu_bound_print_lp} or
  1132. @code{starpu_bound_print_mps} can then be used to output a Linear Programming
  1133. problem corresponding to the schedule of your tasks. Run it through
  1134. @code{lp_solve} or any other linear programming solver, and that will give you a
  1135. lower bound for the total execution time of your tasks. If StarPU was compiled
  1136. with the glpk library installed, @code{starpu_bound_compute} can be used to
  1137. solve it immediately and get the optimized minimum, in ms. Its @code{integer}
  1138. parameter allows to decide whether integer resolution should be computed
  1139. and returned too.
  1140. The @code{deps} parameter tells StarPU whether to take tasks and implicit data
  1141. dependencies into account. It must be understood that the linear programming
  1142. problem size is quadratic with the number of tasks and thus the time to solve it
  1143. will be very long, it could be minutes for just a few dozen tasks. You should
  1144. probably use @code{lp_solve -timeout 1 test.pl -wmps test.mps} to convert the
  1145. problem to MPS format and then use a better solver, @code{glpsol} might be
  1146. better than @code{lp_solve} for instance (the @code{--pcost} option may be
  1147. useful), but sometimes doesn't manage to converge. @code{cbc} might look
  1148. slower, but it is parallel. Be sure to try at least all the @code{-B} options
  1149. of @code{lp_solve}. For instance, we often just use
  1150. @code{lp_solve -cc -B1 -Bb -Bg -Bp -Bf -Br -BG -Bd -Bs -BB -Bo -Bc -Bi} , and
  1151. the @code{-gr} option can also be quite useful.
  1152. Setting @code{deps} to 0 will only take into account the actual computations
  1153. on processing units. It however still properly takes into account the varying
  1154. performances of kernels and processing units, which is quite more accurate than
  1155. just comparing StarPU performances with the fastest of the kernels being used.
  1156. The @code{prio} parameter tells StarPU whether to simulate taking into account
  1157. the priorities as the StarPU scheduler would, i.e. schedule prioritized
  1158. tasks before less prioritized tasks, to check to which extend this results
  1159. to a less optimal solution. This increases even more computation time.
  1160. Note that for simplicity, all this however doesn't take into account data
  1161. transfers, which are assumed to be completely overlapped.
  1162. @node Insert Task Utility
  1163. @section Insert Task Utility
  1164. StarPU provides the wrapper function @code{starpu_insert_task} to ease
  1165. the creation and submission of tasks.
  1166. @deftypefun int starpu_insert_task (starpu_codelet *@var{cl}, ...)
  1167. Create and submit a task corresponding to @var{cl} with the following
  1168. arguments. The argument list must be zero-terminated.
  1169. The arguments following the codelets can be of the following types:
  1170. @itemize
  1171. @item
  1172. @code{STARPU_R}, @code{STARPU_W}, @code{STARPU_RW}, @code{STARPU_SCRATCH}, @code{STARPU_REDUX} an access mode followed by a data handle;
  1173. @item
  1174. @code{STARPU_VALUE} followed by a pointer to a constant value and
  1175. the size of the constant;
  1176. @item
  1177. @code{STARPU_CALLBACK} followed by a pointer to a callback function;
  1178. @item
  1179. @code{STARPU_CALLBACK_ARG} followed by a pointer to be given as an
  1180. argument to the callback function;
  1181. @item
  1182. @code{STARPU_CALLBACK_WITH_ARG} followed by two pointers: one to a callback
  1183. function, and the other to be given as an argument to the callback
  1184. function; this is equivalent to using both @code{STARPU_CALLBACK} and
  1185. @code{STARPU_CALLBACK_WITH_ARG}
  1186. @item
  1187. @code{STARPU_PRIORITY} followed by a integer defining a priority level.
  1188. @end itemize
  1189. Parameters to be passed to the codelet implementation are defined
  1190. through the type @code{STARPU_VALUE}. The function
  1191. @code{starpu_unpack_cl_args} must be called within the codelet
  1192. implementation to retrieve them.
  1193. @end deftypefun
  1194. Here the implementation of the codelet:
  1195. @smallexample
  1196. void func_cpu(void *descr[], void *_args)
  1197. @{
  1198. int *x0 = (int *)STARPU_VARIABLE_GET_PTR(descr[0]);
  1199. float *x1 = (float *)STARPU_VARIABLE_GET_PTR(descr[1]);
  1200. int ifactor;
  1201. float ffactor;
  1202. starpu_unpack_cl_args(_args, &ifactor, &ffactor);
  1203. *x0 = *x0 * ifactor;
  1204. *x1 = *x1 * ffactor;
  1205. @}
  1206. starpu_codelet mycodelet = @{
  1207. .where = STARPU_CPU,
  1208. .cpu_func = func_cpu,
  1209. .nbuffers = 2
  1210. @};
  1211. @end smallexample
  1212. And the call to the @code{starpu_insert_task} wrapper:
  1213. @smallexample
  1214. starpu_insert_task(&mycodelet,
  1215. STARPU_VALUE, &ifactor, sizeof(ifactor),
  1216. STARPU_VALUE, &ffactor, sizeof(ffactor),
  1217. STARPU_RW, data_handles[0], STARPU_RW, data_handles[1],
  1218. 0);
  1219. @end smallexample
  1220. The call to @code{starpu_insert_task} is equivalent to the following
  1221. code:
  1222. @smallexample
  1223. struct starpu_task *task = starpu_task_create();
  1224. task->cl = &mycodelet;
  1225. task->buffers[0].handle = data_handles[0];
  1226. task->buffers[0].mode = STARPU_RW;
  1227. task->buffers[1].handle = data_handles[1];
  1228. task->buffers[1].mode = STARPU_RW;
  1229. char *arg_buffer;
  1230. size_t arg_buffer_size;
  1231. starpu_pack_cl_args(&arg_buffer, &arg_buffer_size,
  1232. STARPU_VALUE, &ifactor, sizeof(ifactor),
  1233. STARPU_VALUE, &ffactor, sizeof(ffactor),
  1234. 0);
  1235. task->cl_arg = arg_buffer;
  1236. task->cl_arg_size = arg_buffer_size;
  1237. int ret = starpu_task_submit(task);
  1238. @end smallexample
  1239. If some part of the task insertion depends on the value of some computation,
  1240. the @code{STARPU_DATA_ACQUIRE_CB} macro can be very convenient. For
  1241. instance, assuming that the index variable @code{i} was registered as handle
  1242. @code{i_handle}:
  1243. @smallexample
  1244. /* Compute which portion we will work on, e.g. pivot */
  1245. starpu_insert_task(&which_index, STARPU_W, i_handle, 0);
  1246. /* And submit the corresponding task */
  1247. STARPU_DATA_ACQUIRE_CB(i_handle, STARPU_R, starpu_insert_task(&work, STARPU_RW, A_handle[i], 0));
  1248. @end smallexample
  1249. The @code{STARPU_DATA_ACQUIRE_CB} macro submits an asynchronous request for
  1250. acquiring data @code{i} for the main application, and will execute the code
  1251. given as third parameter when it is acquired. In other words, as soon as the
  1252. value of @code{i} computed by the @code{which_index} codelet can be read, the
  1253. portion of code passed as third parameter of @code{STARPU_DATA_ACQUIRE_CB} will
  1254. be executed, and is allowed to read from @code{i} to use it e.g. as an
  1255. index. Note that this macro is only avaible when compiling StarPU with
  1256. the compiler @code{gcc}.
  1257. @node Debugging
  1258. @section Debugging
  1259. StarPU provides several tools to help debugging aplications. Execution traces
  1260. can be generated and displayed graphically, see @ref{Generating traces}. Some
  1261. gdb helpers are also provided to show the whole StarPU state:
  1262. @smallexample
  1263. (gdb) source tools/gdbinit
  1264. (gdb) help starpu
  1265. @end smallexample
  1266. @node More examples
  1267. @section More examples
  1268. More examples are available in the StarPU sources in the @code{examples/}
  1269. directory. Simple examples include:
  1270. @table @asis
  1271. @item @code{incrementer/}:
  1272. Trivial incrementation test.
  1273. @item @code{basic_examples/}:
  1274. Simple documented Hello world (as shown in @ref{Hello World}), vector/scalar product (as shown
  1275. in @ref{Vector Scaling on an Hybrid CPU/GPU Machine}), matrix
  1276. product examples (as shown in @ref{Performance model example}), an example using the blocked matrix data
  1277. interface, and an example using the variable data interface.
  1278. @item @code{matvecmult/}:
  1279. OpenCL example from NVidia, adapted to StarPU.
  1280. @item @code{axpy/}:
  1281. AXPY CUBLAS operation adapted to StarPU.
  1282. @item @code{fortran/}:
  1283. Example of Fortran bindings.
  1284. @end table
  1285. More advanced examples include:
  1286. @table @asis
  1287. @item @code{filters/}:
  1288. Examples using filters, as shown in @ref{Partitioning Data}.
  1289. @item @code{lu/}:
  1290. LU matrix factorization, see for instance @code{xlu_implicit.c}
  1291. @item @code{cholesky/}:
  1292. Cholesky matrix factorization, see for instance @code{cholesky_implicit.c}.
  1293. @end table
  1294. @c ---------------------------------------------------------------------
  1295. @c Performance options
  1296. @c ---------------------------------------------------------------------
  1297. @node Performance optimization
  1298. @chapter How to optimize performance with StarPU
  1299. TODO: improve!
  1300. @menu
  1301. * Data management::
  1302. * Task submission::
  1303. * Task priorities::
  1304. * Task scheduling policy::
  1305. * Performance model calibration::
  1306. * Task distribution vs Data transfer::
  1307. * Data prefetch::
  1308. * Power-based scheduling::
  1309. * Profiling::
  1310. * CUDA-specific optimizations::
  1311. @end menu
  1312. Simply encapsulating application kernels into tasks already permits to
  1313. seamlessly support CPU and GPUs at the same time. To achieve good performance, a
  1314. few additional changes are needed.
  1315. @node Data management
  1316. @section Data management
  1317. When the application allocates data, whenever possible it should use the
  1318. @code{starpu_malloc} function, which will ask CUDA or
  1319. OpenCL to make the allocation itself and pin the corresponding allocated
  1320. memory. This is needed to permit asynchronous data transfer, i.e. permit data
  1321. transfer to overlap with computations. Otherwise, the trace will show that the
  1322. @code{DriverCopyAsync} state takes a lot of time, this is because CUDA or OpenCL
  1323. then reverts to synchronous transfers.
  1324. By default, StarPU leaves replicates of data wherever they were used, in case they
  1325. will be re-used by other tasks, thus saving the data transfer time. When some
  1326. task modifies some data, all the other replicates are invalidated, and only the
  1327. processing unit which ran that task will have a valid replicate of the data. If the application knows
  1328. that this data will not be re-used by further tasks, it should advise StarPU to
  1329. immediately replicate it to a desired list of memory nodes (given through a
  1330. bitmask). This can be understood like the write-through mode of CPU caches.
  1331. @example
  1332. starpu_data_set_wt_mask(img_handle, 1<<0);
  1333. @end example
  1334. will for instance request to always automatically transfer a replicate into the
  1335. main memory (node 0), as bit 0 of the write-through bitmask is being set.
  1336. @example
  1337. starpu_data_set_wt_mask(img_handle, ~0U);
  1338. @end example
  1339. will request to always automatically broadcast the updated data to all memory
  1340. nodes.
  1341. @node Task submission
  1342. @section Task submission
  1343. To let StarPU make online optimizations, tasks should be submitted
  1344. asynchronously as much as possible. Ideally, all the tasks should be
  1345. submitted, and mere calls to @code{starpu_task_wait_for_all} or
  1346. @code{starpu_data_unregister} be done to wait for
  1347. termination. StarPU will then be able to rework the whole schedule, overlap
  1348. computation with communication, manage accelerator local memory usage, etc.
  1349. @node Task priorities
  1350. @section Task priorities
  1351. By default, StarPU will consider the tasks in the order they are submitted by
  1352. the application. If the application programmer knows that some tasks should
  1353. be performed in priority (for instance because their output is needed by many
  1354. other tasks and may thus be a bottleneck if not executed early enough), the
  1355. @code{priority} field of the task structure should be set to transmit the
  1356. priority information to StarPU.
  1357. @node Task scheduling policy
  1358. @section Task scheduling policy
  1359. By default, StarPU uses the @code{eager} simple greedy scheduler. This is
  1360. because it provides correct load balance even if the application codelets do not
  1361. have performance models. If your application codelets have performance models
  1362. (@pxref{Performance model example} for examples showing how to do it),
  1363. you should change the scheduler thanks to the @code{STARPU_SCHED} environment
  1364. variable. For instance @code{export STARPU_SCHED=dmda} . Use @code{help} to get
  1365. the list of available schedulers.
  1366. The @b{eager} scheduler uses a central task queue, from which workers draw tasks
  1367. to work on. This however does not permit to prefetch data since the scheduling
  1368. decision is taken late. If a task has a non-0 priority, it is put at the front of the queue.
  1369. The @b{prio} scheduler also uses a central task queue, but sorts tasks by
  1370. priority (between -5 and 5).
  1371. The @b{random} scheduler distributes tasks randomly according to assumed worker
  1372. overall performance.
  1373. The @b{ws} (work stealing) scheduler schedules tasks on the local worker by
  1374. default. When a worker becomes idle, it steals a task from the most loaded
  1375. worker.
  1376. The @b{dm} (deque model) scheduler uses task execution performance models into account to
  1377. perform an HEFT-similar scheduling strategy: it schedules tasks where their
  1378. termination time will be minimal.
  1379. The @b{dmda} (deque model data aware) scheduler is similar to dm, it also takes
  1380. into account data transfer time.
  1381. The @b{dmdar} (deque model data aware ready) scheduler is similar to dmda,
  1382. it also sorts tasks on per-worker queues by number of already-available data
  1383. buffers.
  1384. The @b{dmdas} (deque model data aware sorted) scheduler is similar to dmda, it
  1385. also supports arbitrary priority values.
  1386. The @b{heft} (HEFT) scheduler is similar to dmda, it also supports task bundles.
  1387. The @b{pheft} (parallel HEFT) scheduler is similar to heft, it also supports
  1388. parallel tasks (still experimental).
  1389. The @b{pgreedy} (parallel greedy) scheduler is similar to greedy, it also
  1390. supports parallel tasks (still experimental).
  1391. @node Performance model calibration
  1392. @section Performance model calibration
  1393. Most schedulers are based on an estimation of codelet duration on each kind
  1394. of processing unit. For this to be possible, the application programmer needs
  1395. to configure a performance model for the codelets of the application (see
  1396. @ref{Performance model example} for instance). History-based performance models
  1397. use on-line calibration. StarPU will automatically calibrate codelets
  1398. which have never been calibrated yet, and save the result in
  1399. @code{~/.starpu/sampling/codelets}.
  1400. The models are indexed by machine name. To share the models between machines (e.g. for a homogeneous cluster), use @code{export STARPU_HOSTNAME=some_global_name}. To force continuing calibration, use
  1401. @code{export STARPU_CALIBRATE=1} . This may be necessary if your application
  1402. has not-so-stable performance. StarPU will force calibration (and thus ignore
  1403. the current result) until 10 (STARPU_CALIBRATION_MINIMUM) measurements have been
  1404. made on each architecture, to avoid badly scheduling tasks just because the
  1405. first measurements were not so good. Details on the current performance model status
  1406. can be obtained from the @code{starpu_perfmodel_display} command: the @code{-l}
  1407. option lists the available performance models, and the @code{-s} option permits
  1408. to choose the performance model to be displayed. The result looks like:
  1409. @example
  1410. $ starpu_perfmodel_display -s starpu_dlu_lu_model_22
  1411. performance model for cpu
  1412. # hash size mean dev n
  1413. 880805ba 98304 2.731309e+02 6.010210e+01 1240
  1414. b50b6605 393216 1.469926e+03 1.088828e+02 1240
  1415. 5c6c3401 1572864 1.125983e+04 3.265296e+03 1240
  1416. @end example
  1417. Which shows that for the LU 22 kernel with a 1.5MiB matrix, the average
  1418. execution time on CPUs was about 12ms, with a 2ms standard deviation, over
  1419. 1240 samples. It is a good idea to check this before doing actual performance
  1420. measurements.
  1421. A graph can be drawn by using the @code{starpu_perfmodel_plot}:
  1422. @example
  1423. $ starpu_perfmodel_plot -s starpu_dlu_lu_model_22
  1424. 98304 393216 1572864
  1425. $ gnuplot starpu_starpu_dlu_lu_model_22.gp
  1426. $ gv starpu_starpu_dlu_lu_model_22.eps
  1427. @end example
  1428. If a kernel source code was modified (e.g. performance improvement), the
  1429. calibration information is stale and should be dropped, to re-calibrate from
  1430. start. This can be done by using @code{export STARPU_CALIBRATE=2}.
  1431. Note: due to CUDA limitations, to be able to measure kernel duration,
  1432. calibration mode needs to disable asynchronous data transfers. Calibration thus
  1433. disables data transfer / computation overlapping, and should thus not be used
  1434. for eventual benchmarks. Note 2: history-based performance models get calibrated
  1435. only if a performance-model-based scheduler is chosen.
  1436. @node Task distribution vs Data transfer
  1437. @section Task distribution vs Data transfer
  1438. Distributing tasks to balance the load induces data transfer penalty. StarPU
  1439. thus needs to find a balance between both. The target function that the
  1440. @code{dmda} scheduler of StarPU
  1441. tries to minimize is @code{alpha * T_execution + beta * T_data_transfer}, where
  1442. @code{T_execution} is the estimated execution time of the codelet (usually
  1443. accurate), and @code{T_data_transfer} is the estimated data transfer time. The
  1444. latter is estimated based on bus calibration before execution start,
  1445. i.e. with an idle machine, thus without contention. You can force bus re-calibration by running
  1446. @code{starpu_calibrate_bus}. The beta parameter defaults to 1, but it can be
  1447. worth trying to tweak it by using @code{export STARPU_BETA=2} for instance,
  1448. since during real application execution, contention makes transfer times bigger.
  1449. This is of course imprecise, but in practice, a rough estimation already gives
  1450. the good results that a precise estimation would give.
  1451. @node Data prefetch
  1452. @section Data prefetch
  1453. The @code{heft}, @code{dmda} and @code{pheft} scheduling policies perform data prefetch (see @ref{STARPU_PREFETCH}):
  1454. as soon as a scheduling decision is taken for a task, requests are issued to
  1455. transfer its required data to the target processing unit, if needeed, so that
  1456. when the processing unit actually starts the task, its data will hopefully be
  1457. already available and it will not have to wait for the transfer to finish.
  1458. The application may want to perform some manual prefetching, for several reasons
  1459. such as excluding initial data transfers from performance measurements, or
  1460. setting up an initial statically-computed data distribution on the machine
  1461. before submitting tasks, which will thus guide StarPU toward an initial task
  1462. distribution (since StarPU will try to avoid further transfers).
  1463. This can be achieved by giving the @code{starpu_data_prefetch_on_node} function
  1464. the handle and the desired target memory node.
  1465. @node Power-based scheduling
  1466. @section Power-based scheduling
  1467. If the application can provide some power performance model (through
  1468. the @code{power_model} field of the codelet structure), StarPU will
  1469. take it into account when distributing tasks. The target function that
  1470. the @code{dmda} scheduler minimizes becomes @code{alpha * T_execution +
  1471. beta * T_data_transfer + gamma * Consumption} , where @code{Consumption}
  1472. is the estimated task consumption in Joules. To tune this parameter, use
  1473. @code{export STARPU_GAMMA=3000} for instance, to express that each Joule
  1474. (i.e kW during 1000us) is worth 3000us execution time penalty. Setting
  1475. @code{alpha} and @code{beta} to zero permits to only take into account power consumption.
  1476. This is however not sufficient to correctly optimize power: the scheduler would
  1477. simply tend to run all computations on the most energy-conservative processing
  1478. unit. To account for the consumption of the whole machine (including idle
  1479. processing units), the idle power of the machine should be given by setting
  1480. @code{export STARPU_IDLE_POWER=200} for 200W, for instance. This value can often
  1481. be obtained from the machine power supplier.
  1482. The power actually consumed by the total execution can be displayed by setting
  1483. @code{export STARPU_PROFILING=1 STARPU_WORKER_STATS=1} .
  1484. @node Profiling
  1485. @section Profiling
  1486. A quick view of how many tasks each worker has executed can be obtained by setting
  1487. @code{export STARPU_WORKER_STATS=1} This is a convenient way to check that
  1488. execution did happen on accelerators without penalizing performance with
  1489. the profiling overhead.
  1490. A quick view of how much data transfers have been issued can be obtained by setting
  1491. @code{export STARPU_BUS_STATS=1} .
  1492. More detailed profiling information can be enabled by using @code{export STARPU_PROFILING=1} or by
  1493. calling @code{starpu_profiling_status_set} from the source code.
  1494. Statistics on the execution can then be obtained by using @code{export
  1495. STARPU_BUS_STATS=1} and @code{export STARPU_WORKER_STATS=1} .
  1496. More details on performance feedback are provided by the next chapter.
  1497. @node CUDA-specific optimizations
  1498. @section CUDA-specific optimizations
  1499. Due to CUDA limitations, StarPU will have a hard time overlapping its own
  1500. communications and the codelet computations if the application does not use a
  1501. dedicated CUDA stream for its computations. StarPU provides one by the use of
  1502. @code{starpu_cuda_get_local_stream()} which should be used by all CUDA codelet
  1503. operations. For instance:
  1504. @example
  1505. func <<<grid,block,0,starpu_cuda_get_local_stream()>>> (foo, bar);
  1506. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  1507. @end example
  1508. StarPU already does appropriate calls for the CUBLAS library.
  1509. Unfortunately, some CUDA libraries do not have stream variants of
  1510. kernels. That will lower the potential for overlapping.
  1511. @c ---------------------------------------------------------------------
  1512. @c Performance feedback
  1513. @c ---------------------------------------------------------------------
  1514. @node Performance feedback
  1515. @chapter Performance feedback
  1516. @menu
  1517. * On-line:: On-line performance feedback
  1518. * Off-line:: Off-line performance feedback
  1519. * Codelet performance:: Performance of codelets
  1520. @end menu
  1521. @node On-line
  1522. @section On-line performance feedback
  1523. @menu
  1524. * Enabling monitoring:: Enabling on-line performance monitoring
  1525. * Task feedback:: Per-task feedback
  1526. * Codelet feedback:: Per-codelet feedback
  1527. * Worker feedback:: Per-worker feedback
  1528. * Bus feedback:: Bus-related feedback
  1529. * StarPU-Top:: StarPU-Top interface
  1530. @end menu
  1531. @node Enabling monitoring
  1532. @subsection Enabling on-line performance monitoring
  1533. In order to enable online performance monitoring, the application can call
  1534. @code{starpu_profiling_status_set(STARPU_PROFILING_ENABLE)}. It is possible to
  1535. detect whether monitoring is already enabled or not by calling
  1536. @code{starpu_profiling_status_get()}. Enabling monitoring also reinitialize all
  1537. previously collected feedback. The @code{STARPU_PROFILING} environment variable
  1538. can also be set to 1 to achieve the same effect.
  1539. Likewise, performance monitoring is stopped by calling
  1540. @code{starpu_profiling_status_set(STARPU_PROFILING_DISABLE)}. Note that this
  1541. does not reset the performance counters so that the application may consult
  1542. them later on.
  1543. More details about the performance monitoring API are available in section
  1544. @ref{Profiling API}.
  1545. @node Task feedback
  1546. @subsection Per-task feedback
  1547. If profiling is enabled, a pointer to a @code{starpu_task_profiling_info}
  1548. structure is put in the @code{.profiling_info} field of the @code{starpu_task}
  1549. structure when a task terminates.
  1550. This structure is automatically destroyed when the task structure is destroyed,
  1551. either automatically or by calling @code{starpu_task_destroy}.
  1552. The @code{starpu_task_profiling_info} structure indicates the date when the
  1553. task was submitted (@code{submit_time}), started (@code{start_time}), and
  1554. terminated (@code{end_time}), relative to the initialization of
  1555. StarPU with @code{starpu_init}. It also specifies the identifier of the worker
  1556. that has executed the task (@code{workerid}).
  1557. These date are stored as @code{timespec} structures which the user may convert
  1558. into micro-seconds using the @code{starpu_timing_timespec_to_us} helper
  1559. function.
  1560. It it worth noting that the application may directly access this structure from
  1561. the callback executed at the end of the task. The @code{starpu_task} structure
  1562. associated to the callback currently being executed is indeed accessible with
  1563. the @code{starpu_get_current_task()} function.
  1564. @node Codelet feedback
  1565. @subsection Per-codelet feedback
  1566. The @code{per_worker_stats} field of the @code{starpu_codelet_t} structure is
  1567. an array of counters. The i-th entry of the array is incremented every time a
  1568. task implementing the codelet is executed on the i-th worker.
  1569. This array is not reinitialized when profiling is enabled or disabled.
  1570. @node Worker feedback
  1571. @subsection Per-worker feedback
  1572. The second argument returned by the @code{starpu_worker_get_profiling_info}
  1573. function is a @code{starpu_worker_profiling_info} structure that gives
  1574. statistics about the specified worker. This structure specifies when StarPU
  1575. started collecting profiling information for that worker (@code{start_time}),
  1576. the duration of the profiling measurement interval (@code{total_time}), the
  1577. time spent executing kernels (@code{executing_time}), the time spent sleeping
  1578. because there is no task to execute at all (@code{sleeping_time}), and the
  1579. number of tasks that were executed while profiling was enabled.
  1580. These values give an estimation of the proportion of time spent do real work,
  1581. and the time spent either sleeping because there are not enough executable
  1582. tasks or simply wasted in pure StarPU overhead.
  1583. Calling @code{starpu_worker_get_profiling_info} resets the profiling
  1584. information associated to a worker.
  1585. When an FxT trace is generated (see @ref{Generating traces}), it is also
  1586. possible to use the @code{starpu_top} script (described in @ref{starpu-top}) to
  1587. generate a graphic showing the evolution of these values during the time, for
  1588. the different workers.
  1589. @node Bus feedback
  1590. @subsection Bus-related feedback
  1591. TODO
  1592. @c how to enable/disable performance monitoring
  1593. @c what kind of information do we get ?
  1594. The bus speed measured by StarPU can be displayed by using the
  1595. @code{starpu_machine_display} tool, for instance:
  1596. @example
  1597. StarPU has found :
  1598. 3 CUDA devices
  1599. CUDA 0 (Tesla C2050 02:00.0)
  1600. CUDA 1 (Tesla C2050 03:00.0)
  1601. CUDA 2 (Tesla C2050 84:00.0)
  1602. from to RAM to CUDA 0 to CUDA 1 to CUDA 2
  1603. RAM 0.000000 5176.530428 5176.492994 5191.710722
  1604. CUDA 0 4523.732446 0.000000 2414.074751 2417.379201
  1605. CUDA 1 4523.718152 2414.078822 0.000000 2417.375119
  1606. CUDA 2 4534.229519 2417.069025 2417.060863 0.000000
  1607. @end example
  1608. @node StarPU-Top
  1609. @subsection StarPU-Top interface
  1610. StarPU-Top is an interface which remotely displays the on-line state of a StarPU
  1611. application and permits the user to change parameters on the fly.
  1612. Variables to be monitored can be registered by calling the
  1613. @code{starputop_add_data_boolean}, @code{starputop_add_data_integer},
  1614. @code{starputop_add_data_float} functions, e.g.:
  1615. @example
  1616. starputop_data *data = starputop_add_data_integer("mynum", 0, 100, 1);
  1617. @end example
  1618. The application should then call @code{starputop_init_and_wait} to give its name
  1619. and wait for StarPU-Top to get a start request from the user. The name is used
  1620. by StarPU-Top to quickly reload a previously-saved layout of parameter display.
  1621. @example
  1622. starputop_init_and_wait("the application");
  1623. @end example
  1624. The new values can then be provided thanks to
  1625. @code{starputop_update_data_boolean}, @code{starputop_update_data_integer},
  1626. @code{starputop_update_data_float}, e.g.:
  1627. @example
  1628. starputop_update_data_integer(data, mynum);
  1629. @end example
  1630. Updateable parameters can be registered thanks to @code{starputop_register_parameter_boolean}, @code{starputop_register_parameter_integer}, @code{starputop_register_parameter_float}, e.g.:
  1631. @example
  1632. float apha;
  1633. starputop_register_parameter_float("alpha", &alpha, 0, 10, modif_hook);
  1634. @end example
  1635. @code{modif_hook} is a function which will be called when the parameter is being modified, it can for instance print the new value:
  1636. @example
  1637. void modif_hook(struct starputop_param_t *d) @{
  1638. fprintf(stderr,"%s has been modified: %f\n", d->name, alpha);
  1639. @}
  1640. @end example
  1641. Task schedulers should notify StarPU-Top when it has decided when a task will be
  1642. scheduled, so that it can show it in its Gantt chart, for instance:
  1643. @example
  1644. starputop_task_prevision(task, workerid, begin, end);
  1645. @end example
  1646. Starting StarPU-Top and the application can be done two ways:
  1647. @itemize
  1648. @item The application is started by hand on some machine (and thus already
  1649. waiting for the start event). In the Preference dialog of StarPU-Top, the SSH
  1650. checkbox should be unchecked, and the hostname and port (default is 2011) on
  1651. which the application is already running should be specified. Clicking on the
  1652. connection button will thus connect to the already-running application.
  1653. @item StarPU-Top is started first, and clicking on the connection button will
  1654. start the application itself (possibly on a remote machine). The SSH checkbox
  1655. should be checked, and a command line provided, e.g.:
  1656. @example
  1657. ssh myserver STARPU_SCHED=heft ./application
  1658. @end example
  1659. If port 2011 of the remote machine can not be accessed directly, an ssh port bridge should be added:
  1660. @example
  1661. ssh -L 2011:localhost:2011 myserver STARPU_SCHED=heft ./application
  1662. @end example
  1663. and "localhost" should be used as IP Address to connect to.
  1664. @end itemize
  1665. @node Off-line
  1666. @section Off-line performance feedback
  1667. @menu
  1668. * Generating traces:: Generating traces with FxT
  1669. * Gantt diagram:: Creating a Gantt Diagram
  1670. * DAG:: Creating a DAG with graphviz
  1671. * starpu-top:: Monitoring activity
  1672. @end menu
  1673. @node Generating traces
  1674. @subsection Generating traces with FxT
  1675. StarPU can use the FxT library (see
  1676. @indicateurl{https://savannah.nongnu.org/projects/fkt/}) to generate traces
  1677. with a limited runtime overhead.
  1678. You can either get a tarball:
  1679. @example
  1680. % wget http://download.savannah.gnu.org/releases/fkt/fxt-0.2.2.tar.gz
  1681. @end example
  1682. or use the FxT library from CVS (autotools are required):
  1683. @example
  1684. % cvs -d :pserver:anonymous@@cvs.sv.gnu.org:/sources/fkt co FxT
  1685. % ./bootstrap
  1686. @end example
  1687. Compiling and installing the FxT library in the @code{$FXTDIR} path is
  1688. done following the standard procedure:
  1689. @example
  1690. % ./configure --prefix=$FXTDIR
  1691. % make
  1692. % make install
  1693. @end example
  1694. In order to have StarPU to generate traces, StarPU should be configured with
  1695. the @code{--with-fxt} option:
  1696. @example
  1697. $ ./configure --with-fxt=$FXTDIR
  1698. @end example
  1699. Or you can simply point the @code{PKG_CONFIG_PATH} to
  1700. @code{$FXTDIR/lib/pkgconfig} and pass @code{--with-fxt} to @code{./configure}
  1701. When FxT is enabled, a trace is generated when StarPU is terminated by calling
  1702. @code{starpu_shutdown()}). The trace is a binary file whose name has the form
  1703. @code{prof_file_XXX_YYY} where @code{XXX} is the user name, and
  1704. @code{YYY} is the pid of the process that used StarPU. This file is saved in the
  1705. @code{/tmp/} directory by default, or by the directory specified by
  1706. the @code{STARPU_FXT_PREFIX} environment variable.
  1707. @node Gantt diagram
  1708. @subsection Creating a Gantt Diagram
  1709. When the FxT trace file @code{filename} has been generated, it is possible to
  1710. generate a trace in the Paje format by calling:
  1711. @example
  1712. % starpu_fxt_tool -i filename
  1713. @end example
  1714. Or alternatively, setting the @code{STARPU_GENERATE_TRACE} environment variable
  1715. to 1 before application execution will make StarPU do it automatically at
  1716. application shutdown.
  1717. This will create a @code{paje.trace} file in the current directory that can be
  1718. inspected with the ViTE trace visualizing open-source tool. More information
  1719. about ViTE is available at @indicateurl{http://vite.gforge.inria.fr/}. It is
  1720. possible to open the @code{paje.trace} file with ViTE by using the following
  1721. command:
  1722. @example
  1723. % vite paje.trace
  1724. @end example
  1725. @node DAG
  1726. @subsection Creating a DAG with graphviz
  1727. When the FxT trace file @code{filename} has been generated, it is possible to
  1728. generate a task graph in the DOT format by calling:
  1729. @example
  1730. $ starpu_fxt_tool -i filename
  1731. @end example
  1732. This will create a @code{dag.dot} file in the current directory. This file is a
  1733. task graph described using the DOT language. It is possible to get a
  1734. graphical output of the graph by using the graphviz library:
  1735. @example
  1736. $ dot -Tpdf dag.dot -o output.pdf
  1737. @end example
  1738. @node starpu-top
  1739. @subsection Monitoring activity
  1740. When the FxT trace file @code{filename} has been generated, it is possible to
  1741. generate a activity trace by calling:
  1742. @example
  1743. $ starpu_fxt_tool -i filename
  1744. @end example
  1745. This will create an @code{activity.data} file in the current
  1746. directory. A profile of the application showing the activity of StarPU
  1747. during the execution of the program can be generated:
  1748. @example
  1749. $ starpu_top activity.data
  1750. @end example
  1751. This will create a file named @code{activity.eps} in the current directory.
  1752. This picture is composed of two parts.
  1753. The first part shows the activity of the different workers. The green sections
  1754. indicate which proportion of the time was spent executed kernels on the
  1755. processing unit. The red sections indicate the proportion of time spent in
  1756. StartPU: an important overhead may indicate that the granularity may be too
  1757. low, and that bigger tasks may be appropriate to use the processing unit more
  1758. efficiently. The black sections indicate that the processing unit was blocked
  1759. because there was no task to process: this may indicate a lack of parallelism
  1760. which may be alleviated by creating more tasks when it is possible.
  1761. The second part of the @code{activity.eps} picture is a graph showing the
  1762. evolution of the number of tasks available in the system during the execution.
  1763. Ready tasks are shown in black, and tasks that are submitted but not
  1764. schedulable yet are shown in grey.
  1765. @node Codelet performance
  1766. @section Performance of codelets
  1767. The performance model of codelets can be examined by using the
  1768. @code{starpu_perfmodel_display} tool:
  1769. @example
  1770. $ starpu_perfmodel_display -l
  1771. file: <malloc_pinned.hannibal>
  1772. file: <starpu_slu_lu_model_21.hannibal>
  1773. file: <starpu_slu_lu_model_11.hannibal>
  1774. file: <starpu_slu_lu_model_22.hannibal>
  1775. file: <starpu_slu_lu_model_12.hannibal>
  1776. @end example
  1777. Here, the codelets of the lu example are available. We can examine the
  1778. performance of the 22 kernel:
  1779. @example
  1780. $ starpu_perfmodel_display -s starpu_slu_lu_model_22
  1781. performance model for cpu
  1782. # hash size mean dev n
  1783. 57618ab0 19660800 2.851069e+05 1.829369e+04 109
  1784. performance model for cuda_0
  1785. # hash size mean dev n
  1786. 57618ab0 19660800 1.164144e+04 1.556094e+01 315
  1787. performance model for cuda_1
  1788. # hash size mean dev n
  1789. 57618ab0 19660800 1.164271e+04 1.330628e+01 360
  1790. performance model for cuda_2
  1791. # hash size mean dev n
  1792. 57618ab0 19660800 1.166730e+04 3.390395e+02 456
  1793. @end example
  1794. We can see that for the given size, over a sample of a few hundreds of
  1795. execution, the GPUs are about 20 times faster than the CPUs (numbers are in
  1796. us). The standard deviation is extremely low for the GPUs, and less than 10% for
  1797. CPUs.
  1798. The @code{starpu_regression_display} tool does the same for regression-based
  1799. performance models. It also writes a @code{.gp} file in the current directory,
  1800. to be run in the @code{gnuplot} tool, which shows the corresponding curve.
  1801. @c ---------------------------------------------------------------------
  1802. @c MPI support
  1803. @c ---------------------------------------------------------------------
  1804. @node StarPU MPI support
  1805. @chapter StarPU MPI support
  1806. The integration of MPI transfers within task parallelism is done in a
  1807. very natural way by the means of asynchronous interactions between the
  1808. application and StarPU. This is implemented in a separate libstarpumpi library
  1809. which basically provides "StarPU" equivalents of @code{MPI_*} functions, where
  1810. @code{void *} buffers are replaced with @code{starpu_data_handle}s, and all
  1811. GPU-RAM-NIC transfers are handled efficiently by StarPU-MPI. The user has to
  1812. use the usual @code{mpirun} command of the MPI implementation to start StarPU on
  1813. the different MPI nodes.
  1814. An MPI Insert Task function provides an even more seamless transition to a
  1815. distributed application, by automatically issuing all required data transfers
  1816. according to the task graph and an application-provided distribution.
  1817. @menu
  1818. * The API::
  1819. * Simple Example::
  1820. * MPI Insert Task Utility::
  1821. * MPI Collective Operations::
  1822. @end menu
  1823. @node The API
  1824. @section The API
  1825. @subsection Compilation
  1826. The flags required to compile or link against the MPI layer are then
  1827. accessible with the following commands:
  1828. @example
  1829. % pkg-config --cflags libstarpumpi # options for the compiler
  1830. % pkg-config --libs libstarpumpi # options for the linker
  1831. @end example
  1832. @subsection Initialisation
  1833. @deftypefun int starpu_mpi_initialize (void)
  1834. Initializes the starpumpi library. This must be called between calling
  1835. @code{starpu_init} and other @code{starpu_mpi} functions. This
  1836. function does not call @code{MPI_Init}, it should be called beforehand.
  1837. @end deftypefun
  1838. @deftypefun int starpu_mpi_initialize_extended (int *@var{rank}, int *@var{world_size})
  1839. Initializes the starpumpi library. This must be called between calling
  1840. @code{starpu_init} and other @code{starpu_mpi} functions.
  1841. This function calls @code{MPI_Init}, and therefore should be prefered
  1842. to the previous one for MPI implementations which are not thread-safe.
  1843. Returns the current MPI node rank and world size.
  1844. @end deftypefun
  1845. @deftypefun int starpu_mpi_shutdown (void)
  1846. Cleans the starpumpi library. This must be called between calling
  1847. @code{starpu_mpi} functions and @code{starpu_shutdown}.
  1848. @code{MPI_Finalize} will be called if StarPU-MPI has been initialized
  1849. by calling @code{starpu_mpi_initialize_extended}.
  1850. @end deftypefun
  1851. @subsection Communication
  1852. @deftypefun int starpu_mpi_send (starpu_data_handle @var{data_handle}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm})
  1853. @end deftypefun
  1854. @deftypefun int starpu_mpi_recv (starpu_data_handle @var{data_handle}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm}, MPI_Status *@var{status})
  1855. @end deftypefun
  1856. @deftypefun int starpu_mpi_isend (starpu_data_handle @var{data_handle}, starpu_mpi_req *@var{req}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm})
  1857. @end deftypefun
  1858. @deftypefun int starpu_mpi_irecv (starpu_data_handle @var{data_handle}, starpu_mpi_req *@var{req}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm})
  1859. @end deftypefun
  1860. @deftypefun int starpu_mpi_isend_detached (starpu_data_handle @var{data_handle}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm}, void (*@var{callback})(void *), void *@var{arg})
  1861. @end deftypefun
  1862. @deftypefun int starpu_mpi_irecv_detached (starpu_data_handle @var{data_handle}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm}, void (*@var{callback})(void *), void *@var{arg})
  1863. @end deftypefun
  1864. @deftypefun int starpu_mpi_wait (starpu_mpi_req *@var{req}, MPI_Status *@var{status})
  1865. @end deftypefun
  1866. @deftypefun int starpu_mpi_test (starpu_mpi_req *@var{req}, int *@var{flag}, MPI_Status *@var{status})
  1867. @end deftypefun
  1868. @deftypefun int starpu_mpi_barrier (MPI_Comm @var{comm})
  1869. @end deftypefun
  1870. @deftypefun int starpu_mpi_isend_detached_unlock_tag (starpu_data_handle @var{data_handle}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm}, starpu_tag_t @var{tag})
  1871. When the transfer is completed, the tag is unlocked
  1872. @end deftypefun
  1873. @deftypefun int starpu_mpi_irecv_detached_unlock_tag (starpu_data_handle @var{data_handle}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm}, starpu_tag_t @var{tag})
  1874. @end deftypefun
  1875. @deftypefun int starpu_mpi_isend_array_detached_unlock_tag (unsigned @var{array_size}, starpu_data_handle *@var{data_handle}, int *@var{dest}, int *@var{mpi_tag}, MPI_Comm *@var{comm}, starpu_tag_t @var{tag})
  1876. Asynchronously send an array of buffers, and unlocks the tag once all
  1877. of them are transmitted.
  1878. @end deftypefun
  1879. @deftypefun int starpu_mpi_irecv_array_detached_unlock_tag (unsigned @var{array_size}, starpu_data_handle *@var{data_handle}, int *@var{source}, int *@var{mpi_tag}, MPI_Comm *@var{comm}, starpu_tag_t @var{tag})
  1880. @end deftypefun
  1881. @page
  1882. @node Simple Example
  1883. @section Simple Example
  1884. @cartouche
  1885. @smallexample
  1886. void increment_token(void)
  1887. @{
  1888. struct starpu_task *task = starpu_task_create();
  1889. task->cl = &increment_cl;
  1890. task->buffers[0].handle = token_handle;
  1891. task->buffers[0].mode = STARPU_RW;
  1892. starpu_task_submit(task);
  1893. @}
  1894. @end smallexample
  1895. @end cartouche
  1896. @cartouche
  1897. @smallexample
  1898. int main(int argc, char **argv)
  1899. @{
  1900. int rank, size;
  1901. starpu_init(NULL);
  1902. starpu_mpi_initialize_extended(&rank, &size);
  1903. starpu_vector_data_register(&token_handle, 0, (uintptr_t)&token, 1, sizeof(unsigned));
  1904. unsigned nloops = NITER;
  1905. unsigned loop;
  1906. unsigned last_loop = nloops - 1;
  1907. unsigned last_rank = size - 1;
  1908. @end smallexample
  1909. @end cartouche
  1910. @cartouche
  1911. @smallexample
  1912. for (loop = 0; loop < nloops; loop++) @{
  1913. int tag = loop*size + rank;
  1914. if (loop == 0 && rank == 0)
  1915. @{
  1916. token = 0;
  1917. fprintf(stdout, "Start with token value %d\n", token);
  1918. @}
  1919. else
  1920. @{
  1921. starpu_mpi_irecv_detached(token_handle, (rank+size-1)%size, tag,
  1922. MPI_COMM_WORLD, NULL, NULL);
  1923. @}
  1924. increment_token();
  1925. if (loop == last_loop && rank == last_rank)
  1926. @{
  1927. starpu_data_acquire(token_handle, STARPU_R);
  1928. fprintf(stdout, "Finished : token value %d\n", token);
  1929. starpu_data_release(token_handle);
  1930. @}
  1931. else
  1932. @{
  1933. starpu_mpi_isend_detached(token_handle, (rank+1)%size, tag+1,
  1934. MPI_COMM_WORLD, NULL, NULL);
  1935. @}
  1936. @}
  1937. starpu_task_wait_for_all();
  1938. @end smallexample
  1939. @end cartouche
  1940. @cartouche
  1941. @smallexample
  1942. starpu_mpi_shutdown();
  1943. starpu_shutdown();
  1944. if (rank == last_rank)
  1945. @{
  1946. fprintf(stderr, "[%d] token = %d == %d * %d ?\n", rank, token, nloops, size);
  1947. STARPU_ASSERT(token == nloops*size);
  1948. @}
  1949. @end smallexample
  1950. @end cartouche
  1951. @page
  1952. @node MPI Insert Task Utility
  1953. @section MPI Insert Task Utility
  1954. To save the programmer from having to explicit all communications, StarPU
  1955. provides an "MPI Insert Task Utility". The principe is that the application
  1956. decides a distribution of the data over the MPI nodes by allocating it and
  1957. notifying StarPU of that decision, i.e. tell StarPU which MPI node "owns" which
  1958. data. All MPI nodes then process the whole task graph, and StarPU automatically
  1959. determines which node actually execute which task, as well as the required MPI
  1960. transfers.
  1961. @deftypefun int starpu_data_set_rank (starpu_data_handle @var{handle}, int @var{mpi_rank})
  1962. Tell StarPU-MPI which MPI node "owns" a given data, that is, the node which will
  1963. always keep an up-to-date value, and will by default execute tasks which write
  1964. to it.
  1965. @end deftypefun
  1966. @deftypefun int starpu_data_get_rank (starpu_data_handle @var{handle})
  1967. Returns the last value set by @code{starpu_data_set_rank}.
  1968. @end deftypefun
  1969. @deftypefun void starpu_mpi_insert_task (MPI_Comm @var{comm}, starpu_codelet *@var{cl}, ...)
  1970. Create and submit a task corresponding to @var{cl} with the following
  1971. arguments. The argument list must be zero-terminated.
  1972. The arguments following the codelets are the same types as for the
  1973. function @code{starpu_insert_task} defined in @ref{Insert Task
  1974. Utility}. The extra argument @code{STARPU_EXECUTE_ON_NODE} followed by an
  1975. integer allows to specify the MPI node to execute the codelet. It is also
  1976. possible to specify that the node owning a specific data will execute
  1977. the codelet, by using @code{STARPU_EXECUTE_ON_DATA} followed by a data
  1978. handle.
  1979. The internal algorithm is as follows:
  1980. @enumerate
  1981. @item Find out whether we (as an MPI node) are to execute the codelet
  1982. because we own the data to be written to. If different nodes own data
  1983. to be written to, the argument @code{STARPU_EXECUTE_ON_NODE} or
  1984. @code{STARPU_EXECUTE_ON_DATA} has to be used to specify which MPI node will
  1985. execute the task.
  1986. @item Send and receive data as requested. Nodes owning data which need to be
  1987. read by the task are sending them to the MPI node which will execute it. The
  1988. latter receives them.
  1989. @item Execute the codelet. This is done by the MPI node selected in the
  1990. 1st step of the algorithm.
  1991. @item In the case when different MPI nodes own data to be written to, send
  1992. written data back to their owners.
  1993. @end enumerate
  1994. The algorithm also includes a cache mechanism that allows not to send
  1995. data twice to the same MPI node, unless the data has been modified.
  1996. @end deftypefun
  1997. @deftypefun void starpu_mpi_get_data_on_node (MPI_Comm @var{comm}, starpu_data_handle @var{data_handle}, int @var{node})
  1998. @end deftypefun
  1999. @page
  2000. Here an stencil example showing how to use @code{starpu_mpi_insert_task}. One
  2001. first needs to define a distribution function which specifies the
  2002. locality of the data. Note that that distribution information needs to
  2003. be given to StarPU by calling @code{starpu_data_set_rank}.
  2004. @cartouche
  2005. @smallexample
  2006. /* Returns the MPI node number where data is */
  2007. int my_distrib(int x, int y, int nb_nodes) @{
  2008. /* Cyclic distrib */
  2009. return ((int)(x / sqrt(nb_nodes) + (y / sqrt(nb_nodes)) * sqrt(nb_nodes))) % nb_nodes;
  2010. // /* Linear distrib */
  2011. // return x / sqrt(nb_nodes) + (y / sqrt(nb_nodes)) * X;
  2012. @}
  2013. @end smallexample
  2014. @end cartouche
  2015. Now the data can be registered within StarPU. Data which are not
  2016. owned but will be needed for computations can be registered through
  2017. the lazy allocation mechanism, i.e. with a @code{home_node} set to -1.
  2018. StarPU will automatically allocate the memory when it is used for the
  2019. first time.
  2020. One can note an optimization here (the @code{else if} test): we only register
  2021. data which will be needed by the tasks that we will execute.
  2022. @cartouche
  2023. @smallexample
  2024. unsigned matrix[X][Y];
  2025. starpu_data_handle data_handles[X][Y];
  2026. for(x = 0; x < X; x++) @{
  2027. for (y = 0; y < Y; y++) @{
  2028. int mpi_rank = my_distrib(x, y, size);
  2029. if (mpi_rank == rank)
  2030. /* Owning data */
  2031. starpu_variable_data_register(&data_handles[x][y], 0,
  2032. (uintptr_t)&(matrix[x][y]), sizeof(unsigned));
  2033. else if (rank == mpi_rank+1 || rank == mpi_rank-1)
  2034. /* I don't own that index, but will need it for my computations */
  2035. starpu_variable_data_register(&data_handles[x][y], -1,
  2036. (uintptr_t)NULL, sizeof(unsigned));
  2037. else
  2038. /* I know it's useless to allocate anything for this */
  2039. data_handles[x][y] = NULL;
  2040. if (data_handles[x][y])
  2041. starpu_data_set_rank(data_handles[x][y], mpi_rank);
  2042. @}
  2043. @}
  2044. @end smallexample
  2045. @end cartouche
  2046. Now @code{starpu_mpi_insert_task()} can be called for the different
  2047. steps of the application.
  2048. @cartouche
  2049. @smallexample
  2050. for(loop=0 ; loop<niter; loop++)
  2051. for (x = 1; x < X-1; x++)
  2052. for (y = 1; y < Y-1; y++)
  2053. starpu_mpi_insert_task(MPI_COMM_WORLD, &stencil5_cl,
  2054. STARPU_RW, data_handles[x][y],
  2055. STARPU_R, data_handles[x-1][y],
  2056. STARPU_R, data_handles[x+1][y],
  2057. STARPU_R, data_handles[x][y-1],
  2058. STARPU_R, data_handles[x][y+1],
  2059. 0);
  2060. starpu_task_wait_for_all();
  2061. @end smallexample
  2062. @end cartouche
  2063. I.e. all MPI nodes process the whole task graph, but as mentioned above, for
  2064. each task, only the MPI node which owns the data being written to (here,
  2065. @code{data_handles[x][y]}) will actually run the task. The other MPI nodes will
  2066. automatically send the required data.
  2067. @node MPI Collective Operations
  2068. @section MPI Collective Operations
  2069. @deftypefun int starpu_mpi_scatter_detached (starpu_data_handle *@var{data_handles}, int @var{count}, int @var{root}, MPI_Comm @var{comm})
  2070. Scatter data among processes of the communicator based on the ownership of
  2071. the data. For each data of the array @var{data_handles}, the
  2072. process @var{root} sends the data to the process owning this data.
  2073. Processes receiving data must have valid data handles to receive them.
  2074. @end deftypefun
  2075. @deftypefun int starpu_mpi_gather_detached (starpu_data_handle *@var{data_handles}, int @var{count}, int @var{root}, MPI_Comm @var{comm})
  2076. Gather data from the different processes of the communicator onto the
  2077. process @var{root}. Each process owning data handle in the array
  2078. @var{data_handles} will send them to the process @var{root}. The
  2079. process @var{root} must have valid data handles to receive the data.
  2080. @end deftypefun
  2081. @page
  2082. @cartouche
  2083. @smallexample
  2084. if (rank == root)
  2085. @{
  2086. /* Allocate the vector */
  2087. vector = malloc(nblocks * sizeof(float *));
  2088. for(x=0 ; x<nblocks ; x++)
  2089. @{
  2090. starpu_malloc((void **)&vector[x], block_size*sizeof(float));
  2091. @}
  2092. @}
  2093. /* Allocate data handles and register data to StarPU */
  2094. data_handles = malloc(nblocks*sizeof(starpu_data_handle *));
  2095. for(x = 0; x < nblocks ; x++)
  2096. @{
  2097. int mpi_rank = my_distrib(x, nodes);
  2098. if (rank == root) @{
  2099. starpu_vector_data_register(&data_handles[x], 0, (uintptr_t)vector[x],
  2100. blocks_size, sizeof(float));
  2101. @}
  2102. else if ((mpi_rank == rank) || ((rank == mpi_rank+1 || rank == mpi_rank-1))) @{
  2103. /* I own that index, or i will need it for my computations */
  2104. starpu_vector_data_register(&data_handles[x], -1, (uintptr_t)NULL,
  2105. block_size, sizeof(float));
  2106. @}
  2107. else @{
  2108. /* I know it's useless to allocate anything for this */
  2109. data_handles[x] = NULL;
  2110. @}
  2111. if (data_handles[x]) @{
  2112. starpu_data_set_rank(data_handles[x], mpi_rank);
  2113. @}
  2114. @}
  2115. /* Scatter the matrix among the nodes */
  2116. starpu_mpi_scatter_detached(data_handles, nblocks, root, MPI_COMM_WORLD);
  2117. /* Calculation */
  2118. for(x = 0; x < nblocks ; x++) @{
  2119. if (data_handles[x]) @{
  2120. int owner = starpu_data_get_rank(data_handles[x]);
  2121. if (owner == rank) @{
  2122. starpu_insert_task(&cl, STARPU_RW, data_handles[x], 0);
  2123. @}
  2124. @}
  2125. @}
  2126. /* Gather the matrix on main node */
  2127. starpu_mpi_gather_detached(data_handles, nblocks, 0, MPI_COMM_WORLD);
  2128. @end smallexample
  2129. @end cartouche
  2130. @c ---------------------------------------------------------------------
  2131. @c Tips and Tricks
  2132. @c ---------------------------------------------------------------------
  2133. @node Tips and Tricks
  2134. @chapter Tips and Tricks to know about
  2135. @menu
  2136. * Per-worker library initialization:: How to initialize a computation library once for each worker?
  2137. @end menu
  2138. @node Per-worker library initialization
  2139. @section How to initialize a computation library once for each worker?
  2140. Some libraries need to be initialized one for each concurrent instance that
  2141. may run on the machine. For instance, a C++ computation class which is not
  2142. thread-safe by itself, but for which several instanciated objects of that class
  2143. can be used concurrently. This can be used in StarPU by initializing one such
  2144. object per worker. For instance, the libstarpufft example does the following to be able to use FFTW.
  2145. Some global array stores the instanciated objects:
  2146. @smallexample
  2147. fftw_plan plan_cpu[STARPU_NMAXWORKERS];
  2148. @end smallexample
  2149. At initialisation time of libstarpu, the objects are initialized:
  2150. @smallexample
  2151. int workerid;
  2152. for (workerid = 0; workerid < starpu_worker_get_count(); workerid++) @{
  2153. switch (starpu_worker_get_type(workerid)) @{
  2154. case STARPU_CPU_WORKER:
  2155. plan_cpu[workerid] = fftw_plan(...);
  2156. break;
  2157. @}
  2158. @}
  2159. @end smallexample
  2160. And in the codelet body, they are used:
  2161. @smallexample
  2162. static void fft(void *descr[], void *_args)
  2163. @{
  2164. int workerid = starpu_worker_get_id();
  2165. fftw_plan plan = plan_cpu[workerid];
  2166. ...
  2167. fftw_execute(plan, ...);
  2168. @}
  2169. @end smallexample
  2170. To also deal with the CUDA CUFFT implementation, the @code{fftw_plan} type can
  2171. be replaced with a union of @code{fftw_plan} and @code{cufftHandle}, and the
  2172. @code{switch} statement extended with @code{STARPU_CUDA_WORKER}.
  2173. @c ---------------------------------------------------------------------
  2174. @c Configuration options
  2175. @c ---------------------------------------------------------------------
  2176. @node Configuring StarPU
  2177. @chapter Configuring StarPU
  2178. @menu
  2179. * Compilation configuration::
  2180. * Execution configuration through environment variables::
  2181. @end menu
  2182. @node Compilation configuration
  2183. @section Compilation configuration
  2184. The following arguments can be given to the @code{configure} script.
  2185. @menu
  2186. * Common configuration::
  2187. * Configuring workers::
  2188. * Advanced configuration::
  2189. @end menu
  2190. @node Common configuration
  2191. @subsection Common configuration
  2192. @menu
  2193. * --enable-debug::
  2194. * --enable-fast::
  2195. * --enable-verbose::
  2196. * --enable-coverage::
  2197. @end menu
  2198. @node --enable-debug
  2199. @subsubsection @code{--enable-debug}
  2200. @table @asis
  2201. @item @emph{Description}:
  2202. Enable debugging messages.
  2203. @end table
  2204. @node --enable-fast
  2205. @subsubsection @code{--enable-fast}
  2206. @table @asis
  2207. @item @emph{Description}:
  2208. Do not enforce assertions, saves a lot of time spent to compute them otherwise.
  2209. @end table
  2210. @node --enable-verbose
  2211. @subsubsection @code{--enable-verbose}
  2212. @table @asis
  2213. @item @emph{Description}:
  2214. Augment the verbosity of the debugging messages. This can be disabled
  2215. at runtime by setting the environment variable @code{STARPU_SILENT} to
  2216. any value.
  2217. @smallexample
  2218. % STARPU_SILENT=1 ./vector_scal
  2219. @end smallexample
  2220. @end table
  2221. @node --enable-coverage
  2222. @subsubsection @code{--enable-coverage}
  2223. @table @asis
  2224. @item @emph{Description}:
  2225. Enable flags for the @code{gcov} coverage tool.
  2226. @end table
  2227. @node Configuring workers
  2228. @subsection Configuring workers
  2229. @menu
  2230. * --enable-maxcpus::
  2231. * --disable-cpu::
  2232. * --enable-maxcudadev::
  2233. * --disable-cuda::
  2234. * --with-cuda-dir::
  2235. * --with-cuda-include-dir::
  2236. * --with-cuda-lib-dir::
  2237. * --disable-cuda-memcpy-peer::
  2238. * --enable-maxopencldev::
  2239. * --disable-opencl::
  2240. * --with-opencl-dir::
  2241. * --with-opencl-include-dir::
  2242. * --with-opencl-lib-dir::
  2243. * --enable-gordon::
  2244. * --with-gordon-dir::
  2245. * --enable-maximplementations::
  2246. @end menu
  2247. @node --enable-maxcpus
  2248. @subsubsection @code{--enable-maxcpus=<number>}
  2249. @table @asis
  2250. @item @emph{Description}:
  2251. Defines the maximum number of CPU cores that StarPU will support, then
  2252. available as the @code{STARPU_MAXCPUS} macro.
  2253. @end table
  2254. @node --disable-cpu
  2255. @subsubsection @code{--disable-cpu}
  2256. @table @asis
  2257. @item @emph{Description}:
  2258. Disable the use of CPUs of the machine. Only GPUs etc. will be used.
  2259. @end table
  2260. @node --enable-maxcudadev
  2261. @subsubsection @code{--enable-maxcudadev=<number>}
  2262. @table @asis
  2263. @item @emph{Description}:
  2264. Defines the maximum number of CUDA devices that StarPU will support, then
  2265. available as the @code{STARPU_MAXCUDADEVS} macro.
  2266. @end table
  2267. @node --disable-cuda
  2268. @subsubsection @code{--disable-cuda}
  2269. @table @asis
  2270. @item @emph{Description}:
  2271. Disable the use of CUDA, even if a valid CUDA installation was detected.
  2272. @end table
  2273. @node --with-cuda-dir
  2274. @subsubsection @code{--with-cuda-dir=<path>}
  2275. @table @asis
  2276. @item @emph{Description}:
  2277. Specify the directory where CUDA is installed. This directory should notably contain
  2278. @code{include/cuda.h}.
  2279. @end table
  2280. @node --with-cuda-include-dir
  2281. @subsubsection @code{--with-cuda-include-dir=<path>}
  2282. @table @asis
  2283. @item @emph{Description}:
  2284. Specify the directory where CUDA headers are installed. This directory should
  2285. notably contain @code{cuda.h}. This defaults to @code{/include} appended to the
  2286. value given to @code{--with-cuda-dir}.
  2287. @end table
  2288. @node --with-cuda-lib-dir
  2289. @subsubsection @code{--with-cuda-lib-dir=<path>}
  2290. @table @asis
  2291. @item @emph{Description}:
  2292. Specify the directory where the CUDA library is installed. This directory should
  2293. notably contain the CUDA shared libraries (e.g. libcuda.so). This defaults to
  2294. @code{/lib} appended to the value given to @code{--with-cuda-dir}.
  2295. @end table
  2296. @node --disable-cuda-memcpy-peer
  2297. @subsubsection @code{--disable-cuda-memcpy-peer}
  2298. @table @asis
  2299. @item @emph{Description}
  2300. Explicitely disables peer transfers when using CUDA 4.0
  2301. @end table
  2302. @node --enable-maxopencldev
  2303. @subsubsection @code{--enable-maxopencldev=<number>}
  2304. @table @asis
  2305. @item @emph{Description}:
  2306. Defines the maximum number of OpenCL devices that StarPU will support, then
  2307. available as the @code{STARPU_MAXOPENCLDEVS} macro.
  2308. @end table
  2309. @node --disable-opencl
  2310. @subsubsection @code{--disable-opencl}
  2311. @table @asis
  2312. @item @emph{Description}:
  2313. Disable the use of OpenCL, even if the SDK is detected.
  2314. @end table
  2315. @node --with-opencl-dir
  2316. @subsubsection @code{--with-opencl-dir=<path>}
  2317. @table @asis
  2318. @item @emph{Description}:
  2319. Specify the location of the OpenCL SDK. This directory should notably contain
  2320. @code{include/CL/cl.h} (or @code{include/OpenCL/cl.h} on Mac OS).
  2321. @end table
  2322. @node --with-opencl-include-dir
  2323. @subsubsection @code{--with-opencl-include-dir=<path>}
  2324. @table @asis
  2325. @item @emph{Description}:
  2326. Specify the location of OpenCL headers. This directory should notably contain
  2327. @code{CL/cl.h} (or @code{OpenCL/cl.h} on Mac OS). This defaults to
  2328. @code{/include} appended to the value given to @code{--with-opencl-dir}.
  2329. @end table
  2330. @node --with-opencl-lib-dir
  2331. @subsubsection @code{--with-opencl-lib-dir=<path>}
  2332. @table @asis
  2333. @item @emph{Description}:
  2334. Specify the location of the OpenCL library. This directory should notably
  2335. contain the OpenCL shared libraries (e.g. libOpenCL.so). This defaults to
  2336. @code{/lib} appended to the value given to @code{--with-opencl-dir}.
  2337. @end table
  2338. @node --enable-gordon
  2339. @subsubsection @code{--enable-gordon}
  2340. @table @asis
  2341. @item @emph{Description}:
  2342. Enable the use of the Gordon runtime for Cell SPUs.
  2343. @c TODO: rather default to enabled when detected
  2344. @end table
  2345. @node --with-gordon-dir
  2346. @subsubsection @code{--with-gordon-dir=<path>}
  2347. @table @asis
  2348. @item @emph{Description}:
  2349. Specify the location of the Gordon SDK.
  2350. @end table
  2351. @node --enable-maximplementations
  2352. @subsubsection @code{--enable-maximplementations=<number>}
  2353. @table @asis
  2354. @item @emph{Description}:
  2355. Defines the number of implementations that can be defined for a single kind of
  2356. device. It is then available as the @code{STARPU_MAXIMPLEMENTATIONS} macro.
  2357. @end table
  2358. @node Advanced configuration
  2359. @subsection Advanced configuration
  2360. @menu
  2361. * --enable-perf-debug::
  2362. * --enable-model-debug::
  2363. * --enable-stats::
  2364. * --enable-maxbuffers::
  2365. * --enable-allocation-cache::
  2366. * --enable-opengl-render::
  2367. * --enable-blas-lib::
  2368. * --with-magma::
  2369. * --with-fxt::
  2370. * --with-perf-model-dir::
  2371. * --with-mpicc::
  2372. * --with-goto-dir::
  2373. * --with-atlas-dir::
  2374. * --with-mkl-cflags::
  2375. * --with-mkl-ldflags::
  2376. @end menu
  2377. @node --enable-perf-debug
  2378. @subsubsection @code{--enable-perf-debug}
  2379. @table @asis
  2380. @item @emph{Description}:
  2381. Enable performance debugging through gprof.
  2382. @end table
  2383. @node --enable-model-debug
  2384. @subsubsection @code{--enable-model-debug}
  2385. @table @asis
  2386. @item @emph{Description}:
  2387. Enable performance model debugging.
  2388. @end table
  2389. @node --enable-stats
  2390. @subsubsection @code{--enable-stats}
  2391. @table @asis
  2392. @item @emph{Description}:
  2393. Enable statistics.
  2394. @end table
  2395. @node --enable-maxbuffers
  2396. @subsubsection @code{--enable-maxbuffers=<nbuffers>}
  2397. @table @asis
  2398. @item @emph{Description}:
  2399. Define the maximum number of buffers that tasks will be able to take
  2400. as parameters, then available as the @code{STARPU_NMAXBUFS} macro.
  2401. @end table
  2402. @node --enable-allocation-cache
  2403. @subsubsection @code{--enable-allocation-cache}
  2404. @table @asis
  2405. @item @emph{Description}:
  2406. Enable the use of a data allocation cache to avoid the cost of it with
  2407. CUDA. Still experimental.
  2408. @end table
  2409. @node --enable-opengl-render
  2410. @subsubsection @code{--enable-opengl-render}
  2411. @table @asis
  2412. @item @emph{Description}:
  2413. Enable the use of OpenGL for the rendering of some examples.
  2414. @c TODO: rather default to enabled when detected
  2415. @end table
  2416. @node --enable-blas-lib
  2417. @subsubsection @code{--enable-blas-lib=<name>}
  2418. @table @asis
  2419. @item @emph{Description}:
  2420. Specify the blas library to be used by some of the examples. The
  2421. library has to be 'atlas' or 'goto'.
  2422. @end table
  2423. @node --with-magma
  2424. @subsubsection @code{--with-magma=<path>}
  2425. @table @asis
  2426. @item @emph{Description}:
  2427. Specify where magma is installed. This directory should notably contain
  2428. @code{include/magmablas.h}.
  2429. @end table
  2430. @node --with-fxt
  2431. @subsubsection @code{--with-fxt=<path>}
  2432. @table @asis
  2433. @item @emph{Description}:
  2434. Specify the location of FxT (for generating traces and rendering them
  2435. using ViTE). This directory should notably contain
  2436. @code{include/fxt/fxt.h}.
  2437. @c TODO add ref to other section
  2438. @end table
  2439. @node --with-perf-model-dir
  2440. @subsubsection @code{--with-perf-model-dir=<dir>}
  2441. @table @asis
  2442. @item @emph{Description}:
  2443. Specify where performance models should be stored (instead of defaulting to the
  2444. current user's home).
  2445. @end table
  2446. @node --with-mpicc
  2447. @subsubsection @code{--with-mpicc=<path to mpicc>}
  2448. @table @asis
  2449. @item @emph{Description}:
  2450. Specify the location of the @code{mpicc} compiler to be used for starpumpi.
  2451. @end table
  2452. @node --with-goto-dir
  2453. @subsubsection @code{--with-goto-dir=<dir>}
  2454. @table @asis
  2455. @item @emph{Description}:
  2456. Specify the location of GotoBLAS.
  2457. @end table
  2458. @node --with-atlas-dir
  2459. @subsubsection @code{--with-atlas-dir=<dir>}
  2460. @table @asis
  2461. @item @emph{Description}:
  2462. Specify the location of ATLAS. This directory should notably contain
  2463. @code{include/cblas.h}.
  2464. @end table
  2465. @node --with-mkl-cflags
  2466. @subsubsection @code{--with-mkl-cflags=<cflags>}
  2467. @table @asis
  2468. @item @emph{Description}:
  2469. Specify the compilation flags for the MKL Library.
  2470. @end table
  2471. @node --with-mkl-ldflags
  2472. @subsubsection @code{--with-mkl-ldflags=<ldflags>}
  2473. @table @asis
  2474. @item @emph{Description}:
  2475. Specify the linking flags for the MKL Library. Note that the
  2476. @url{http://software.intel.com/en-us/articles/intel-mkl-link-line-advisor/}
  2477. website provides a script to determine the linking flags.
  2478. @end table
  2479. @c ---------------------------------------------------------------------
  2480. @c Environment variables
  2481. @c ---------------------------------------------------------------------
  2482. @node Execution configuration through environment variables
  2483. @section Execution configuration through environment variables
  2484. @menu
  2485. * Workers:: Configuring workers
  2486. * Scheduling:: Configuring the Scheduling engine
  2487. * Misc:: Miscellaneous and debug
  2488. @end menu
  2489. Note: the values given in @code{starpu_conf} structure passed when
  2490. calling @code{starpu_init} will override the values of the environment
  2491. variables.
  2492. @node Workers
  2493. @subsection Configuring workers
  2494. @menu
  2495. * STARPU_NCPUS:: Number of CPU workers
  2496. * STARPU_NCUDA:: Number of CUDA workers
  2497. * STARPU_NOPENCL:: Number of OpenCL workers
  2498. * STARPU_NGORDON:: Number of SPU workers (Cell)
  2499. * STARPU_WORKERS_CPUID:: Bind workers to specific CPUs
  2500. * STARPU_WORKERS_CUDAID:: Select specific CUDA devices
  2501. * STARPU_WORKERS_OPENCLID:: Select specific OpenCL devices
  2502. @end menu
  2503. @node STARPU_NCPUS
  2504. @subsubsection @code{STARPU_NCPUS} -- Number of CPU workers
  2505. @table @asis
  2506. @item @emph{Description}:
  2507. Specify the number of CPU workers (thus not including workers dedicated to control acceleratores). Note that by default, StarPU will not allocate
  2508. more CPU workers than there are physical CPUs, and that some CPUs are used to control
  2509. the accelerators.
  2510. @end table
  2511. @node STARPU_NCUDA
  2512. @subsubsection @code{STARPU_NCUDA} -- Number of CUDA workers
  2513. @table @asis
  2514. @item @emph{Description}:
  2515. Specify the number of CUDA devices that StarPU can use. If
  2516. @code{STARPU_NCUDA} is lower than the number of physical devices, it is
  2517. possible to select which CUDA devices should be used by the means of the
  2518. @code{STARPU_WORKERS_CUDAID} environment variable. By default, StarPU will
  2519. create as many CUDA workers as there are CUDA devices.
  2520. @end table
  2521. @node STARPU_NOPENCL
  2522. @subsubsection @code{STARPU_NOPENCL} -- Number of OpenCL workers
  2523. @table @asis
  2524. @item @emph{Description}:
  2525. OpenCL equivalent of the @code{STARPU_NCUDA} environment variable.
  2526. @end table
  2527. @node STARPU_NGORDON
  2528. @subsubsection @code{STARPU_NGORDON} -- Number of SPU workers (Cell)
  2529. @table @asis
  2530. @item @emph{Description}:
  2531. Specify the number of SPUs that StarPU can use.
  2532. @end table
  2533. @node STARPU_WORKERS_CPUID
  2534. @subsubsection @code{STARPU_WORKERS_CPUID} -- Bind workers to specific CPUs
  2535. @table @asis
  2536. @item @emph{Description}:
  2537. Passing an array of integers (starting from 0) in @code{STARPU_WORKERS_CPUID}
  2538. specifies on which logical CPU the different workers should be
  2539. bound. For instance, if @code{STARPU_WORKERS_CPUID = "0 1 4 5"}, the first
  2540. worker will be bound to logical CPU #0, the second CPU worker will be bound to
  2541. logical CPU #1 and so on. Note that the logical ordering of the CPUs is either
  2542. determined by the OS, or provided by the @code{hwloc} library in case it is
  2543. available.
  2544. Note that the first workers correspond to the CUDA workers, then come the
  2545. OpenCL and the SPU, and finally the CPU workers. For example if
  2546. we have @code{STARPU_NCUDA=1}, @code{STARPU_NOPENCL=1}, @code{STARPU_NCPUS=2}
  2547. and @code{STARPU_WORKERS_CPUID = "0 2 1 3"}, the CUDA device will be controlled
  2548. by logical CPU #0, the OpenCL device will be controlled by logical CPU #2, and
  2549. the logical CPUs #1 and #3 will be used by the CPU workers.
  2550. If the number of workers is larger than the array given in
  2551. @code{STARPU_WORKERS_CPUID}, the workers are bound to the logical CPUs in a
  2552. round-robin fashion: if @code{STARPU_WORKERS_CPUID = "0 1"}, the first and the
  2553. third (resp. second and fourth) workers will be put on CPU #0 (resp. CPU #1).
  2554. This variable is ignored if the @code{use_explicit_workers_bindid} flag of the
  2555. @code{starpu_conf} structure passed to @code{starpu_init} is set.
  2556. @end table
  2557. @node STARPU_WORKERS_CUDAID
  2558. @subsubsection @code{STARPU_WORKERS_CUDAID} -- Select specific CUDA devices
  2559. @table @asis
  2560. @item @emph{Description}:
  2561. Similarly to the @code{STARPU_WORKERS_CPUID} environment variable, it is
  2562. possible to select which CUDA devices should be used by StarPU. On a machine
  2563. equipped with 4 GPUs, setting @code{STARPU_WORKERS_CUDAID = "1 3"} and
  2564. @code{STARPU_NCUDA=2} specifies that 2 CUDA workers should be created, and that
  2565. they should use CUDA devices #1 and #3 (the logical ordering of the devices is
  2566. the one reported by CUDA).
  2567. This variable is ignored if the @code{use_explicit_workers_cuda_gpuid} flag of
  2568. the @code{starpu_conf} structure passed to @code{starpu_init} is set.
  2569. @end table
  2570. @node STARPU_WORKERS_OPENCLID
  2571. @subsubsection @code{STARPU_WORKERS_OPENCLID} -- Select specific OpenCL devices
  2572. @table @asis
  2573. @item @emph{Description}:
  2574. OpenCL equivalent of the @code{STARPU_WORKERS_CUDAID} environment variable.
  2575. This variable is ignored if the @code{use_explicit_workers_opencl_gpuid} flag of
  2576. the @code{starpu_conf} structure passed to @code{starpu_init} is set.
  2577. @end table
  2578. @node Scheduling
  2579. @subsection Configuring the Scheduling engine
  2580. @menu
  2581. * STARPU_SCHED:: Scheduling policy
  2582. * STARPU_CALIBRATE:: Calibrate performance models
  2583. * STARPU_PREFETCH:: Use data prefetch
  2584. * STARPU_SCHED_ALPHA:: Computation factor
  2585. * STARPU_SCHED_BETA:: Communication factor
  2586. @end menu
  2587. @node STARPU_SCHED
  2588. @subsubsection @code{STARPU_SCHED} -- Scheduling policy
  2589. @table @asis
  2590. @item @emph{Description}:
  2591. This chooses between the different scheduling policies proposed by StarPU: work
  2592. random, stealing, greedy, with performance models, etc.
  2593. Use @code{STARPU_SCHED=help} to get the list of available schedulers.
  2594. @end table
  2595. @node STARPU_CALIBRATE
  2596. @subsubsection @code{STARPU_CALIBRATE} -- Calibrate performance models
  2597. @table @asis
  2598. @item @emph{Description}:
  2599. If this variable is set to 1, the performance models are calibrated during
  2600. the execution. If it is set to 2, the previous values are dropped to restart
  2601. calibration from scratch. Setting this variable to 0 disable calibration, this
  2602. is the default behaviour.
  2603. Note: this currently only applies to @code{dm}, @code{dmda} and @code{heft} scheduling policies.
  2604. @end table
  2605. @node STARPU_PREFETCH
  2606. @subsubsection @code{STARPU_PREFETCH} -- Use data prefetch
  2607. @table @asis
  2608. @item @emph{Description}:
  2609. This variable indicates whether data prefetching should be enabled (0 means
  2610. that it is disabled). If prefetching is enabled, when a task is scheduled to be
  2611. executed e.g. on a GPU, StarPU will request an asynchronous transfer in
  2612. advance, so that data is already present on the GPU when the task starts. As a
  2613. result, computation and data transfers are overlapped.
  2614. Note that prefetching is enabled by default in StarPU.
  2615. @end table
  2616. @node STARPU_SCHED_ALPHA
  2617. @subsubsection @code{STARPU_SCHED_ALPHA} -- Computation factor
  2618. @table @asis
  2619. @item @emph{Description}:
  2620. To estimate the cost of a task StarPU takes into account the estimated
  2621. computation time (obtained thanks to performance models). The alpha factor is
  2622. the coefficient to be applied to it before adding it to the communication part.
  2623. @end table
  2624. @node STARPU_SCHED_BETA
  2625. @subsubsection @code{STARPU_SCHED_BETA} -- Communication factor
  2626. @table @asis
  2627. @item @emph{Description}:
  2628. To estimate the cost of a task StarPU takes into account the estimated
  2629. data transfer time (obtained thanks to performance models). The beta factor is
  2630. the coefficient to be applied to it before adding it to the computation part.
  2631. @end table
  2632. @node Misc
  2633. @subsection Miscellaneous and debug
  2634. @menu
  2635. * STARPU_SILENT:: Disable verbose mode
  2636. * STARPU_LOGFILENAME:: Select debug file name
  2637. * STARPU_FXT_PREFIX:: FxT trace location
  2638. * STARPU_LIMIT_GPU_MEM:: Restrict memory size on the GPUs
  2639. * STARPU_GENERATE_TRACE:: Generate a Paje trace when StarPU is shut down
  2640. @end menu
  2641. @node STARPU_SILENT
  2642. @subsubsection @code{STARPU_SILENT} -- Disable verbose mode
  2643. @table @asis
  2644. @item @emph{Description}:
  2645. This variable allows to disable verbose mode at runtime when StarPU
  2646. has been configured with the option @code{--enable-verbose}.
  2647. @end table
  2648. @node STARPU_LOGFILENAME
  2649. @subsubsection @code{STARPU_LOGFILENAME} -- Select debug file name
  2650. @table @asis
  2651. @item @emph{Description}:
  2652. This variable specifies in which file the debugging output should be saved to.
  2653. @end table
  2654. @node STARPU_FXT_PREFIX
  2655. @subsubsection @code{STARPU_FXT_PREFIX} -- FxT trace location
  2656. @table @asis
  2657. @item @emph{Description}
  2658. This variable specifies in which directory to save the trace generated if FxT is enabled. It needs to have a trailing '/' character.
  2659. @end table
  2660. @node STARPU_LIMIT_GPU_MEM
  2661. @subsubsection @code{STARPU_LIMIT_GPU_MEM} -- Restrict memory size on the GPUs
  2662. @table @asis
  2663. @item @emph{Description}
  2664. This variable specifies the maximum number of megabytes that should be
  2665. available to the application on each GPUs. In case this value is smaller than
  2666. the size of the memory of a GPU, StarPU pre-allocates a buffer to waste memory
  2667. on the device. This variable is intended to be used for experimental purposes
  2668. as it emulates devices that have a limited amount of memory.
  2669. @end table
  2670. @node STARPU_GENERATE_TRACE
  2671. @subsubsection @code{STARPU_GENERATE_TRACE} -- Generate a Paje trace when StarPU is shut down
  2672. @table @asis
  2673. @item @emph{Description}
  2674. When set to 1, this variable indicates that StarPU should automatically
  2675. generate a Paje trace when starpu_shutdown is called.
  2676. @end table
  2677. @c ---------------------------------------------------------------------
  2678. @c StarPU API
  2679. @c ---------------------------------------------------------------------
  2680. @node StarPU API
  2681. @chapter StarPU API
  2682. @menu
  2683. * Initialization and Termination:: Initialization and Termination methods
  2684. * Workers' Properties:: Methods to enumerate workers' properties
  2685. * Data Library:: Methods to manipulate data
  2686. * Data Interfaces::
  2687. * Data Partition::
  2688. * Codelets and Tasks:: Methods to construct tasks
  2689. * Explicit Dependencies:: Explicit Dependencies
  2690. * Implicit Data Dependencies:: Implicit Data Dependencies
  2691. * Performance Model API::
  2692. * Profiling API:: Profiling API
  2693. * CUDA extensions:: CUDA extensions
  2694. * OpenCL extensions:: OpenCL extensions
  2695. * Cell extensions:: Cell extensions
  2696. * Miscellaneous helpers::
  2697. @end menu
  2698. @node Initialization and Termination
  2699. @section Initialization and Termination
  2700. @menu
  2701. * starpu_init:: Initialize StarPU
  2702. * struct starpu_conf:: StarPU runtime configuration
  2703. * starpu_conf_init:: Initialize starpu_conf structure
  2704. * starpu_shutdown:: Terminate StarPU
  2705. @end menu
  2706. @node starpu_init
  2707. @subsection @code{starpu_init} -- Initialize StarPU
  2708. @table @asis
  2709. @item @emph{Description}:
  2710. This is StarPU initialization method, which must be called prior to any other
  2711. StarPU call. It is possible to specify StarPU's configuration (e.g. scheduling
  2712. policy, number of cores, ...) by passing a non-null argument. Default
  2713. configuration is used if the passed argument is @code{NULL}.
  2714. @item @emph{Return value}:
  2715. Upon successful completion, this function returns 0. Otherwise, @code{-ENODEV}
  2716. indicates that no worker was available (so that StarPU was not initialized).
  2717. @item @emph{Prototype}:
  2718. @code{int starpu_init(struct starpu_conf *conf);}
  2719. @end table
  2720. @node struct starpu_conf
  2721. @subsection @code{struct starpu_conf} -- StarPU runtime configuration
  2722. @table @asis
  2723. @item @emph{Description}:
  2724. This structure is passed to the @code{starpu_init} function in order
  2725. to configure StarPU.
  2726. When the default value is used, StarPU automatically selects the number
  2727. of processing units and takes the default scheduling policy. This parameter
  2728. overwrites the equivalent environment variables.
  2729. @item @emph{Fields}:
  2730. @table @asis
  2731. @item @code{sched_policy_name} (default = NULL):
  2732. This is the name of the scheduling policy. This can also be specified with the
  2733. @code{STARPU_SCHED} environment variable.
  2734. @item @code{sched_policy} (default = NULL):
  2735. This is the definition of the scheduling policy. This field is ignored
  2736. if @code{sched_policy_name} is set.
  2737. @item @code{ncpus} (default = -1):
  2738. This is the number of CPU cores that StarPU can use. This can also be
  2739. specified with the @code{STARPU_NCPUS} environment variable.
  2740. @item @code{ncuda} (default = -1):
  2741. This is the number of CUDA devices that StarPU can use. This can also be
  2742. specified with the @code{STARPU_NCUDA} environment variable.
  2743. @item @code{nopencl} (default = -1):
  2744. This is the number of OpenCL devices that StarPU can use. This can also be
  2745. specified with the @code{STARPU_NOPENCL} environment variable.
  2746. @item @code{nspus} (default = -1):
  2747. This is the number of Cell SPUs that StarPU can use. This can also be
  2748. specified with the @code{STARPU_NGORDON} environment variable.
  2749. @item @code{use_explicit_workers_bindid} (default = 0)
  2750. If this flag is set, the @code{workers_bindid} array indicates where the
  2751. different workers are bound, otherwise StarPU automatically selects where to
  2752. bind the different workers unless the @code{STARPU_WORKERS_CPUID} environment
  2753. variable is set. The @code{STARPU_WORKERS_CPUID} environment variable is
  2754. ignored if the @code{use_explicit_workers_bindid} flag is set.
  2755. @item @code{workers_bindid[STARPU_NMAXWORKERS]}
  2756. If the @code{use_explicit_workers_bindid} flag is set, this array indicates
  2757. where to bind the different workers. The i-th entry of the
  2758. @code{workers_bindid} indicates the logical identifier of the processor which
  2759. should execute the i-th worker. Note that the logical ordering of the CPUs is
  2760. either determined by the OS, or provided by the @code{hwloc} library in case it
  2761. is available.
  2762. When this flag is set, the @ref{STARPU_WORKERS_CPUID} environment variable is
  2763. ignored.
  2764. @item @code{use_explicit_workers_cuda_gpuid} (default = 0)
  2765. If this flag is set, the CUDA workers will be attached to the CUDA devices
  2766. specified in the @code{workers_cuda_gpuid} array. Otherwise, StarPU affects the
  2767. CUDA devices in a round-robin fashion.
  2768. When this flag is set, the @ref{STARPU_WORKERS_CUDAID} environment variable is
  2769. ignored.
  2770. @item @code{workers_cuda_gpuid[STARPU_NMAXWORKERS]}
  2771. If the @code{use_explicit_workers_cuda_gpuid} flag is set, this array contains
  2772. the logical identifiers of the CUDA devices (as used by @code{cudaGetDevice}).
  2773. @item @code{use_explicit_workers_opencl_gpuid} (default = 0)
  2774. If this flag is set, the OpenCL workers will be attached to the OpenCL devices
  2775. specified in the @code{workers_opencl_gpuid} array. Otherwise, StarPU affects the
  2776. OpenCL devices in a round-robin fashion.
  2777. @item @code{workers_opencl_gpuid[STARPU_NMAXWORKERS]}:
  2778. @item @code{calibrate} (default = 0):
  2779. If this flag is set, StarPU will calibrate the performance models when
  2780. executing tasks. If this value is equal to -1, the default value is used. The
  2781. default value is overwritten by the @code{STARPU_CALIBRATE} environment
  2782. variable when it is set.
  2783. @end table
  2784. @item @code{single_combined_worker} (default = 0):
  2785. By default, StarPU creates various combined workers according to the machine
  2786. structure. Some parallel libraries (e.g. most OpenMP implementations) however do
  2787. not support concurrent calls to parallel code. In such case, setting this flag
  2788. makes StarPU only create one combined worker, containing all
  2789. the CPU workers. The default value is overwritten by the
  2790. @code{STARPU_SINGLE_COMBINED_WORKER} environment variable when it is set.
  2791. @end table
  2792. @node starpu_conf_init
  2793. @subsection @code{starpu_conf_init} -- Initialize starpu_conf structure
  2794. @table @asis
  2795. This function initializes the @code{starpu_conf} structure passed as argument
  2796. with the default values. In case some configuration parameters are already
  2797. specified through environment variables, @code{starpu_conf_init} initializes
  2798. the fields of the structure according to the environment variables. For
  2799. instance if @code{STARPU_CALIBRATE} is set, its value is put in the
  2800. @code{.ncuda} field of the structure passed as argument.
  2801. @item @emph{Return value}:
  2802. Upon successful completion, this function returns 0. Otherwise, @code{-EINVAL}
  2803. indicates that the argument was NULL.
  2804. @item @emph{Prototype}:
  2805. @code{int starpu_conf_init(struct starpu_conf *conf);}
  2806. @end table
  2807. @node starpu_shutdown
  2808. @subsection @code{starpu_shutdown} -- Terminate StarPU
  2809. @deftypefun void starpu_shutdown (void)
  2810. This is StarPU termination method. It must be called at the end of the
  2811. application: statistics and other post-mortem debugging information are not
  2812. guaranteed to be available until this method has been called.
  2813. @end deftypefun
  2814. @node Workers' Properties
  2815. @section Workers' Properties
  2816. @menu
  2817. * starpu_worker_get_count:: Get the number of processing units
  2818. * starpu_worker_get_count_by_type:: Get the number of processing units of a given type
  2819. * starpu_cpu_worker_get_count:: Get the number of CPU controlled by StarPU
  2820. * starpu_cuda_worker_get_count:: Get the number of CUDA devices controlled by StarPU
  2821. * starpu_opencl_worker_get_count:: Get the number of OpenCL devices controlled by StarPU
  2822. * starpu_spu_worker_get_count:: Get the number of Cell SPUs controlled by StarPU
  2823. * starpu_worker_get_id:: Get the identifier of the current worker
  2824. * starpu_worker_get_ids_by_type:: Get the list of identifiers of workers with a given type
  2825. * starpu_worker_get_devid:: Get the device identifier of a worker
  2826. * starpu_worker_get_type:: Get the type of processing unit associated to a worker
  2827. * starpu_worker_get_name:: Get the name of a worker
  2828. * starpu_worker_get_memory_node:: Get the memory node of a worker
  2829. @end menu
  2830. @node starpu_worker_get_count
  2831. @subsection @code{starpu_worker_get_count} -- Get the number of processing units
  2832. @deftypefun unsigned starpu_worker_get_count (void)
  2833. This function returns the number of workers (i.e. processing units executing
  2834. StarPU tasks). The returned value should be at most @code{STARPU_NMAXWORKERS}.
  2835. @end deftypefun
  2836. @node starpu_worker_get_count_by_type
  2837. @subsection @code{starpu_worker_get_count_by_type} -- Get the number of processing units of a given type
  2838. @deftypefun int starpu_worker_get_count_by_type ({enum starpu_archtype} @var{type})
  2839. Returns the number of workers of the type indicated by the argument. A positive
  2840. (or null) value is returned in case of success, @code{-EINVAL} indicates that
  2841. the type is not valid otherwise.
  2842. @end deftypefun
  2843. @node starpu_cpu_worker_get_count
  2844. @subsection @code{starpu_cpu_worker_get_count} -- Get the number of CPU controlled by StarPU
  2845. @deftypefun unsigned starpu_cpu_worker_get_count (void)
  2846. This function returns the number of CPUs controlled by StarPU. The returned
  2847. value should be at most @code{STARPU_MAXCPUS}.
  2848. @end deftypefun
  2849. @node starpu_cuda_worker_get_count
  2850. @subsection @code{starpu_cuda_worker_get_count} -- Get the number of CUDA devices controlled by StarPU
  2851. @deftypefun unsigned starpu_cuda_worker_get_count (void)
  2852. This function returns the number of CUDA devices controlled by StarPU. The returned
  2853. value should be at most @code{STARPU_MAXCUDADEVS}.
  2854. @end deftypefun
  2855. @node starpu_opencl_worker_get_count
  2856. @subsection @code{starpu_opencl_worker_get_count} -- Get the number of OpenCL devices controlled by StarPU
  2857. @deftypefun unsigned starpu_opencl_worker_get_count (void)
  2858. This function returns the number of OpenCL devices controlled by StarPU. The returned
  2859. value should be at most @code{STARPU_MAXOPENCLDEVS}.
  2860. @end deftypefun
  2861. @node starpu_spu_worker_get_count
  2862. @subsection @code{starpu_spu_worker_get_count} -- Get the number of Cell SPUs controlled by StarPU
  2863. @deftypefun unsigned starpu_spu_worker_get_count (void)
  2864. This function returns the number of Cell SPUs controlled by StarPU.
  2865. @end deftypefun
  2866. @node starpu_worker_get_id
  2867. @subsection @code{starpu_worker_get_id} -- Get the identifier of the current worker
  2868. @deftypefun int starpu_worker_get_id (void)
  2869. This function returns the identifier of the worker associated to the calling
  2870. thread. The returned value is either -1 if the current context is not a StarPU
  2871. worker (i.e. when called from the application outside a task or a callback), or
  2872. an integer between 0 and @code{starpu_worker_get_count() - 1}.
  2873. @end deftypefun
  2874. @node starpu_worker_get_ids_by_type
  2875. @subsection @code{starpu_worker_get_ids_by_type} -- Get the list of identifiers of workers with a given type
  2876. @deftypefun int starpu_worker_get_ids_by_type ({enum starpu_archtype} @var{type}, int *@var{workerids}, int @var{maxsize})
  2877. Fill the workerids array with the identifiers of the workers that have the type
  2878. indicated in the first argument. The maxsize argument indicates the size of the
  2879. workids array. The returned value gives the number of identifiers that were put
  2880. in the array. @code{-ERANGE} is returned is maxsize is lower than the number of
  2881. workers with the appropriate type: in that case, the array is filled with the
  2882. maxsize first elements. To avoid such overflows, the value of maxsize can be
  2883. chosen by the means of the @code{starpu_worker_get_count_by_type} function, or
  2884. by passing a value greater or equal to @code{STARPU_NMAXWORKERS}.
  2885. @end deftypefun
  2886. @node starpu_worker_get_devid
  2887. @subsection @code{starpu_worker_get_devid} -- Get the device identifier of a worker
  2888. @deftypefun int starpu_worker_get_devid (int @var{id})
  2889. This functions returns the device id of the worker associated to an identifier
  2890. (as returned by the @code{starpu_worker_get_id} function). In the case of a
  2891. CUDA worker, this device identifier is the logical device identifier exposed by
  2892. CUDA (used by the @code{cudaGetDevice} function for instance). The device
  2893. identifier of a CPU worker is the logical identifier of the core on which the
  2894. worker was bound; this identifier is either provided by the OS or by the
  2895. @code{hwloc} library in case it is available.
  2896. @end deftypefun
  2897. @node starpu_worker_get_type
  2898. @subsection @code{starpu_worker_get_type} -- Get the type of processing unit associated to a worker
  2899. @deftypefun {enum starpu_archtype} starpu_worker_get_type (int @var{id})
  2900. This function returns the type of worker associated to an identifier (as
  2901. returned by the @code{starpu_worker_get_id} function). The returned value
  2902. indicates the architecture of the worker: @code{STARPU_CPU_WORKER} for a CPU
  2903. core, @code{STARPU_CUDA_WORKER} for a CUDA device,
  2904. @code{STARPU_OPENCL_WORKER} for a OpenCL device, and
  2905. @code{STARPU_GORDON_WORKER} for a Cell SPU. The value returned for an invalid
  2906. identifier is unspecified.
  2907. @end deftypefun
  2908. @node starpu_worker_get_name
  2909. @subsection @code{starpu_worker_get_name} -- Get the name of a worker
  2910. @deftypefun void starpu_worker_get_name (int @var{id}, char *@var{dst}, size_t @var{maxlen})
  2911. StarPU associates a unique human readable string to each processing unit. This
  2912. function copies at most the @var{maxlen} first bytes of the unique string
  2913. associated to a worker identified by its identifier @var{id} into the
  2914. @var{dst} buffer. The caller is responsible for ensuring that the @var{dst}
  2915. is a valid pointer to a buffer of @var{maxlen} bytes at least. Calling this
  2916. function on an invalid identifier results in an unspecified behaviour.
  2917. @end deftypefun
  2918. @node starpu_worker_get_memory_node
  2919. @subsection @code{starpu_worker_get_memory_node} -- Get the memory node of a worker
  2920. @deftypefun unsigned starpu_worker_get_memory_node (unsigned @var{workerid})
  2921. This function returns the identifier of the memory node associated to the
  2922. worker identified by @var{workerid}.
  2923. @end deftypefun
  2924. @node Data Library
  2925. @section Data Library
  2926. This section describes the data management facilities provided by StarPU.
  2927. We show how to use existing data interfaces in @ref{Data Interfaces}, but developers can
  2928. design their own data interfaces if required.
  2929. @menu
  2930. * starpu_malloc:: Allocate data and pin it
  2931. * starpu_access_mode:: Data access mode
  2932. * unsigned memory_node:: Memory node
  2933. * starpu_data_handle:: StarPU opaque data handle
  2934. * void *interface:: StarPU data interface
  2935. * starpu_data_register:: Register a piece of data to StarPU
  2936. * starpu_data_unregister:: Unregister a piece of data from StarPU
  2937. * starpu_data_unregister_no_coherency:: Unregister a piece of data from StarPU without coherency
  2938. * starpu_data_invalidate:: Invalidate all data replicates
  2939. * starpu_data_acquire:: Access registered data from the application
  2940. * starpu_data_acquire_cb:: Access registered data from the application asynchronously
  2941. * STARPU_DATA_ACQUIRE_CB:: Access registered data from the application asynchronously, macro
  2942. * starpu_data_release:: Release registered data from the application
  2943. * starpu_data_set_wt_mask:: Set the Write-Through mask
  2944. * starpu_data_prefetch_on_node:: Prefetch data to a given node
  2945. @end menu
  2946. @node starpu_malloc
  2947. @subsection @code{starpu_malloc} -- Allocate data and pin it
  2948. @deftypefun int starpu_malloc (void **@var{A}, size_t @var{dim})
  2949. This function allocates data of the given size in main memory. It will also try to pin it in
  2950. CUDA or OpenCL, so that data transfers from this buffer can be asynchronous, and
  2951. thus permit data transfer and computation overlapping. The allocated buffer must
  2952. be freed thanks to the @code{starpu_free} function.
  2953. @end deftypefun
  2954. @node starpu_access_mode
  2955. @subsection @code{starpu_access_mode} -- Data access mode
  2956. This datatype describes a data access mode. The different available modes are:
  2957. @table @asis
  2958. @table @asis
  2959. @item @code{STARPU_R} read-only mode.
  2960. @item @code{STARPU_W} write-only mode.
  2961. @item @code{STARPU_RW} read-write mode. This is equivalent to @code{STARPU_R|STARPU_W}.
  2962. @item @code{STARPU_SCRATCH} scratch memory. A temporary buffer is allocated for the task, but StarPU does not enforce data consistency, i.e. each device has its own buffer, independently from each other (even for CPUs). This is useful for temporary variables. For now, no behaviour is defined concerning the relation with STARPU_R/W modes and the value provided at registration, i.e. the value of the scratch buffer is undefined at entry of the codelet function, but this is being considered for future extensions.
  2963. @item @code{STARPU_REDUX} reduction mode. TODO: document, as well as @code{starpu_data_set_reduction_methods}
  2964. @end table
  2965. @end table
  2966. @node unsigned memory_node
  2967. @subsection @code{unsigned memory_node} -- Memory node
  2968. @table @asis
  2969. @item @emph{Description}:
  2970. Every worker is associated to a memory node which is a logical abstraction of
  2971. the address space from which the processing unit gets its data. For instance,
  2972. the memory node associated to the different CPU workers represents main memory
  2973. (RAM), the memory node associated to a GPU is DRAM embedded on the device.
  2974. Every memory node is identified by a logical index which is accessible from the
  2975. @code{starpu_worker_get_memory_node} function. When registering a piece of data
  2976. to StarPU, the specified memory node indicates where the piece of data
  2977. initially resides (we also call this memory node the home node of a piece of
  2978. data).
  2979. @end table
  2980. @node starpu_data_handle
  2981. @subsection @code{starpu_data_handle} -- StarPU opaque data handle
  2982. @table @asis
  2983. @item @emph{Description}:
  2984. StarPU uses @code{starpu_data_handle} as an opaque handle to manage a piece of
  2985. data. Once a piece of data has been registered to StarPU, it is associated to a
  2986. @code{starpu_data_handle} which keeps track of the state of the piece of data
  2987. over the entire machine, so that we can maintain data consistency and locate
  2988. data replicates for instance.
  2989. @end table
  2990. @node void *interface
  2991. @subsection @code{void *interface} -- StarPU data interface
  2992. @table @asis
  2993. @item @emph{Description}:
  2994. Data management is done at a high-level in StarPU: rather than accessing a mere
  2995. list of contiguous buffers, the tasks may manipulate data that are described by
  2996. a high-level construct which we call data interface.
  2997. An example of data interface is the "vector" interface which describes a
  2998. contiguous data array on a spefic memory node. This interface is a simple
  2999. structure containing the number of elements in the array, the size of the
  3000. elements, and the address of the array in the appropriate address space (this
  3001. address may be invalid if there is no valid copy of the array in the memory
  3002. node). More informations on the data interfaces provided by StarPU are
  3003. given in @ref{Data Interfaces}.
  3004. When a piece of data managed by StarPU is used by a task, the task
  3005. implementation is given a pointer to an interface describing a valid copy of
  3006. the data that is accessible from the current processing unit.
  3007. @end table
  3008. @node starpu_data_register
  3009. @subsection @code{starpu_data_register} -- Register a piece of data to StarPU
  3010. @deftypefun void starpu_data_register (starpu_data_handle *@var{handleptr}, uint32_t @var{home_node}, void *@var{interface}, {struct starpu_data_interface_ops_t} *@var{ops})
  3011. Register a piece of data into the handle located at the @var{handleptr}
  3012. address. The @var{interface} buffer contains the initial description of the
  3013. data in the home node. The @var{ops} argument is a pointer to a structure
  3014. describing the different methods used to manipulate this type of interface. See
  3015. @ref{struct starpu_data_interface_ops_t} for more details on this structure.
  3016. If @code{home_node} is -1, StarPU will automatically
  3017. allocate the memory when it is used for the
  3018. first time in write-only mode. Once such data handle has been automatically
  3019. allocated, it is possible to access it using any access mode.
  3020. Note that StarPU supplies a set of predefined types of interface (e.g. vector or
  3021. matrix) which can be registered by the means of helper functions (e.g.
  3022. @code{starpu_vector_data_register} or @code{starpu_matrix_data_register}).
  3023. @end deftypefun
  3024. @node starpu_data_unregister
  3025. @subsection @code{starpu_data_unregister} -- Unregister a piece of data from StarPU
  3026. @deftypefun void starpu_data_unregister (starpu_data_handle @var{handle})
  3027. This function unregisters a data handle from StarPU. If the data was
  3028. automatically allocated by StarPU because the home node was -1, all
  3029. automatically allocated buffers are freed. Otherwise, a valid copy of the data
  3030. is put back into the home node in the buffer that was initially registered.
  3031. Using a data handle that has been unregistered from StarPU results in an
  3032. undefined behaviour.
  3033. @end deftypefun
  3034. @node starpu_data_unregister_no_coherency
  3035. @subsection @code{starpu_data_unregister_no_coherency} -- Unregister a piece of data from StarPU
  3036. @deftypefun void starpu_data_unregister_no_coherency (starpu_data_handle @var{handle})
  3037. This is the same as starpu_data_unregister, except that StarPU does not put back
  3038. a valid copy into the home node, in the buffer that was initially registered.
  3039. @end deftypefun
  3040. @node starpu_data_invalidate
  3041. @subsection @code{starpu_data_invalidate} -- Invalidate all data replicates
  3042. @deftypefun void starpu_data_invalidate (starpu_data_handle @var{handle})
  3043. Destroy all replicates of the data handle. After data invalidation, the first
  3044. access to the handle must be performed in write-only mode. Accessing an
  3045. invalidated data in read-mode results in undefined behaviour.
  3046. @end deftypefun
  3047. @c TODO create a specific sections about user interaction with the DSM ?
  3048. @node starpu_data_acquire
  3049. @subsection @code{starpu_data_acquire} -- Access registered data from the application
  3050. @deftypefun int starpu_data_acquire (starpu_data_handle @var{handle}, starpu_access_mode @var{mode})
  3051. The application must call this function prior to accessing registered data from
  3052. main memory outside tasks. StarPU ensures that the application will get an
  3053. up-to-date copy of the data in main memory located where the data was
  3054. originally registered, and that all concurrent accesses (e.g. from tasks) will
  3055. be consistent with the access mode specified in the @var{mode} argument.
  3056. @code{starpu_data_release} must be called once the application does not need to
  3057. access the piece of data anymore. Note that implicit data
  3058. dependencies are also enforced by @code{starpu_data_acquire}, i.e.
  3059. @code{starpu_data_acquire} will wait for all tasks scheduled to work on
  3060. the data, unless that they have not been disabled explictly by calling
  3061. @code{starpu_data_set_default_sequential_consistency_flag} or
  3062. @code{starpu_data_set_sequential_consistency_flag}.
  3063. @code{starpu_data_acquire} is a blocking call, so that it cannot be called from
  3064. tasks or from their callbacks (in that case, @code{starpu_data_acquire} returns
  3065. @code{-EDEADLK}). Upon successful completion, this function returns 0.
  3066. @end deftypefun
  3067. @node starpu_data_acquire_cb
  3068. @subsection @code{starpu_data_acquire_cb} -- Access registered data from the application asynchronously
  3069. @deftypefun int starpu_data_acquire_cb (starpu_data_handle @var{handle}, starpu_access_mode @var{mode}, void (*@var{callback})(void *), void *@var{arg})
  3070. @code{starpu_data_acquire_cb} is the asynchronous equivalent of
  3071. @code{starpu_data_release}. When the data specified in the first argument is
  3072. available in the appropriate access mode, the callback function is executed.
  3073. The application may access the requested data during the execution of this
  3074. callback. The callback function must call @code{starpu_data_release} once the
  3075. application does not need to access the piece of data anymore.
  3076. Note that implicit data dependencies are also enforced by
  3077. @code{starpu_data_acquire_cb} in case they are enabled.
  3078. Contrary to @code{starpu_data_acquire}, this function is non-blocking and may
  3079. be called from task callbacks. Upon successful completion, this function
  3080. returns 0.
  3081. @end deftypefun
  3082. @node STARPU_DATA_ACQUIRE_CB
  3083. @subsection @code{STARPU_DATA_ACQUIRE_CB} -- Access registered data from the application asynchronously, macro
  3084. @deftypefun STARPU_DATA_ACQUIRE_CB (starpu_data_handle @var{handle}, starpu_access_mode @var{mode}, code)
  3085. @code{STARPU_DATA_ACQUIRE_CB} is the same as @code{starpu_data_acquire_cb},
  3086. except that the code to be executed in a callback is directly provided as a
  3087. macro parameter, and the data handle is automatically released after it. This
  3088. permit to easily execute code which depends on the value of some registered
  3089. data. This is non-blocking too and may be called from task callbacks.
  3090. @end deftypefun
  3091. @node starpu_data_release
  3092. @subsection @code{starpu_data_release} -- Release registered data from the application
  3093. @deftypefun void starpu_data_release (starpu_data_handle @var{handle})
  3094. This function releases the piece of data acquired by the application either by
  3095. @code{starpu_data_acquire} or by @code{starpu_data_acquire_cb}.
  3096. @end deftypefun
  3097. @node starpu_data_set_wt_mask
  3098. @subsection @code{starpu_data_set_wt_mask} -- Set the Write-Through mask
  3099. @deftypefun void starpu_data_set_wt_mask (starpu_data_handle @var{handle}, uint32_t @var{wt_mask})
  3100. This function sets the write-through mask of a given data, i.e. a bitmask of
  3101. nodes where the data should be always replicated after modification.
  3102. @end deftypefun
  3103. @node starpu_data_prefetch_on_node
  3104. @subsection @code{starpu_data_prefetch_on_node} -- Prefetch data to a given node
  3105. @deftypefun int starpu_data_prefetch_on_node (starpu_data_handle @var{handle}, unsigned @var{node}, unsigned @var{async})
  3106. Issue a prefetch request for a given data to a given node, i.e.
  3107. requests that the data be replicated to the given node, so that it is available
  3108. there for tasks. If the @var{async} parameter is 0, the call will block until
  3109. the transfer is achieved, else the call will return as soon as the request is
  3110. scheduled (which may however have to wait for a task completion).
  3111. @end deftypefun
  3112. @node Data Interfaces
  3113. @section Data Interfaces
  3114. There are several ways to register a memory region so that it can be
  3115. managed by StarPU. The functions below allow the registration of
  3116. vectors, 2D matrices, and 3D matrices, as well as BCSR and CSR sparse
  3117. matrices.
  3118. @deftypefun void starpu_variable_data_register ({starpu_data_handle *}@var{handle}, uint32_t @var{home_node}, uintptr_t @var{ptr}, size_t @var{size})
  3119. Register the @var{size}-byte element pointed to by @var{ptr}, which is
  3120. typically a scalar, and initialize @var{handle} to represent this data
  3121. item.
  3122. @smallexample
  3123. float var;
  3124. starpu_data_handle var_handle;
  3125. starpu_variable_data_register(&var_handle, 0, (uintptr_t)&var, sizeof(var));
  3126. @end smallexample
  3127. @end deftypefun
  3128. @deftypefun void starpu_vector_data_register ({starpu_data_handle *}@var{handle}, uint32_t @var{home_node}, uintptr_t @var{ptr}, uint32_t @var{count}, size_t @var{size})
  3129. Register the @var{count} @var{size}-byte elements pointed to by
  3130. @var{ptr} and initialize @var{handle} to represent it.
  3131. @example
  3132. float vector[NX];
  3133. starpu_data_handle vector_handle;
  3134. starpu_vector_data_register(&vector_handle, 0, (uintptr_t)vector, NX,
  3135. sizeof(vector[0]));
  3136. @end example
  3137. @end deftypefun
  3138. @deftypefun void starpu_matrix_data_register ({starpu_data_handle *}@var{handle}, uint32_t @var{home_node}, uintptr_t @var{ptr}, uint32_t @var{ld}, uint32_t @var{nx}, uint32_t @var{ny}, size_t @var{size})
  3139. Register the @var{nx}x@var{ny} 2D matrix of @var{size}-byte elements
  3140. pointed by @var{ptr} and initialize @var{handle} to represent it.
  3141. @var{ld} specifies the number of extra elements present at the end of
  3142. each row; a non-zero @var{ld} adds padding, which can be useful for
  3143. alignment purposes.
  3144. @example
  3145. float *matrix;
  3146. starpu_data_handle matrix_handle;
  3147. matrix = (float*)malloc(width * height * sizeof(float));
  3148. starpu_matrix_data_register(&matrix_handle, 0, (uintptr_t)matrix,
  3149. width, width, height, sizeof(float));
  3150. @end example
  3151. @end deftypefun
  3152. @deftypefun void starpu_block_data_register ({starpu_data_handle *}@var{handle}, uint32_t @var{home_node}, uintptr_t @var{ptr}, uint32_t @var{ldy}, uint32_t @var{ldz}, uint32_t @var{nx}, uint32_t @var{ny}, uint32_t @var{nz}, size_t @var{size})
  3153. Register the @var{nx}x@var{ny}x@var{nz} 3D matrix of @var{size}-byte
  3154. elements pointed by @var{ptr} and initialize @var{handle} to represent
  3155. it. Again, @var{ldy} and @var{ldz} specify the number of extra elements
  3156. present at the end of each row or column.
  3157. @example
  3158. float *block;
  3159. starpu_data_handle block_handle;
  3160. block = (float*)malloc(nx*ny*nz*sizeof(float));
  3161. starpu_block_data_register(&block_handle, 0, (uintptr_t)block,
  3162. nx, nx*ny, nx, ny, nz, sizeof(float));
  3163. @end example
  3164. @end deftypefun
  3165. @deftypefun void starpu_bcsr_data_register (starpu_data_handle *@var{handle}, uint32_t @var{home_node}, uint32_t @var{nnz}, uint32_t @var{nrow}, uintptr_t @var{nzval}, uint32_t *@var{colind}, uint32_t *@var{rowptr}, uint32_t @var{firstentry}, uint32_t @var{r}, uint32_t @var{c}, size_t @var{elemsize})
  3166. This variant of @code{starpu_data_register} uses the BCSR (Blocked
  3167. Compressed Sparse Row Representation) sparse matrix interface.
  3168. TODO
  3169. @end deftypefun
  3170. @deftypefun void starpu_csr_data_register (starpu_data_handle *@var{handle}, uint32_t @var{home_node}, uint32_t @var{nnz}, uint32_t @var{nrow}, uintptr_t @var{nzval}, uint32_t *@var{colind}, uint32_t *@var{rowptr}, uint32_t @var{firstentry}, size_t @var{elemsize})
  3171. This variant of @code{starpu_data_register} uses the CSR (Compressed
  3172. Sparse Row Representation) sparse matrix interface.
  3173. TODO
  3174. @end deftypefun
  3175. @node Data Partition
  3176. @section Data Partition
  3177. @menu
  3178. * struct starpu_data_filter:: StarPU filter structure
  3179. * starpu_data_partition:: Partition Data
  3180. * starpu_data_unpartition:: Unpartition Data
  3181. * starpu_data_get_nb_children::
  3182. * starpu_data_get_sub_data::
  3183. * Predefined filter functions::
  3184. @end menu
  3185. @node struct starpu_data_filter
  3186. @subsection @code{struct starpu_data_filter} -- StarPU filter structure
  3187. @table @asis
  3188. @item @emph{Description}:
  3189. The filter structure describes a data partitioning operation, to be given to the
  3190. @code{starpu_data_partition} function, see @ref{starpu_data_partition} for an example.
  3191. @item @emph{Fields}:
  3192. @table @asis
  3193. @item @code{filter_func}:
  3194. This function fills the @code{child_interface} structure with interface
  3195. information for the @code{id}-th child of the parent @code{father_interface} (among @code{nparts}).
  3196. @code{void (*filter_func)(void *father_interface, void* child_interface, struct starpu_data_filter *, unsigned id, unsigned nparts);}
  3197. @item @code{nchildren}:
  3198. This is the number of parts to partition the data into.
  3199. @item @code{get_nchildren}:
  3200. This returns the number of children. This can be used instead of @code{nchildren} when the number of
  3201. children depends on the actual data (e.g. the number of blocks in a sparse
  3202. matrix).
  3203. @code{unsigned (*get_nchildren)(struct starpu_data_filter *, starpu_data_handle initial_handle);}
  3204. @item @code{get_child_ops}:
  3205. In case the resulting children use a different data interface, this function
  3206. returns which interface is used by child number @code{id}.
  3207. @code{struct starpu_data_interface_ops_t *(*get_child_ops)(struct starpu_data_filter *, unsigned id);}
  3208. @item @code{filter_arg}:
  3209. Some filters take an addition parameter, but this is usually unused.
  3210. @item @code{filter_arg_ptr}:
  3211. Some filters take an additional array parameter like the sizes of the parts, but
  3212. this is usually unused.
  3213. @end table
  3214. @end table
  3215. @node starpu_data_partition
  3216. @subsection starpu_data_partition -- Partition Data
  3217. @table @asis
  3218. @item @emph{Description}:
  3219. This requests partitioning one StarPU data @code{initial_handle} into several
  3220. subdata according to the filter @code{f}
  3221. @item @emph{Prototype}:
  3222. @code{void starpu_data_partition(starpu_data_handle initial_handle, struct starpu_data_filter *f);}
  3223. @item @emph{Example}:
  3224. @cartouche
  3225. @smallexample
  3226. struct starpu_data_filter f = @{
  3227. .filter_func = starpu_vertical_block_filter_func,
  3228. .nchildren = nslicesx,
  3229. .get_nchildren = NULL,
  3230. .get_child_ops = NULL
  3231. @};
  3232. starpu_data_partition(A_handle, &f);
  3233. @end smallexample
  3234. @end cartouche
  3235. @end table
  3236. @node starpu_data_unpartition
  3237. @subsection starpu_data_unpartition -- Unpartition data
  3238. @table @asis
  3239. @item @emph{Description}:
  3240. This unapplies one filter, thus unpartitioning the data. The pieces of data are
  3241. collected back into one big piece in the @code{gathering_node} (usually 0).
  3242. @item @emph{Prototype}:
  3243. @code{void starpu_data_unpartition(starpu_data_handle root_data, uint32_t gathering_node);}
  3244. @item @emph{Example}:
  3245. @cartouche
  3246. @smallexample
  3247. starpu_data_unpartition(A_handle, 0);
  3248. @end smallexample
  3249. @end cartouche
  3250. @end table
  3251. @node starpu_data_get_nb_children
  3252. @subsection starpu_data_get_nb_children
  3253. @table @asis
  3254. @item @emph{Description}:
  3255. This function returns the number of children.
  3256. @item @emph{Return value}:
  3257. The number of children.
  3258. @item @emph{Prototype}:
  3259. @code{int starpu_data_get_nb_children(starpu_data_handle handle);}
  3260. @end table
  3261. @c starpu_data_handle starpu_data_get_child(starpu_data_handle handle, unsigned i);
  3262. @node starpu_data_get_sub_data
  3263. @subsection starpu_data_get_sub_data
  3264. @table @asis
  3265. @item @emph{Description}:
  3266. After partitioning a StarPU data by applying a filter,
  3267. @code{starpu_data_get_sub_data} can be used to get handles for each of the data
  3268. portions. @code{root_data} is the parent data that was partitioned. @code{depth}
  3269. is the number of filters to traverse (in case several filters have been applied,
  3270. to e.g. partition in row blocks, and then in column blocks), and the subsequent
  3271. parameters are the indexes.
  3272. @item @emph{Return value}:
  3273. A handle to the subdata.
  3274. @item @emph{Prototype}:
  3275. @code{starpu_data_handle starpu_data_get_sub_data(starpu_data_handle root_data, unsigned depth, ... );}
  3276. @item @emph{Example}:
  3277. @cartouche
  3278. @smallexample
  3279. h = starpu_data_get_sub_data(A_handle, 1, taskx);
  3280. @end smallexample
  3281. @end cartouche
  3282. @end table
  3283. @node Predefined filter functions
  3284. @subsection Predefined filter functions
  3285. @menu
  3286. * Partitioning BCSR Data::
  3287. * Partitioning BLAS interface::
  3288. * Partitioning Vector Data::
  3289. * Partitioning Block Data::
  3290. @end menu
  3291. This section gives a partial list of the predefined partitioning functions.
  3292. Examples on how to use them are shown in @ref{Partitioning Data}. The complete
  3293. list can be found in @code{starpu_data_filters.h} .
  3294. @node Partitioning BCSR Data
  3295. @subsubsection Partitioning BCSR Data
  3296. @deftypefun void starpu_canonical_block_filter_bcsr (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  3297. TODO
  3298. @end deftypefun
  3299. @deftypefun void starpu_vertical_block_filter_func_csr (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  3300. TODO
  3301. @end deftypefun
  3302. @node Partitioning BLAS interface
  3303. @subsubsection Partitioning BLAS interface
  3304. @deftypefun void starpu_block_filter_func (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  3305. This partitions a dense Matrix into horizontal blocks.
  3306. @end deftypefun
  3307. @deftypefun void starpu_vertical_block_filter_func (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  3308. This partitions a dense Matrix into vertical blocks.
  3309. @end deftypefun
  3310. @node Partitioning Vector Data
  3311. @subsubsection Partitioning Vector Data
  3312. @deftypefun void starpu_block_filter_func_vector (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  3313. This partitions a vector into blocks of the same size.
  3314. @end deftypefun
  3315. @deftypefun void starpu_vector_list_filter_func (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  3316. This partitions a vector into blocks of sizes given in the @var{filter_arg_ptr}
  3317. field of @var{f}, supposed to point on a @code{uint32_t*} array.
  3318. @end deftypefun
  3319. @deftypefun void starpu_vector_divide_in_2_filter_func (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  3320. This partitions a vector into two blocks, the first block size being given in
  3321. the @var{filter_arg} field of @var{f}.
  3322. @end deftypefun
  3323. @node Partitioning Block Data
  3324. @subsubsection Partitioning Block Data
  3325. @deftypefun void starpu_block_filter_func_block (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  3326. This partitions a 3D matrix along the X axis.
  3327. @end deftypefun
  3328. @node Codelets and Tasks
  3329. @section Codelets and Tasks
  3330. This section describes the interface to manipulate codelets and tasks.
  3331. @deftp {Data Type} {struct starpu_codelet}
  3332. The codelet structure describes a kernel that is possibly implemented on various
  3333. targets. For compatibility, make sure to initialize the whole structure to zero.
  3334. @table @asis
  3335. @item @code{where}
  3336. Indicates which types of processing units are able to execute the codelet.
  3337. @code{STARPU_CPU|STARPU_CUDA} for instance indicates that the codelet is
  3338. implemented for both CPU cores and CUDA devices while @code{STARPU_GORDON}
  3339. indicates that it is only available on Cell SPUs.
  3340. @item @code{cpu_func} (optional)
  3341. Is a function pointer to the CPU implementation of the codelet. Its prototype
  3342. must be: @code{void cpu_func(void *buffers[], void *cl_arg)}. The first
  3343. argument being the array of data managed by the data management library, and
  3344. the second argument is a pointer to the argument passed from the @code{cl_arg}
  3345. field of the @code{starpu_task} structure.
  3346. The @code{cpu_func} field is ignored if @code{STARPU_CPU} does not appear in
  3347. the @code{where} field, it must be non-null otherwise.
  3348. @item @code{cuda_func} (optional)
  3349. Is a function pointer to the CUDA implementation of the codelet. @emph{This
  3350. must be a host-function written in the CUDA runtime API}. Its prototype must
  3351. be: @code{void cuda_func(void *buffers[], void *cl_arg);}. The @code{cuda_func}
  3352. field is ignored if @code{STARPU_CUDA} does not appear in the @code{where}
  3353. field, it must be non-null otherwise.
  3354. @item @code{opencl_func} (optional)
  3355. Is a function pointer to the OpenCL implementation of the codelet. Its
  3356. prototype must be:
  3357. @code{void opencl_func(starpu_data_interface_t *descr, void *arg);}.
  3358. This pointer is ignored if @code{STARPU_OPENCL} does not appear in the
  3359. @code{where} field, it must be non-null otherwise.
  3360. @item @code{gordon_func} (optional)
  3361. This is the index of the Cell SPU implementation within the Gordon library.
  3362. See Gordon documentation for more details on how to register a kernel and
  3363. retrieve its index.
  3364. @item @code{nbuffers}
  3365. Specifies the number of arguments taken by the codelet. These arguments are
  3366. managed by the DSM and are accessed from the @code{void *buffers[]}
  3367. array. The constant argument passed with the @code{cl_arg} field of the
  3368. @code{starpu_task} structure is not counted in this number. This value should
  3369. not be above @code{STARPU_NMAXBUFS}.
  3370. @item @code{model} (optional)
  3371. This is a pointer to the task duration performance model associated to this
  3372. codelet. This optional field is ignored when set to @code{NULL}.
  3373. TODO
  3374. @item @code{power_model} (optional)
  3375. This is a pointer to the task power consumption performance model associated
  3376. to this codelet. This optional field is ignored when set to @code{NULL}.
  3377. In the case of parallel codelets, this has to account for all processing units
  3378. involved in the parallel execution.
  3379. TODO
  3380. @end table
  3381. @end deftp
  3382. @deftp {Data Type} {struct starpu_task}
  3383. The @code{starpu_task} structure describes a task that can be offloaded on the various
  3384. processing units managed by StarPU. It instantiates a codelet. It can either be
  3385. allocated dynamically with the @code{starpu_task_create} method, or declared
  3386. statically. In the latter case, the programmer has to zero the
  3387. @code{starpu_task} structure and to fill the different fields properly. The
  3388. indicated default values correspond to the configuration of a task allocated
  3389. with @code{starpu_task_create}.
  3390. @table @asis
  3391. @item @code{cl}
  3392. Is a pointer to the corresponding @code{starpu_codelet} data structure. This
  3393. describes where the kernel should be executed, and supplies the appropriate
  3394. implementations. When set to @code{NULL}, no code is executed during the tasks,
  3395. such empty tasks can be useful for synchronization purposes.
  3396. @item @code{buffers}
  3397. Is an array of @code{starpu_buffer_descr_t} structures. It describes the
  3398. different pieces of data accessed by the task, and how they should be accessed.
  3399. The @code{starpu_buffer_descr_t} structure is composed of two fields, the
  3400. @code{handle} field specifies the handle of the piece of data, and the
  3401. @code{mode} field is the required access mode (eg @code{STARPU_RW}). The number
  3402. of entries in this array must be specified in the @code{nbuffers} field of the
  3403. @code{starpu_codelet} structure, and should not excede @code{STARPU_NMAXBUFS}.
  3404. If unsufficient, this value can be set with the @code{--enable-maxbuffers}
  3405. option when configuring StarPU.
  3406. @item @code{cl_arg} (optional; default: @code{NULL})
  3407. This pointer is passed to the codelet through the second argument
  3408. of the codelet implementation (e.g. @code{cpu_func} or @code{cuda_func}).
  3409. In the specific case of the Cell processor, see the @code{cl_arg_size}
  3410. argument.
  3411. @item @code{cl_arg_size} (optional, Cell-specific)
  3412. In the case of the Cell processor, the @code{cl_arg} pointer is not directly
  3413. given to the SPU function. A buffer of size @code{cl_arg_size} is allocated on
  3414. the SPU. This buffer is then filled with the @code{cl_arg_size} bytes starting
  3415. at address @code{cl_arg}. In this case, the argument given to the SPU codelet
  3416. is therefore not the @code{cl_arg} pointer, but the address of the buffer in
  3417. local store (LS) instead. This field is ignored for CPU, CUDA and OpenCL
  3418. codelets, where the @code{cl_arg} pointer is given as such.
  3419. @item @code{callback_func} (optional) (default: @code{NULL})
  3420. This is a function pointer of prototype @code{void (*f)(void *)} which
  3421. specifies a possible callback. If this pointer is non-null, the callback
  3422. function is executed @emph{on the host} after the execution of the task. The
  3423. callback is passed the value contained in the @code{callback_arg} field. No
  3424. callback is executed if the field is set to @code{NULL}.
  3425. @item @code{callback_arg} (optional) (default: @code{NULL})
  3426. This is the pointer passed to the callback function. This field is ignored if
  3427. the @code{callback_func} is set to @code{NULL}.
  3428. @item @code{use_tag} (optional) (default: @code{0})
  3429. If set, this flag indicates that the task should be associated with the tag
  3430. contained in the @code{tag_id} field. Tag allow the application to synchronize
  3431. with the task and to express task dependencies easily.
  3432. @item @code{tag_id}
  3433. This fields contains the tag associated to the task if the @code{use_tag} field
  3434. was set, it is ignored otherwise.
  3435. @item @code{synchronous}
  3436. If this flag is set, the @code{starpu_task_submit} function is blocking and
  3437. returns only when the task has been executed (or if no worker is able to
  3438. process the task). Otherwise, @code{starpu_task_submit} returns immediately.
  3439. @item @code{priority} (optional) (default: @code{STARPU_DEFAULT_PRIO})
  3440. This field indicates a level of priority for the task. This is an integer value
  3441. that must be set between the return values of the
  3442. @code{starpu_sched_get_min_priority} function for the least important tasks,
  3443. and that of the @code{starpu_sched_get_max_priority} for the most important
  3444. tasks (included). The @code{STARPU_MIN_PRIO} and @code{STARPU_MAX_PRIO} macros
  3445. are provided for convenience and respectively returns value of
  3446. @code{starpu_sched_get_min_priority} and @code{starpu_sched_get_max_priority}.
  3447. Default priority is @code{STARPU_DEFAULT_PRIO}, which is always defined as 0 in
  3448. order to allow static task initialization. Scheduling strategies that take
  3449. priorities into account can use this parameter to take better scheduling
  3450. decisions, but the scheduling policy may also ignore it.
  3451. @item @code{execute_on_a_specific_worker} (default: @code{0})
  3452. If this flag is set, StarPU will bypass the scheduler and directly affect this
  3453. task to the worker specified by the @code{workerid} field.
  3454. @item @code{workerid} (optional)
  3455. If the @code{execute_on_a_specific_worker} field is set, this field indicates
  3456. which is the identifier of the worker that should process this task (as
  3457. returned by @code{starpu_worker_get_id}). This field is ignored if
  3458. @code{execute_on_a_specific_worker} field is set to 0.
  3459. @item @code{detach} (optional) (default: @code{1})
  3460. If this flag is set, it is not possible to synchronize with the task
  3461. by the means of @code{starpu_task_wait} later on. Internal data structures
  3462. are only guaranteed to be freed once @code{starpu_task_wait} is called if the
  3463. flag is not set.
  3464. @item @code{destroy} (optional) (default: @code{1})
  3465. If this flag is set, the task structure will automatically be freed, either
  3466. after the execution of the callback if the task is detached, or during
  3467. @code{starpu_task_wait} otherwise. If this flag is not set, dynamically
  3468. allocated data structures will not be freed until @code{starpu_task_destroy} is
  3469. called explicitly. Setting this flag for a statically allocated task structure
  3470. will result in undefined behaviour.
  3471. @item @code{predicted} (output field)
  3472. Predicted duration of the task. This field is only set if the scheduling
  3473. strategy used performance models.
  3474. @end table
  3475. @end deftp
  3476. @deftypefun void starpu_task_init ({struct starpu_task} *@var{task})
  3477. Initialize @var{task} with default values. This function is implicitly
  3478. called by @code{starpu_task_create}. By default, tasks initialized with
  3479. @code{starpu_task_init} must be deinitialized explicitly with
  3480. @code{starpu_task_deinit}. Tasks can also be initialized statically, using the
  3481. constant @code{STARPU_TASK_INITIALIZER}.
  3482. @end deftypefun
  3483. @deftypefun {struct starpu_task *} starpu_task_create (void)
  3484. Allocate a task structure and initialize it with default values. Tasks
  3485. allocated dynamically with @code{starpu_task_create} are automatically freed when the
  3486. task is terminated. If the destroy flag is explicitly unset, the resources used
  3487. by the task are freed by calling
  3488. @code{starpu_task_destroy}.
  3489. @end deftypefun
  3490. @deftypefun void starpu_task_deinit ({struct starpu_task} *@var{task})
  3491. Release all the structures automatically allocated to execute @var{task}. This is
  3492. called automatically by @code{starpu_task_destroy}, but the task structure itself is not
  3493. freed. This should be used for statically allocated tasks for instance.
  3494. @end deftypefun
  3495. @deftypefun void starpu_task_destroy ({struct starpu_task} *@var{task})
  3496. Free the resource allocated during @code{starpu_task_create} and
  3497. associated with @var{task}. This function can be called automatically
  3498. after the execution of a task by setting the @code{destroy} flag of the
  3499. @code{starpu_task} structure (default behaviour). Calling this function
  3500. on a statically allocated task results in an undefined behaviour.
  3501. @end deftypefun
  3502. @deftypefun int starpu_task_wait ({struct starpu_task} *@var{task})
  3503. This function blocks until @var{task} has been executed. It is not possible to
  3504. synchronize with a task more than once. It is not possible to wait for
  3505. synchronous or detached tasks.
  3506. Upon successful completion, this function returns 0. Otherwise, @code{-EINVAL}
  3507. indicates that the specified task was either synchronous or detached.
  3508. @end deftypefun
  3509. @deftypefun int starpu_task_submit ({struct starpu_task} *@var{task})
  3510. This function submits @var{task} to StarPU. Calling this function does
  3511. not mean that the task will be executed immediately as there can be data or task
  3512. (tag) dependencies that are not fulfilled yet: StarPU will take care of
  3513. scheduling this task with respect to such dependencies.
  3514. This function returns immediately if the @code{synchronous} field of the
  3515. @code{starpu_task} structure was set to 0, and block until the termination of
  3516. the task otherwise. It is also possible to synchronize the application with
  3517. asynchronous tasks by the means of tags, using the @code{starpu_tag_wait}
  3518. function for instance.
  3519. In case of success, this function returns 0, a return value of @code{-ENODEV}
  3520. means that there is no worker able to process this task (e.g. there is no GPU
  3521. available and this task is only implemented for CUDA devices).
  3522. @end deftypefun
  3523. @deftypefun int starpu_task_wait_for_all (void)
  3524. This function blocks until all the tasks that were submitted are terminated.
  3525. @end deftypefun
  3526. @deftypefun {struct starpu_task *} starpu_get_current_task (void)
  3527. This function returns the task currently executed by the worker, or
  3528. NULL if it is called either from a thread that is not a task or simply
  3529. because there is no task being executed at the moment.
  3530. @end deftypefun
  3531. @deftypefun void starpu_display_codelet_stats ({struct starpu_codelet_t} *@var{cl})
  3532. Output on @code{stderr} some statistics on the codelet @var{cl}.
  3533. @end deftypefun
  3534. @c Callbacks : what can we put in callbacks ?
  3535. @node Explicit Dependencies
  3536. @section Explicit Dependencies
  3537. @menu
  3538. * starpu_task_declare_deps_array:: starpu_task_declare_deps_array
  3539. * starpu_tag_t:: Task logical identifier
  3540. * starpu_tag_declare_deps:: Declare the Dependencies of a Tag
  3541. * starpu_tag_declare_deps_array:: Declare the Dependencies of a Tag
  3542. * starpu_tag_wait:: Block until a Tag is terminated
  3543. * starpu_tag_wait_array:: Block until a set of Tags is terminated
  3544. * starpu_tag_remove:: Destroy a Tag
  3545. * starpu_tag_notify_from_apps:: Feed a tag explicitly
  3546. @end menu
  3547. @node starpu_task_declare_deps_array
  3548. @subsection @code{starpu_task_declare_deps_array} -- Declare task dependencies
  3549. @deftypefun void starpu_task_declare_deps_array ({struct starpu_task} *@var{task}, unsigned @var{ndeps}, {struct starpu_task} *@var{task_array}[])
  3550. Declare task dependencies between a @var{task} and an array of tasks of length
  3551. @var{ndeps}. This function must be called prior to the submission of the task,
  3552. but it may called after the submission or the execution of the tasks in the
  3553. array provided the tasks are still valid (ie. they were not automatically
  3554. destroyed). Calling this function on a task that was already submitted or with
  3555. an entry of @var{task_array} that is not a valid task anymore results in an
  3556. undefined behaviour. If @var{ndeps} is null, no dependency is added. It is
  3557. possible to call @code{starpu_task_declare_deps_array} multiple times on the
  3558. same task, in this case, the dependencies are added. It is possible to have
  3559. redundancy in the task dependencies.
  3560. @end deftypefun
  3561. @node starpu_tag_t
  3562. @subsection @code{starpu_tag_t} -- Task logical identifier
  3563. @table @asis
  3564. @item @emph{Description}:
  3565. It is possible to associate a task with a unique ``tag'' chosen by the application, and to express
  3566. dependencies between tasks by the means of those tags. To do so, fill the
  3567. @code{tag_id} field of the @code{starpu_task} structure with a tag number (can
  3568. be arbitrary) and set the @code{use_tag} field to 1.
  3569. If @code{starpu_tag_declare_deps} is called with this tag number, the task will
  3570. not be started until the tasks which holds the declared dependency tags are
  3571. completed.
  3572. @end table
  3573. @node starpu_tag_declare_deps
  3574. @subsection @code{starpu_tag_declare_deps} -- Declare the Dependencies of a Tag
  3575. @table @asis
  3576. @item @emph{Description}:
  3577. Specify the dependencies of the task identified by tag @code{id}. The first
  3578. argument specifies the tag which is configured, the second argument gives the
  3579. number of tag(s) on which @code{id} depends. The following arguments are the
  3580. tags which have to be terminated to unlock the task.
  3581. This function must be called before the associated task is submitted to StarPU
  3582. with @code{starpu_task_submit}.
  3583. @item @emph{Remark}
  3584. Because of the variable arity of @code{starpu_tag_declare_deps}, note that the
  3585. last arguments @emph{must} be of type @code{starpu_tag_t}: constant values
  3586. typically need to be explicitly casted. Using the
  3587. @code{starpu_tag_declare_deps_array} function avoids this hazard.
  3588. @item @emph{Prototype}:
  3589. @code{void starpu_tag_declare_deps(starpu_tag_t id, unsigned ndeps, ...);}
  3590. @item @emph{Example}:
  3591. @cartouche
  3592. @example
  3593. /* Tag 0x1 depends on tags 0x32 and 0x52 */
  3594. starpu_tag_declare_deps((starpu_tag_t)0x1,
  3595. 2, (starpu_tag_t)0x32, (starpu_tag_t)0x52);
  3596. @end example
  3597. @end cartouche
  3598. @end table
  3599. @node starpu_tag_declare_deps_array
  3600. @subsection @code{starpu_tag_declare_deps_array} -- Declare the Dependencies of a Tag
  3601. @table @asis
  3602. @item @emph{Description}:
  3603. This function is similar to @code{starpu_tag_declare_deps}, except that its
  3604. does not take a variable number of arguments but an array of tags of size
  3605. @code{ndeps}.
  3606. @item @emph{Prototype}:
  3607. @code{void starpu_tag_declare_deps_array(starpu_tag_t id, unsigned ndeps, starpu_tag_t *array);}
  3608. @item @emph{Example}:
  3609. @cartouche
  3610. @example
  3611. /* Tag 0x1 depends on tags 0x32 and 0x52 */
  3612. starpu_tag_t tag_array[2] = @{0x32, 0x52@};
  3613. starpu_tag_declare_deps_array((starpu_tag_t)0x1, 2, tag_array);
  3614. @end example
  3615. @end cartouche
  3616. @end table
  3617. @node starpu_tag_wait
  3618. @subsection @code{starpu_tag_wait} -- Block until a Tag is terminated
  3619. @deftypefun void starpu_tag_wait (starpu_tag_t @var{id})
  3620. This function blocks until the task associated to tag @var{id} has been
  3621. executed. This is a blocking call which must therefore not be called within
  3622. tasks or callbacks, but only from the application directly. It is possible to
  3623. synchronize with the same tag multiple times, as long as the
  3624. @code{starpu_tag_remove} function is not called. Note that it is still
  3625. possible to synchronize with a tag associated to a task which @code{starpu_task}
  3626. data structure was freed (e.g. if the @code{destroy} flag of the
  3627. @code{starpu_task} was enabled).
  3628. @end deftypefun
  3629. @node starpu_tag_wait_array
  3630. @subsection @code{starpu_tag_wait_array} -- Block until a set of Tags is terminated
  3631. @deftypefun void starpu_tag_wait_array (unsigned @var{ntags}, starpu_tag_t *@var{id})
  3632. This function is similar to @code{starpu_tag_wait} except that it blocks until
  3633. @emph{all} the @var{ntags} tags contained in the @var{id} array are
  3634. terminated.
  3635. @end deftypefun
  3636. @node starpu_tag_remove
  3637. @subsection @code{starpu_tag_remove} -- Destroy a Tag
  3638. @deftypefun void starpu_tag_remove (starpu_tag_t @var{id})
  3639. This function releases the resources associated to tag @var{id}. It can be
  3640. called once the corresponding task has been executed and when there is
  3641. no other tag that depend on this tag anymore.
  3642. @end deftypefun
  3643. @node starpu_tag_notify_from_apps
  3644. @subsection @code{starpu_tag_notify_from_apps} -- Feed a Tag explicitly
  3645. @deftypefun void starpu_tag_notify_from_apps (starpu_tag_t @var{id})
  3646. This function explicitly unlocks tag @var{id}. It may be useful in the
  3647. case of applications which execute part of their computation outside StarPU
  3648. tasks (e.g. third-party libraries). It is also provided as a
  3649. convenient tool for the programmer, for instance to entirely construct the task
  3650. DAG before actually giving StarPU the opportunity to execute the tasks.
  3651. @end deftypefun
  3652. @node Implicit Data Dependencies
  3653. @section Implicit Data Dependencies
  3654. @menu
  3655. * starpu_data_set_default_sequential_consistency_flag:: starpu_data_set_default_sequential_consistency_flag
  3656. * starpu_data_get_default_sequential_consistency_flag:: starpu_data_get_default_sequential_consistency_flag
  3657. * starpu_data_set_sequential_consistency_flag:: starpu_data_set_sequential_consistency_flag
  3658. @end menu
  3659. In this section, we describe how StarPU makes it possible to insert implicit
  3660. task dependencies in order to enforce sequential data consistency. When this
  3661. data consistency is enabled on a specific data handle, any data access will
  3662. appear as sequentially consistent from the application. For instance, if the
  3663. application submits two tasks that access the same piece of data in read-only
  3664. mode, and then a third task that access it in write mode, dependencies will be
  3665. added between the two first tasks and the third one. Implicit data dependencies
  3666. are also inserted in the case of data accesses from the application.
  3667. @node starpu_data_set_default_sequential_consistency_flag
  3668. @subsection @code{starpu_data_set_default_sequential_consistency_flag} -- Set default sequential consistency flag
  3669. @deftypefun void starpu_data_set_default_sequential_consistency_flag (unsigned @var{flag})
  3670. Set the default sequential consistency flag. If a non-zero value is passed, a
  3671. sequential data consistency will be enforced for all handles registered after
  3672. this function call, otherwise it is disabled. By default, StarPU enables
  3673. sequential data consistency. It is also possible to select the data consistency
  3674. mode of a specific data handle with the
  3675. @code{starpu_data_set_sequential_consistency_flag} function.
  3676. @end deftypefun
  3677. @node starpu_data_get_default_sequential_consistency_flag
  3678. @subsection @code{starpu_data_get_default_sequential_consistency_flag} -- Get current default sequential consistency flag
  3679. @deftypefun unsigned starpu_data_set_default_sequential_consistency_flag (void)
  3680. This function returns the current default sequential consistency flag.
  3681. @end deftypefun
  3682. @node starpu_data_set_sequential_consistency_flag
  3683. @subsection @code{starpu_data_set_sequential_consistency_flag} -- Set data sequential consistency mode
  3684. @deftypefun void starpu_data_set_sequential_consistency_flag (starpu_data_handle @var{handle}, unsigned @var{flag})
  3685. Select the data consistency mode associated to a data handle. The consistency
  3686. mode set using this function has the priority over the default mode which can
  3687. be set with @code{starpu_data_set_sequential_consistency_flag}.
  3688. @end deftypefun
  3689. @node Performance Model API
  3690. @section Performance Model API
  3691. @menu
  3692. * starpu_load_history_debug::
  3693. * starpu_perfmodel_debugfilepath::
  3694. * starpu_perfmodel_get_arch_name::
  3695. * starpu_force_bus_sampling::
  3696. @end menu
  3697. @node starpu_load_history_debug
  3698. @subsection @code{starpu_load_history_debug}
  3699. @deftypefun int starpu_load_history_debug ({const char} *@var{symbol}, {struct starpu_perfmodel_t} *@var{model})
  3700. TODO
  3701. @end deftypefun
  3702. @node starpu_perfmodel_debugfilepath
  3703. @subsection @code{starpu_perfmodel_debugfilepath}
  3704. @deftypefun void starpu_perfmodel_debugfilepath ({struct starpu_perfmodel_t} *@var{model}, {enum starpu_perf_archtype} @var{arch}, char *@var{path}, size_t @var{maxlen})
  3705. TODO
  3706. @end deftypefun
  3707. @node starpu_perfmodel_get_arch_name
  3708. @subsection @code{starpu_perfmodel_get_arch_name}
  3709. @deftypefun void starpu_perfmodel_get_arch_name ({enum starpu_perf_archtype} @var{arch}, char *@var{archname}, size_t @var{maxlen})
  3710. TODO
  3711. @end deftypefun
  3712. @node starpu_force_bus_sampling
  3713. @subsection @code{starpu_force_bus_sampling}
  3714. @deftypefun void starpu_force_bus_sampling (void)
  3715. This forces sampling the bus performance model again.
  3716. @end deftypefun
  3717. @node Profiling API
  3718. @section Profiling API
  3719. @menu
  3720. * starpu_profiling_status_set:: starpu_profiling_status_set
  3721. * starpu_profiling_status_get:: starpu_profiling_status_get
  3722. * struct starpu_task_profiling_info:: task profiling information
  3723. * struct starpu_worker_profiling_info:: worker profiling information
  3724. * starpu_worker_get_profiling_info:: starpu_worker_get_profiling_info
  3725. * struct starpu_bus_profiling_info:: bus profiling information
  3726. * starpu_bus_get_count::
  3727. * starpu_bus_get_id::
  3728. * starpu_bus_get_src::
  3729. * starpu_bus_get_dst::
  3730. * starpu_timing_timespec_delay_us::
  3731. * starpu_timing_timespec_to_us::
  3732. * starpu_bus_profiling_helper_display_summary::
  3733. * starpu_worker_profiling_helper_display_summary::
  3734. @end menu
  3735. @node starpu_profiling_status_set
  3736. @subsection @code{starpu_profiling_status_set} -- Set current profiling status
  3737. @table @asis
  3738. @item @emph{Description}:
  3739. Thie function sets the profiling status. Profiling is activated by passing
  3740. @code{STARPU_PROFILING_ENABLE} in @code{status}. Passing
  3741. @code{STARPU_PROFILING_DISABLE} disables profiling. Calling this function
  3742. resets all profiling measurements. When profiling is enabled, the
  3743. @code{profiling_info} field of the @code{struct starpu_task} structure points
  3744. to a valid @code{struct starpu_task_profiling_info} structure containing
  3745. information about the execution of the task.
  3746. @item @emph{Return value}:
  3747. Negative return values indicate an error, otherwise the previous status is
  3748. returned.
  3749. @item @emph{Prototype}:
  3750. @code{int starpu_profiling_status_set(int status);}
  3751. @end table
  3752. @node starpu_profiling_status_get
  3753. @subsection @code{starpu_profiling_status_get} -- Get current profiling status
  3754. @deftypefun int starpu_profiling_status_get (void)
  3755. Return the current profiling status or a negative value in case there was an error.
  3756. @end deftypefun
  3757. @node struct starpu_task_profiling_info
  3758. @subsection @code{struct starpu_task_profiling_info} -- Task profiling information
  3759. @table @asis
  3760. @item @emph{Description}:
  3761. This structure contains information about the execution of a task. It is
  3762. accessible from the @code{.profiling_info} field of the @code{starpu_task}
  3763. structure if profiling was enabled.
  3764. @item @emph{Fields}:
  3765. @table @asis
  3766. @item @code{submit_time}:
  3767. Date of task submission (relative to the initialization of StarPU).
  3768. @item @code{start_time}:
  3769. Date of task execution beginning (relative to the initialization of StarPU).
  3770. @item @code{end_time}:
  3771. Date of task execution termination (relative to the initialization of StarPU).
  3772. @item @code{workerid}:
  3773. Identifier of the worker which has executed the task.
  3774. @end table
  3775. @end table
  3776. @node struct starpu_worker_profiling_info
  3777. @subsection @code{struct starpu_worker_profiling_info} -- Worker profiling information
  3778. @table @asis
  3779. @item @emph{Description}:
  3780. This structure contains the profiling information associated to a worker.
  3781. @item @emph{Fields}:
  3782. @table @asis
  3783. @item @code{start_time}:
  3784. Starting date for the reported profiling measurements.
  3785. @item @code{total_time}:
  3786. Duration of the profiling measurement interval.
  3787. @item @code{executing_time}:
  3788. Time spent by the worker to execute tasks during the profiling measurement interval.
  3789. @item @code{sleeping_time}:
  3790. Time spent idling by the worker during the profiling measurement interval.
  3791. @item @code{executed_tasks}:
  3792. Number of tasks executed by the worker during the profiling measurement interval.
  3793. @end table
  3794. @end table
  3795. @node starpu_worker_get_profiling_info
  3796. @subsection @code{starpu_worker_get_profiling_info} -- Get worker profiling info
  3797. @table @asis
  3798. @item @emph{Description}:
  3799. Get the profiling info associated to the worker identified by @code{workerid},
  3800. and reset the profiling measurements. If the @code{worker_info} argument is
  3801. NULL, only reset the counters associated to worker @code{workerid}.
  3802. @item @emph{Return value}:
  3803. Upon successful completion, this function returns 0. Otherwise, a negative
  3804. value is returned.
  3805. @item @emph{Prototype}:
  3806. @code{int starpu_worker_get_profiling_info(int workerid, struct starpu_worker_profiling_info *worker_info);}
  3807. @end table
  3808. @node struct starpu_bus_profiling_info
  3809. @subsection @code{struct starpu_bus_profiling_info} -- Bus profiling information
  3810. @table @asis
  3811. @item @emph{Description}:
  3812. TODO
  3813. @item @emph{Fields}:
  3814. @table @asis
  3815. @item @code{start_time}:
  3816. TODO
  3817. @item @code{total_time}:
  3818. TODO
  3819. @item @code{transferred_bytes}:
  3820. TODO
  3821. @item @code{transfer_count}:
  3822. TODO
  3823. @end table
  3824. @end table
  3825. @node starpu_bus_get_count
  3826. @subsection @code{starpu_bus_get_count}
  3827. @deftypefun int starpu_bus_get_count (void)
  3828. TODO
  3829. @end deftypefun
  3830. @node starpu_bus_get_id
  3831. @subsection @code{starpu_bus_get_id}
  3832. @deftypefun int starpu_bus_get_id (int @var{src}, int @var{dst})
  3833. TODO
  3834. @end deftypefun
  3835. @node starpu_bus_get_src
  3836. @subsection @code{starpu_bus_get_src}
  3837. @deftypefun int starpu_bus_get_src (int @var{busid})
  3838. TODO
  3839. @end deftypefun
  3840. @node starpu_bus_get_dst
  3841. @subsection @code{starpu_bus_get_dst}
  3842. @deftypefun int starpu_bus_get_dst (int @var{busid})
  3843. TODO
  3844. @end deftypefun
  3845. @node starpu_timing_timespec_delay_us
  3846. @subsection @code{starpu_timing_timespec_delay_us}
  3847. @deftypefun double starpu_timing_timespec_delay_us ({struct timespec} *@var{start}, {struct timespec} *@var{end})
  3848. TODO
  3849. @end deftypefun
  3850. @node starpu_timing_timespec_to_us
  3851. @subsection @code{starpu_timing_timespec_to_us}
  3852. @deftypefun double starpu_timing_timespec_to_us ({struct timespec} *@var{ts})
  3853. TODO
  3854. @end deftypefun
  3855. @node starpu_bus_profiling_helper_display_summary
  3856. @subsection @code{starpu_bus_profiling_helper_display_summary}
  3857. @deftypefun void starpu_bus_profiling_helper_display_summary (void)
  3858. TODO
  3859. @end deftypefun
  3860. @node starpu_worker_profiling_helper_display_summary
  3861. @subsection @code{starpu_worker_profiling_helper_display_summary}
  3862. @deftypefun void starpu_worker_profiling_helper_display_summary (void)
  3863. TODO
  3864. @end deftypefun
  3865. @node CUDA extensions
  3866. @section CUDA extensions
  3867. @c void starpu_malloc(float **A, size_t dim);
  3868. @menu
  3869. * starpu_cuda_get_local_stream:: Get current worker's CUDA stream
  3870. * starpu_helper_cublas_init:: Initialize CUBLAS on every CUDA device
  3871. * starpu_helper_cublas_shutdown:: Deinitialize CUBLAS on every CUDA device
  3872. @end menu
  3873. @node starpu_cuda_get_local_stream
  3874. @subsection @code{starpu_cuda_get_local_stream} -- Get current worker's CUDA stream
  3875. @deftypefun {cudaStream_t *} starpu_cuda_get_local_stream (void)
  3876. StarPU provides a stream for every CUDA device controlled by StarPU. This
  3877. function is only provided for convenience so that programmers can easily use
  3878. asynchronous operations within codelets without having to create a stream by
  3879. hand. Note that the application is not forced to use the stream provided by
  3880. @code{starpu_cuda_get_local_stream} and may also create its own streams.
  3881. Synchronizing with @code{cudaThreadSynchronize()} is allowed, but will reduce
  3882. the likelihood of having all transfers overlapped.
  3883. @end deftypefun
  3884. @node starpu_helper_cublas_init
  3885. @subsection @code{starpu_helper_cublas_init} -- Initialize CUBLAS on every CUDA device
  3886. @deftypefun void starpu_helper_cublas_init (void)
  3887. The CUBLAS library must be initialized prior to any CUBLAS call. Calling
  3888. @code{starpu_helper_cublas_init} will initialize CUBLAS on every CUDA device
  3889. controlled by StarPU. This call blocks until CUBLAS has been properly
  3890. initialized on every device.
  3891. @end deftypefun
  3892. @node starpu_helper_cublas_shutdown
  3893. @subsection @code{starpu_helper_cublas_shutdown} -- Deinitialize CUBLAS on every CUDA device
  3894. @deftypefun void starpu_helper_cublas_shutdown (void)
  3895. This function synchronously deinitializes the CUBLAS library on every CUDA device.
  3896. @end deftypefun
  3897. @node OpenCL extensions
  3898. @section OpenCL extensions
  3899. @menu
  3900. * Compiling OpenCL kernels:: Compiling OpenCL kernels
  3901. * Loading OpenCL kernels:: Loading OpenCL kernels
  3902. * OpenCL statistics:: Collecting statistics from OpenCL
  3903. @end menu
  3904. @node Compiling OpenCL kernels
  3905. @subsection Compiling OpenCL kernels
  3906. Source codes for OpenCL kernels can be stored in a file or in a
  3907. string. StarPU provides functions to build the program executable for
  3908. each available OpenCL device as a @code{cl_program} object. This
  3909. program executable can then be loaded within a specific queue as
  3910. explained in the next section. These are only helpers, Applications
  3911. can also fill a @code{starpu_opencl_program} array by hand for more advanced
  3912. use (e.g. different programs on the different OpenCL devices, for
  3913. relocation purpose for instance).
  3914. @menu
  3915. * starpu_opencl_load_opencl_from_file:: Compiling OpenCL source code
  3916. * starpu_opencl_load_opencl_from_string:: Compiling OpenCL source code
  3917. * starpu_opencl_unload_opencl:: Releasing OpenCL code
  3918. @end menu
  3919. @node starpu_opencl_load_opencl_from_file
  3920. @subsubsection @code{starpu_opencl_load_opencl_from_file} -- Compiling OpenCL source code
  3921. @deftypefun int starpu_opencl_load_opencl_from_file (char *@var{source_file_name}, {struct starpu_opencl_program} *@var{opencl_programs}, {const char}* @var{build_options})
  3922. TODO
  3923. @end deftypefun
  3924. @node starpu_opencl_load_opencl_from_string
  3925. @subsubsection @code{starpu_opencl_load_opencl_from_string} -- Compiling OpenCL source code
  3926. @deftypefun int starpu_opencl_load_opencl_from_string (char *@var{opencl_program_source}, {struct starpu_opencl_program} *@var{opencl_programs}, {const char}* @var{build_options})
  3927. TODO
  3928. @end deftypefun
  3929. @node starpu_opencl_unload_opencl
  3930. @subsubsection @code{starpu_opencl_unload_opencl} -- Releasing OpenCL code
  3931. @deftypefun int starpu_opencl_unload_opencl ({struct starpu_opencl_program} *@var{opencl_programs})
  3932. TODO
  3933. @end deftypefun
  3934. @node Loading OpenCL kernels
  3935. @subsection Loading OpenCL kernels
  3936. @menu
  3937. * starpu_opencl_load_kernel:: Loading a kernel
  3938. * starpu_opencl_relase_kernel:: Releasing a kernel
  3939. @end menu
  3940. @node starpu_opencl_load_kernel
  3941. @subsubsection @code{starpu_opencl_load_kernel} -- Loading a kernel
  3942. @deftypefun int starpu_opencl_load_kernel (cl_kernel *@var{kernel}, cl_command_queue *@var{queue}, {struct starpu_opencl_program} *@var{opencl_programs}, char *@var{kernel_name}, int @var{devid})
  3943. TODO
  3944. @end deftypefun
  3945. @node starpu_opencl_relase_kernel
  3946. @subsubsection @code{starpu_opencl_release_kernel} -- Releasing a kernel
  3947. @deftypefun int starpu_opencl_release_kernel (cl_kernel @var{kernel})
  3948. TODO
  3949. @end deftypefun
  3950. @node OpenCL statistics
  3951. @subsection OpenCL statistics
  3952. @menu
  3953. * starpu_opencl_collect_stats:: Collect statistics on a kernel execution
  3954. @end menu
  3955. @node starpu_opencl_collect_stats
  3956. @subsubsection @code{starpu_opencl_collect_stats} -- Collect statistics on a kernel execution
  3957. @deftypefun int starpu_opencl_collect_stats (cl_event @var{event})
  3958. After termination of the kernels, the OpenCL codelet should call this function
  3959. to pass it the even returned by @code{clEnqueueNDRangeKernel}, to let StarPU
  3960. collect statistics about the kernel execution (used cycles, consumed power).
  3961. @end deftypefun
  3962. @node Cell extensions
  3963. @section Cell extensions
  3964. nothing yet.
  3965. @node Miscellaneous helpers
  3966. @section Miscellaneous helpers
  3967. @menu
  3968. * starpu_data_cpy:: Copy a data handle into another data handle
  3969. * starpu_execute_on_each_worker:: Execute a function on a subset of workers
  3970. @end menu
  3971. @node starpu_data_cpy
  3972. @subsection @code{starpu_data_cpy} -- Copy a data handle into another data handle
  3973. @deftypefun int starpu_data_cpy (starpu_data_handle @var{dst_handle}, starpu_data_handle @var{src_handle}, int @var{asynchronous}, void (*@var{callback_func})(void*), void *@var{callback_arg})
  3974. Copy the content of the @var{src_handle} into the @var{dst_handle} handle.
  3975. The @var{asynchronous} parameter indicates whether the function should
  3976. block or not. In the case of an asynchronous call, it is possible to
  3977. synchronize with the termination of this operation either by the means of
  3978. implicit dependencies (if enabled) or by calling
  3979. @code{starpu_task_wait_for_all()}. If @var{callback_func} is not @code{NULL},
  3980. this callback function is executed after the handle has been copied, and it is
  3981. given the @var{callback_arg} pointer as argument.
  3982. @end deftypefun
  3983. @node starpu_execute_on_each_worker
  3984. @subsection @code{starpu_execute_on_each_worker} -- Execute a function on a subset of workers
  3985. @deftypefun void starpu_execute_on_each_worker (void (*@var{func})(void *), void *@var{arg}, uint32_t @var{where})
  3986. When calling this method, the offloaded function specified by the first argument is
  3987. executed by every StarPU worker that may execute the function.
  3988. The second argument is passed to the offloaded function.
  3989. The last argument specifies on which types of processing units the function
  3990. should be executed. Similarly to the @var{where} field of the
  3991. @code{starpu_codelet} structure, it is possible to specify that the function
  3992. should be executed on every CUDA device and every CPU by passing
  3993. @code{STARPU_CPU|STARPU_CUDA}.
  3994. This function blocks until the function has been executed on every appropriate
  3995. processing units, so that it may not be called from a callback function for
  3996. instance.
  3997. @end deftypefun
  3998. @c ---------------------------------------------------------------------
  3999. @c Advanced Topics
  4000. @c ---------------------------------------------------------------------
  4001. @node Advanced Topics
  4002. @chapter Advanced Topics
  4003. @menu
  4004. * Defining a new data interface::
  4005. * Defining a new scheduling policy::
  4006. @end menu
  4007. @node Defining a new data interface
  4008. @section Defining a new data interface
  4009. @menu
  4010. * struct starpu_data_interface_ops_t:: Per-interface methods
  4011. * struct starpu_data_copy_methods:: Per-interface data transfer methods
  4012. * An example of data interface:: An example of data interface
  4013. @end menu
  4014. @c void *starpu_data_get_interface_on_node(starpu_data_handle handle, unsigned memory_node); TODO
  4015. @node struct starpu_data_interface_ops_t
  4016. @subsection @code{struct starpu_data_interface_ops_t} -- Per-interface methods
  4017. @table @asis
  4018. @item @emph{Description}:
  4019. TODO describe all the different fields
  4020. @end table
  4021. @node struct starpu_data_copy_methods
  4022. @subsection @code{struct starpu_data_copy_methods} -- Per-interface data transfer methods
  4023. @table @asis
  4024. @item @emph{Description}:
  4025. TODO describe all the different fields
  4026. @end table
  4027. @node An example of data interface
  4028. @subsection An example of data interface
  4029. @table @asis
  4030. TODO
  4031. See @code{src/datawizard/interfaces/vector_interface.c} for now.
  4032. @end table
  4033. @node Defining a new scheduling policy
  4034. @section Defining a new scheduling policy
  4035. TODO
  4036. A full example showing how to define a new scheduling policy is available in
  4037. the StarPU sources in the directory @code{examples/scheduler/}.
  4038. @menu
  4039. * struct starpu_sched_policy_s::
  4040. * starpu_worker_set_sched_condition::
  4041. * starpu_sched_set_min_priority:: Set the minimum priority level
  4042. * starpu_sched_set_max_priority:: Set the maximum priority level
  4043. * starpu_push_local_task:: Assign a task to a worker
  4044. * Source code::
  4045. @end menu
  4046. @node struct starpu_sched_policy_s
  4047. @subsection @code{struct starpu_sched_policy_s} -- Scheduler methods
  4048. @table @asis
  4049. @item @emph{Description}:
  4050. This structure contains all the methods that implement a scheduling policy. An
  4051. application may specify which scheduling strategy in the @code{sched_policy}
  4052. field of the @code{starpu_conf} structure passed to the @code{starpu_init}
  4053. function.
  4054. @item @emph{Fields}:
  4055. @table @asis
  4056. @item @code{init_sched}:
  4057. Initialize the scheduling policy.
  4058. @item @code{deinit_sched}:
  4059. Cleanup the scheduling policy.
  4060. @item @code{push_task}:
  4061. Insert a task into the scheduler.
  4062. @item @code{push_prio_task}:
  4063. Insert a priority task into the scheduler.
  4064. @item @code{push_prio_notify}:
  4065. Notify the scheduler that a task was pushed on the worker. This method is
  4066. called when a task that was explicitely assigned to a worker is scheduled. This
  4067. method therefore permits to keep the state of of the scheduler coherent even
  4068. when StarPU bypasses the scheduling strategy.
  4069. @item @code{pop_task}:
  4070. Get a task from the scheduler. The mutex associated to the worker is already
  4071. taken when this method is called. If this method is defined as @code{NULL}, the
  4072. worker will only execute tasks from its local queue. In this case, the
  4073. @code{push_task} method should use the @code{starpu_push_local_task} method to
  4074. assign tasks to the different workers.
  4075. @item @code{pop_every_task}:
  4076. Remove all available tasks from the scheduler (tasks are chained by the means
  4077. of the prev and next fields of the starpu_task structure). The mutex associated
  4078. to the worker is already taken when this method is called.
  4079. @item @code{post_exec_hook} (optionnal):
  4080. This method is called every time a task has been executed.
  4081. @item @code{policy_name}:
  4082. Name of the policy (optionnal).
  4083. @item @code{policy_description}:
  4084. Description of the policy (optionnal).
  4085. @end table
  4086. @end table
  4087. @node starpu_worker_set_sched_condition
  4088. @subsection @code{starpu_worker_set_sched_condition} -- Specify the condition variable associated to a worker
  4089. @deftypefun void starpu_worker_set_sched_condition (int @var{workerid}, pthread_cond_t *@var{sched_cond}, pthread_mutex_t *@var{sched_mutex})
  4090. When there is no available task for a worker, StarPU blocks this worker on a
  4091. condition variable. This function specifies which condition variable (and the
  4092. associated mutex) should be used to block (and to wake up) a worker. Note that
  4093. multiple workers may use the same condition variable. For instance, in the case
  4094. of a scheduling strategy with a single task queue, the same condition variable
  4095. would be used to block and wake up all workers.
  4096. The initialization method of a scheduling strategy (@code{init_sched}) must
  4097. call this function once per worker.
  4098. @end deftypefun
  4099. @node starpu_sched_set_min_priority
  4100. @subsection @code{starpu_sched_set_min_priority}
  4101. @deftypefun void starpu_sched_set_min_priority (int @var{min_prio})
  4102. Defines the minimum priority level supported by the scheduling policy. The
  4103. default minimum priority level is the same as the default priority level which
  4104. is 0 by convention. The application may access that value by calling the
  4105. @code{starpu_sched_get_min_priority} function. This function should only be
  4106. called from the initialization method of the scheduling policy, and should not
  4107. be used directly from the application.
  4108. @end deftypefun
  4109. @node starpu_sched_set_max_priority
  4110. @subsection @code{starpu_sched_set_max_priority}
  4111. @deftypefun void starpu_sched_set_min_priority (int @var{max_prio})
  4112. Defines the maximum priority level supported by the scheduling policy. The
  4113. default maximum priority level is 1. The application may access that value by
  4114. calling the @code{starpu_sched_get_max_priority} function. This function should
  4115. only be called from the initialization method of the scheduling policy, and
  4116. should not be used directly from the application.
  4117. @end deftypefun
  4118. @node starpu_push_local_task
  4119. @subsection @code{starpu_push_local_task}
  4120. @deftypefun int starpu_push_local_task (int @var{workerid}, {struct starpu_task} *@var{task}, int @var{back})
  4121. The scheduling policy may put tasks directly into a worker's local queue so
  4122. that it is not always necessary to create its own queue when the local queue
  4123. is sufficient. If "back" not null, the task is put at the back of the queue
  4124. where the worker will pop tasks first. Setting "back" to 0 therefore ensures
  4125. a FIFO ordering.
  4126. @end deftypefun
  4127. @node Source code
  4128. @subsection Source code
  4129. @cartouche
  4130. @smallexample
  4131. static struct starpu_sched_policy_s dummy_sched_policy = @{
  4132. .init_sched = init_dummy_sched,
  4133. .deinit_sched = deinit_dummy_sched,
  4134. .push_task = push_task_dummy,
  4135. .push_prio_task = NULL,
  4136. .pop_task = pop_task_dummy,
  4137. .post_exec_hook = NULL,
  4138. .pop_every_task = NULL,
  4139. .policy_name = "dummy",
  4140. .policy_description = "dummy scheduling strategy"
  4141. @};
  4142. @end smallexample
  4143. @end cartouche
  4144. @c ---------------------------------------------------------------------
  4145. @c C Extensions
  4146. @c ---------------------------------------------------------------------
  4147. @include c-extensions.texi
  4148. @c ---------------------------------------------------------------------
  4149. @c Appendices
  4150. @c ---------------------------------------------------------------------
  4151. @c ---------------------------------------------------------------------
  4152. @c Full source code for the 'Scaling a Vector' example
  4153. @c ---------------------------------------------------------------------
  4154. @node Full source code for the 'Scaling a Vector' example
  4155. @appendix Full source code for the 'Scaling a Vector' example
  4156. @menu
  4157. * Main application::
  4158. * CPU Kernel::
  4159. * CUDA Kernel::
  4160. * OpenCL Kernel::
  4161. @end menu
  4162. @node Main application
  4163. @section Main application
  4164. @include vector_scal_c.texi
  4165. @node CPU Kernel
  4166. @section CPU Kernel
  4167. @include vector_scal_cpu.texi
  4168. @node CUDA Kernel
  4169. @section CUDA Kernel
  4170. @include vector_scal_cuda.texi
  4171. @node OpenCL Kernel
  4172. @section OpenCL Kernel
  4173. @menu
  4174. * Invoking the kernel::
  4175. * Source of the kernel::
  4176. @end menu
  4177. @node Invoking the kernel
  4178. @subsection Invoking the kernel
  4179. @include vector_scal_opencl.texi
  4180. @node Source of the kernel
  4181. @subsection Source of the kernel
  4182. @include vector_scal_opencl_codelet.texi
  4183. @node GNU Free Documentation License
  4184. @appendix GNU Free Documentation License
  4185. @include fdl-1.3.texi
  4186. @c
  4187. @c Indices.
  4188. @c
  4189. @node Function Index
  4190. @unnumbered Function Index
  4191. @printindex fn
  4192. @bye