starpu.texi 195 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127
  1. \input texinfo @c -*-texinfo-*-
  2. @c %**start of header
  3. @setfilename starpu.info
  4. @settitle StarPU Handbook
  5. @c %**end of header
  6. @include version.texi
  7. @copying
  8. Copyright @copyright{} 2009--2011 Universit@'e de Bordeaux 1
  9. @noindent
  10. Copyright @copyright{} 2010, 2011 Centre National de la Recherche Scientifique
  11. @noindent
  12. Copyright @copyright{} 2011 Institut National de Recherche en Informatique et Automatique
  13. @quotation
  14. Permission is granted to copy, distribute and/or modify this document
  15. under the terms of the GNU Free Documentation License, Version 1.3
  16. or any later version published by the Free Software Foundation;
  17. with no Invariant Sections, no Front-Cover Texts, and no Back-Cover
  18. Texts. A copy of the license is included in the section entitled ``GNU
  19. Free Documentation License''.
  20. @end quotation
  21. @end copying
  22. @setchapternewpage odd
  23. @dircategory Development
  24. @direntry
  25. * StarPU: (starpu). StarPU Handbook
  26. @end direntry
  27. @titlepage
  28. @title StarPU Handbook
  29. @subtitle for StarPU @value{VERSION}
  30. @page
  31. @vskip 0pt plus 1fill
  32. @insertcopying
  33. @end titlepage
  34. @c @summarycontents
  35. @contents
  36. @page
  37. @node Top
  38. @top Preface
  39. This manual documents the usage of StarPU version @value{VERSION}. It
  40. was last updated on @value{UPDATED}.
  41. @ifnottex
  42. @insertcopying
  43. @end ifnottex
  44. @comment
  45. @comment When you add a new menu item, please keep the right hand
  46. @comment aligned to the same column. Do not use tabs. This provides
  47. @comment better formatting.
  48. @comment
  49. @menu
  50. * Introduction:: A basic introduction to using StarPU
  51. * Installing StarPU:: How to configure, build and install StarPU
  52. * Using StarPU:: How to run StarPU application
  53. * Basic Examples:: Basic examples of the use of StarPU
  54. * Performance optimization:: How to optimize performance with StarPU
  55. * Performance feedback:: Performance debugging tools
  56. * StarPU MPI support:: How to combine StarPU with MPI
  57. * Configuring StarPU:: How to configure StarPU
  58. * StarPU API:: The API to use StarPU
  59. * Advanced Topics:: Advanced use of StarPU
  60. * C Extensions:: Easier StarPU programming with GCC
  61. * Full source code for the 'Scaling a Vector' example::
  62. * Function Index:: Index of C functions.
  63. * GNU Free Documentation License:: How you can copy and share this manual.
  64. @end menu
  65. @c ---------------------------------------------------------------------
  66. @c Introduction to StarPU
  67. @c ---------------------------------------------------------------------
  68. @node Introduction
  69. @chapter Introduction to StarPU
  70. @menu
  71. * Motivation:: Why StarPU ?
  72. * StarPU in a Nutshell:: The Fundamentals of StarPU
  73. @end menu
  74. @node Motivation
  75. @section Motivation
  76. @c complex machines with heterogeneous cores/devices
  77. The use of specialized hardware such as accelerators or coprocessors offers an
  78. interesting approach to overcome the physical limits encountered by processor
  79. architects. As a result, many machines are now equipped with one or several
  80. accelerators (e.g. a GPU), in addition to the usual processor(s). While a lot of
  81. efforts have been devoted to offload computation onto such accelerators, very
  82. little attention as been paid to portability concerns on the one hand, and to the
  83. possibility of having heterogeneous accelerators and processors to interact on the other hand.
  84. StarPU is a runtime system that offers support for heterogeneous multicore
  85. architectures, it not only offers a unified view of the computational resources
  86. (i.e. CPUs and accelerators at the same time), but it also takes care of
  87. efficiently mapping and executing tasks onto an heterogeneous machine while
  88. transparently handling low-level issues such as data transfers in a portable
  89. fashion.
  90. @c this leads to a complicated distributed memory design
  91. @c which is not (easily) manageable by hand
  92. @c added value/benefits of StarPU
  93. @c - portability
  94. @c - scheduling, perf. portability
  95. @node StarPU in a Nutshell
  96. @section StarPU in a Nutshell
  97. @menu
  98. * Codelet and Tasks::
  99. * StarPU Data Management Library::
  100. * Glossary::
  101. * Research Papers::
  102. @end menu
  103. From a programming point of view, StarPU is not a new language but a library
  104. that executes tasks explicitly submitted by the application. The data that a
  105. task manipulates are automatically transferred onto the accelerator so that the
  106. programmer does not have to take care of complex data movements. StarPU also
  107. takes particular care of scheduling those tasks efficiently and allows
  108. scheduling experts to implement custom scheduling policies in a portable
  109. fashion.
  110. @c explain the notion of codelet and task (i.e. g(A, B)
  111. @node Codelet and Tasks
  112. @subsection Codelet and Tasks
  113. One of the StarPU primary data structures is the @b{codelet}. A codelet describes a
  114. computational kernel that can possibly be implemented on multiple architectures
  115. such as a CPU, a CUDA device or a Cell's SPU.
  116. @c TODO insert illustration f : f_spu, f_cpu, ...
  117. Another important data structure is the @b{task}. Executing a StarPU task
  118. consists in applying a codelet on a data set, on one of the architectures on
  119. which the codelet is implemented. A task thus describes the codelet that it
  120. uses, but also which data are accessed, and how they are
  121. accessed during the computation (read and/or write).
  122. StarPU tasks are asynchronous: submitting a task to StarPU is a non-blocking
  123. operation. The task structure can also specify a @b{callback} function that is
  124. called once StarPU has properly executed the task. It also contains optional
  125. fields that the application may use to give hints to the scheduler (such as
  126. priority levels).
  127. By default, task dependencies are inferred from data dependency (sequential
  128. coherence) by StarPU. The application can however disable sequential coherency
  129. for some data, and dependencies be expressed by hand.
  130. A task may be identified by a unique 64-bit number chosen by the application
  131. which we refer as a @b{tag}.
  132. Task dependencies can be enforced by hand either by the means of callback functions, by
  133. submitting other tasks, or by expressing dependencies
  134. between tags (which can thus correspond to tasks that have not been submitted
  135. yet).
  136. @c TODO insert illustration f(Ar, Brw, Cr) + ..
  137. @c DSM
  138. @node StarPU Data Management Library
  139. @subsection StarPU Data Management Library
  140. Because StarPU schedules tasks at runtime, data transfers have to be
  141. done automatically and ``just-in-time'' between processing units,
  142. relieving the application programmer from explicit data transfers.
  143. Moreover, to avoid unnecessary transfers, StarPU keeps data
  144. where it was last needed, even if was modified there, and it
  145. allows multiple copies of the same data to reside at the same time on
  146. several processing units as long as it is not modified.
  147. @node Glossary
  148. @subsection Glossary
  149. A @b{codelet} records pointers to various implementations of the same
  150. theoretical function.
  151. A @b{memory node} can be either the main RAM or GPU-embedded memory.
  152. A @b{bus} is a link between memory nodes.
  153. A @b{data handle} keeps track of replicates of the same data (@b{registered} by the
  154. application) over various memory nodes. The data management library manages
  155. keeping them coherent.
  156. The @b{home} memory node of a data handle is the memory node from which the data
  157. was registered (usually the main memory node).
  158. A @b{task} represents a scheduled execution of a codelet on some data handles.
  159. A @b{tag} is a rendez-vous point. Tasks typically have their own tag, and can
  160. depend on other tags. The value is chosen by the application.
  161. A @b{worker} execute tasks. There is typically one per CPU computation core and
  162. one per accelerator (for which a whole CPU core is dedicated).
  163. A @b{driver} drives a given kind of workers. There are currently CPU, CUDA,
  164. OpenCL and Gordon drivers. They usually start several workers to actually drive
  165. them.
  166. A @b{performance model} is a (dynamic or static) model of the performance of a
  167. given codelet. Codelets can have execution time performance model as well as
  168. power consumption performance models.
  169. A data @b{interface} describes the layout of the data: for a vector, a pointer
  170. for the start, the number of elements and the size of elements ; for a matrix, a
  171. pointer for the start, the number of elements per row, the offset between rows,
  172. and the size of each element ; etc. To access their data, codelet functions are
  173. given interfaces for the local memory node replicates of the data handles of the
  174. scheduled task.
  175. @b{Partitioning} data means dividing the data of a given data handle (called
  176. @b{father}) into a series of @b{children} data handles which designate various
  177. portions of the former.
  178. A @b{filter} is the function which computes children data handles from a father
  179. data handle, and thus describes how the partitioning should be done (horizontal,
  180. vertical, etc.)
  181. @b{Acquiring} a data handle can be done from the main application, to safely
  182. access the data of a data handle from its home node, without having to
  183. unregister it.
  184. @node Research Papers
  185. @subsection Research Papers
  186. Research papers about StarPU can be found at
  187. @indicateurl{http://runtime.bordeaux.inria.fr/Publis/Keyword/STARPU.html}
  188. Notably a good overview in the research report
  189. @indicateurl{http://hal.archives-ouvertes.fr/inria-00467677}
  190. @c ---------------------------------------------------------------------
  191. @c Installing StarPU
  192. @c ---------------------------------------------------------------------
  193. @node Installing StarPU
  194. @chapter Installing StarPU
  195. @menu
  196. * Downloading StarPU::
  197. * Configuration of StarPU::
  198. * Building and Installing StarPU::
  199. @end menu
  200. StarPU can be built and installed by the standard means of the GNU
  201. autotools. The following chapter is intended to briefly remind how these tools
  202. can be used to install StarPU.
  203. @node Downloading StarPU
  204. @section Downloading StarPU
  205. @menu
  206. * Getting Sources::
  207. * Optional dependencies::
  208. @end menu
  209. @node Getting Sources
  210. @subsection Getting Sources
  211. The simplest way to get StarPU sources is to download the latest official
  212. release tarball from @indicateurl{https://gforge.inria.fr/frs/?group_id=1570} ,
  213. or the latest nightly snapshot from
  214. @indicateurl{http://starpu.gforge.inria.fr/testing/} . The following documents
  215. how to get the very latest version from the subversion repository itself, it
  216. should be needed only if you need the very latest changes (i.e. less than a
  217. day!)
  218. The source code is managed by a Subversion server hosted by the
  219. InriaGforge. To get the source code, you need:
  220. @itemize
  221. @item
  222. To install the client side of the software Subversion if it is
  223. not already available on your system. The software can be obtained from
  224. @indicateurl{http://subversion.tigris.org} . If you are running
  225. on Windows, you will probably prefer to use TortoiseSVN from
  226. @indicateurl{http://tortoisesvn.tigris.org/} .
  227. @item
  228. You can check out the project's SVN repository through anonymous
  229. access. This will provide you with a read access to the
  230. repository.
  231. If you need to have write access on the StarPU project, you can also choose to
  232. become a member of the project @code{starpu}. For this, you first need to get
  233. an account to the gForge server. You can then send a request to join the project
  234. (@indicateurl{https://gforge.inria.fr/project/request.php?group_id=1570}).
  235. @item
  236. More information on how to get a gForge account, to become a member of
  237. a project, or on any other related task can be obtained from the
  238. InriaGforge at @indicateurl{https://gforge.inria.fr/}. The most important
  239. thing is to upload your public SSH key on the gForge server (see the
  240. FAQ at @indicateurl{http://siteadmin.gforge.inria.fr/FAQ.html#Q6} for
  241. instructions).
  242. @end itemize
  243. You can now check out the latest version from the Subversion server:
  244. @itemize
  245. @item
  246. using the anonymous access via svn:
  247. @example
  248. % svn checkout svn://scm.gforge.inria.fr/svn/starpu/trunk
  249. @end example
  250. @item
  251. using the anonymous access via https:
  252. @example
  253. % svn checkout --username anonsvn https://scm.gforge.inria.fr/svn/starpu/trunk
  254. @end example
  255. The password is @code{anonsvn}.
  256. @item
  257. using your gForge account
  258. @example
  259. % svn checkout svn+ssh://<login>@@scm.gforge.inria.fr/svn/starpu/trunk
  260. @end example
  261. @end itemize
  262. The following step requires the availability of @code{autoconf} and
  263. @code{automake} to generate the @code{./configure} script. This is
  264. done by calling @code{./autogen.sh}. The required version for
  265. @code{autoconf} is 2.60 or higher. You will also need @code{makeinfo}.
  266. @example
  267. % ./autogen.sh
  268. @end example
  269. If the autotools are not available on your machine or not recent
  270. enough, you can choose to download the latest nightly tarball, which
  271. is provided with a @code{configure} script.
  272. @example
  273. % wget http://starpu.gforge.inria.fr/testing/starpu-nightly-latest.tar.gz
  274. @end example
  275. @node Optional dependencies
  276. @subsection Optional dependencies
  277. The topology discovery library, @code{hwloc}, is not mandatory to use StarPU
  278. but strongly recommended. It allows to increase performance, and to
  279. perform some topology aware scheduling.
  280. @code{hwloc} is available in major distributions and for most OSes and can be
  281. downloaded from @indicateurl{http://www.open-mpi.org/software/hwloc}.
  282. @node Configuration of StarPU
  283. @section Configuration of StarPU
  284. @menu
  285. * Generating Makefiles and configuration scripts::
  286. * Running the configuration::
  287. @end menu
  288. @node Generating Makefiles and configuration scripts
  289. @subsection Generating Makefiles and configuration scripts
  290. This step is not necessary when using the tarball releases of StarPU. If you
  291. are using the source code from the svn repository, you first need to generate
  292. the configure scripts and the Makefiles.
  293. @example
  294. % ./autogen.sh
  295. @end example
  296. @node Running the configuration
  297. @subsection Running the configuration
  298. @example
  299. % ./configure
  300. @end example
  301. Details about options that are useful to give to @code{./configure} are given in
  302. @ref{Compilation configuration}.
  303. @node Building and Installing StarPU
  304. @section Building and Installing StarPU
  305. @menu
  306. * Building::
  307. * Sanity Checks::
  308. * Installing::
  309. @end menu
  310. @node Building
  311. @subsection Building
  312. @example
  313. % make
  314. @end example
  315. @node Sanity Checks
  316. @subsection Sanity Checks
  317. In order to make sure that StarPU is working properly on the system, it is also
  318. possible to run a test suite.
  319. @example
  320. % make check
  321. @end example
  322. @node Installing
  323. @subsection Installing
  324. In order to install StarPU at the location that was specified during
  325. configuration:
  326. @example
  327. % make install
  328. @end example
  329. @c ---------------------------------------------------------------------
  330. @c Using StarPU
  331. @c ---------------------------------------------------------------------
  332. @node Using StarPU
  333. @chapter Using StarPU
  334. @menu
  335. * Setting flags for compiling and linking applications::
  336. * Running a basic StarPU application::
  337. * Kernel threads started by StarPU::
  338. * Enabling OpenCL::
  339. @end menu
  340. @node Setting flags for compiling and linking applications
  341. @section Setting flags for compiling and linking applications
  342. Compiling and linking an application against StarPU may require to use
  343. specific flags or libraries (for instance @code{CUDA} or @code{libspe2}).
  344. To this end, it is possible to use the @code{pkg-config} tool.
  345. If StarPU was not installed at some standard location, the path of StarPU's
  346. library must be specified in the @code{PKG_CONFIG_PATH} environment variable so
  347. that @code{pkg-config} can find it. For example if StarPU was installed in
  348. @code{$prefix_dir}:
  349. @example
  350. % PKG_CONFIG_PATH=$PKG_CONFIG_PATH:$prefix_dir/lib/pkgconfig
  351. @end example
  352. The flags required to compile or link against StarPU are then
  353. accessible with the following commands:
  354. @example
  355. % pkg-config --cflags libstarpu # options for the compiler
  356. % pkg-config --libs libstarpu # options for the linker
  357. @end example
  358. @node Running a basic StarPU application
  359. @section Running a basic StarPU application
  360. Basic examples using StarPU are built in the directory
  361. @code{examples/basic_examples/} (and installed in
  362. @code{$prefix_dir/lib/starpu/examples/}). You can for example run the example
  363. @code{vector_scal}.
  364. @example
  365. % ./examples/basic_examples/vector_scal
  366. BEFORE : First element was 1.000000
  367. AFTER First element is 3.140000
  368. %
  369. @end example
  370. When StarPU is used for the first time, the directory
  371. @code{$HOME/.starpu/} is created, performance models will be stored in
  372. that directory.
  373. Please note that buses are benchmarked when StarPU is launched for the
  374. first time. This may take a few minutes, or less if @code{hwloc} is
  375. installed. This step is done only once per user and per machine.
  376. @node Kernel threads started by StarPU
  377. @section Kernel threads started by StarPU
  378. StarPU automatically binds one thread per CPU core. It does not use
  379. SMT/hyperthreading because kernels are usually already optimized for using a
  380. full core, and using hyperthreading would make kernel calibration rather random.
  381. Since driving GPUs is a CPU-consuming task, StarPU dedicates one core per GPU
  382. While StarPU tasks are executing, the application is not supposed to do
  383. computations in the threads it starts itself, tasks should be used instead.
  384. TODO: add a StarPU function to bind an application thread (e.g. the main thread)
  385. to a dedicated core (and thus disable the corresponding StarPU CPU worker).
  386. @node Enabling OpenCL
  387. @section Enabling OpenCL
  388. When both CUDA and OpenCL drivers are enabled, StarPU will launch an
  389. OpenCL worker for NVIDIA GPUs only if CUDA is not already running on them.
  390. This design choice was necessary as OpenCL and CUDA can not run at the
  391. same time on the same NVIDIA GPU, as there is currently no interoperability
  392. between them.
  393. To enable OpenCL, you need either to disable CUDA when configuring StarPU:
  394. @example
  395. % ./configure --disable-cuda
  396. @end example
  397. or when running applications:
  398. @example
  399. % STARPU_NCUDA=0 ./application
  400. @end example
  401. OpenCL will automatically be started on any device not yet used by
  402. CUDA. So on a machine running 4 GPUS, it is therefore possible to
  403. enable CUDA on 2 devices, and OpenCL on the 2 other devices by doing
  404. so:
  405. @example
  406. % STARPU_NCUDA=2 ./application
  407. @end example
  408. @c ---------------------------------------------------------------------
  409. @c Basic Examples
  410. @c ---------------------------------------------------------------------
  411. @node Basic Examples
  412. @chapter Basic Examples
  413. @menu
  414. * Compiling and linking options::
  415. * Hello World:: Submitting Tasks
  416. * Scaling a Vector:: Manipulating Data
  417. * Vector Scaling on an Hybrid CPU/GPU Machine:: Handling Heterogeneous Architectures
  418. * Using multiple implementations of a codelet::
  419. * Task and Worker Profiling::
  420. * Partitioning Data:: Partitioning Data
  421. * Performance model example::
  422. * Theoretical lower bound on execution time::
  423. * Insert Task Utility::
  424. * More examples:: More examples shipped with StarPU
  425. * Debugging:: When things go wrong.
  426. @end menu
  427. @node Compiling and linking options
  428. @section Compiling and linking options
  429. Let's suppose StarPU has been installed in the directory
  430. @code{$STARPU_DIR}. As explained in @ref{Setting flags for compiling and linking applications},
  431. the variable @code{PKG_CONFIG_PATH} needs to be set. It is also
  432. necessary to set the variable @code{LD_LIBRARY_PATH} to locate dynamic
  433. libraries at runtime.
  434. @example
  435. % PKG_CONFIG_PATH=$STARPU_DIR/lib/pkgconfig:$PKG_CONFIG_PATH
  436. % LD_LIBRARY_PATH=$STARPU_DIR/lib:$LD_LIBRARY_PATH
  437. @end example
  438. The Makefile could for instance contain the following lines to define which
  439. options must be given to the compiler and to the linker:
  440. @cartouche
  441. @example
  442. CFLAGS += $$(pkg-config --cflags libstarpu)
  443. LDFLAGS += $$(pkg-config --libs libstarpu)
  444. @end example
  445. @end cartouche
  446. @node Hello World
  447. @section Hello World
  448. @menu
  449. * Required Headers::
  450. * Defining a Codelet::
  451. * Submitting a Task::
  452. * Execution of Hello World::
  453. @end menu
  454. In this section, we show how to implement a simple program that submits a task to StarPU.
  455. @node Required Headers
  456. @subsection Required Headers
  457. The @code{starpu.h} header should be included in any code using StarPU.
  458. @cartouche
  459. @smallexample
  460. #include <starpu.h>
  461. @end smallexample
  462. @end cartouche
  463. @node Defining a Codelet
  464. @subsection Defining a Codelet
  465. @cartouche
  466. @smallexample
  467. struct params @{
  468. int i;
  469. float f;
  470. @};
  471. void cpu_func(void *buffers[], void *cl_arg)
  472. @{
  473. struct params *params = cl_arg;
  474. printf("Hello world (params = @{%i, %f@} )\n", params->i, params->f);
  475. @}
  476. starpu_codelet cl =
  477. @{
  478. .where = STARPU_CPU,
  479. .cpu_func = cpu_func,
  480. .nbuffers = 0
  481. @};
  482. @end smallexample
  483. @end cartouche
  484. A codelet is a structure that represents a computational kernel. Such a codelet
  485. may contain an implementation of the same kernel on different architectures
  486. (e.g. CUDA, Cell's SPU, x86, ...).
  487. The @code{nbuffers} field specifies the number of data buffers that are
  488. manipulated by the codelet: here the codelet does not access or modify any data
  489. that is controlled by our data management library. Note that the argument
  490. passed to the codelet (the @code{cl_arg} field of the @code{starpu_task}
  491. structure) does not count as a buffer since it is not managed by our data
  492. management library, but just contain trivial parameters.
  493. @c TODO need a crossref to the proper description of "where" see bla for more ...
  494. We create a codelet which may only be executed on the CPUs. The @code{where}
  495. field is a bitmask that defines where the codelet may be executed. Here, the
  496. @code{STARPU_CPU} value means that only CPUs can execute this codelet
  497. (@pxref{Codelets and Tasks} for more details on this field).
  498. When a CPU core executes a codelet, it calls the @code{cpu_func} function,
  499. which @emph{must} have the following prototype:
  500. @code{void (*cpu_func)(void *buffers[], void *cl_arg);}
  501. In this example, we can ignore the first argument of this function which gives a
  502. description of the input and output buffers (e.g. the size and the location of
  503. the matrices) since there is none.
  504. The second argument is a pointer to a buffer passed as an
  505. argument to the codelet by the means of the @code{cl_arg} field of the
  506. @code{starpu_task} structure.
  507. @c TODO rewrite so that it is a little clearer ?
  508. Be aware that this may be a pointer to a
  509. @emph{copy} of the actual buffer, and not the pointer given by the programmer:
  510. if the codelet modifies this buffer, there is no guarantee that the initial
  511. buffer will be modified as well: this for instance implies that the buffer
  512. cannot be used as a synchronization medium. If synchronization is needed, data
  513. has to be registered to StarPU, see @ref{Scaling a Vector}.
  514. @node Submitting a Task
  515. @subsection Submitting a Task
  516. @cartouche
  517. @smallexample
  518. void callback_func(void *callback_arg)
  519. @{
  520. printf("Callback function (arg %x)\n", callback_arg);
  521. @}
  522. int main(int argc, char **argv)
  523. @{
  524. /* @b{initialize StarPU} */
  525. starpu_init(NULL);
  526. struct starpu_task *task = starpu_task_create();
  527. task->cl = &cl; /* @b{Pointer to the codelet defined above} */
  528. struct params params = @{ 1, 2.0f @};
  529. task->cl_arg = &params;
  530. task->cl_arg_size = sizeof(params);
  531. task->callback_func = callback_func;
  532. task->callback_arg = 0x42;
  533. /* @b{starpu_task_submit will be a blocking call} */
  534. task->synchronous = 1;
  535. /* @b{submit the task to StarPU} */
  536. starpu_task_submit(task);
  537. /* @b{terminate StarPU} */
  538. starpu_shutdown();
  539. return 0;
  540. @}
  541. @end smallexample
  542. @end cartouche
  543. Before submitting any tasks to StarPU, @code{starpu_init} must be called. The
  544. @code{NULL} argument specifies that we use default configuration. Tasks cannot
  545. be submitted after the termination of StarPU by a call to
  546. @code{starpu_shutdown}.
  547. In the example above, a task structure is allocated by a call to
  548. @code{starpu_task_create}. This function only allocates and fills the
  549. corresponding structure with the default settings (@pxref{Codelets and
  550. Tasks, starpu_task_create}), but it does not submit the task to StarPU.
  551. @c not really clear ;)
  552. The @code{cl} field is a pointer to the codelet which the task will
  553. execute: in other words, the codelet structure describes which computational
  554. kernel should be offloaded on the different architectures, and the task
  555. structure is a wrapper containing a codelet and the piece of data on which the
  556. codelet should operate.
  557. The optional @code{cl_arg} field is a pointer to a buffer (of size
  558. @code{cl_arg_size}) with some parameters for the kernel
  559. described by the codelet. For instance, if a codelet implements a computational
  560. kernel that multiplies its input vector by a constant, the constant could be
  561. specified by the means of this buffer, instead of registering it as a StarPU
  562. data. It must however be noted that StarPU avoids making copy whenever possible
  563. and rather passes the pointer as such, so the buffer which is pointed at must
  564. kept allocated until the task terminates, and if several tasks are submitted
  565. with various parameters, each of them must be given a pointer to their own
  566. buffer.
  567. Once a task has been executed, an optional callback function is be called.
  568. While the computational kernel could be offloaded on various architectures, the
  569. callback function is always executed on a CPU. The @code{callback_arg}
  570. pointer is passed as an argument of the callback. The prototype of a callback
  571. function must be:
  572. @code{void (*callback_function)(void *);}
  573. If the @code{synchronous} field is non-zero, task submission will be
  574. synchronous: the @code{starpu_task_submit} function will not return until the
  575. task was executed. Note that the @code{starpu_shutdown} method does not
  576. guarantee that asynchronous tasks have been executed before it returns,
  577. @code{starpu_task_wait_for_all} can be used to that effect, or data can be
  578. unregistered (@code{starpu_data_unregister(vector_handle);}), which will
  579. implicitly wait for all the tasks scheduled to work on it, unless explicitly
  580. disabled thanks to @code{starpu_data_set_default_sequential_consistency_flag} or
  581. @code{starpu_data_set_sequential_consistency_flag}.
  582. @node Execution of Hello World
  583. @subsection Execution of Hello World
  584. @smallexample
  585. % make hello_world
  586. cc $(pkg-config --cflags libstarpu) $(pkg-config --libs libstarpu) hello_world.c -o hello_world
  587. % ./hello_world
  588. Hello world (params = @{1, 2.000000@} )
  589. Callback function (arg 42)
  590. @end smallexample
  591. @node Scaling a Vector
  592. @section Manipulating Data: Scaling a Vector
  593. The previous example has shown how to submit tasks. In this section,
  594. we show how StarPU tasks can manipulate data. The full source code for
  595. this example is given in @ref{Full source code for the 'Scaling a Vector' example}.
  596. @menu
  597. * Source code of Vector Scaling::
  598. * Execution of Vector Scaling::
  599. @end menu
  600. @node Source code of Vector Scaling
  601. @subsection Source code of Vector Scaling
  602. Programmers can describe the data layout of their application so that StarPU is
  603. responsible for enforcing data coherency and availability across the machine.
  604. Instead of handling complex (and non-portable) mechanisms to perform data
  605. movements, programmers only declare which piece of data is accessed and/or
  606. modified by a task, and StarPU makes sure that when a computational kernel
  607. starts somewhere (e.g. on a GPU), its data are available locally.
  608. Before submitting those tasks, the programmer first needs to declare the
  609. different pieces of data to StarPU using the @code{starpu_*_data_register}
  610. functions. To ease the development of applications for StarPU, it is possible
  611. to describe multiple types of data layout. A type of data layout is called an
  612. @b{interface}. There are different predefined interfaces available in StarPU:
  613. here we will consider the @b{vector interface}.
  614. The following lines show how to declare an array of @code{NX} elements of type
  615. @code{float} using the vector interface:
  616. @cartouche
  617. @smallexample
  618. float vector[NX];
  619. starpu_data_handle vector_handle;
  620. starpu_vector_data_register(&vector_handle, 0, (uintptr_t)vector, NX,
  621. sizeof(vector[0]));
  622. @end smallexample
  623. @end cartouche
  624. The first argument, called the @b{data handle}, is an opaque pointer which
  625. designates the array in StarPU. This is also the structure which is used to
  626. describe which data is used by a task. The second argument is the node number
  627. where the data originally resides. Here it is 0 since the @code{vector} array is in
  628. the main memory. Then comes the pointer @code{vector} where the data can be found in main memory,
  629. the number of elements in the vector and the size of each element.
  630. The following shows how to construct a StarPU task that will manipulate the
  631. vector and a constant factor.
  632. @cartouche
  633. @smallexample
  634. float factor = 3.14;
  635. struct starpu_task *task = starpu_task_create();
  636. task->cl = &cl; /* @b{Pointer to the codelet defined below} */
  637. task->buffers[0].handle = vector_handle; /* @b{First parameter of the codelet} */
  638. task->buffers[0].mode = STARPU_RW;
  639. task->cl_arg = &factor;
  640. task->cl_arg_size = sizeof(factor);
  641. task->synchronous = 1;
  642. starpu_task_submit(task);
  643. @end smallexample
  644. @end cartouche
  645. Since the factor is a mere constant float value parameter,
  646. it does not need a preliminary registration, and
  647. can just be passed through the @code{cl_arg} pointer like in the previous
  648. example. The vector parameter is described by its handle.
  649. There are two fields in each element of the @code{buffers} array.
  650. @code{handle} is the handle of the data, and @code{mode} specifies how the
  651. kernel will access the data (@code{STARPU_R} for read-only, @code{STARPU_W} for
  652. write-only and @code{STARPU_RW} for read and write access).
  653. The definition of the codelet can be written as follows:
  654. @cartouche
  655. @smallexample
  656. void scal_cpu_func(void *buffers[], void *cl_arg)
  657. @{
  658. unsigned i;
  659. float *factor = cl_arg;
  660. /* length of the vector */
  661. unsigned n = STARPU_VECTOR_GET_NX(buffers[0]);
  662. /* CPU copy of the vector pointer */
  663. float *val = (float *)STARPU_VECTOR_GET_PTR(buffers[0]);
  664. for (i = 0; i < n; i++)
  665. val[i] *= *factor;
  666. @}
  667. starpu_codelet cl = @{
  668. .where = STARPU_CPU,
  669. .cpu_func = scal_cpu_func,
  670. .nbuffers = 1
  671. @};
  672. @end smallexample
  673. @end cartouche
  674. The first argument is an array that gives
  675. a description of all the buffers passed in the @code{task->buffers}@ array. The
  676. size of this array is given by the @code{nbuffers} field of the codelet
  677. structure. For the sake of genericity, this array contains pointers to the
  678. different interfaces describing each buffer. In the case of the @b{vector
  679. interface}, the location of the vector (resp. its length) is accessible in the
  680. @code{ptr} (resp. @code{nx}) of this array. Since the vector is accessed in a
  681. read-write fashion, any modification will automatically affect future accesses
  682. to this vector made by other tasks.
  683. The second argument of the @code{scal_cpu_func} function contains a pointer to the
  684. parameters of the codelet (given in @code{task->cl_arg}), so that we read the
  685. constant factor from this pointer.
  686. @node Execution of Vector Scaling
  687. @subsection Execution of Vector Scaling
  688. @smallexample
  689. % make vector_scal
  690. cc $(pkg-config --cflags libstarpu) $(pkg-config --libs libstarpu) vector_scal.c -o vector_scal
  691. % ./vector_scal
  692. 0.000000 3.000000 6.000000 9.000000 12.000000
  693. @end smallexample
  694. @node Vector Scaling on an Hybrid CPU/GPU Machine
  695. @section Vector Scaling on an Hybrid CPU/GPU Machine
  696. Contrary to the previous examples, the task submitted in this example may not
  697. only be executed by the CPUs, but also by a CUDA device.
  698. @menu
  699. * Definition of the CUDA Kernel::
  700. * Definition of the OpenCL Kernel::
  701. * Definition of the Main Code::
  702. * Execution of Hybrid Vector Scaling::
  703. @end menu
  704. @node Definition of the CUDA Kernel
  705. @subsection Definition of the CUDA Kernel
  706. The CUDA implementation can be written as follows. It needs to be compiled with
  707. a CUDA compiler such as nvcc, the NVIDIA CUDA compiler driver. It must be noted
  708. that the vector pointer returned by STARPU_VECTOR_GET_PTR is here a pointer in GPU
  709. memory, so that it can be passed as such to the @code{vector_mult_cuda} kernel
  710. call.
  711. @cartouche
  712. @smallexample
  713. #include <starpu.h>
  714. #include <starpu_cuda.h>
  715. static __global__ void vector_mult_cuda(float *val, unsigned n,
  716. float factor)
  717. @{
  718. unsigned i = blockIdx.x*blockDim.x + threadIdx.x;
  719. if (i < n)
  720. val[i] *= factor;
  721. @}
  722. extern "C" void scal_cuda_func(void *buffers[], void *_args)
  723. @{
  724. float *factor = (float *)_args;
  725. /* length of the vector */
  726. unsigned n = STARPU_VECTOR_GET_NX(buffers[0]);
  727. /* CUDA copy of the vector pointer */
  728. float *val = (float *)STARPU_VECTOR_GET_PTR(buffers[0]);
  729. unsigned threads_per_block = 64;
  730. unsigned nblocks = (n + threads_per_block-1) / threads_per_block;
  731. @i{ vector_mult_cuda<<<nblocks,threads_per_block, 0, starpu_cuda_get_local_stream()>>>(val, n, *factor);}
  732. @i{ cudaStreamSynchronize(starpu_cuda_get_local_stream());}
  733. @}
  734. @end smallexample
  735. @end cartouche
  736. @node Definition of the OpenCL Kernel
  737. @subsection Definition of the OpenCL Kernel
  738. The OpenCL implementation can be written as follows. StarPU provides
  739. tools to compile a OpenCL kernel stored in a file.
  740. @cartouche
  741. @smallexample
  742. __kernel void vector_mult_opencl(__global float* val, int nx, float factor)
  743. @{
  744. const int i = get_global_id(0);
  745. if (i < nx) @{
  746. val[i] *= factor;
  747. @}
  748. @}
  749. @end smallexample
  750. @end cartouche
  751. Similarly to CUDA, the pointer returned by @code{STARPU_VECTOR_GET_PTR} is here
  752. a device pointer, so that it is passed as such to the OpenCL kernel.
  753. @cartouche
  754. @smallexample
  755. #include <starpu.h>
  756. @i{#include <starpu_opencl.h>}
  757. @i{extern struct starpu_opencl_program programs;}
  758. void scal_opencl_func(void *buffers[], void *_args)
  759. @{
  760. float *factor = _args;
  761. @i{ int id, devid, err;}
  762. @i{ cl_kernel kernel;}
  763. @i{ cl_command_queue queue;}
  764. @i{ cl_event event;}
  765. /* length of the vector */
  766. unsigned n = STARPU_VECTOR_GET_NX(buffers[0]);
  767. /* OpenCL copy of the vector pointer */
  768. cl_mem val = (cl_mem) STARPU_VECTOR_GET_PTR(buffers[0]);
  769. @i{ id = starpu_worker_get_id();}
  770. @i{ devid = starpu_worker_get_devid(id);}
  771. @i{ err = starpu_opencl_load_kernel(&kernel, &queue, &programs,}
  772. @i{ "vector_mult_opencl", devid); /* @b{Name of the codelet defined above} */}
  773. @i{ if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);}
  774. @i{ err = clSetKernelArg(kernel, 0, sizeof(val), &val);}
  775. @i{ err |= clSetKernelArg(kernel, 1, sizeof(n), &n);}
  776. @i{ err |= clSetKernelArg(kernel, 2, sizeof(*factor), factor);}
  777. @i{ if (err) STARPU_OPENCL_REPORT_ERROR(err);}
  778. @i{ @{}
  779. @i{ size_t global=1;}
  780. @i{ size_t local=1;}
  781. @i{ err = clEnqueueNDRangeKernel(queue, kernel, 1, NULL, &global, &local, 0, NULL, &event);}
  782. @i{ if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);}
  783. @i{ @}}
  784. @i{ clFinish(queue);}
  785. @i{ starpu_opencl_collect_stats(event);}
  786. @i{ clReleaseEvent(event);}
  787. @i{ starpu_opencl_release_kernel(kernel);}
  788. @}
  789. @end smallexample
  790. @end cartouche
  791. @node Definition of the Main Code
  792. @subsection Definition of the Main Code
  793. The CPU implementation is the same as in the previous section.
  794. Here is the source of the main application. You can notice the value of the
  795. field @code{where} for the codelet. We specify
  796. @code{STARPU_CPU|STARPU_CUDA|STARPU_OPENCL} to indicate to StarPU that the codelet
  797. can be executed either on a CPU or on a CUDA or an OpenCL device.
  798. @cartouche
  799. @smallexample
  800. #include <starpu.h>
  801. #define NX 2048
  802. extern void scal_cuda_func(void *buffers[], void *_args);
  803. extern void scal_cpu_func(void *buffers[], void *_args);
  804. extern void scal_opencl_func(void *buffers[], void *_args);
  805. /* @b{Definition of the codelet} */
  806. static starpu_codelet cl = @{
  807. .where = STARPU_CPU|STARPU_CUDA|STARPU_OPENCL; /* @b{It can be executed on a CPU,} */
  808. /* @b{on a CUDA device, or on an OpenCL device} */
  809. .cuda_func = scal_cuda_func,
  810. .cpu_func = scal_cpu_func,
  811. .opencl_func = scal_opencl_func,
  812. .nbuffers = 1
  813. @}
  814. #ifdef STARPU_USE_OPENCL
  815. /* @b{The compiled version of the OpenCL program} */
  816. struct starpu_opencl_program programs;
  817. #endif
  818. int main(int argc, char **argv)
  819. @{
  820. float *vector;
  821. int i, ret;
  822. float factor=3.0;
  823. struct starpu_task *task;
  824. starpu_data_handle vector_handle;
  825. starpu_init(NULL); /* @b{Initialising StarPU} */
  826. #ifdef STARPU_USE_OPENCL
  827. starpu_opencl_load_opencl_from_file(
  828. "examples/basic_examples/vector_scal_opencl_codelet.cl",
  829. &programs, NULL);
  830. #endif
  831. vector = malloc(NX*sizeof(vector[0]));
  832. assert(vector);
  833. for(i=0 ; i<NX ; i++) vector[i] = i;
  834. @end smallexample
  835. @end cartouche
  836. @cartouche
  837. @smallexample
  838. /* @b{Registering data within StarPU} */
  839. starpu_vector_data_register(&vector_handle, 0, (uintptr_t)vector,
  840. NX, sizeof(vector[0]));
  841. /* @b{Definition of the task} */
  842. task = starpu_task_create();
  843. task->cl = &cl;
  844. task->buffers[0].handle = vector_handle;
  845. task->buffers[0].mode = STARPU_RW;
  846. task->cl_arg = &factor;
  847. task->cl_arg_size = sizeof(factor);
  848. @end smallexample
  849. @end cartouche
  850. @cartouche
  851. @smallexample
  852. /* @b{Submitting the task} */
  853. ret = starpu_task_submit(task);
  854. if (ret == -ENODEV) @{
  855. fprintf(stderr, "No worker may execute this task\n");
  856. return 1;
  857. @}
  858. @c TODO: Mmm, should rather be an unregistration with an implicit dependency, no?
  859. /* @b{Waiting for its termination} */
  860. starpu_task_wait_for_all();
  861. /* @b{Update the vector in RAM} */
  862. starpu_data_acquire(vector_handle, STARPU_R);
  863. @end smallexample
  864. @end cartouche
  865. @cartouche
  866. @smallexample
  867. /* @b{Access the data} */
  868. for(i=0 ; i<NX; i++) @{
  869. fprintf(stderr, "%f ", vector[i]);
  870. @}
  871. fprintf(stderr, "\n");
  872. /* @b{Release the RAM view of the data before unregistering it and shutting down StarPU} */
  873. starpu_data_release(vector_handle);
  874. starpu_data_unregister(vector_handle);
  875. starpu_shutdown();
  876. return 0;
  877. @}
  878. @end smallexample
  879. @end cartouche
  880. @node Execution of Hybrid Vector Scaling
  881. @subsection Execution of Hybrid Vector Scaling
  882. The Makefile given at the beginning of the section must be extended to
  883. give the rules to compile the CUDA source code. Note that the source
  884. file of the OpenCL kernel does not need to be compiled now, it will
  885. be compiled at run-time when calling the function
  886. @code{starpu_opencl_load_opencl_from_file()} (@pxref{starpu_opencl_load_opencl_from_file}).
  887. @cartouche
  888. @smallexample
  889. CFLAGS += $(shell pkg-config --cflags libstarpu)
  890. LDFLAGS += $(shell pkg-config --libs libstarpu)
  891. CC = gcc
  892. vector_scal: vector_scal.o vector_scal_cpu.o vector_scal_cuda.o vector_scal_opencl.o
  893. %.o: %.cu
  894. nvcc $(CFLAGS) $< -c $@
  895. clean:
  896. rm -f vector_scal *.o
  897. @end smallexample
  898. @end cartouche
  899. @smallexample
  900. % make
  901. @end smallexample
  902. and to execute it, with the default configuration:
  903. @smallexample
  904. % ./vector_scal
  905. 0.000000 3.000000 6.000000 9.000000 12.000000
  906. @end smallexample
  907. or for example, by disabling CPU devices:
  908. @smallexample
  909. % STARPU_NCPUS=0 ./vector_scal
  910. 0.000000 3.000000 6.000000 9.000000 12.000000
  911. @end smallexample
  912. or by disabling CUDA devices (which may permit to enable the use of OpenCL,
  913. see @ref{Enabling OpenCL}):
  914. @smallexample
  915. % STARPU_NCUDA=0 ./vector_scal
  916. 0.000000 3.000000 6.000000 9.000000 12.000000
  917. @end smallexample
  918. @node Using multiple implementations of a codelet
  919. @section Using multiple implementations of a codelet
  920. One may want to write multiple implementations of a codelet for a single type of
  921. device and let StarPU choose which one to run. As an example, we will show how
  922. to use SSE to scale a vector. The codelet can be written as follows :
  923. @cartouche
  924. @smallexample
  925. #include <xmmintrin.h>
  926. void scal_sse_func(void *buffers[], void *cl_arg)
  927. @{
  928. float *vector = (float *) STARPU_VECTOR_GET_PTR(buffers[0]);
  929. unsigned int n = STARPU_VECTOR_GET_NX(buffers[0]);
  930. unsigned int n_iterations = n/4;
  931. if (n % 4 != 0)
  932. n_iterations++;
  933. __m128 *VECTOR = (__m128*) vector;
  934. __m128 factor __attribute__((aligned(16)));
  935. factor = _mm_set1_ps(*(float *) cl_arg);
  936. unsigned int i;
  937. for (i = 0; i < n_iterations; i++)
  938. VECTOR[i] = _mm_mul_ps(factor, VECTOR[i]);
  939. @}
  940. @end smallexample
  941. @end cartouche
  942. The @code{cpu_func} field of the @code{starpu_codelet} structure has to be set
  943. to the special value @code{STARPU_MULTIPLE_CPU_IMPLEMENTATIONS}. Note that
  944. @code{STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS} and
  945. @code{STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS} are also available.
  946. @cartouche
  947. @smallexample
  948. starpu_codelet cl = @{
  949. .where = STARPU_CPU,
  950. .cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS,
  951. .cpu_funcs = @{ scal_cpu_func, scal_sse_func @},
  952. .nbuffers = 1
  953. @};
  954. @end smallexample
  955. @end cartouche
  956. The scheduler will measure the performance of all the implementations it was
  957. given, and pick the one that seems to be the fastest.
  958. @node Task and Worker Profiling
  959. @section Task and Worker Profiling
  960. A full example showing how to use the profiling API is available in
  961. the StarPU sources in the directory @code{examples/profiling/}.
  962. @cartouche
  963. @smallexample
  964. struct starpu_task *task = starpu_task_create();
  965. task->cl = &cl;
  966. task->synchronous = 1;
  967. /* We will destroy the task structure by hand so that we can
  968. * query the profiling info before the task is destroyed. */
  969. task->destroy = 0;
  970. /* Submit and wait for completion (since synchronous was set to 1) */
  971. starpu_task_submit(task);
  972. /* The task is finished, get profiling information */
  973. struct starpu_task_profiling_info *info = task->profiling_info;
  974. /* How much time did it take before the task started ? */
  975. double delay += starpu_timing_timespec_delay_us(&info->submit_time, &info->start_time);
  976. /* How long was the task execution ? */
  977. double length += starpu_timing_timespec_delay_us(&info->start_time, &info->end_time);
  978. /* We don't need the task structure anymore */
  979. starpu_task_destroy(task);
  980. @end smallexample
  981. @end cartouche
  982. @cartouche
  983. @smallexample
  984. /* Display the occupancy of all workers during the test */
  985. int worker;
  986. for (worker = 0; worker < starpu_worker_get_count(); worker++)
  987. @{
  988. struct starpu_worker_profiling_info worker_info;
  989. int ret = starpu_worker_get_profiling_info(worker, &worker_info);
  990. STARPU_ASSERT(!ret);
  991. double total_time = starpu_timing_timespec_to_us(&worker_info.total_time);
  992. double executing_time = starpu_timing_timespec_to_us(&worker_info.executing_time);
  993. double sleeping_time = starpu_timing_timespec_to_us(&worker_info.sleeping_time);
  994. float executing_ratio = 100.0*executing_time/total_time;
  995. float sleeping_ratio = 100.0*sleeping_time/total_time;
  996. char workername[128];
  997. starpu_worker_get_name(worker, workername, 128);
  998. fprintf(stderr, "Worker %s:\n", workername);
  999. fprintf(stderr, "\ttotal time : %.2lf ms\n", total_time*1e-3);
  1000. fprintf(stderr, "\texec time : %.2lf ms (%.2f %%)\n", executing_time*1e-3,
  1001. executing_ratio);
  1002. fprintf(stderr, "\tblocked time : %.2lf ms (%.2f %%)\n", sleeping_time*1e-3,
  1003. sleeping_ratio);
  1004. @}
  1005. @end smallexample
  1006. @end cartouche
  1007. @node Partitioning Data
  1008. @section Partitioning Data
  1009. An existing piece of data can be partitioned in sub parts to be used by different tasks, for instance:
  1010. @cartouche
  1011. @smallexample
  1012. int vector[NX];
  1013. starpu_data_handle handle;
  1014. /* Declare data to StarPU */
  1015. starpu_vector_data_register(&handle, 0, (uintptr_t)vector, NX, sizeof(vector[0]));
  1016. /* Partition the vector in PARTS sub-vectors */
  1017. starpu_filter f =
  1018. @{
  1019. .filter_func = starpu_block_filter_func_vector,
  1020. .nchildren = PARTS
  1021. @};
  1022. starpu_data_partition(handle, &f);
  1023. @end smallexample
  1024. @end cartouche
  1025. @cartouche
  1026. @smallexample
  1027. /* Submit a task on each sub-vector */
  1028. for (i=0; i<starpu_data_get_nb_children(handle); i++) @{
  1029. /* Get subdata number i (there is only 1 dimension) */
  1030. starpu_data_handle sub_handle = starpu_data_get_sub_data(handle, 1, i);
  1031. struct starpu_task *task = starpu_task_create();
  1032. task->buffers[0].handle = sub_handle;
  1033. task->buffers[0].mode = STARPU_RW;
  1034. task->cl = &cl;
  1035. task->synchronous = 1;
  1036. task->cl_arg = &factor;
  1037. task->cl_arg_size = sizeof(factor);
  1038. starpu_task_submit(task);
  1039. @}
  1040. @end smallexample
  1041. @end cartouche
  1042. Partitioning can be applied several times, see
  1043. @code{examples/basic_examples/mult.c} and @code{examples/filters/}.
  1044. @node Performance model example
  1045. @section Performance model example
  1046. To achieve good scheduling, StarPU scheduling policies need to be able to
  1047. estimate in advance the duration of a task. This is done by giving to codelets
  1048. a performance model, by defining a @code{starpu_perfmodel_t} structure and
  1049. providing its address in the @code{model} field of the @code{starpu_codelet}
  1050. structure. The @code{symbol} and @code{type} fields of @code{starpu_perfmodel_t}
  1051. are mandatory, to give a name to the model, and the type of the model, since
  1052. there are several kinds of performance models.
  1053. @itemize
  1054. @item
  1055. Measured at runtime (@code{STARPU_HISTORY_BASED} model type). This assumes that for a
  1056. given set of data input/output sizes, the performance will always be about the
  1057. same. This is very true for regular kernels on GPUs for instance (<0.1% error),
  1058. and just a bit less true on CPUs (~=1% error). This also assumes that there are
  1059. few different sets of data input/output sizes. StarPU will then keep record of
  1060. the average time of previous executions on the various processing units, and use
  1061. it as an estimation. History is done per task size, by using a hash of the input
  1062. and ouput sizes as an index.
  1063. It will also save it in @code{~/.starpu/sampling/codelets}
  1064. for further executions, and can be observed by using the
  1065. @code{starpu_perfmodel_display} command, or drawn by using
  1066. the @code{starpu_perfmodel_plot}. The models are indexed by machine name. To
  1067. share the models between machines (e.g. for a homogeneous cluster), use
  1068. @code{export STARPU_HOSTNAME=some_global_name}. The following is a small code
  1069. example.
  1070. If e.g. the code is recompiled with other compilation options, or several
  1071. variants of the code are used, the symbol string should be changed to reflect
  1072. that, in order to recalibrate a new model from zero. The symbol string can even
  1073. be constructed dynamically at execution time, as long as this is done before
  1074. submitting any task using it.
  1075. @cartouche
  1076. @smallexample
  1077. static struct starpu_perfmodel_t mult_perf_model = @{
  1078. .type = STARPU_HISTORY_BASED,
  1079. .symbol = "mult_perf_model"
  1080. @};
  1081. starpu_codelet cl = @{
  1082. .where = STARPU_CPU,
  1083. .cpu_func = cpu_mult,
  1084. .nbuffers = 3,
  1085. /* for the scheduling policy to be able to use performance models */
  1086. .model = &mult_perf_model
  1087. @};
  1088. @end smallexample
  1089. @end cartouche
  1090. @item
  1091. Measured at runtime and refined by regression (@code{STARPU_REGRESSION_*_BASED}
  1092. model type). This still assumes performance regularity, but can work
  1093. with various data input sizes, by applying regression over observed
  1094. execution times. STARPU_REGRESSION_BASED uses an a*n^b regression
  1095. form, STARPU_NL_REGRESSION_BASED uses an a*n^b+c (more precise than
  1096. STARPU_REGRESSION_BASED, but costs a lot more to compute). For instance,
  1097. @code{tests/perfmodels/regression_based.c} uses a regression-based performance
  1098. model for the @code{memset} operation.
  1099. @item
  1100. Provided as an estimation from the application itself (@code{STARPU_COMMON} model type and @code{cost_model} field),
  1101. see for instance
  1102. @code{examples/common/blas_model.h} and @code{examples/common/blas_model.c}.
  1103. @item
  1104. Provided explicitly by the application (@code{STARPU_PER_ARCH} model type): the
  1105. @code{.per_arch[i].cost_model} fields have to be filled with pointers to
  1106. functions which return the expected duration of the task in micro-seconds, one
  1107. per architecture.
  1108. @end itemize
  1109. How to use schedulers which can benefit from such performance model is explained
  1110. in @ref{Task scheduling policy}.
  1111. The same can be done for task power consumption estimation, by setting the
  1112. @code{power_model} field the same way as the @code{model} field. Note: for
  1113. now, the application has to give to the power consumption performance model
  1114. a name which is different from the execution time performance model.
  1115. The application can request time estimations from the StarPU performance
  1116. models by filling a task structure as usual without actually submitting
  1117. it. The data handles can be created by calling @code{starpu_data_register}
  1118. functions with a @code{NULL} pointer (and need to be unregistered as usual)
  1119. and the desired data sizes. The @code{starpu_task_expected_length} and
  1120. @code{starpu_task_expected_power} functions can then be called to get an
  1121. estimation of the task duration on a given arch. @code{starpu_task_destroy}
  1122. needs to be called to destroy the dummy task afterwards. See
  1123. @code{tests/perfmodels/regression_based.c} for an example.
  1124. @node Theoretical lower bound on execution time
  1125. @section Theoretical lower bound on execution time
  1126. For kernels with history-based performance models, StarPU can very easily provide a theoretical lower
  1127. bound for the execution time of a whole set of tasks. See for
  1128. instance @code{examples/lu/lu_example.c}: before submitting tasks,
  1129. call @code{starpu_bound_start}, and after complete execution, call
  1130. @code{starpu_bound_stop}. @code{starpu_bound_print_lp} or
  1131. @code{starpu_bound_print_mps} can then be used to output a Linear Programming
  1132. problem corresponding to the schedule of your tasks. Run it through
  1133. @code{lp_solve} or any other linear programming solver, and that will give you a
  1134. lower bound for the total execution time of your tasks. If StarPU was compiled
  1135. with the glpk library installed, @code{starpu_bound_compute} can be used to
  1136. solve it immediately and get the optimized minimum. Its @code{integer}
  1137. parameter allows to decide whether integer resolution should be computed
  1138. and returned.
  1139. The @code{deps} parameter tells StarPU whether to take tasks and implicit data
  1140. dependencies into account. It must be understood that the linear programming
  1141. problem size is quadratic with the number of tasks and thus the time to solve it
  1142. will be very long, it could be minutes for just a few dozen tasks. You should
  1143. probably use @code{lp_solve -timeout 1 test.pl -wmps test.mps} to convert the
  1144. problem to MPS format and then use a better solver, @code{glpsol} might be
  1145. better than @code{lp_solve} for instance (the @code{--pcost} option may be
  1146. useful), but sometimes doesn't manage to converge. @code{cbc} might look
  1147. slower, but it is parallel. Be sure to try at least all the @code{-B} options
  1148. of @code{lp_solve}. For instance, we often just use
  1149. @code{lp_solve -cc -B1 -Bb -Bg -Bp -Bf -Br -BG -Bd -Bs -BB -Bo -Bc -Bi} , and
  1150. the @code{-gr} option can also be quite useful.
  1151. Setting @code{deps} to 0 will only take into account the actual computations
  1152. on processing units. It however still properly takes into account the varying
  1153. performances of kernels and processing units, which is quite more accurate than
  1154. just comparing StarPU performances with the fastest of the kernels being used.
  1155. The @code{prio} parameter tells StarPU whether to simulate taking into account
  1156. the priorities as the StarPU scheduler would, i.e. schedule prioritized
  1157. tasks before less prioritized tasks, to check to which extend this results
  1158. to a less optimal solution. This increases even more computation time.
  1159. Note that for simplicity, all this however doesn't take into account data
  1160. transfers, which are assumed to be completely overlapped.
  1161. @node Insert Task Utility
  1162. @section Insert Task Utility
  1163. StarPU provides the wrapper function @code{starpu_insert_task} to ease
  1164. the creation and submission of tasks.
  1165. @deftypefun int starpu_insert_task (starpu_codelet *@var{cl}, ...)
  1166. Create and submit a task corresponding to @var{cl} with the following
  1167. arguments. The argument list must be zero-terminated.
  1168. The arguments following the codelets can be of the following types:
  1169. @itemize
  1170. @item
  1171. @code{STARPU_R}, @code{STARPU_W}, @code{STARPU_RW}, @code{STARPU_SCRATCH}, @code{STARPU_REDUX} an access mode followed by a data handle;
  1172. @item
  1173. @code{STARPU_VALUE} followed by a pointer to a constant value and
  1174. the size of the constant;
  1175. @item
  1176. @code{STARPU_CALLBACK} followed by a pointer to a callback function;
  1177. @item
  1178. @code{STARPU_CALLBACK_ARG} followed by a pointer to be given as an
  1179. argument to the callback function;
  1180. @item
  1181. @code{STARPU_PRIORITY} followed by a integer defining a priority level.
  1182. @end itemize
  1183. Parameters to be passed to the codelet implementation are defined
  1184. through the type @code{STARPU_VALUE}. The function
  1185. @code{starpu_unpack_cl_args} must be called within the codelet
  1186. implementation to retrieve them.
  1187. @end deftypefun
  1188. Here the implementation of the codelet:
  1189. @smallexample
  1190. void func_cpu(void *descr[], void *_args)
  1191. @{
  1192. int *x0 = (int *)STARPU_VARIABLE_GET_PTR(descr[0]);
  1193. float *x1 = (float *)STARPU_VARIABLE_GET_PTR(descr[1]);
  1194. int ifactor;
  1195. float ffactor;
  1196. starpu_unpack_cl_args(_args, &ifactor, &ffactor);
  1197. *x0 = *x0 * ifactor;
  1198. *x1 = *x1 * ffactor;
  1199. @}
  1200. starpu_codelet mycodelet = @{
  1201. .where = STARPU_CPU,
  1202. .cpu_func = func_cpu,
  1203. .nbuffers = 2
  1204. @};
  1205. @end smallexample
  1206. And the call to the @code{starpu_insert_task} wrapper:
  1207. @smallexample
  1208. starpu_insert_task(&mycodelet,
  1209. STARPU_VALUE, &ifactor, sizeof(ifactor),
  1210. STARPU_VALUE, &ffactor, sizeof(ffactor),
  1211. STARPU_RW, data_handles[0], STARPU_RW, data_handles[1],
  1212. 0);
  1213. @end smallexample
  1214. The call to @code{starpu_insert_task} is equivalent to the following
  1215. code:
  1216. @smallexample
  1217. struct starpu_task *task = starpu_task_create();
  1218. task->cl = &mycodelet;
  1219. task->buffers[0].handle = data_handles[0];
  1220. task->buffers[0].mode = STARPU_RW;
  1221. task->buffers[1].handle = data_handles[1];
  1222. task->buffers[1].mode = STARPU_RW;
  1223. char *arg_buffer;
  1224. size_t arg_buffer_size;
  1225. starpu_pack_cl_args(&arg_buffer, &arg_buffer_size,
  1226. STARPU_VALUE, &ifactor, sizeof(ifactor),
  1227. STARPU_VALUE, &ffactor, sizeof(ffactor),
  1228. 0);
  1229. task->cl_arg = arg_buffer;
  1230. task->cl_arg_size = arg_buffer_size;
  1231. int ret = starpu_task_submit(task);
  1232. @end smallexample
  1233. If some part of the task insertion depends on the value of some computation,
  1234. the @code{STARPU_DATA_ACQUIRE_CB} macro can be very convenient. For
  1235. instance, assuming that the index variable @code{i} was registered as handle
  1236. @code{i_handle}:
  1237. @smallexample
  1238. /* Compute which portion we will work on, e.g. pivot */
  1239. starpu_insert_task(&which_index, STARPU_W, i_handle, 0);
  1240. /* And submit the corresponding task */
  1241. STARPU_DATA_ACQUIRE_CB(i_handle, STARPU_R, starpu_insert_task(&work, STARPU_RW, A_handle[i], 0));
  1242. @end smallexample
  1243. The @code{STARPU_DATA_ACQUIRE_CB} macro submits an asynchronous request for
  1244. acquiring data @code{i} for the main application, and will execute the code
  1245. given as third parameter when it is acquired. In other words, as soon as the
  1246. value of @code{i} computed by the @code{which_index} codelet can be read, the
  1247. portion of code passed as third parameter of @code{STARPU_DATA_ACQUIRE_CB} will
  1248. be executed, and is allowed to read from @code{i} to use it e.g. as an
  1249. index. Note that this macro is only avaible when compiling StarPU with
  1250. the compiler @code{gcc}.
  1251. @node Debugging
  1252. @section Debugging
  1253. StarPU provides several tools to help debugging aplications. Execution traces
  1254. can be generated and displayed graphically, see @ref{Generating traces}. Some
  1255. gdb helpers are also provided to show the whole StarPU state:
  1256. @smallexample
  1257. (gdb) source tools/gdbinit
  1258. (gdb) help starpu
  1259. @end smallexample
  1260. @node More examples
  1261. @section More examples
  1262. More examples are available in the StarPU sources in the @code{examples/}
  1263. directory. Simple examples include:
  1264. @table @asis
  1265. @item @code{incrementer/}:
  1266. Trivial incrementation test.
  1267. @item @code{basic_examples/}:
  1268. Simple documented Hello world (as shown in @ref{Hello World}), vector/scalar product (as shown
  1269. in @ref{Vector Scaling on an Hybrid CPU/GPU Machine}), matrix
  1270. product examples (as shown in @ref{Performance model example}), an example using the blocked matrix data
  1271. interface, and an example using the variable data interface.
  1272. @item @code{matvecmult/}:
  1273. OpenCL example from NVidia, adapted to StarPU.
  1274. @item @code{axpy/}:
  1275. AXPY CUBLAS operation adapted to StarPU.
  1276. @item @code{fortran/}:
  1277. Example of Fortran bindings.
  1278. @end table
  1279. More advanced examples include:
  1280. @table @asis
  1281. @item @code{filters/}:
  1282. Examples using filters, as shown in @ref{Partitioning Data}.
  1283. @item @code{lu/}:
  1284. LU matrix factorization, see for instance @code{xlu_implicit.c}
  1285. @item @code{cholesky/}:
  1286. Cholesky matrix factorization, see for instance @code{cholesky_implicit.c}.
  1287. @end table
  1288. @c ---------------------------------------------------------------------
  1289. @c Performance options
  1290. @c ---------------------------------------------------------------------
  1291. @node Performance optimization
  1292. @chapter How to optimize performance with StarPU
  1293. TODO: improve!
  1294. @menu
  1295. * Data management::
  1296. * Task submission::
  1297. * Task priorities::
  1298. * Task scheduling policy::
  1299. * Performance model calibration::
  1300. * Task distribution vs Data transfer::
  1301. * Data prefetch::
  1302. * Power-based scheduling::
  1303. * Profiling::
  1304. * CUDA-specific optimizations::
  1305. @end menu
  1306. Simply encapsulating application kernels into tasks already permits to
  1307. seamlessly support CPU and GPUs at the same time. To achieve good performance, a
  1308. few additional changes are needed.
  1309. @node Data management
  1310. @section Data management
  1311. When the application allocates data, whenever possible it should use the
  1312. @code{starpu_malloc} function, which will ask CUDA or
  1313. OpenCL to make the allocation itself and pin the corresponding allocated
  1314. memory. This is needed to permit asynchronous data transfer, i.e. permit data
  1315. transfer to overlap with computations. Otherwise, the trace will show that the
  1316. @code{DriverCopyAsync} state takes a lot of time, this is because CUDA or OpenCL
  1317. then reverts to synchronous transfers.
  1318. By default, StarPU leaves replicates of data wherever they were used, in case they
  1319. will be re-used by other tasks, thus saving the data transfer time. When some
  1320. task modifies some data, all the other replicates are invalidated, and only the
  1321. processing unit which ran that task will have a valid replicate of the data. If the application knows
  1322. that this data will not be re-used by further tasks, it should advise StarPU to
  1323. immediately replicate it to a desired list of memory nodes (given through a
  1324. bitmask). This can be understood like the write-through mode of CPU caches.
  1325. @example
  1326. starpu_data_set_wt_mask(img_handle, 1<<0);
  1327. @end example
  1328. will for instance request to always automatically transfer a replicate into the
  1329. main memory (node 0), as bit 0 of the write-through bitmask is being set.
  1330. @example
  1331. starpu_data_set_wt_mask(img_handle, ~0U);
  1332. @end example
  1333. will request to always automatically broadcast the updated data to all memory
  1334. nodes.
  1335. @node Task submission
  1336. @section Task submission
  1337. To let StarPU make online optimizations, tasks should be submitted
  1338. asynchronously as much as possible. Ideally, all the tasks should be
  1339. submitted, and mere calls to @code{starpu_task_wait_for_all} or
  1340. @code{starpu_data_unregister} be done to wait for
  1341. termination. StarPU will then be able to rework the whole schedule, overlap
  1342. computation with communication, manage accelerator local memory usage, etc.
  1343. @node Task priorities
  1344. @section Task priorities
  1345. By default, StarPU will consider the tasks in the order they are submitted by
  1346. the application. If the application programmer knows that some tasks should
  1347. be performed in priority (for instance because their output is needed by many
  1348. other tasks and may thus be a bottleneck if not executed early enough), the
  1349. @code{priority} field of the task structure should be set to transmit the
  1350. priority information to StarPU.
  1351. @node Task scheduling policy
  1352. @section Task scheduling policy
  1353. By default, StarPU uses the @code{eager} simple greedy scheduler. This is
  1354. because it provides correct load balance even if the application codelets do not
  1355. have performance models. If your application codelets have performance models
  1356. (@pxref{Performance model example} for examples showing how to do it),
  1357. you should change the scheduler thanks to the @code{STARPU_SCHED} environment
  1358. variable. For instance @code{export STARPU_SCHED=dmda} . Use @code{help} to get
  1359. the list of available schedulers.
  1360. The @b{eager} scheduler uses a central task queue, from which workers draw tasks
  1361. to work on. This however does not permit to prefetch data since the scheduling
  1362. decision is taken late. If a task has a non-0 priority, it is put at the front of the queue.
  1363. The @b{prio} scheduler also uses a central task queue, but sorts tasks by
  1364. priority (between -5 and 5).
  1365. The @b{random} scheduler distributes tasks randomly according to assumed worker
  1366. overall performance.
  1367. The @b{ws} (work stealing) scheduler schedules tasks on the local worker by
  1368. default. When a worker becomes idle, it steals a task from the most loaded
  1369. worker.
  1370. The @b{dm} (deque model) scheduler uses task execution performance models into account to
  1371. perform an HEFT-similar scheduling strategy: it schedules tasks where their
  1372. termination time will be minimal.
  1373. The @b{dmda} (deque model data aware) scheduler is similar to dm, it also takes
  1374. into account data transfer time.
  1375. The @b{dmdar} (deque model data aware ready) scheduler is similar to dmda,
  1376. it also sorts tasks on per-worker queues by number of already-available data
  1377. buffers.
  1378. The @b{dmdas} (deque model data aware sorted) scheduler is similar to dmda, it
  1379. also supports arbitrary priority values.
  1380. The @b{heft} (HEFT) scheduler is similar to dmda, it also supports task bundles.
  1381. The @b{pheft} (parallel HEFT) scheduler is similar to heft, it also supports
  1382. parallel tasks (still experimental).
  1383. The @b{pgreedy} (parallel greedy) scheduler is similar to greedy, it also
  1384. supports parallel tasks (still experimental).
  1385. @node Performance model calibration
  1386. @section Performance model calibration
  1387. Most schedulers are based on an estimation of codelet duration on each kind
  1388. of processing unit. For this to be possible, the application programmer needs
  1389. to configure a performance model for the codelets of the application (see
  1390. @ref{Performance model example} for instance). History-based performance models
  1391. use on-line calibration. StarPU will automatically calibrate codelets
  1392. which have never been calibrated yet, and save the result in
  1393. @code{~/.starpu/sampling/codelets}.
  1394. The models are indexed by machine name. To share the models between machines (e.g. for a homogeneous cluster), use @code{export STARPU_HOSTNAME=some_global_name}. To force continuing calibration, use
  1395. @code{export STARPU_CALIBRATE=1} . This may be necessary if your application
  1396. has not-so-stable performance. StarPU will force calibration (and thus ignore
  1397. the current result) until 10 (STARPU_CALIBRATION_MINIMUM) measurements have been
  1398. made on each architecture, to avoid badly scheduling tasks just because the
  1399. first measurements were not so good. Details on the current performance model status
  1400. can be obtained from the @code{starpu_perfmodel_display} command: the @code{-l}
  1401. option lists the available performance models, and the @code{-s} option permits
  1402. to choose the performance model to be displayed. The result looks like:
  1403. @example
  1404. $ starpu_perfmodel_display -s starpu_dlu_lu_model_22
  1405. performance model for cpu
  1406. # hash size mean dev n
  1407. 880805ba 98304 2.731309e+02 6.010210e+01 1240
  1408. b50b6605 393216 1.469926e+03 1.088828e+02 1240
  1409. 5c6c3401 1572864 1.125983e+04 3.265296e+03 1240
  1410. @end example
  1411. Which shows that for the LU 22 kernel with a 1.5MiB matrix, the average
  1412. execution time on CPUs was about 12ms, with a 2ms standard deviation, over
  1413. 1240 samples. It is a good idea to check this before doing actual performance
  1414. measurements.
  1415. A graph can be drawn by using the @code{starpu_perfmodel_plot}:
  1416. @example
  1417. $ starpu_perfmodel_plot -s starpu_dlu_lu_model_22
  1418. 98304 393216 1572864
  1419. $ gnuplot starpu_starpu_dlu_lu_model_22.gp
  1420. $ gv starpu_starpu_dlu_lu_model_22.eps
  1421. @end example
  1422. If a kernel source code was modified (e.g. performance improvement), the
  1423. calibration information is stale and should be dropped, to re-calibrate from
  1424. start. This can be done by using @code{export STARPU_CALIBRATE=2}.
  1425. Note: due to CUDA limitations, to be able to measure kernel duration,
  1426. calibration mode needs to disable asynchronous data transfers. Calibration thus
  1427. disables data transfer / computation overlapping, and should thus not be used
  1428. for eventual benchmarks. Note 2: history-based performance models get calibrated
  1429. only if a performance-model-based scheduler is chosen.
  1430. @node Task distribution vs Data transfer
  1431. @section Task distribution vs Data transfer
  1432. Distributing tasks to balance the load induces data transfer penalty. StarPU
  1433. thus needs to find a balance between both. The target function that the
  1434. @code{dmda} scheduler of StarPU
  1435. tries to minimize is @code{alpha * T_execution + beta * T_data_transfer}, where
  1436. @code{T_execution} is the estimated execution time of the codelet (usually
  1437. accurate), and @code{T_data_transfer} is the estimated data transfer time. The
  1438. latter is estimated based on bus calibration before execution start,
  1439. i.e. with an idle machine, thus without contention. You can force bus re-calibration by running
  1440. @code{starpu_calibrate_bus}. The beta parameter defaults to 1, but it can be
  1441. worth trying to tweak it by using @code{export STARPU_BETA=2} for instance,
  1442. since during real application execution, contention makes transfer times bigger.
  1443. This is of course imprecise, but in practice, a rough estimation already gives
  1444. the good results that a precise estimation would give.
  1445. @node Data prefetch
  1446. @section Data prefetch
  1447. The @code{heft}, @code{dmda} and @code{pheft} scheduling policies perform data prefetch (see @ref{STARPU_PREFETCH}):
  1448. as soon as a scheduling decision is taken for a task, requests are issued to
  1449. transfer its required data to the target processing unit, if needeed, so that
  1450. when the processing unit actually starts the task, its data will hopefully be
  1451. already available and it will not have to wait for the transfer to finish.
  1452. The application may want to perform some manual prefetching, for several reasons
  1453. such as excluding initial data transfers from performance measurements, or
  1454. setting up an initial statically-computed data distribution on the machine
  1455. before submitting tasks, which will thus guide StarPU toward an initial task
  1456. distribution (since StarPU will try to avoid further transfers).
  1457. This can be achieved by giving the @code{starpu_data_prefetch_on_node} function
  1458. the handle and the desired target memory node.
  1459. @node Power-based scheduling
  1460. @section Power-based scheduling
  1461. If the application can provide some power performance model (through
  1462. the @code{power_model} field of the codelet structure), StarPU will
  1463. take it into account when distributing tasks. The target function that
  1464. the @code{dmda} scheduler minimizes becomes @code{alpha * T_execution +
  1465. beta * T_data_transfer + gamma * Consumption} , where @code{Consumption}
  1466. is the estimated task consumption in Joules. To tune this parameter, use
  1467. @code{export STARPU_GAMMA=3000} for instance, to express that each Joule
  1468. (i.e kW during 1000us) is worth 3000us execution time penalty. Setting
  1469. @code{alpha} and @code{beta} to zero permits to only take into account power consumption.
  1470. This is however not sufficient to correctly optimize power: the scheduler would
  1471. simply tend to run all computations on the most energy-conservative processing
  1472. unit. To account for the consumption of the whole machine (including idle
  1473. processing units), the idle power of the machine should be given by setting
  1474. @code{export STARPU_IDLE_POWER=200} for 200W, for instance. This value can often
  1475. be obtained from the machine power supplier.
  1476. The power actually consumed by the total execution can be displayed by setting
  1477. @code{export STARPU_PROFILING=1 STARPU_WORKER_STATS=1} .
  1478. @node Profiling
  1479. @section Profiling
  1480. A quick view of how many tasks each worker has executed can be obtained by setting
  1481. @code{export STARPU_WORKER_STATS=1} This is a convenient way to check that
  1482. execution did happen on accelerators without penalizing performance with
  1483. the profiling overhead.
  1484. A quick view of how much data transfers have been issued can be obtained by setting
  1485. @code{export STARPU_BUS_STATS=1} .
  1486. More detailed profiling information can be enabled by using @code{export STARPU_PROFILING=1} or by
  1487. calling @code{starpu_profiling_status_set} from the source code.
  1488. Statistics on the execution can then be obtained by using @code{export
  1489. STARPU_BUS_STATS=1} and @code{export STARPU_WORKER_STATS=1} .
  1490. More details on performance feedback are provided by the next chapter.
  1491. @node CUDA-specific optimizations
  1492. @section CUDA-specific optimizations
  1493. Due to CUDA limitations, StarPU will have a hard time overlapping its own
  1494. communications and the codelet computations if the application does not use a
  1495. dedicated CUDA stream for its computations. StarPU provides one by the use of
  1496. @code{starpu_cuda_get_local_stream()} which should be used by all CUDA codelet
  1497. operations. For instance:
  1498. @example
  1499. func <<<grid,block,0,starpu_cuda_get_local_stream()>>> (foo, bar);
  1500. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  1501. @end example
  1502. StarPU already does appropriate calls for the CUBLAS library.
  1503. Unfortunately, some CUDA libraries do not have stream variants of
  1504. kernels. That will lower the potential for overlapping.
  1505. @c ---------------------------------------------------------------------
  1506. @c Performance feedback
  1507. @c ---------------------------------------------------------------------
  1508. @node Performance feedback
  1509. @chapter Performance feedback
  1510. @menu
  1511. * On-line:: On-line performance feedback
  1512. * Off-line:: Off-line performance feedback
  1513. * Codelet performance:: Performance of codelets
  1514. @end menu
  1515. @node On-line
  1516. @section On-line performance feedback
  1517. @menu
  1518. * Enabling monitoring:: Enabling on-line performance monitoring
  1519. * Task feedback:: Per-task feedback
  1520. * Codelet feedback:: Per-codelet feedback
  1521. * Worker feedback:: Per-worker feedback
  1522. * Bus feedback:: Bus-related feedback
  1523. * StarPU-Top:: StarPU-Top interface
  1524. @end menu
  1525. @node Enabling monitoring
  1526. @subsection Enabling on-line performance monitoring
  1527. In order to enable online performance monitoring, the application can call
  1528. @code{starpu_profiling_status_set(STARPU_PROFILING_ENABLE)}. It is possible to
  1529. detect whether monitoring is already enabled or not by calling
  1530. @code{starpu_profiling_status_get()}. Enabling monitoring also reinitialize all
  1531. previously collected feedback. The @code{STARPU_PROFILING} environment variable
  1532. can also be set to 1 to achieve the same effect.
  1533. Likewise, performance monitoring is stopped by calling
  1534. @code{starpu_profiling_status_set(STARPU_PROFILING_DISABLE)}. Note that this
  1535. does not reset the performance counters so that the application may consult
  1536. them later on.
  1537. More details about the performance monitoring API are available in section
  1538. @ref{Profiling API}.
  1539. @node Task feedback
  1540. @subsection Per-task feedback
  1541. If profiling is enabled, a pointer to a @code{starpu_task_profiling_info}
  1542. structure is put in the @code{.profiling_info} field of the @code{starpu_task}
  1543. structure when a task terminates.
  1544. This structure is automatically destroyed when the task structure is destroyed,
  1545. either automatically or by calling @code{starpu_task_destroy}.
  1546. The @code{starpu_task_profiling_info} structure indicates the date when the
  1547. task was submitted (@code{submit_time}), started (@code{start_time}), and
  1548. terminated (@code{end_time}), relative to the initialization of
  1549. StarPU with @code{starpu_init}. It also specifies the identifier of the worker
  1550. that has executed the task (@code{workerid}).
  1551. These date are stored as @code{timespec} structures which the user may convert
  1552. into micro-seconds using the @code{starpu_timing_timespec_to_us} helper
  1553. function.
  1554. It it worth noting that the application may directly access this structure from
  1555. the callback executed at the end of the task. The @code{starpu_task} structure
  1556. associated to the callback currently being executed is indeed accessible with
  1557. the @code{starpu_get_current_task()} function.
  1558. @node Codelet feedback
  1559. @subsection Per-codelet feedback
  1560. The @code{per_worker_stats} field of the @code{starpu_codelet_t} structure is
  1561. an array of counters. The i-th entry of the array is incremented every time a
  1562. task implementing the codelet is executed on the i-th worker.
  1563. This array is not reinitialized when profiling is enabled or disabled.
  1564. @node Worker feedback
  1565. @subsection Per-worker feedback
  1566. The second argument returned by the @code{starpu_worker_get_profiling_info}
  1567. function is a @code{starpu_worker_profiling_info} structure that gives
  1568. statistics about the specified worker. This structure specifies when StarPU
  1569. started collecting profiling information for that worker (@code{start_time}),
  1570. the duration of the profiling measurement interval (@code{total_time}), the
  1571. time spent executing kernels (@code{executing_time}), the time spent sleeping
  1572. because there is no task to execute at all (@code{sleeping_time}), and the
  1573. number of tasks that were executed while profiling was enabled.
  1574. These values give an estimation of the proportion of time spent do real work,
  1575. and the time spent either sleeping because there are not enough executable
  1576. tasks or simply wasted in pure StarPU overhead.
  1577. Calling @code{starpu_worker_get_profiling_info} resets the profiling
  1578. information associated to a worker.
  1579. When an FxT trace is generated (see @ref{Generating traces}), it is also
  1580. possible to use the @code{starpu_top} script (described in @ref{starpu-top}) to
  1581. generate a graphic showing the evolution of these values during the time, for
  1582. the different workers.
  1583. @node Bus feedback
  1584. @subsection Bus-related feedback
  1585. TODO
  1586. @c how to enable/disable performance monitoring
  1587. @c what kind of information do we get ?
  1588. The bus speed measured by StarPU can be displayed by using the
  1589. @code{starpu_machine_display} tool, for instance:
  1590. @example
  1591. StarPU has found :
  1592. 3 CUDA devices
  1593. CUDA 0 (Tesla C2050 02:00.0)
  1594. CUDA 1 (Tesla C2050 03:00.0)
  1595. CUDA 2 (Tesla C2050 84:00.0)
  1596. from to RAM to CUDA 0 to CUDA 1 to CUDA 2
  1597. RAM 0.000000 5176.530428 5176.492994 5191.710722
  1598. CUDA 0 4523.732446 0.000000 2414.074751 2417.379201
  1599. CUDA 1 4523.718152 2414.078822 0.000000 2417.375119
  1600. CUDA 2 4534.229519 2417.069025 2417.060863 0.000000
  1601. @end example
  1602. @node StarPU-Top
  1603. @subsection StarPU-Top interface
  1604. StarPU-Top is an interface which remotely displays the on-line state of a StarPU
  1605. application and permits the user to change parameters on the fly.
  1606. Variables to be monitored can be registered by calling the
  1607. @code{starputop_add_data_boolean}, @code{starputop_add_data_integer},
  1608. @code{starputop_add_data_float} functions, e.g.:
  1609. @example
  1610. starputop_data *data = starputop_add_data_integer("mynum", 0, 100, 1);
  1611. @end example
  1612. The application should then call @code{starputop_init_and_wait} to give its name
  1613. and wait for StarPU-Top to get a start request from the user. The name is used
  1614. by StarPU-Top to quickly reload a previously-saved layout of parameter display.
  1615. @example
  1616. starputop_init_and_wait("the application");
  1617. @end example
  1618. The new values can then be provided thanks to
  1619. @code{starputop_update_data_boolean}, @code{starputop_update_data_integer},
  1620. @code{starputop_update_data_float}, e.g.:
  1621. @example
  1622. starputop_update_data_integer(data, mynum);
  1623. @end example
  1624. Updateable parameters can be registered thanks to @code{starputop_register_parameter_boolean}, @code{starputop_register_parameter_integer}, @code{starputop_register_parameter_float}, e.g.:
  1625. @example
  1626. float apha;
  1627. starputop_register_parameter_float("alpha", &alpha, 0, 10, modif_hook);
  1628. @end example
  1629. @code{modif_hook} is a function which will be called when the parameter is being modified, it can for instance print the new value:
  1630. @example
  1631. void modif_hook(struct starputop_param_t *d) @{
  1632. fprintf(stderr,"%s has been modified: %f\n", d->name, alpha);
  1633. @}
  1634. @end example
  1635. Task schedulers should notify StarPU-Top when it has decided when a task will be
  1636. scheduled, so that it can show it in its Gantt chart, for instance:
  1637. @example
  1638. starputop_task_prevision(task, workerid, begin, end);
  1639. @end example
  1640. Starting StarPU-Top and the application can be done two ways:
  1641. @itemize
  1642. @item The application is started by hand on some machine (and thus already
  1643. waiting for the start event). In the Preference dialog of StarPU-Top, the SSH
  1644. checkbox should be unchecked, and the hostname and port (default is 2011) on
  1645. which the application is already running should be specified. Clicking on the
  1646. connection button will thus connect to the already-running application.
  1647. @item StarPU-Top is started first, and clicking on the connection button will
  1648. start the application itself (possibly on a remote machine). The SSH checkbox
  1649. should be checked, and a command line provided, e.g.:
  1650. @example
  1651. ssh myserver STARPU_SCHED=heft ./application
  1652. @end example
  1653. If port 2011 of the remote machine can not be accessed directly, an ssh port bridge should be added:
  1654. @example
  1655. ssh -L 2011:localhost:2011 myserver STARPU_SCHED=heft ./application
  1656. @end example
  1657. and "localhost" should be used as IP Address to connect to.
  1658. @end itemize
  1659. @node Off-line
  1660. @section Off-line performance feedback
  1661. @menu
  1662. * Generating traces:: Generating traces with FxT
  1663. * Gantt diagram:: Creating a Gantt Diagram
  1664. * DAG:: Creating a DAG with graphviz
  1665. * starpu-top:: Monitoring activity
  1666. @end menu
  1667. @node Generating traces
  1668. @subsection Generating traces with FxT
  1669. StarPU can use the FxT library (see
  1670. @indicateurl{https://savannah.nongnu.org/projects/fkt/}) to generate traces
  1671. with a limited runtime overhead.
  1672. You can either get a tarball:
  1673. @example
  1674. % wget http://download.savannah.gnu.org/releases/fkt/fxt-0.2.2.tar.gz
  1675. @end example
  1676. or use the FxT library from CVS (autotools are required):
  1677. @example
  1678. % cvs -d :pserver:anonymous@@cvs.sv.gnu.org:/sources/fkt co FxT
  1679. % ./bootstrap
  1680. @end example
  1681. Compiling and installing the FxT library in the @code{$FXTDIR} path is
  1682. done following the standard procedure:
  1683. @example
  1684. % ./configure --prefix=$FXTDIR
  1685. % make
  1686. % make install
  1687. @end example
  1688. In order to have StarPU to generate traces, StarPU should be configured with
  1689. the @code{--with-fxt} option:
  1690. @example
  1691. $ ./configure --with-fxt=$FXTDIR
  1692. @end example
  1693. Or you can simply point the @code{PKG_CONFIG_PATH} to
  1694. @code{$FXTDIR/lib/pkgconfig} and pass @code{--with-fxt} to @code{./configure}
  1695. When FxT is enabled, a trace is generated when StarPU is terminated by calling
  1696. @code{starpu_shutdown()}). The trace is a binary file whose name has the form
  1697. @code{prof_file_XXX_YYY} where @code{XXX} is the user name, and
  1698. @code{YYY} is the pid of the process that used StarPU. This file is saved in the
  1699. @code{/tmp/} directory by default, or by the directory specified by
  1700. the @code{STARPU_FXT_PREFIX} environment variable.
  1701. @node Gantt diagram
  1702. @subsection Creating a Gantt Diagram
  1703. When the FxT trace file @code{filename} has been generated, it is possible to
  1704. generate a trace in the Paje format by calling:
  1705. @example
  1706. % starpu_fxt_tool -i filename
  1707. @end example
  1708. Or alternatively, setting the @code{STARPU_GENERATE_TRACE} environment variable
  1709. to 1 before application execution will make StarPU do it automatically at
  1710. application shutdown.
  1711. This will create a @code{paje.trace} file in the current directory that can be
  1712. inspected with the ViTE trace visualizing open-source tool. More information
  1713. about ViTE is available at @indicateurl{http://vite.gforge.inria.fr/}. It is
  1714. possible to open the @code{paje.trace} file with ViTE by using the following
  1715. command:
  1716. @example
  1717. % vite paje.trace
  1718. @end example
  1719. @node DAG
  1720. @subsection Creating a DAG with graphviz
  1721. When the FxT trace file @code{filename} has been generated, it is possible to
  1722. generate a task graph in the DOT format by calling:
  1723. @example
  1724. $ starpu_fxt_tool -i filename
  1725. @end example
  1726. This will create a @code{dag.dot} file in the current directory. This file is a
  1727. task graph described using the DOT language. It is possible to get a
  1728. graphical output of the graph by using the graphviz library:
  1729. @example
  1730. $ dot -Tpdf dag.dot -o output.pdf
  1731. @end example
  1732. @node starpu-top
  1733. @subsection Monitoring activity
  1734. When the FxT trace file @code{filename} has been generated, it is possible to
  1735. generate a activity trace by calling:
  1736. @example
  1737. $ starpu_fxt_tool -i filename
  1738. @end example
  1739. This will create an @code{activity.data} file in the current
  1740. directory. A profile of the application showing the activity of StarPU
  1741. during the execution of the program can be generated:
  1742. @example
  1743. $ starpu_top activity.data
  1744. @end example
  1745. This will create a file named @code{activity.eps} in the current directory.
  1746. This picture is composed of two parts.
  1747. The first part shows the activity of the different workers. The green sections
  1748. indicate which proportion of the time was spent executed kernels on the
  1749. processing unit. The red sections indicate the proportion of time spent in
  1750. StartPU: an important overhead may indicate that the granularity may be too
  1751. low, and that bigger tasks may be appropriate to use the processing unit more
  1752. efficiently. The black sections indicate that the processing unit was blocked
  1753. because there was no task to process: this may indicate a lack of parallelism
  1754. which may be alleviated by creating more tasks when it is possible.
  1755. The second part of the @code{activity.eps} picture is a graph showing the
  1756. evolution of the number of tasks available in the system during the execution.
  1757. Ready tasks are shown in black, and tasks that are submitted but not
  1758. schedulable yet are shown in grey.
  1759. @node Codelet performance
  1760. @section Performance of codelets
  1761. The performance model of codelets can be examined by using the
  1762. @code{starpu_perfmodel_display} tool:
  1763. @example
  1764. $ starpu_perfmodel_display -l
  1765. file: <malloc_pinned.hannibal>
  1766. file: <starpu_slu_lu_model_21.hannibal>
  1767. file: <starpu_slu_lu_model_11.hannibal>
  1768. file: <starpu_slu_lu_model_22.hannibal>
  1769. file: <starpu_slu_lu_model_12.hannibal>
  1770. @end example
  1771. Here, the codelets of the lu example are available. We can examine the
  1772. performance of the 22 kernel:
  1773. @example
  1774. $ starpu_perfmodel_display -s starpu_slu_lu_model_22
  1775. performance model for cpu
  1776. # hash size mean dev n
  1777. 57618ab0 19660800 2.851069e+05 1.829369e+04 109
  1778. performance model for cuda_0
  1779. # hash size mean dev n
  1780. 57618ab0 19660800 1.164144e+04 1.556094e+01 315
  1781. performance model for cuda_1
  1782. # hash size mean dev n
  1783. 57618ab0 19660800 1.164271e+04 1.330628e+01 360
  1784. performance model for cuda_2
  1785. # hash size mean dev n
  1786. 57618ab0 19660800 1.166730e+04 3.390395e+02 456
  1787. @end example
  1788. We can see that for the given size, over a sample of a few hundreds of
  1789. execution, the GPUs are about 20 times faster than the CPUs (numbers are in
  1790. us). The standard deviation is extremely low for the GPUs, and less than 10% for
  1791. CPUs.
  1792. The @code{starpu_regression_display} tool does the same for regression-based
  1793. performance models. It also writes a @code{.gp} file in the current directory,
  1794. to be run in the @code{gnuplot} tool, which shows the corresponding curve.
  1795. @c ---------------------------------------------------------------------
  1796. @c MPI support
  1797. @c ---------------------------------------------------------------------
  1798. @node StarPU MPI support
  1799. @chapter StarPU MPI support
  1800. The integration of MPI transfers within task parallelism is done in a
  1801. very natural way by the means of asynchronous interactions between the
  1802. application and StarPU. This is implemented in a separate libstarpumpi library
  1803. which basically provides "StarPU" equivalents of @code{MPI_*} functions, where
  1804. @code{void *} buffers are replaced with @code{starpu_data_handle}s, and all
  1805. GPU-RAM-NIC transfers are handled efficiently by StarPU-MPI. The user has to
  1806. use the usual @code{mpirun} command of the MPI implementation to start StarPU on
  1807. the different MPI nodes.
  1808. An MPI Insert Task function provides an even more seamless transition to a
  1809. distributed application, by automatically issuing all required data transfers
  1810. according to the task graph and an application-provided distribution.
  1811. @menu
  1812. * The API::
  1813. * Simple Example::
  1814. * MPI Insert Task Utility::
  1815. * MPI Collective Operations::
  1816. @end menu
  1817. @node The API
  1818. @section The API
  1819. @subsection Compilation
  1820. The flags required to compile or link against the MPI layer are then
  1821. accessible with the following commands:
  1822. @example
  1823. % pkg-config --cflags libstarpumpi # options for the compiler
  1824. % pkg-config --libs libstarpumpi # options for the linker
  1825. @end example
  1826. @subsection Initialisation
  1827. @deftypefun int starpu_mpi_initialize (void)
  1828. Initializes the starpumpi library. This must be called between calling
  1829. @code{starpu_init} and other @code{starpu_mpi} functions. This
  1830. function does not call @code{MPI_Init}, it should be called beforehand.
  1831. @end deftypefun
  1832. @deftypefun int starpu_mpi_initialize_extended (int *@var{rank}, int *@var{world_size})
  1833. Initializes the starpumpi library. This must be called between calling
  1834. @code{starpu_init} and other @code{starpu_mpi} functions.
  1835. This function calls @code{MPI_Init}, and therefore should be prefered
  1836. to the previous one for MPI implementations which are not thread-safe.
  1837. Returns the current MPI node rank and world size.
  1838. @end deftypefun
  1839. @deftypefun int starpu_mpi_shutdown (void)
  1840. Cleans the starpumpi library. This must be called between calling
  1841. @code{starpu_mpi} functions and @code{starpu_shutdown}.
  1842. @code{MPI_Finalize} will be called if StarPU-MPI has been initialized
  1843. by calling @code{starpu_mpi_initialize_extended}.
  1844. @end deftypefun
  1845. @subsection Communication
  1846. @deftypefun int starpu_mpi_send (starpu_data_handle @var{data_handle}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm})
  1847. @end deftypefun
  1848. @deftypefun int starpu_mpi_recv (starpu_data_handle @var{data_handle}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm}, MPI_Status *@var{status})
  1849. @end deftypefun
  1850. @deftypefun int starpu_mpi_isend (starpu_data_handle @var{data_handle}, starpu_mpi_req *@var{req}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm})
  1851. @end deftypefun
  1852. @deftypefun int starpu_mpi_irecv (starpu_data_handle @var{data_handle}, starpu_mpi_req *@var{req}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm})
  1853. @end deftypefun
  1854. @deftypefun int starpu_mpi_isend_detached (starpu_data_handle @var{data_handle}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm}, void (*@var{callback})(void *), void *@var{arg})
  1855. @end deftypefun
  1856. @deftypefun int starpu_mpi_irecv_detached (starpu_data_handle @var{data_handle}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm}, void (*@var{callback})(void *), void *@var{arg})
  1857. @end deftypefun
  1858. @deftypefun int starpu_mpi_wait (starpu_mpi_req *@var{req}, MPI_Status *@var{status})
  1859. @end deftypefun
  1860. @deftypefun int starpu_mpi_test (starpu_mpi_req *@var{req}, int *@var{flag}, MPI_Status *@var{status})
  1861. @end deftypefun
  1862. @deftypefun int starpu_mpi_barrier (MPI_Comm @var{comm})
  1863. @end deftypefun
  1864. @deftypefun int starpu_mpi_isend_detached_unlock_tag (starpu_data_handle @var{data_handle}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm}, starpu_tag_t @var{tag})
  1865. When the transfer is completed, the tag is unlocked
  1866. @end deftypefun
  1867. @deftypefun int starpu_mpi_irecv_detached_unlock_tag (starpu_data_handle @var{data_handle}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm}, starpu_tag_t @var{tag})
  1868. @end deftypefun
  1869. @deftypefun int starpu_mpi_isend_array_detached_unlock_tag (unsigned @var{array_size}, starpu_data_handle *@var{data_handle}, int *@var{dest}, int *@var{mpi_tag}, MPI_Comm *@var{comm}, starpu_tag_t @var{tag})
  1870. Asynchronously send an array of buffers, and unlocks the tag once all
  1871. of them are transmitted.
  1872. @end deftypefun
  1873. @deftypefun int starpu_mpi_irecv_array_detached_unlock_tag (unsigned @var{array_size}, starpu_data_handle *@var{data_handle}, int *@var{source}, int *@var{mpi_tag}, MPI_Comm *@var{comm}, starpu_tag_t @var{tag})
  1874. @end deftypefun
  1875. @page
  1876. @node Simple Example
  1877. @section Simple Example
  1878. @cartouche
  1879. @smallexample
  1880. void increment_token(void)
  1881. @{
  1882. struct starpu_task *task = starpu_task_create();
  1883. task->cl = &increment_cl;
  1884. task->buffers[0].handle = token_handle;
  1885. task->buffers[0].mode = STARPU_RW;
  1886. starpu_task_submit(task);
  1887. @}
  1888. @end smallexample
  1889. @end cartouche
  1890. @cartouche
  1891. @smallexample
  1892. int main(int argc, char **argv)
  1893. @{
  1894. int rank, size;
  1895. starpu_init(NULL);
  1896. starpu_mpi_initialize_extended(&rank, &size);
  1897. starpu_vector_data_register(&token_handle, 0, (uintptr_t)&token, 1, sizeof(unsigned));
  1898. unsigned nloops = NITER;
  1899. unsigned loop;
  1900. unsigned last_loop = nloops - 1;
  1901. unsigned last_rank = size - 1;
  1902. @end smallexample
  1903. @end cartouche
  1904. @cartouche
  1905. @smallexample
  1906. for (loop = 0; loop < nloops; loop++) @{
  1907. int tag = loop*size + rank;
  1908. if (loop == 0 && rank == 0)
  1909. @{
  1910. token = 0;
  1911. fprintf(stdout, "Start with token value %d\n", token);
  1912. @}
  1913. else
  1914. @{
  1915. starpu_mpi_irecv_detached(token_handle, (rank+size-1)%size, tag,
  1916. MPI_COMM_WORLD, NULL, NULL);
  1917. @}
  1918. increment_token();
  1919. if (loop == last_loop && rank == last_rank)
  1920. @{
  1921. starpu_data_acquire(token_handle, STARPU_R);
  1922. fprintf(stdout, "Finished : token value %d\n", token);
  1923. starpu_data_release(token_handle);
  1924. @}
  1925. else
  1926. @{
  1927. starpu_mpi_isend_detached(token_handle, (rank+1)%size, tag+1,
  1928. MPI_COMM_WORLD, NULL, NULL);
  1929. @}
  1930. @}
  1931. starpu_task_wait_for_all();
  1932. @end smallexample
  1933. @end cartouche
  1934. @cartouche
  1935. @smallexample
  1936. starpu_mpi_shutdown();
  1937. starpu_shutdown();
  1938. if (rank == last_rank)
  1939. @{
  1940. fprintf(stderr, "[%d] token = %d == %d * %d ?\n", rank, token, nloops, size);
  1941. STARPU_ASSERT(token == nloops*size);
  1942. @}
  1943. @end smallexample
  1944. @end cartouche
  1945. @page
  1946. @node MPI Insert Task Utility
  1947. @section MPI Insert Task Utility
  1948. To save the programmer from having to explicit all communications, StarPU
  1949. provides an "MPI Insert Task Utility". The principe is that the application
  1950. decides a distribution of the data over the MPI nodes by allocating it and
  1951. notifying StarPU of that decision, i.e. tell StarPU which MPI node "owns" which
  1952. data. All MPI nodes then process the whole task graph, and StarPU automatically
  1953. determines which node actually execute which task, as well as the required MPI
  1954. transfers.
  1955. @deftypefun int starpu_data_set_rank(starpu_data_handle handle, int mpi_rank)
  1956. Tell StarPU-MPI which MPI node "owns" a given data, that is, the node which will
  1957. always keep an up-to-date value, and will by default execute tasks which write
  1958. to it.
  1959. @end deftypefun
  1960. @deftypefun void starpu_mpi_insert_task (MPI_Comm @var{comm}, starpu_codelet *@var{cl}, ...)
  1961. Create and submit a task corresponding to @var{cl} with the following
  1962. arguments. The argument list must be zero-terminated.
  1963. The arguments following the codelets are the same types as for the
  1964. function @code{starpu_insert_task} defined in @ref{Insert Task
  1965. Utility}. The extra argument @code{STARPU_EXECUTE_ON_NODE} followed by an
  1966. integer allows to specify the MPI node to execute the codelet. It is also
  1967. possible to specify that the node owning a specific data will execute
  1968. the codelet, by using @code{STARPU_EXECUTE_ON_DATA} followed by a data
  1969. handle.
  1970. The internal algorithm is as follows:
  1971. @enumerate
  1972. @item Find out whether we (as an MPI node) are to execute the codelet
  1973. because we own the data to be written to. If different nodes own data
  1974. to be written to, the argument @code{STARPU_EXECUTE_ON_NODE} or
  1975. @code{STARPU_EXECUTE_ON_DATA} has to be used to specify which MPI node will
  1976. execute the task.
  1977. @item Send and receive data as requested. Nodes owning data which need to be
  1978. read by the task are sending them to the MPI node which will execute it. The
  1979. latter receives them.
  1980. @item Execute the codelet. This is done by the MPI node selected in the
  1981. 1st step of the algorithm.
  1982. @item In the case when different MPI nodes own data to be written to, send
  1983. written data back to their owners.
  1984. @end enumerate
  1985. The algorithm also includes a cache mechanism that allows not to send
  1986. data twice to the same MPI node, unless the data has been modified.
  1987. @end deftypefun
  1988. @deftypefun void starpu_mpi_get_data_on_node (MPI_Comm @var{comm}, starpu_data_handle @var{data_handle}, int @var{node})
  1989. @end deftypefun
  1990. @page
  1991. Here an stencil example showing how to use @code{starpu_mpi_insert_task}. One
  1992. first needs to define a distribution function which specifies the
  1993. locality of the data. Note that that distribution information needs to
  1994. be given to StarPU by calling @code{starpu_data_set_rank}.
  1995. @cartouche
  1996. @smallexample
  1997. /* Returns the MPI node number where data is */
  1998. int my_distrib(int x, int y, int nb_nodes) @{
  1999. /* Cyclic distrib */
  2000. return ((int)(x / sqrt(nb_nodes) + (y / sqrt(nb_nodes)) * sqrt(nb_nodes))) % nb_nodes;
  2001. // /* Linear distrib */
  2002. // return x / sqrt(nb_nodes) + (y / sqrt(nb_nodes)) * X;
  2003. @}
  2004. @end smallexample
  2005. @end cartouche
  2006. Now the data can be registered within StarPU. Data which are not
  2007. owned but will be needed for computations can be registered through
  2008. the lazy allocation mechanism, i.e. with a @code{home_node} set to -1.
  2009. StarPU will automatically allocate the memory when it is used for the
  2010. first time.
  2011. One can note an optimization here (the @code{else if} test): we only register
  2012. data which will be needed by the tasks that we will execute.
  2013. @cartouche
  2014. @smallexample
  2015. unsigned matrix[X][Y];
  2016. starpu_data_handle data_handles[X][Y];
  2017. for(x = 0; x < X; x++) @{
  2018. for (y = 0; y < Y; y++) @{
  2019. int mpi_rank = my_distrib(x, y, size);
  2020. if (mpi_rank == rank)
  2021. /* Owning data */
  2022. starpu_variable_data_register(&data_handles[x][y], 0,
  2023. (uintptr_t)&(matrix[x][y]), sizeof(unsigned));
  2024. else if (rank == mpi_rank+1 || rank == mpi_rank-1)
  2025. /* I don't own that index, but will need it for my computations */
  2026. starpu_variable_data_register(&data_handles[x][y], -1,
  2027. (uintptr_t)NULL, sizeof(unsigned));
  2028. else
  2029. /* I know it's useless to allocate anything for this */
  2030. data_handles[x][y] = NULL;
  2031. if (data_handles[x][y])
  2032. starpu_data_set_rank(data_handles[x][y], mpi_rank);
  2033. @}
  2034. @}
  2035. @end smallexample
  2036. @end cartouche
  2037. Now @code{starpu_mpi_insert_task()} can be called for the different
  2038. steps of the application.
  2039. @cartouche
  2040. @smallexample
  2041. for(loop=0 ; loop<niter; loop++)
  2042. for (x = 1; x < X-1; x++)
  2043. for (y = 1; y < Y-1; y++)
  2044. starpu_mpi_insert_task(MPI_COMM_WORLD, &stencil5_cl,
  2045. STARPU_RW, data_handles[x][y],
  2046. STARPU_R, data_handles[x-1][y],
  2047. STARPU_R, data_handles[x+1][y],
  2048. STARPU_R, data_handles[x][y-1],
  2049. STARPU_R, data_handles[x][y+1],
  2050. 0);
  2051. starpu_task_wait_for_all();
  2052. @end smallexample
  2053. @end cartouche
  2054. I.e. all MPI nodes process the whole task graph, but as mentioned above, for
  2055. each task, only the MPI node which owns the data being written to (here,
  2056. @code{data_handles[x][y]}) will actually run the task. The other MPI nodes will
  2057. automatically send the required data.
  2058. @node MPI Collective Operations
  2059. @section MPI Collective Operations
  2060. @deftypefun int starpu_mpi_scatter_detached (starpu_data_handle *@var{data_handles}, int @var{count}, int @var{root}, MPI_Comm @var{comm})
  2061. Scatter data among processes of the communicator based on the ownership of
  2062. the data. For each data of the array @var{data_handles}, the
  2063. process @var{root} sends the data to the process owning this data.
  2064. Processes receiving data must have valid data handles to receive them.
  2065. @end deftypefun
  2066. @deftypefun int starpu_mpi_gather_detached (starpu_data_handle *@var{data_handles}, int @var{count}, int @var{root}, MPI_Comm @var{comm})
  2067. Gather data from the different processes of the communicator onto the
  2068. process @var{root}. Each process owning data handle in the array
  2069. @var{data_handles} will send them to the process @var{root}. The
  2070. process @var{root} must have valid data handles to receive the data.
  2071. @end deftypefun
  2072. @page
  2073. @cartouche
  2074. @smallexample
  2075. if (rank == root)
  2076. @{
  2077. /* Allocate the vector */
  2078. vector = malloc(nblocks * sizeof(float *));
  2079. for(x=0 ; x<nblocks ; x++)
  2080. @{
  2081. starpu_malloc((void **)&vector[x], block_size*sizeof(float));
  2082. @}
  2083. @}
  2084. /* Allocate data handles and register data to StarPU */
  2085. data_handles = malloc(nblocks*sizeof(starpu_data_handle *));
  2086. for(x = 0; x < nblocks ; x++)
  2087. @{
  2088. int mpi_rank = my_distrib(x, nodes);
  2089. if (rank == root) @{
  2090. starpu_vector_data_register(&data_handles[x], 0, (uintptr_t)vector[x],
  2091. blocks_size, sizeof(float));
  2092. @}
  2093. else if ((mpi_rank == rank) || ((rank == mpi_rank+1 || rank == mpi_rank-1))) @{
  2094. /* I own that index, or i will need it for my computations */
  2095. starpu_vector_data_register(&data_handles[x], -1, (uintptr_t)NULL,
  2096. block_size, sizeof(float));
  2097. @}
  2098. else @{
  2099. /* I know it's useless to allocate anything for this */
  2100. data_handles[x] = NULL;
  2101. @}
  2102. if (data_handles[x]) @{
  2103. starpu_data_set_rank(data_handles[x], mpi_rank);
  2104. @}
  2105. @}
  2106. /* Scatter the matrix among the nodes */
  2107. starpu_mpi_scatter_detached(data_handles, nblocks, root, MPI_COMM_WORLD);
  2108. /* Calculation */
  2109. for(x = 0; x < nblocks ; x++) @{
  2110. if (data_handles[x]) @{
  2111. int owner = starpu_data_get_rank(data_handles[x]);
  2112. if (owner == rank) @{
  2113. starpu_insert_task(&cl, STARPU_RW, data_handles[x], 0);
  2114. @}
  2115. @}
  2116. @}
  2117. /* Gather the matrix on main node */
  2118. starpu_mpi_gather_detached(data_handles, nblocks, 0, MPI_COMM_WORLD);
  2119. @end smallexample
  2120. @end cartouche
  2121. @c ---------------------------------------------------------------------
  2122. @c Configuration options
  2123. @c ---------------------------------------------------------------------
  2124. @node Configuring StarPU
  2125. @chapter Configuring StarPU
  2126. @menu
  2127. * Compilation configuration::
  2128. * Execution configuration through environment variables::
  2129. @end menu
  2130. @node Compilation configuration
  2131. @section Compilation configuration
  2132. The following arguments can be given to the @code{configure} script.
  2133. @menu
  2134. * Common configuration::
  2135. * Configuring workers::
  2136. * Advanced configuration::
  2137. @end menu
  2138. @node Common configuration
  2139. @subsection Common configuration
  2140. @menu
  2141. * --enable-debug::
  2142. * --enable-fast::
  2143. * --enable-verbose::
  2144. * --enable-coverage::
  2145. @end menu
  2146. @node --enable-debug
  2147. @subsubsection @code{--enable-debug}
  2148. @table @asis
  2149. @item @emph{Description}:
  2150. Enable debugging messages.
  2151. @end table
  2152. @node --enable-fast
  2153. @subsubsection @code{--enable-fast}
  2154. @table @asis
  2155. @item @emph{Description}:
  2156. Do not enforce assertions, saves a lot of time spent to compute them otherwise.
  2157. @end table
  2158. @node --enable-verbose
  2159. @subsubsection @code{--enable-verbose}
  2160. @table @asis
  2161. @item @emph{Description}:
  2162. Augment the verbosity of the debugging messages. This can be disabled
  2163. at runtime by setting the environment variable @code{STARPU_SILENT} to
  2164. any value.
  2165. @smallexample
  2166. % STARPU_SILENT=1 ./vector_scal
  2167. @end smallexample
  2168. @end table
  2169. @node --enable-coverage
  2170. @subsubsection @code{--enable-coverage}
  2171. @table @asis
  2172. @item @emph{Description}:
  2173. Enable flags for the @code{gcov} coverage tool.
  2174. @end table
  2175. @node Configuring workers
  2176. @subsection Configuring workers
  2177. @menu
  2178. * --enable-maxcpus::
  2179. * --disable-cpu::
  2180. * --enable-maxcudadev::
  2181. * --disable-cuda::
  2182. * --with-cuda-dir::
  2183. * --with-cuda-include-dir::
  2184. * --with-cuda-lib-dir::
  2185. * --disable-cuda-memcpy-peer::
  2186. * --enable-maxopencldev::
  2187. * --disable-opencl::
  2188. * --with-opencl-dir::
  2189. * --with-opencl-include-dir::
  2190. * --with-opencl-lib-dir::
  2191. * --enable-gordon::
  2192. * --with-gordon-dir::
  2193. * --enable-maximplementations::
  2194. @end menu
  2195. @node --enable-maxcpus
  2196. @subsubsection @code{--enable-maxcpus=<number>}
  2197. @table @asis
  2198. @item @emph{Description}:
  2199. Defines the maximum number of CPU cores that StarPU will support, then
  2200. available as the @code{STARPU_MAXCPUS} macro.
  2201. @end table
  2202. @node --disable-cpu
  2203. @subsubsection @code{--disable-cpu}
  2204. @table @asis
  2205. @item @emph{Description}:
  2206. Disable the use of CPUs of the machine. Only GPUs etc. will be used.
  2207. @end table
  2208. @node --enable-maxcudadev
  2209. @subsubsection @code{--enable-maxcudadev=<number>}
  2210. @table @asis
  2211. @item @emph{Description}:
  2212. Defines the maximum number of CUDA devices that StarPU will support, then
  2213. available as the @code{STARPU_MAXCUDADEVS} macro.
  2214. @end table
  2215. @node --disable-cuda
  2216. @subsubsection @code{--disable-cuda}
  2217. @table @asis
  2218. @item @emph{Description}:
  2219. Disable the use of CUDA, even if a valid CUDA installation was detected.
  2220. @end table
  2221. @node --with-cuda-dir
  2222. @subsubsection @code{--with-cuda-dir=<path>}
  2223. @table @asis
  2224. @item @emph{Description}:
  2225. Specify the directory where CUDA is installed. This directory should notably contain
  2226. @code{include/cuda.h}.
  2227. @end table
  2228. @node --with-cuda-include-dir
  2229. @subsubsection @code{--with-cuda-include-dir=<path>}
  2230. @table @asis
  2231. @item @emph{Description}:
  2232. Specify the directory where CUDA headers are installed. This directory should
  2233. notably contain @code{cuda.h}. This defaults to @code{/include} appended to the
  2234. value given to @code{--with-cuda-dir}.
  2235. @end table
  2236. @node --with-cuda-lib-dir
  2237. @subsubsection @code{--with-cuda-lib-dir=<path>}
  2238. @table @asis
  2239. @item @emph{Description}:
  2240. Specify the directory where the CUDA library is installed. This directory should
  2241. notably contain the CUDA shared libraries (e.g. libcuda.so). This defaults to
  2242. @code{/lib} appended to the value given to @code{--with-cuda-dir}.
  2243. @end table
  2244. @node --disable-cuda-memcpy-peer
  2245. @subsubsection @code{--disable-cuda-memcpy-peer}
  2246. @table @asis
  2247. @item @emph{Description}
  2248. Explicitely disables peer transfers when using CUDA 4.0
  2249. @end table
  2250. @node --enable-maxopencldev
  2251. @subsubsection @code{--enable-maxopencldev=<number>}
  2252. @table @asis
  2253. @item @emph{Description}:
  2254. Defines the maximum number of OpenCL devices that StarPU will support, then
  2255. available as the @code{STARPU_MAXOPENCLDEVS} macro.
  2256. @end table
  2257. @node --disable-opencl
  2258. @subsubsection @code{--disable-opencl}
  2259. @table @asis
  2260. @item @emph{Description}:
  2261. Disable the use of OpenCL, even if the SDK is detected.
  2262. @end table
  2263. @node --with-opencl-dir
  2264. @subsubsection @code{--with-opencl-dir=<path>}
  2265. @table @asis
  2266. @item @emph{Description}:
  2267. Specify the location of the OpenCL SDK. This directory should notably contain
  2268. @code{include/CL/cl.h} (or @code{include/OpenCL/cl.h} on Mac OS).
  2269. @end table
  2270. @node --with-opencl-include-dir
  2271. @subsubsection @code{--with-opencl-include-dir=<path>}
  2272. @table @asis
  2273. @item @emph{Description}:
  2274. Specify the location of OpenCL headers. This directory should notably contain
  2275. @code{CL/cl.h} (or @code{OpenCL/cl.h} on Mac OS). This defaults to
  2276. @code{/include} appended to the value given to @code{--with-opencl-dir}.
  2277. @end table
  2278. @node --with-opencl-lib-dir
  2279. @subsubsection @code{--with-opencl-lib-dir=<path>}
  2280. @table @asis
  2281. @item @emph{Description}:
  2282. Specify the location of the OpenCL library. This directory should notably
  2283. contain the OpenCL shared libraries (e.g. libOpenCL.so). This defaults to
  2284. @code{/lib} appended to the value given to @code{--with-opencl-dir}.
  2285. @end table
  2286. @node --enable-gordon
  2287. @subsubsection @code{--enable-gordon}
  2288. @table @asis
  2289. @item @emph{Description}:
  2290. Enable the use of the Gordon runtime for Cell SPUs.
  2291. @c TODO: rather default to enabled when detected
  2292. @end table
  2293. @node --with-gordon-dir
  2294. @subsubsection @code{--with-gordon-dir=<path>}
  2295. @table @asis
  2296. @item @emph{Description}:
  2297. Specify the location of the Gordon SDK.
  2298. @end table
  2299. @node --enable-maximplementations
  2300. @subsubsection @code{--enable-maximplementations=<number>}
  2301. @table @asis
  2302. @item @emph{Description}:
  2303. Defines the number of implementations that can be defined for a single kind of
  2304. device. It is then available as the @code{STARPU_MAXIMPLEMENTATIONS} macro.
  2305. @end table
  2306. @node Advanced configuration
  2307. @subsection Advanced configuration
  2308. @menu
  2309. * --enable-perf-debug::
  2310. * --enable-model-debug::
  2311. * --enable-stats::
  2312. * --enable-maxbuffers::
  2313. * --enable-allocation-cache::
  2314. * --enable-opengl-render::
  2315. * --enable-blas-lib::
  2316. * --with-magma::
  2317. * --with-fxt::
  2318. * --with-perf-model-dir::
  2319. * --with-mpicc::
  2320. * --with-goto-dir::
  2321. * --with-atlas-dir::
  2322. * --with-mkl-cflags::
  2323. * --with-mkl-ldflags::
  2324. @end menu
  2325. @node --enable-perf-debug
  2326. @subsubsection @code{--enable-perf-debug}
  2327. @table @asis
  2328. @item @emph{Description}:
  2329. Enable performance debugging through gprof.
  2330. @end table
  2331. @node --enable-model-debug
  2332. @subsubsection @code{--enable-model-debug}
  2333. @table @asis
  2334. @item @emph{Description}:
  2335. Enable performance model debugging.
  2336. @end table
  2337. @node --enable-stats
  2338. @subsubsection @code{--enable-stats}
  2339. @table @asis
  2340. @item @emph{Description}:
  2341. Enable statistics.
  2342. @end table
  2343. @node --enable-maxbuffers
  2344. @subsubsection @code{--enable-maxbuffers=<nbuffers>}
  2345. @table @asis
  2346. @item @emph{Description}:
  2347. Define the maximum number of buffers that tasks will be able to take
  2348. as parameters, then available as the @code{STARPU_NMAXBUFS} macro.
  2349. @end table
  2350. @node --enable-allocation-cache
  2351. @subsubsection @code{--enable-allocation-cache}
  2352. @table @asis
  2353. @item @emph{Description}:
  2354. Enable the use of a data allocation cache to avoid the cost of it with
  2355. CUDA. Still experimental.
  2356. @end table
  2357. @node --enable-opengl-render
  2358. @subsubsection @code{--enable-opengl-render}
  2359. @table @asis
  2360. @item @emph{Description}:
  2361. Enable the use of OpenGL for the rendering of some examples.
  2362. @c TODO: rather default to enabled when detected
  2363. @end table
  2364. @node --enable-blas-lib
  2365. @subsubsection @code{--enable-blas-lib=<name>}
  2366. @table @asis
  2367. @item @emph{Description}:
  2368. Specify the blas library to be used by some of the examples. The
  2369. library has to be 'atlas' or 'goto'.
  2370. @end table
  2371. @node --with-magma
  2372. @subsubsection @code{--with-magma=<path>}
  2373. @table @asis
  2374. @item @emph{Description}:
  2375. Specify where magma is installed. This directory should notably contain
  2376. @code{include/magmablas.h}.
  2377. @end table
  2378. @node --with-fxt
  2379. @subsubsection @code{--with-fxt=<path>}
  2380. @table @asis
  2381. @item @emph{Description}:
  2382. Specify the location of FxT (for generating traces and rendering them
  2383. using ViTE). This directory should notably contain
  2384. @code{include/fxt/fxt.h}.
  2385. @c TODO add ref to other section
  2386. @end table
  2387. @node --with-perf-model-dir
  2388. @subsubsection @code{--with-perf-model-dir=<dir>}
  2389. @table @asis
  2390. @item @emph{Description}:
  2391. Specify where performance models should be stored (instead of defaulting to the
  2392. current user's home).
  2393. @end table
  2394. @node --with-mpicc
  2395. @subsubsection @code{--with-mpicc=<path to mpicc>}
  2396. @table @asis
  2397. @item @emph{Description}:
  2398. Specify the location of the @code{mpicc} compiler to be used for starpumpi.
  2399. @end table
  2400. @node --with-goto-dir
  2401. @subsubsection @code{--with-goto-dir=<dir>}
  2402. @table @asis
  2403. @item @emph{Description}:
  2404. Specify the location of GotoBLAS.
  2405. @end table
  2406. @node --with-atlas-dir
  2407. @subsubsection @code{--with-atlas-dir=<dir>}
  2408. @table @asis
  2409. @item @emph{Description}:
  2410. Specify the location of ATLAS. This directory should notably contain
  2411. @code{include/cblas.h}.
  2412. @end table
  2413. @node --with-mkl-cflags
  2414. @subsubsection @code{--with-mkl-cflags=<cflags>}
  2415. @table @asis
  2416. @item @emph{Description}:
  2417. Specify the compilation flags for the MKL Library.
  2418. @end table
  2419. @node --with-mkl-ldflags
  2420. @subsubsection @code{--with-mkl-ldflags=<ldflags>}
  2421. @table @asis
  2422. @item @emph{Description}:
  2423. Specify the linking flags for the MKL Library. Note that the
  2424. @url{http://software.intel.com/en-us/articles/intel-mkl-link-line-advisor/}
  2425. website provides a script to determine the linking flags.
  2426. @end table
  2427. @c ---------------------------------------------------------------------
  2428. @c Environment variables
  2429. @c ---------------------------------------------------------------------
  2430. @node Execution configuration through environment variables
  2431. @section Execution configuration through environment variables
  2432. @menu
  2433. * Workers:: Configuring workers
  2434. * Scheduling:: Configuring the Scheduling engine
  2435. * Misc:: Miscellaneous and debug
  2436. @end menu
  2437. Note: the values given in @code{starpu_conf} structure passed when
  2438. calling @code{starpu_init} will override the values of the environment
  2439. variables.
  2440. @node Workers
  2441. @subsection Configuring workers
  2442. @menu
  2443. * STARPU_NCPUS:: Number of CPU workers
  2444. * STARPU_NCUDA:: Number of CUDA workers
  2445. * STARPU_NOPENCL:: Number of OpenCL workers
  2446. * STARPU_NGORDON:: Number of SPU workers (Cell)
  2447. * STARPU_WORKERS_CPUID:: Bind workers to specific CPUs
  2448. * STARPU_WORKERS_CUDAID:: Select specific CUDA devices
  2449. * STARPU_WORKERS_OPENCLID:: Select specific OpenCL devices
  2450. @end menu
  2451. @node STARPU_NCPUS
  2452. @subsubsection @code{STARPU_NCPUS} -- Number of CPU workers
  2453. @table @asis
  2454. @item @emph{Description}:
  2455. Specify the number of CPU workers (thus not including workers dedicated to control acceleratores). Note that by default, StarPU will not allocate
  2456. more CPU workers than there are physical CPUs, and that some CPUs are used to control
  2457. the accelerators.
  2458. @end table
  2459. @node STARPU_NCUDA
  2460. @subsubsection @code{STARPU_NCUDA} -- Number of CUDA workers
  2461. @table @asis
  2462. @item @emph{Description}:
  2463. Specify the number of CUDA devices that StarPU can use. If
  2464. @code{STARPU_NCUDA} is lower than the number of physical devices, it is
  2465. possible to select which CUDA devices should be used by the means of the
  2466. @code{STARPU_WORKERS_CUDAID} environment variable. By default, StarPU will
  2467. create as many CUDA workers as there are CUDA devices.
  2468. @end table
  2469. @node STARPU_NOPENCL
  2470. @subsubsection @code{STARPU_NOPENCL} -- Number of OpenCL workers
  2471. @table @asis
  2472. @item @emph{Description}:
  2473. OpenCL equivalent of the @code{STARPU_NCUDA} environment variable.
  2474. @end table
  2475. @node STARPU_NGORDON
  2476. @subsubsection @code{STARPU_NGORDON} -- Number of SPU workers (Cell)
  2477. @table @asis
  2478. @item @emph{Description}:
  2479. Specify the number of SPUs that StarPU can use.
  2480. @end table
  2481. @node STARPU_WORKERS_CPUID
  2482. @subsubsection @code{STARPU_WORKERS_CPUID} -- Bind workers to specific CPUs
  2483. @table @asis
  2484. @item @emph{Description}:
  2485. Passing an array of integers (starting from 0) in @code{STARPU_WORKERS_CPUID}
  2486. specifies on which logical CPU the different workers should be
  2487. bound. For instance, if @code{STARPU_WORKERS_CPUID = "0 1 4 5"}, the first
  2488. worker will be bound to logical CPU #0, the second CPU worker will be bound to
  2489. logical CPU #1 and so on. Note that the logical ordering of the CPUs is either
  2490. determined by the OS, or provided by the @code{hwloc} library in case it is
  2491. available.
  2492. Note that the first workers correspond to the CUDA workers, then come the
  2493. OpenCL and the SPU, and finally the CPU workers. For example if
  2494. we have @code{STARPU_NCUDA=1}, @code{STARPU_NOPENCL=1}, @code{STARPU_NCPUS=2}
  2495. and @code{STARPU_WORKERS_CPUID = "0 2 1 3"}, the CUDA device will be controlled
  2496. by logical CPU #0, the OpenCL device will be controlled by logical CPU #2, and
  2497. the logical CPUs #1 and #3 will be used by the CPU workers.
  2498. If the number of workers is larger than the array given in
  2499. @code{STARPU_WORKERS_CPUID}, the workers are bound to the logical CPUs in a
  2500. round-robin fashion: if @code{STARPU_WORKERS_CPUID = "0 1"}, the first and the
  2501. third (resp. second and fourth) workers will be put on CPU #0 (resp. CPU #1).
  2502. This variable is ignored if the @code{use_explicit_workers_bindid} flag of the
  2503. @code{starpu_conf} structure passed to @code{starpu_init} is set.
  2504. @end table
  2505. @node STARPU_WORKERS_CUDAID
  2506. @subsubsection @code{STARPU_WORKERS_CUDAID} -- Select specific CUDA devices
  2507. @table @asis
  2508. @item @emph{Description}:
  2509. Similarly to the @code{STARPU_WORKERS_CPUID} environment variable, it is
  2510. possible to select which CUDA devices should be used by StarPU. On a machine
  2511. equipped with 4 GPUs, setting @code{STARPU_WORKERS_CUDAID = "1 3"} and
  2512. @code{STARPU_NCUDA=2} specifies that 2 CUDA workers should be created, and that
  2513. they should use CUDA devices #1 and #3 (the logical ordering of the devices is
  2514. the one reported by CUDA).
  2515. This variable is ignored if the @code{use_explicit_workers_cuda_gpuid} flag of
  2516. the @code{starpu_conf} structure passed to @code{starpu_init} is set.
  2517. @end table
  2518. @node STARPU_WORKERS_OPENCLID
  2519. @subsubsection @code{STARPU_WORKERS_OPENCLID} -- Select specific OpenCL devices
  2520. @table @asis
  2521. @item @emph{Description}:
  2522. OpenCL equivalent of the @code{STARPU_WORKERS_CUDAID} environment variable.
  2523. This variable is ignored if the @code{use_explicit_workers_opencl_gpuid} flag of
  2524. the @code{starpu_conf} structure passed to @code{starpu_init} is set.
  2525. @end table
  2526. @node Scheduling
  2527. @subsection Configuring the Scheduling engine
  2528. @menu
  2529. * STARPU_SCHED:: Scheduling policy
  2530. * STARPU_CALIBRATE:: Calibrate performance models
  2531. * STARPU_PREFETCH:: Use data prefetch
  2532. * STARPU_SCHED_ALPHA:: Computation factor
  2533. * STARPU_SCHED_BETA:: Communication factor
  2534. @end menu
  2535. @node STARPU_SCHED
  2536. @subsubsection @code{STARPU_SCHED} -- Scheduling policy
  2537. @table @asis
  2538. @item @emph{Description}:
  2539. This chooses between the different scheduling policies proposed by StarPU: work
  2540. random, stealing, greedy, with performance models, etc.
  2541. Use @code{STARPU_SCHED=help} to get the list of available schedulers.
  2542. @end table
  2543. @node STARPU_CALIBRATE
  2544. @subsubsection @code{STARPU_CALIBRATE} -- Calibrate performance models
  2545. @table @asis
  2546. @item @emph{Description}:
  2547. If this variable is set to 1, the performance models are calibrated during
  2548. the execution. If it is set to 2, the previous values are dropped to restart
  2549. calibration from scratch. Setting this variable to 0 disable calibration, this
  2550. is the default behaviour.
  2551. Note: this currently only applies to @code{dm}, @code{dmda} and @code{heft} scheduling policies.
  2552. @end table
  2553. @node STARPU_PREFETCH
  2554. @subsubsection @code{STARPU_PREFETCH} -- Use data prefetch
  2555. @table @asis
  2556. @item @emph{Description}:
  2557. This variable indicates whether data prefetching should be enabled (0 means
  2558. that it is disabled). If prefetching is enabled, when a task is scheduled to be
  2559. executed e.g. on a GPU, StarPU will request an asynchronous transfer in
  2560. advance, so that data is already present on the GPU when the task starts. As a
  2561. result, computation and data transfers are overlapped.
  2562. Note that prefetching is enabled by default in StarPU.
  2563. @end table
  2564. @node STARPU_SCHED_ALPHA
  2565. @subsubsection @code{STARPU_SCHED_ALPHA} -- Computation factor
  2566. @table @asis
  2567. @item @emph{Description}:
  2568. To estimate the cost of a task StarPU takes into account the estimated
  2569. computation time (obtained thanks to performance models). The alpha factor is
  2570. the coefficient to be applied to it before adding it to the communication part.
  2571. @end table
  2572. @node STARPU_SCHED_BETA
  2573. @subsubsection @code{STARPU_SCHED_BETA} -- Communication factor
  2574. @table @asis
  2575. @item @emph{Description}:
  2576. To estimate the cost of a task StarPU takes into account the estimated
  2577. data transfer time (obtained thanks to performance models). The beta factor is
  2578. the coefficient to be applied to it before adding it to the computation part.
  2579. @end table
  2580. @node Misc
  2581. @subsection Miscellaneous and debug
  2582. @menu
  2583. * STARPU_SILENT:: Disable verbose mode
  2584. * STARPU_LOGFILENAME:: Select debug file name
  2585. * STARPU_FXT_PREFIX:: FxT trace location
  2586. * STARPU_LIMIT_GPU_MEM:: Restrict memory size on the GPUs
  2587. * STARPU_GENERATE_TRACE:: Generate a Paje trace when StarPU is shut down
  2588. @end menu
  2589. @node STARPU_SILENT
  2590. @subsubsection @code{STARPU_SILENT} -- Disable verbose mode
  2591. @table @asis
  2592. @item @emph{Description}:
  2593. This variable allows to disable verbose mode at runtime when StarPU
  2594. has been configured with the option @code{--enable-verbose}.
  2595. @end table
  2596. @node STARPU_LOGFILENAME
  2597. @subsubsection @code{STARPU_LOGFILENAME} -- Select debug file name
  2598. @table @asis
  2599. @item @emph{Description}:
  2600. This variable specifies in which file the debugging output should be saved to.
  2601. @end table
  2602. @node STARPU_FXT_PREFIX
  2603. @subsubsection @code{STARPU_FXT_PREFIX} -- FxT trace location
  2604. @table @asis
  2605. @item @emph{Description}
  2606. This variable specifies in which directory to save the trace generated if FxT is enabled. It needs to have a trailing '/' character.
  2607. @end table
  2608. @node STARPU_LIMIT_GPU_MEM
  2609. @subsubsection @code{STARPU_LIMIT_GPU_MEM} -- Restrict memory size on the GPUs
  2610. @table @asis
  2611. @item @emph{Description}
  2612. This variable specifies the maximum number of megabytes that should be
  2613. available to the application on each GPUs. In case this value is smaller than
  2614. the size of the memory of a GPU, StarPU pre-allocates a buffer to waste memory
  2615. on the device. This variable is intended to be used for experimental purposes
  2616. as it emulates devices that have a limited amount of memory.
  2617. @end table
  2618. @node STARPU_GENERATE_TRACE
  2619. @subsubsection @code{STARPU_GENERATE_TRACE} -- Generate a Paje trace when StarPU is shut down
  2620. @table @asis
  2621. @item @emph{Description}
  2622. When set to 1, this variable indicates that StarPU should automatically
  2623. generate a Paje trace when starpu_shutdown is called.
  2624. @end table
  2625. @c ---------------------------------------------------------------------
  2626. @c StarPU API
  2627. @c ---------------------------------------------------------------------
  2628. @node StarPU API
  2629. @chapter StarPU API
  2630. @menu
  2631. * Initialization and Termination:: Initialization and Termination methods
  2632. * Workers' Properties:: Methods to enumerate workers' properties
  2633. * Data Library:: Methods to manipulate data
  2634. * Data Interfaces::
  2635. * Data Partition::
  2636. * Codelets and Tasks:: Methods to construct tasks
  2637. * Explicit Dependencies:: Explicit Dependencies
  2638. * Implicit Data Dependencies:: Implicit Data Dependencies
  2639. * Performance Model API::
  2640. * Profiling API:: Profiling API
  2641. * CUDA extensions:: CUDA extensions
  2642. * OpenCL extensions:: OpenCL extensions
  2643. * Cell extensions:: Cell extensions
  2644. * Miscellaneous helpers::
  2645. @end menu
  2646. @node Initialization and Termination
  2647. @section Initialization and Termination
  2648. @menu
  2649. * starpu_init:: Initialize StarPU
  2650. * struct starpu_conf:: StarPU runtime configuration
  2651. * starpu_conf_init:: Initialize starpu_conf structure
  2652. * starpu_shutdown:: Terminate StarPU
  2653. @end menu
  2654. @node starpu_init
  2655. @subsection @code{starpu_init} -- Initialize StarPU
  2656. @table @asis
  2657. @item @emph{Description}:
  2658. This is StarPU initialization method, which must be called prior to any other
  2659. StarPU call. It is possible to specify StarPU's configuration (e.g. scheduling
  2660. policy, number of cores, ...) by passing a non-null argument. Default
  2661. configuration is used if the passed argument is @code{NULL}.
  2662. @item @emph{Return value}:
  2663. Upon successful completion, this function returns 0. Otherwise, @code{-ENODEV}
  2664. indicates that no worker was available (so that StarPU was not initialized).
  2665. @item @emph{Prototype}:
  2666. @code{int starpu_init(struct starpu_conf *conf);}
  2667. @end table
  2668. @node struct starpu_conf
  2669. @subsection @code{struct starpu_conf} -- StarPU runtime configuration
  2670. @table @asis
  2671. @item @emph{Description}:
  2672. This structure is passed to the @code{starpu_init} function in order
  2673. to configure StarPU.
  2674. When the default value is used, StarPU automatically selects the number
  2675. of processing units and takes the default scheduling policy. This parameter
  2676. overwrites the equivalent environment variables.
  2677. @item @emph{Fields}:
  2678. @table @asis
  2679. @item @code{sched_policy_name} (default = NULL):
  2680. This is the name of the scheduling policy. This can also be specified with the
  2681. @code{STARPU_SCHED} environment variable.
  2682. @item @code{sched_policy} (default = NULL):
  2683. This is the definition of the scheduling policy. This field is ignored
  2684. if @code{sched_policy_name} is set.
  2685. @item @code{ncpus} (default = -1):
  2686. This is the number of CPU cores that StarPU can use. This can also be
  2687. specified with the @code{STARPU_NCPUS} environment variable.
  2688. @item @code{ncuda} (default = -1):
  2689. This is the number of CUDA devices that StarPU can use. This can also be
  2690. specified with the @code{STARPU_NCUDA} environment variable.
  2691. @item @code{nopencl} (default = -1):
  2692. This is the number of OpenCL devices that StarPU can use. This can also be
  2693. specified with the @code{STARPU_NOPENCL} environment variable.
  2694. @item @code{nspus} (default = -1):
  2695. This is the number of Cell SPUs that StarPU can use. This can also be
  2696. specified with the @code{STARPU_NGORDON} environment variable.
  2697. @item @code{use_explicit_workers_bindid} (default = 0)
  2698. If this flag is set, the @code{workers_bindid} array indicates where the
  2699. different workers are bound, otherwise StarPU automatically selects where to
  2700. bind the different workers unless the @code{STARPU_WORKERS_CPUID} environment
  2701. variable is set. The @code{STARPU_WORKERS_CPUID} environment variable is
  2702. ignored if the @code{use_explicit_workers_bindid} flag is set.
  2703. @item @code{workers_bindid[STARPU_NMAXWORKERS]}
  2704. If the @code{use_explicit_workers_bindid} flag is set, this array indicates
  2705. where to bind the different workers. The i-th entry of the
  2706. @code{workers_bindid} indicates the logical identifier of the processor which
  2707. should execute the i-th worker. Note that the logical ordering of the CPUs is
  2708. either determined by the OS, or provided by the @code{hwloc} library in case it
  2709. is available.
  2710. When this flag is set, the @ref{STARPU_WORKERS_CPUID} environment variable is
  2711. ignored.
  2712. @item @code{use_explicit_workers_cuda_gpuid} (default = 0)
  2713. If this flag is set, the CUDA workers will be attached to the CUDA devices
  2714. specified in the @code{workers_cuda_gpuid} array. Otherwise, StarPU affects the
  2715. CUDA devices in a round-robin fashion.
  2716. When this flag is set, the @ref{STARPU_WORKERS_CUDAID} environment variable is
  2717. ignored.
  2718. @item @code{workers_cuda_gpuid[STARPU_NMAXWORKERS]}
  2719. If the @code{use_explicit_workers_cuda_gpuid} flag is set, this array contains
  2720. the logical identifiers of the CUDA devices (as used by @code{cudaGetDevice}).
  2721. @item @code{use_explicit_workers_opencl_gpuid} (default = 0)
  2722. If this flag is set, the OpenCL workers will be attached to the OpenCL devices
  2723. specified in the @code{workers_opencl_gpuid} array. Otherwise, StarPU affects the
  2724. OpenCL devices in a round-robin fashion.
  2725. @item @code{workers_opencl_gpuid[STARPU_NMAXWORKERS]}:
  2726. @item @code{calibrate} (default = 0):
  2727. If this flag is set, StarPU will calibrate the performance models when
  2728. executing tasks. If this value is equal to -1, the default value is used. The
  2729. default value is overwritten by the @code{STARPU_CALIBRATE} environment
  2730. variable when it is set.
  2731. @end table
  2732. @item @code{single_combined_worker} (default = 0):
  2733. By default, StarPU creates various combined workers according to the machine
  2734. structure. Some parallel libraries (e.g. most OpenMP implementations) however do
  2735. not support concurrent calls to parallel code. In such case, setting this flag
  2736. makes StarPU only create one combined worker, containing all
  2737. the CPU workers. The default value is overwritten by the
  2738. @code{STARPU_SINGLE_COMBINED_WORKER} environment variable when it is set.
  2739. @end table
  2740. @node starpu_conf_init
  2741. @subsection @code{starpu_conf_init} -- Initialize starpu_conf structure
  2742. @table @asis
  2743. This function initializes the @code{starpu_conf} structure passed as argument
  2744. with the default values. In case some configuration parameters are already
  2745. specified through environment variables, @code{starpu_conf_init} initializes
  2746. the fields of the structure according to the environment variables. For
  2747. instance if @code{STARPU_CALIBRATE} is set, its value is put in the
  2748. @code{.ncuda} field of the structure passed as argument.
  2749. @item @emph{Return value}:
  2750. Upon successful completion, this function returns 0. Otherwise, @code{-EINVAL}
  2751. indicates that the argument was NULL.
  2752. @item @emph{Prototype}:
  2753. @code{int starpu_conf_init(struct starpu_conf *conf);}
  2754. @end table
  2755. @node starpu_shutdown
  2756. @subsection @code{starpu_shutdown} -- Terminate StarPU
  2757. @deftypefun void starpu_shutdown (void)
  2758. This is StarPU termination method. It must be called at the end of the
  2759. application: statistics and other post-mortem debugging information are not
  2760. guaranteed to be available until this method has been called.
  2761. @end deftypefun
  2762. @node Workers' Properties
  2763. @section Workers' Properties
  2764. @menu
  2765. * starpu_worker_get_count:: Get the number of processing units
  2766. * starpu_worker_get_count_by_type:: Get the number of processing units of a given type
  2767. * starpu_cpu_worker_get_count:: Get the number of CPU controlled by StarPU
  2768. * starpu_cuda_worker_get_count:: Get the number of CUDA devices controlled by StarPU
  2769. * starpu_opencl_worker_get_count:: Get the number of OpenCL devices controlled by StarPU
  2770. * starpu_spu_worker_get_count:: Get the number of Cell SPUs controlled by StarPU
  2771. * starpu_worker_get_id:: Get the identifier of the current worker
  2772. * starpu_worker_get_ids_by_type:: Get the list of identifiers of workers with a given type
  2773. * starpu_worker_get_devid:: Get the device identifier of a worker
  2774. * starpu_worker_get_type:: Get the type of processing unit associated to a worker
  2775. * starpu_worker_get_name:: Get the name of a worker
  2776. * starpu_worker_get_memory_node:: Get the memory node of a worker
  2777. @end menu
  2778. @node starpu_worker_get_count
  2779. @subsection @code{starpu_worker_get_count} -- Get the number of processing units
  2780. @deftypefun unsigned starpu_worker_get_count (void)
  2781. This function returns the number of workers (i.e. processing units executing
  2782. StarPU tasks). The returned value should be at most @code{STARPU_NMAXWORKERS}.
  2783. @end deftypefun
  2784. @node starpu_worker_get_count_by_type
  2785. @subsection @code{starpu_worker_get_count_by_type} -- Get the number of processing units of a given type
  2786. @deftypefun int starpu_worker_get_count_by_type ({enum starpu_archtype} @var{type})
  2787. Returns the number of workers of the type indicated by the argument. A positive
  2788. (or null) value is returned in case of success, @code{-EINVAL} indicates that
  2789. the type is not valid otherwise.
  2790. @end deftypefun
  2791. @node starpu_cpu_worker_get_count
  2792. @subsection @code{starpu_cpu_worker_get_count} -- Get the number of CPU controlled by StarPU
  2793. @deftypefun unsigned starpu_cpu_worker_get_count (void)
  2794. This function returns the number of CPUs controlled by StarPU. The returned
  2795. value should be at most @code{STARPU_MAXCPUS}.
  2796. @end deftypefun
  2797. @node starpu_cuda_worker_get_count
  2798. @subsection @code{starpu_cuda_worker_get_count} -- Get the number of CUDA devices controlled by StarPU
  2799. @deftypefun unsigned starpu_cuda_worker_get_count (void)
  2800. This function returns the number of CUDA devices controlled by StarPU. The returned
  2801. value should be at most @code{STARPU_MAXCUDADEVS}.
  2802. @end deftypefun
  2803. @node starpu_opencl_worker_get_count
  2804. @subsection @code{starpu_opencl_worker_get_count} -- Get the number of OpenCL devices controlled by StarPU
  2805. @deftypefun unsigned starpu_opencl_worker_get_count (void)
  2806. This function returns the number of OpenCL devices controlled by StarPU. The returned
  2807. value should be at most @code{STARPU_MAXOPENCLDEVS}.
  2808. @end deftypefun
  2809. @node starpu_spu_worker_get_count
  2810. @subsection @code{starpu_spu_worker_get_count} -- Get the number of Cell SPUs controlled by StarPU
  2811. @deftypefun unsigned starpu_spu_worker_get_count (void)
  2812. This function returns the number of Cell SPUs controlled by StarPU.
  2813. @end deftypefun
  2814. @node starpu_worker_get_id
  2815. @subsection @code{starpu_worker_get_id} -- Get the identifier of the current worker
  2816. @deftypefun int starpu_worker_get_id (void)
  2817. This function returns the identifier of the worker associated to the calling
  2818. thread. The returned value is either -1 if the current context is not a StarPU
  2819. worker (i.e. when called from the application outside a task or a callback), or
  2820. an integer between 0 and @code{starpu_worker_get_count() - 1}.
  2821. @end deftypefun
  2822. @node starpu_worker_get_ids_by_type
  2823. @subsection @code{starpu_worker_get_ids_by_type} -- Get the list of identifiers of workers with a given type
  2824. @deftypefun int starpu_worker_get_ids_by_type ({enum starpu_archtype} @var{type}, int *@var{workerids}, int @var{maxsize})
  2825. Fill the workerids array with the identifiers of the workers that have the type
  2826. indicated in the first argument. The maxsize argument indicates the size of the
  2827. workids array. The returned value gives the number of identifiers that were put
  2828. in the array. @code{-ERANGE} is returned is maxsize is lower than the number of
  2829. workers with the appropriate type: in that case, the array is filled with the
  2830. maxsize first elements. To avoid such overflows, the value of maxsize can be
  2831. chosen by the means of the @code{starpu_worker_get_count_by_type} function, or
  2832. by passing a value greater or equal to @code{STARPU_NMAXWORKERS}.
  2833. @end deftypefun
  2834. @node starpu_worker_get_devid
  2835. @subsection @code{starpu_worker_get_devid} -- Get the device identifier of a worker
  2836. @deftypefun int starpu_worker_get_devid (int @var{id})
  2837. This functions returns the device id of the worker associated to an identifier
  2838. (as returned by the @code{starpu_worker_get_id} function). In the case of a
  2839. CUDA worker, this device identifier is the logical device identifier exposed by
  2840. CUDA (used by the @code{cudaGetDevice} function for instance). The device
  2841. identifier of a CPU worker is the logical identifier of the core on which the
  2842. worker was bound; this identifier is either provided by the OS or by the
  2843. @code{hwloc} library in case it is available.
  2844. @end deftypefun
  2845. @node starpu_worker_get_type
  2846. @subsection @code{starpu_worker_get_type} -- Get the type of processing unit associated to a worker
  2847. @deftypefun {enum starpu_archtype} starpu_worker_get_type (int @var{id})
  2848. This function returns the type of worker associated to an identifier (as
  2849. returned by the @code{starpu_worker_get_id} function). The returned value
  2850. indicates the architecture of the worker: @code{STARPU_CPU_WORKER} for a CPU
  2851. core, @code{STARPU_CUDA_WORKER} for a CUDA device,
  2852. @code{STARPU_OPENCL_WORKER} for a OpenCL device, and
  2853. @code{STARPU_GORDON_WORKER} for a Cell SPU. The value returned for an invalid
  2854. identifier is unspecified.
  2855. @end deftypefun
  2856. @node starpu_worker_get_name
  2857. @subsection @code{starpu_worker_get_name} -- Get the name of a worker
  2858. @deftypefun void starpu_worker_get_name (int @var{id}, char *@var{dst}, size_t @var{maxlen})
  2859. StarPU associates a unique human readable string to each processing unit. This
  2860. function copies at most the @var{maxlen} first bytes of the unique string
  2861. associated to a worker identified by its identifier @var{id} into the
  2862. @var{dst} buffer. The caller is responsible for ensuring that the @var{dst}
  2863. is a valid pointer to a buffer of @var{maxlen} bytes at least. Calling this
  2864. function on an invalid identifier results in an unspecified behaviour.
  2865. @end deftypefun
  2866. @node starpu_worker_get_memory_node
  2867. @subsection @code{starpu_worker_get_memory_node} -- Get the memory node of a worker
  2868. @deftypefun unsigned starpu_worker_get_memory_node (unsigned @var{workerid})
  2869. This function returns the identifier of the memory node associated to the
  2870. worker identified by @var{workerid}.
  2871. @end deftypefun
  2872. @node Data Library
  2873. @section Data Library
  2874. This section describes the data management facilities provided by StarPU.
  2875. We show how to use existing data interfaces in @ref{Data Interfaces}, but developers can
  2876. design their own data interfaces if required.
  2877. @menu
  2878. * starpu_malloc:: Allocate data and pin it
  2879. * starpu_access_mode:: Data access mode
  2880. * unsigned memory_node:: Memory node
  2881. * starpu_data_handle:: StarPU opaque data handle
  2882. * void *interface:: StarPU data interface
  2883. * starpu_data_register:: Register a piece of data to StarPU
  2884. * starpu_data_unregister:: Unregister a piece of data from StarPU
  2885. * starpu_data_unregister_no_coherency:: Unregister a piece of data from StarPU without coherency
  2886. * starpu_data_invalidate:: Invalidate all data replicates
  2887. * starpu_data_acquire:: Access registered data from the application
  2888. * starpu_data_acquire_cb:: Access registered data from the application asynchronously
  2889. * STARPU_DATA_ACQUIRE_CB:: Access registered data from the application asynchronously, macro
  2890. * starpu_data_release:: Release registered data from the application
  2891. * starpu_data_set_wt_mask:: Set the Write-Through mask
  2892. * starpu_data_prefetch_on_node:: Prefetch data to a given node
  2893. @end menu
  2894. @node starpu_malloc
  2895. @subsection @code{starpu_malloc} -- Allocate data and pin it
  2896. @deftypefun int starpu_malloc (void **@var{A}, size_t @var{dim})
  2897. This function allocates data of the given size in main memory. It will also try to pin it in
  2898. CUDA or OpenCL, so that data transfers from this buffer can be asynchronous, and
  2899. thus permit data transfer and computation overlapping. The allocated buffer must
  2900. be freed thanks to the @code{starpu_free} function.
  2901. @end deftypefun
  2902. @node starpu_access_mode
  2903. @subsection @code{starpu_access_mode} -- Data access mode
  2904. This datatype describes a data access mode. The different available modes are:
  2905. @table @asis
  2906. @table @asis
  2907. @item @code{STARPU_R} read-only mode.
  2908. @item @code{STARPU_W} write-only mode.
  2909. @item @code{STARPU_RW} read-write mode. This is equivalent to @code{STARPU_R|STARPU_W}.
  2910. @item @code{STARPU_SCRATCH} scratch memory. A temporary buffer is allocated for the task, but StarPU does not enforce data consistency, i.e. each device has its own buffer, independently from each other (even for CPUs). This is useful for temporary variables. For now, no behaviour is defined concerning the relation with STARPU_R/W modes and the value provided at registration, i.e. the value of the scratch buffer is undefined at entry of the codelet function, but this is being considered for future extensions.
  2911. @item @code{STARPU_REDUX} reduction mode. TODO: document, as well as @code{starpu_data_set_reduction_methods}
  2912. @end table
  2913. @end table
  2914. @node unsigned memory_node
  2915. @subsection @code{unsigned memory_node} -- Memory node
  2916. @table @asis
  2917. @item @emph{Description}:
  2918. Every worker is associated to a memory node which is a logical abstraction of
  2919. the address space from which the processing unit gets its data. For instance,
  2920. the memory node associated to the different CPU workers represents main memory
  2921. (RAM), the memory node associated to a GPU is DRAM embedded on the device.
  2922. Every memory node is identified by a logical index which is accessible from the
  2923. @code{starpu_worker_get_memory_node} function. When registering a piece of data
  2924. to StarPU, the specified memory node indicates where the piece of data
  2925. initially resides (we also call this memory node the home node of a piece of
  2926. data).
  2927. @end table
  2928. @node starpu_data_handle
  2929. @subsection @code{starpu_data_handle} -- StarPU opaque data handle
  2930. @table @asis
  2931. @item @emph{Description}:
  2932. StarPU uses @code{starpu_data_handle} as an opaque handle to manage a piece of
  2933. data. Once a piece of data has been registered to StarPU, it is associated to a
  2934. @code{starpu_data_handle} which keeps track of the state of the piece of data
  2935. over the entire machine, so that we can maintain data consistency and locate
  2936. data replicates for instance.
  2937. @end table
  2938. @node void *interface
  2939. @subsection @code{void *interface} -- StarPU data interface
  2940. @table @asis
  2941. @item @emph{Description}:
  2942. Data management is done at a high-level in StarPU: rather than accessing a mere
  2943. list of contiguous buffers, the tasks may manipulate data that are described by
  2944. a high-level construct which we call data interface.
  2945. An example of data interface is the "vector" interface which describes a
  2946. contiguous data array on a spefic memory node. This interface is a simple
  2947. structure containing the number of elements in the array, the size of the
  2948. elements, and the address of the array in the appropriate address space (this
  2949. address may be invalid if there is no valid copy of the array in the memory
  2950. node). More informations on the data interfaces provided by StarPU are
  2951. given in @ref{Data Interfaces}.
  2952. When a piece of data managed by StarPU is used by a task, the task
  2953. implementation is given a pointer to an interface describing a valid copy of
  2954. the data that is accessible from the current processing unit.
  2955. @end table
  2956. @node starpu_data_register
  2957. @subsection @code{starpu_data_register} -- Register a piece of data to StarPU
  2958. @deftypefun void starpu_data_register (starpu_data_handle *@var{handleptr}, uint32_t @var{home_node}, void *@var{interface}, {struct starpu_data_interface_ops_t} *@var{ops})
  2959. Register a piece of data into the handle located at the @var{handleptr}
  2960. address. The @var{interface} buffer contains the initial description of the
  2961. data in the home node. The @var{ops} argument is a pointer to a structure
  2962. describing the different methods used to manipulate this type of interface. See
  2963. @ref{struct starpu_data_interface_ops_t} for more details on this structure.
  2964. If @code{home_node} is -1, StarPU will automatically
  2965. allocate the memory when it is used for the
  2966. first time in write-only mode. Once such data handle has been automatically
  2967. allocated, it is possible to access it using any access mode.
  2968. Note that StarPU supplies a set of predefined types of interface (e.g. vector or
  2969. matrix) which can be registered by the means of helper functions (e.g.
  2970. @code{starpu_vector_data_register} or @code{starpu_matrix_data_register}).
  2971. @end deftypefun
  2972. @node starpu_data_unregister
  2973. @subsection @code{starpu_data_unregister} -- Unregister a piece of data from StarPU
  2974. @deftypefun void starpu_data_unregister (starpu_data_handle @var{handle})
  2975. This function unregisters a data handle from StarPU. If the data was
  2976. automatically allocated by StarPU because the home node was -1, all
  2977. automatically allocated buffers are freed. Otherwise, a valid copy of the data
  2978. is put back into the home node in the buffer that was initially registered.
  2979. Using a data handle that has been unregistered from StarPU results in an
  2980. undefined behaviour.
  2981. @end deftypefun
  2982. @node starpu_data_unregister_no_coherency
  2983. @subsection @code{starpu_data_unregister_no_coherency} -- Unregister a piece of data from StarPU
  2984. @deftypefun void starpu_data_unregister_no_coherency (starpu_data_handle @var{handle})
  2985. This is the same as starpu_data_unregister, except that StarPU does not put back
  2986. a valid copy into the home node, in the buffer that was initially registered.
  2987. @end deftypefun
  2988. @node starpu_data_invalidate
  2989. @subsection @code{starpu_data_invalidate} -- Invalidate all data replicates
  2990. @deftypefun void starpu_data_invalidate (starpu_data_handle @var{handle})
  2991. Destroy all replicates of the data handle. After data invalidation, the first
  2992. access to the handle must be performed in write-only mode. Accessing an
  2993. invalidated data in read-mode results in undefined behaviour.
  2994. @end deftypefun
  2995. @c TODO create a specific sections about user interaction with the DSM ?
  2996. @node starpu_data_acquire
  2997. @subsection @code{starpu_data_acquire} -- Access registered data from the application
  2998. @deftypefun int starpu_data_acquire (starpu_data_handle @var{handle}, starpu_access_mode @var{mode})
  2999. The application must call this function prior to accessing registered data from
  3000. main memory outside tasks. StarPU ensures that the application will get an
  3001. up-to-date copy of the data in main memory located where the data was
  3002. originally registered, and that all concurrent accesses (e.g. from tasks) will
  3003. be consistent with the access mode specified in the @var{mode} argument.
  3004. @code{starpu_data_release} must be called once the application does not need to
  3005. access the piece of data anymore. Note that implicit data
  3006. dependencies are also enforced by @code{starpu_data_acquire}, i.e.
  3007. @code{starpu_data_acquire} will wait for all tasks scheduled to work on
  3008. the data, unless that they have not been disabled explictly by calling
  3009. @code{starpu_data_set_default_sequential_consistency_flag} or
  3010. @code{starpu_data_set_sequential_consistency_flag}.
  3011. @code{starpu_data_acquire} is a blocking call, so that it cannot be called from
  3012. tasks or from their callbacks (in that case, @code{starpu_data_acquire} returns
  3013. @code{-EDEADLK}). Upon successful completion, this function returns 0.
  3014. @end deftypefun
  3015. @node starpu_data_acquire_cb
  3016. @subsection @code{starpu_data_acquire_cb} -- Access registered data from the application asynchronously
  3017. @deftypefun int starpu_data_acquire_cb (starpu_data_handle @var{handle}, starpu_access_mode @var{mode}, void (*@var{callback})(void *), void *@var{arg})
  3018. @code{starpu_data_acquire_cb} is the asynchronous equivalent of
  3019. @code{starpu_data_release}. When the data specified in the first argument is
  3020. available in the appropriate access mode, the callback function is executed.
  3021. The application may access the requested data during the execution of this
  3022. callback. The callback function must call @code{starpu_data_release} once the
  3023. application does not need to access the piece of data anymore.
  3024. Note that implicit data dependencies are also enforced by
  3025. @code{starpu_data_acquire_cb} in case they are enabled.
  3026. Contrary to @code{starpu_data_acquire}, this function is non-blocking and may
  3027. be called from task callbacks. Upon successful completion, this function
  3028. returns 0.
  3029. @end deftypefun
  3030. @node STARPU_DATA_ACQUIRE_CB
  3031. @subsection @code{STARPU_DATA_ACQUIRE_CB} -- Access registered data from the application asynchronously, macro
  3032. @deftypefun STARPU_DATA_ACQUIRE_CB (starpu_data_handle @var{handle}, starpu_access_mode @var{mode}, code)
  3033. @code{STARPU_DATA_ACQUIRE_CB} is the same as @code{starpu_data_acquire_cb},
  3034. except that the code to be executed in a callback is directly provided as a
  3035. macro parameter, and the data handle is automatically released after it. This
  3036. permit to easily execute code which depends on the value of some registered
  3037. data. This is non-blocking too and may be called from task callbacks.
  3038. @end deftypefun
  3039. @node starpu_data_release
  3040. @subsection @code{starpu_data_release} -- Release registered data from the application
  3041. @deftypefun void starpu_data_release (starpu_data_handle @var{handle})
  3042. This function releases the piece of data acquired by the application either by
  3043. @code{starpu_data_acquire} or by @code{starpu_data_acquire_cb}.
  3044. @end deftypefun
  3045. @node starpu_data_set_wt_mask
  3046. @subsection @code{starpu_data_set_wt_mask} -- Set the Write-Through mask
  3047. @deftypefun void starpu_data_set_wt_mask (starpu_data_handle @var{handle}, uint32_t @var{wt_mask})
  3048. This function sets the write-through mask of a given data, i.e. a bitmask of
  3049. nodes where the data should be always replicated after modification.
  3050. @end deftypefun
  3051. @node starpu_data_prefetch_on_node
  3052. @subsection @code{starpu_data_prefetch_on_node} -- Prefetch data to a given node
  3053. @deftypefun int starpu_data_prefetch_on_node (starpu_data_handle @var{handle}, unsigned @var{node}, unsigned @var{async})
  3054. Issue a prefetch request for a given data to a given node, i.e.
  3055. requests that the data be replicated to the given node, so that it is available
  3056. there for tasks. If the @var{async} parameter is 0, the call will block until
  3057. the transfer is achieved, else the call will return as soon as the request is
  3058. scheduled (which may however have to wait for a task completion).
  3059. @end deftypefun
  3060. @node Data Interfaces
  3061. @section Data Interfaces
  3062. @menu
  3063. * Variable Interface::
  3064. * Vector Interface::
  3065. * Matrix Interface::
  3066. * 3D Matrix Interface::
  3067. * BCSR Interface for Sparse Matrices (Blocked Compressed Sparse Row Representation)::
  3068. * CSR Interface for Sparse Matrices (Compressed Sparse Row Representation)::
  3069. @end menu
  3070. @node Variable Interface
  3071. @subsection Variable Interface
  3072. @table @asis
  3073. @item @emph{Description}:
  3074. This variant of @code{starpu_data_register} uses the variable interface,
  3075. i.e. for a mere single variable. @code{ptr} is the address of the variable,
  3076. and @code{elemsize} is the size of the variable.
  3077. @item @emph{Prototype}:
  3078. @code{void starpu_variable_data_register(starpu_data_handle *handle,
  3079. uint32_t home_node,
  3080. uintptr_t ptr, size_t elemsize);}
  3081. @item @emph{Example}:
  3082. @cartouche
  3083. @smallexample
  3084. float var;
  3085. starpu_data_handle var_handle;
  3086. starpu_variable_data_register(&var_handle, 0, (uintptr_t)&var, sizeof(var));
  3087. @end smallexample
  3088. @end cartouche
  3089. @end table
  3090. @node Vector Interface
  3091. @subsection Vector Interface
  3092. @table @asis
  3093. @item @emph{Description}:
  3094. This variant of @code{starpu_data_register} uses the vector interface,
  3095. i.e. for mere arrays of elements. @code{ptr} is the address of the first
  3096. element in the home node. @code{nx} is the number of elements in the vector.
  3097. @code{elemsize} is the size of each element.
  3098. @item @emph{Prototype}:
  3099. @code{void starpu_vector_data_register(starpu_data_handle *handle, uint32_t home_node,
  3100. uintptr_t ptr, uint32_t nx, size_t elemsize);}
  3101. @item @emph{Example}:
  3102. @cartouche
  3103. @smallexample
  3104. float vector[NX];
  3105. starpu_data_handle vector_handle;
  3106. starpu_vector_data_register(&vector_handle, 0, (uintptr_t)vector, NX,
  3107. sizeof(vector[0]));
  3108. @end smallexample
  3109. @end cartouche
  3110. @end table
  3111. @node Matrix Interface
  3112. @subsection Matrix Interface
  3113. @table @asis
  3114. @item @emph{Description}:
  3115. This variant of @code{starpu_data_register} uses the matrix interface, i.e. for
  3116. matrices of elements. @code{ptr} is the address of the first element in the home
  3117. node. @code{ld} is the number of elements between rows. @code{nx} is the number
  3118. of elements in a row (this can be different from @code{ld} if there are extra
  3119. elements for alignment for instance). @code{ny} is the number of rows.
  3120. @code{elemsize} is the size of each element.
  3121. @item @emph{Prototype}:
  3122. @code{void starpu_matrix_data_register(starpu_data_handle *handle, uint32_t home_node,
  3123. uintptr_t ptr, uint32_t ld, uint32_t nx,
  3124. uint32_t ny, size_t elemsize);}
  3125. @item @emph{Example}:
  3126. @cartouche
  3127. @smallexample
  3128. float *matrix;
  3129. starpu_data_handle matrix_handle;
  3130. matrix = (float*)malloc(width * height * sizeof(float));
  3131. starpu_matrix_data_register(&matrix_handle, 0, (uintptr_t)matrix,
  3132. width, width, height, sizeof(float));
  3133. @end smallexample
  3134. @end cartouche
  3135. @end table
  3136. @node 3D Matrix Interface
  3137. @subsection 3D Matrix Interface
  3138. @table @asis
  3139. @item @emph{Description}:
  3140. This variant of @code{starpu_data_register} uses the 3D matrix interface.
  3141. @code{ptr} is the address of the array of first element in the home node.
  3142. @code{ldy} is the number of elements between rows. @code{ldz} is the number
  3143. of rows between z planes. @code{nx} is the number of elements in a row (this
  3144. can be different from @code{ldy} if there are extra elements for alignment
  3145. for instance). @code{ny} is the number of rows in a z plane (likewise with
  3146. @code{ldz}). @code{nz} is the number of z planes. @code{elemsize} is the size of
  3147. each element.
  3148. @item @emph{Prototype}:
  3149. @code{void starpu_block_data_register(starpu_data_handle *handle, uint32_t home_node,
  3150. uintptr_t ptr, uint32_t ldy, uint32_t ldz, uint32_t nx,
  3151. uint32_t ny, uint32_t nz, size_t elemsize);}
  3152. @item @emph{Example}:
  3153. @cartouche
  3154. @smallexample
  3155. float *block;
  3156. starpu_data_handle block_handle;
  3157. block = (float*)malloc(nx*ny*nz*sizeof(float));
  3158. starpu_block_data_register(&block_handle, 0, (uintptr_t)block,
  3159. nx, nx*ny, nx, ny, nz, sizeof(float));
  3160. @end smallexample
  3161. @end cartouche
  3162. @end table
  3163. @node BCSR Interface for Sparse Matrices (Blocked Compressed Sparse Row Representation)
  3164. @subsection BCSR Interface for Sparse Matrices (Blocked Compressed Sparse Row Representation)
  3165. @deftypefun void starpu_bcsr_data_register (starpu_data_handle *@var{handle}, uint32_t @var{home_node}, uint32_t @var{nnz}, uint32_t @var{nrow}, uintptr_t @var{nzval}, uint32_t *@var{colind}, uint32_t *@var{rowptr}, uint32_t @var{firstentry}, uint32_t @var{r}, uint32_t @var{c}, size_t @var{elemsize})
  3166. This variant of @code{starpu_data_register} uses the BCSR sparse matrix interface.
  3167. TODO
  3168. @end deftypefun
  3169. @node CSR Interface for Sparse Matrices (Compressed Sparse Row Representation)
  3170. @subsection CSR Interface for Sparse Matrices (Compressed Sparse Row Representation)
  3171. @deftypefun void starpu_csr_data_register (starpu_data_handle *@var{handle}, uint32_t @var{home_node}, uint32_t @var{nnz}, uint32_t @var{nrow}, uintptr_t @var{nzval}, uint32_t *@var{colind}, uint32_t *@var{rowptr}, uint32_t @var{firstentry}, size_t @var{elemsize})
  3172. This variant of @code{starpu_data_register} uses the CSR sparse matrix interface.
  3173. TODO
  3174. @end deftypefun
  3175. @node Data Partition
  3176. @section Data Partition
  3177. @menu
  3178. * struct starpu_data_filter:: StarPU filter structure
  3179. * starpu_data_partition:: Partition Data
  3180. * starpu_data_unpartition:: Unpartition Data
  3181. * starpu_data_get_nb_children::
  3182. * starpu_data_get_sub_data::
  3183. * Predefined filter functions::
  3184. @end menu
  3185. @node struct starpu_data_filter
  3186. @subsection @code{struct starpu_data_filter} -- StarPU filter structure
  3187. @table @asis
  3188. @item @emph{Description}:
  3189. The filter structure describes a data partitioning operation, to be given to the
  3190. @code{starpu_data_partition} function, see @ref{starpu_data_partition} for an example.
  3191. @item @emph{Fields}:
  3192. @table @asis
  3193. @item @code{filter_func}:
  3194. This function fills the @code{child_interface} structure with interface
  3195. information for the @code{id}-th child of the parent @code{father_interface} (among @code{nparts}).
  3196. @code{void (*filter_func)(void *father_interface, void* child_interface, struct starpu_data_filter *, unsigned id, unsigned nparts);}
  3197. @item @code{nchildren}:
  3198. This is the number of parts to partition the data into.
  3199. @item @code{get_nchildren}:
  3200. This returns the number of children. This can be used instead of @code{nchildren} when the number of
  3201. children depends on the actual data (e.g. the number of blocks in a sparse
  3202. matrix).
  3203. @code{unsigned (*get_nchildren)(struct starpu_data_filter *, starpu_data_handle initial_handle);}
  3204. @item @code{get_child_ops}:
  3205. In case the resulting children use a different data interface, this function
  3206. returns which interface is used by child number @code{id}.
  3207. @code{struct starpu_data_interface_ops_t *(*get_child_ops)(struct starpu_data_filter *, unsigned id);}
  3208. @item @code{filter_arg}:
  3209. Some filters take an addition parameter, but this is usually unused.
  3210. @item @code{filter_arg_ptr}:
  3211. Some filters take an additional array parameter like the sizes of the parts, but
  3212. this is usually unused.
  3213. @end table
  3214. @end table
  3215. @node starpu_data_partition
  3216. @subsection starpu_data_partition -- Partition Data
  3217. @table @asis
  3218. @item @emph{Description}:
  3219. This requests partitioning one StarPU data @code{initial_handle} into several
  3220. subdata according to the filter @code{f}
  3221. @item @emph{Prototype}:
  3222. @code{void starpu_data_partition(starpu_data_handle initial_handle, struct starpu_data_filter *f);}
  3223. @item @emph{Example}:
  3224. @cartouche
  3225. @smallexample
  3226. struct starpu_data_filter f = @{
  3227. .filter_func = starpu_vertical_block_filter_func,
  3228. .nchildren = nslicesx,
  3229. .get_nchildren = NULL,
  3230. .get_child_ops = NULL
  3231. @};
  3232. starpu_data_partition(A_handle, &f);
  3233. @end smallexample
  3234. @end cartouche
  3235. @end table
  3236. @node starpu_data_unpartition
  3237. @subsection starpu_data_unpartition -- Unpartition data
  3238. @table @asis
  3239. @item @emph{Description}:
  3240. This unapplies one filter, thus unpartitioning the data. The pieces of data are
  3241. collected back into one big piece in the @code{gathering_node} (usually 0).
  3242. @item @emph{Prototype}:
  3243. @code{void starpu_data_unpartition(starpu_data_handle root_data, uint32_t gathering_node);}
  3244. @item @emph{Example}:
  3245. @cartouche
  3246. @smallexample
  3247. starpu_data_unpartition(A_handle, 0);
  3248. @end smallexample
  3249. @end cartouche
  3250. @end table
  3251. @node starpu_data_get_nb_children
  3252. @subsection starpu_data_get_nb_children
  3253. @table @asis
  3254. @item @emph{Description}:
  3255. This function returns the number of children.
  3256. @item @emph{Return value}:
  3257. The number of children.
  3258. @item @emph{Prototype}:
  3259. @code{int starpu_data_get_nb_children(starpu_data_handle handle);}
  3260. @end table
  3261. @c starpu_data_handle starpu_data_get_child(starpu_data_handle handle, unsigned i);
  3262. @node starpu_data_get_sub_data
  3263. @subsection starpu_data_get_sub_data
  3264. @table @asis
  3265. @item @emph{Description}:
  3266. After partitioning a StarPU data by applying a filter,
  3267. @code{starpu_data_get_sub_data} can be used to get handles for each of the data
  3268. portions. @code{root_data} is the parent data that was partitioned. @code{depth}
  3269. is the number of filters to traverse (in case several filters have been applied,
  3270. to e.g. partition in row blocks, and then in column blocks), and the subsequent
  3271. parameters are the indexes.
  3272. @item @emph{Return value}:
  3273. A handle to the subdata.
  3274. @item @emph{Prototype}:
  3275. @code{starpu_data_handle starpu_data_get_sub_data(starpu_data_handle root_data, unsigned depth, ... );}
  3276. @item @emph{Example}:
  3277. @cartouche
  3278. @smallexample
  3279. h = starpu_data_get_sub_data(A_handle, 1, taskx);
  3280. @end smallexample
  3281. @end cartouche
  3282. @end table
  3283. @node Predefined filter functions
  3284. @subsection Predefined filter functions
  3285. @menu
  3286. * Partitioning BCSR Data::
  3287. * Partitioning BLAS interface::
  3288. * Partitioning Vector Data::
  3289. * Partitioning Block Data::
  3290. @end menu
  3291. This section gives a partial list of the predefined partitioning functions.
  3292. Examples on how to use them are shown in @ref{Partitioning Data}. The complete
  3293. list can be found in @code{starpu_data_filters.h} .
  3294. @node Partitioning BCSR Data
  3295. @subsubsection Partitioning BCSR Data
  3296. @deftypefun void starpu_canonical_block_filter_bcsr (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  3297. TODO
  3298. @end deftypefun
  3299. @deftypefun void starpu_vertical_block_filter_func_csr (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  3300. TODO
  3301. @end deftypefun
  3302. @node Partitioning BLAS interface
  3303. @subsubsection Partitioning BLAS interface
  3304. @deftypefun void starpu_block_filter_func (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  3305. This partitions a dense Matrix into horizontal blocks.
  3306. @end deftypefun
  3307. @deftypefun void starpu_vertical_block_filter_func (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  3308. This partitions a dense Matrix into vertical blocks.
  3309. @end deftypefun
  3310. @node Partitioning Vector Data
  3311. @subsubsection Partitioning Vector Data
  3312. @deftypefun void starpu_block_filter_func_vector (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  3313. This partitions a vector into blocks of the same size.
  3314. @end deftypefun
  3315. @deftypefun void starpu_vector_list_filter_func (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  3316. This partitions a vector into blocks of sizes given in the @var{filter_arg_ptr}
  3317. field of @var{f}, supposed to point on a @code{uint32_t*} array.
  3318. @end deftypefun
  3319. @deftypefun void starpu_vector_divide_in_2_filter_func (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  3320. This partitions a vector into two blocks, the first block size being given in
  3321. the @var{filter_arg} field of @var{f}.
  3322. @end deftypefun
  3323. @node Partitioning Block Data
  3324. @subsubsection Partitioning Block Data
  3325. @deftypefun void starpu_block_filter_func_block (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  3326. This partitions a 3D matrix along the X axis.
  3327. @end deftypefun
  3328. @node Codelets and Tasks
  3329. @section Codelets and Tasks
  3330. This section describes the interface to manipulate codelets and tasks.
  3331. @deftp {Data Type} {struct starpu_codelet}
  3332. The codelet structure describes a kernel that is possibly implemented on various
  3333. targets. For compatibility, make sure to initialize the whole structure to zero.
  3334. @table @asis
  3335. @item @code{where}
  3336. Indicates which types of processing units are able to execute the codelet.
  3337. @code{STARPU_CPU|STARPU_CUDA} for instance indicates that the codelet is
  3338. implemented for both CPU cores and CUDA devices while @code{STARPU_GORDON}
  3339. indicates that it is only available on Cell SPUs.
  3340. @item @code{cpu_func} (optional)
  3341. Is a function pointer to the CPU implementation of the codelet. Its prototype
  3342. must be: @code{void cpu_func(void *buffers[], void *cl_arg)}. The first
  3343. argument being the array of data managed by the data management library, and
  3344. the second argument is a pointer to the argument passed from the @code{cl_arg}
  3345. field of the @code{starpu_task} structure.
  3346. The @code{cpu_func} field is ignored if @code{STARPU_CPU} does not appear in
  3347. the @code{where} field, it must be non-null otherwise.
  3348. @item @code{cuda_func} (optional)
  3349. Is a function pointer to the CUDA implementation of the codelet. @emph{This
  3350. must be a host-function written in the CUDA runtime API}. Its prototype must
  3351. be: @code{void cuda_func(void *buffers[], void *cl_arg);}. The @code{cuda_func}
  3352. field is ignored if @code{STARPU_CUDA} does not appear in the @code{where}
  3353. field, it must be non-null otherwise.
  3354. @item @code{opencl_func} (optional)
  3355. Is a function pointer to the OpenCL implementation of the codelet. Its
  3356. prototype must be:
  3357. @code{void opencl_func(starpu_data_interface_t *descr, void *arg);}.
  3358. This pointer is ignored if @code{STARPU_OPENCL} does not appear in the
  3359. @code{where} field, it must be non-null otherwise.
  3360. @item @code{gordon_func} (optional)
  3361. This is the index of the Cell SPU implementation within the Gordon library.
  3362. See Gordon documentation for more details on how to register a kernel and
  3363. retrieve its index.
  3364. @item @code{nbuffers}
  3365. Specifies the number of arguments taken by the codelet. These arguments are
  3366. managed by the DSM and are accessed from the @code{void *buffers[]}
  3367. array. The constant argument passed with the @code{cl_arg} field of the
  3368. @code{starpu_task} structure is not counted in this number. This value should
  3369. not be above @code{STARPU_NMAXBUFS}.
  3370. @item @code{model} (optional)
  3371. This is a pointer to the task duration performance model associated to this
  3372. codelet. This optional field is ignored when set to @code{NULL}.
  3373. TODO
  3374. @item @code{power_model} (optional)
  3375. This is a pointer to the task power consumption performance model associated
  3376. to this codelet. This optional field is ignored when set to @code{NULL}.
  3377. In the case of parallel codelets, this has to account for all processing units
  3378. involved in the parallel execution.
  3379. TODO
  3380. @end table
  3381. @end deftp
  3382. @deftp {Data Type} {struct starpu_task}
  3383. The @code{starpu_task} structure describes a task that can be offloaded on the various
  3384. processing units managed by StarPU. It instantiates a codelet. It can either be
  3385. allocated dynamically with the @code{starpu_task_create} method, or declared
  3386. statically. In the latter case, the programmer has to zero the
  3387. @code{starpu_task} structure and to fill the different fields properly. The
  3388. indicated default values correspond to the configuration of a task allocated
  3389. with @code{starpu_task_create}.
  3390. @table @asis
  3391. @item @code{cl}
  3392. Is a pointer to the corresponding @code{starpu_codelet} data structure. This
  3393. describes where the kernel should be executed, and supplies the appropriate
  3394. implementations. When set to @code{NULL}, no code is executed during the tasks,
  3395. such empty tasks can be useful for synchronization purposes.
  3396. @item @code{buffers}
  3397. Is an array of @code{starpu_buffer_descr_t} structures. It describes the
  3398. different pieces of data accessed by the task, and how they should be accessed.
  3399. The @code{starpu_buffer_descr_t} structure is composed of two fields, the
  3400. @code{handle} field specifies the handle of the piece of data, and the
  3401. @code{mode} field is the required access mode (eg @code{STARPU_RW}). The number
  3402. of entries in this array must be specified in the @code{nbuffers} field of the
  3403. @code{starpu_codelet} structure, and should not excede @code{STARPU_NMAXBUFS}.
  3404. If unsufficient, this value can be set with the @code{--enable-maxbuffers}
  3405. option when configuring StarPU.
  3406. @item @code{cl_arg} (optional; default: @code{NULL})
  3407. This pointer is passed to the codelet through the second argument
  3408. of the codelet implementation (e.g. @code{cpu_func} or @code{cuda_func}).
  3409. In the specific case of the Cell processor, see the @code{cl_arg_size}
  3410. argument.
  3411. @item @code{cl_arg_size} (optional, Cell-specific)
  3412. In the case of the Cell processor, the @code{cl_arg} pointer is not directly
  3413. given to the SPU function. A buffer of size @code{cl_arg_size} is allocated on
  3414. the SPU. This buffer is then filled with the @code{cl_arg_size} bytes starting
  3415. at address @code{cl_arg}. In this case, the argument given to the SPU codelet
  3416. is therefore not the @code{cl_arg} pointer, but the address of the buffer in
  3417. local store (LS) instead. This field is ignored for CPU, CUDA and OpenCL
  3418. codelets, where the @code{cl_arg} pointer is given as such.
  3419. @item @code{callback_func} (optional) (default: @code{NULL})
  3420. This is a function pointer of prototype @code{void (*f)(void *)} which
  3421. specifies a possible callback. If this pointer is non-null, the callback
  3422. function is executed @emph{on the host} after the execution of the task. The
  3423. callback is passed the value contained in the @code{callback_arg} field. No
  3424. callback is executed if the field is set to @code{NULL}.
  3425. @item @code{callback_arg} (optional) (default: @code{NULL})
  3426. This is the pointer passed to the callback function. This field is ignored if
  3427. the @code{callback_func} is set to @code{NULL}.
  3428. @item @code{use_tag} (optional) (default: @code{0})
  3429. If set, this flag indicates that the task should be associated with the tag
  3430. contained in the @code{tag_id} field. Tag allow the application to synchronize
  3431. with the task and to express task dependencies easily.
  3432. @item @code{tag_id}
  3433. This fields contains the tag associated to the task if the @code{use_tag} field
  3434. was set, it is ignored otherwise.
  3435. @item @code{synchronous}
  3436. If this flag is set, the @code{starpu_task_submit} function is blocking and
  3437. returns only when the task has been executed (or if no worker is able to
  3438. process the task). Otherwise, @code{starpu_task_submit} returns immediately.
  3439. @item @code{priority} (optional) (default: @code{STARPU_DEFAULT_PRIO})
  3440. This field indicates a level of priority for the task. This is an integer value
  3441. that must be set between the return values of the
  3442. @code{starpu_sched_get_min_priority} function for the least important tasks,
  3443. and that of the @code{starpu_sched_get_max_priority} for the most important
  3444. tasks (included). The @code{STARPU_MIN_PRIO} and @code{STARPU_MAX_PRIO} macros
  3445. are provided for convenience and respectively returns value of
  3446. @code{starpu_sched_get_min_priority} and @code{starpu_sched_get_max_priority}.
  3447. Default priority is @code{STARPU_DEFAULT_PRIO}, which is always defined as 0 in
  3448. order to allow static task initialization. Scheduling strategies that take
  3449. priorities into account can use this parameter to take better scheduling
  3450. decisions, but the scheduling policy may also ignore it.
  3451. @item @code{execute_on_a_specific_worker} (default: @code{0})
  3452. If this flag is set, StarPU will bypass the scheduler and directly affect this
  3453. task to the worker specified by the @code{workerid} field.
  3454. @item @code{workerid} (optional)
  3455. If the @code{execute_on_a_specific_worker} field is set, this field indicates
  3456. which is the identifier of the worker that should process this task (as
  3457. returned by @code{starpu_worker_get_id}). This field is ignored if
  3458. @code{execute_on_a_specific_worker} field is set to 0.
  3459. @item @code{detach} (optional) (default: @code{1})
  3460. If this flag is set, it is not possible to synchronize with the task
  3461. by the means of @code{starpu_task_wait} later on. Internal data structures
  3462. are only guaranteed to be freed once @code{starpu_task_wait} is called if the
  3463. flag is not set.
  3464. @item @code{destroy} (optional) (default: @code{1})
  3465. If this flag is set, the task structure will automatically be freed, either
  3466. after the execution of the callback if the task is detached, or during
  3467. @code{starpu_task_wait} otherwise. If this flag is not set, dynamically
  3468. allocated data structures will not be freed until @code{starpu_task_destroy} is
  3469. called explicitly. Setting this flag for a statically allocated task structure
  3470. will result in undefined behaviour.
  3471. @item @code{predicted} (output field)
  3472. Predicted duration of the task. This field is only set if the scheduling
  3473. strategy used performance models.
  3474. @end table
  3475. @end deftp
  3476. @deftypefun void starpu_task_init ({struct starpu_task} *@var{task})
  3477. Initialize @var{task} with default values. This function is implicitly
  3478. called by @code{starpu_task_create}. By default, tasks initialized with
  3479. @code{starpu_task_init} must be deinitialized explicitly with
  3480. @code{starpu_task_deinit}. Tasks can also be initialized statically, using the
  3481. constant @code{STARPU_TASK_INITIALIZER}.
  3482. @end deftypefun
  3483. @deftypefun {struct starpu_task *} starpu_task_create (void)
  3484. Allocate a task structure and initialize it with default values. Tasks
  3485. allocated dynamically with @code{starpu_task_create} are automatically freed when the
  3486. task is terminated. If the destroy flag is explicitly unset, the resources used
  3487. by the task are freed by calling
  3488. @code{starpu_task_destroy}.
  3489. @end deftypefun
  3490. @deftypefun void starpu_task_deinit ({struct starpu_task} *@var{task})
  3491. Release all the structures automatically allocated to execute @var{task}. This is
  3492. called automatically by @code{starpu_task_destroy}, but the task structure itself is not
  3493. freed. This should be used for statically allocated tasks for instance.
  3494. @end deftypefun
  3495. @deftypefun void starpu_task_destroy ({struct starpu_task} *@var{task})
  3496. Free the resource allocated during @code{starpu_task_create} and
  3497. associated with @var{task}. This function can be called automatically
  3498. after the execution of a task by setting the @code{destroy} flag of the
  3499. @code{starpu_task} structure (default behaviour). Calling this function
  3500. on a statically allocated task results in an undefined behaviour.
  3501. @end deftypefun
  3502. @deftypefun int starpu_task_wait ({struct starpu_task} *@var{task})
  3503. This function blocks until @var{task} has been executed. It is not possible to
  3504. synchronize with a task more than once. It is not possible to wait for
  3505. synchronous or detached tasks.
  3506. Upon successful completion, this function returns 0. Otherwise, @code{-EINVAL}
  3507. indicates that the specified task was either synchronous or detached.
  3508. @end deftypefun
  3509. @deftypefun int starpu_task_submit ({struct starpu_task} *@var{task})
  3510. This function submits @var{task} to StarPU. Calling this function does
  3511. not mean that the task will be executed immediately as there can be data or task
  3512. (tag) dependencies that are not fulfilled yet: StarPU will take care of
  3513. scheduling this task with respect to such dependencies.
  3514. This function returns immediately if the @code{synchronous} field of the
  3515. @code{starpu_task} structure was set to 0, and block until the termination of
  3516. the task otherwise. It is also possible to synchronize the application with
  3517. asynchronous tasks by the means of tags, using the @code{starpu_tag_wait}
  3518. function for instance.
  3519. In case of success, this function returns 0, a return value of @code{-ENODEV}
  3520. means that there is no worker able to process this task (e.g. there is no GPU
  3521. available and this task is only implemented for CUDA devices).
  3522. @end deftypefun
  3523. @deftypefun int starpu_task_wait_for_all (void)
  3524. This function blocks until all the tasks that were submitted are terminated.
  3525. @end deftypefun
  3526. @deftypefun {struct starpu_task *} starpu_get_current_task (void)
  3527. This function returns the task currently executed by the worker, or
  3528. NULL if it is called either from a thread that is not a task or simply
  3529. because there is no task being executed at the moment.
  3530. @end deftypefun
  3531. @deftypefun void starpu_display_codelet_stats ({struct starpu_codelet_t} *@var{cl})
  3532. Output on @code{stderr} some statistics on the codelet @var{cl}.
  3533. @end deftypefun
  3534. @c Callbacks : what can we put in callbacks ?
  3535. @node Explicit Dependencies
  3536. @section Explicit Dependencies
  3537. @menu
  3538. * starpu_task_declare_deps_array:: starpu_task_declare_deps_array
  3539. * starpu_tag_t:: Task logical identifier
  3540. * starpu_tag_declare_deps:: Declare the Dependencies of a Tag
  3541. * starpu_tag_declare_deps_array:: Declare the Dependencies of a Tag
  3542. * starpu_tag_wait:: Block until a Tag is terminated
  3543. * starpu_tag_wait_array:: Block until a set of Tags is terminated
  3544. * starpu_tag_remove:: Destroy a Tag
  3545. * starpu_tag_notify_from_apps:: Feed a tag explicitly
  3546. @end menu
  3547. @node starpu_task_declare_deps_array
  3548. @subsection @code{starpu_task_declare_deps_array} -- Declare task dependencies
  3549. @deftypefun void starpu_task_declare_deps_array ({struct starpu_task} *@var{task}, unsigned @var{ndeps}, {struct starpu_task} *@var{task_array}[])
  3550. Declare task dependencies between a @var{task} and an array of tasks of length
  3551. @var{ndeps}. This function must be called prior to the submission of the task,
  3552. but it may called after the submission or the execution of the tasks in the
  3553. array provided the tasks are still valid (ie. they were not automatically
  3554. destroyed). Calling this function on a task that was already submitted or with
  3555. an entry of @var{task_array} that is not a valid task anymore results in an
  3556. undefined behaviour. If @var{ndeps} is null, no dependency is added. It is
  3557. possible to call @code{starpu_task_declare_deps_array} multiple times on the
  3558. same task, in this case, the dependencies are added. It is possible to have
  3559. redundancy in the task dependencies.
  3560. @end deftypefun
  3561. @node starpu_tag_t
  3562. @subsection @code{starpu_tag_t} -- Task logical identifier
  3563. @table @asis
  3564. @item @emph{Description}:
  3565. It is possible to associate a task with a unique ``tag'' chosen by the application, and to express
  3566. dependencies between tasks by the means of those tags. To do so, fill the
  3567. @code{tag_id} field of the @code{starpu_task} structure with a tag number (can
  3568. be arbitrary) and set the @code{use_tag} field to 1.
  3569. If @code{starpu_tag_declare_deps} is called with this tag number, the task will
  3570. not be started until the tasks which holds the declared dependency tags are
  3571. completed.
  3572. @end table
  3573. @node starpu_tag_declare_deps
  3574. @subsection @code{starpu_tag_declare_deps} -- Declare the Dependencies of a Tag
  3575. @table @asis
  3576. @item @emph{Description}:
  3577. Specify the dependencies of the task identified by tag @code{id}. The first
  3578. argument specifies the tag which is configured, the second argument gives the
  3579. number of tag(s) on which @code{id} depends. The following arguments are the
  3580. tags which have to be terminated to unlock the task.
  3581. This function must be called before the associated task is submitted to StarPU
  3582. with @code{starpu_task_submit}.
  3583. @item @emph{Remark}
  3584. Because of the variable arity of @code{starpu_tag_declare_deps}, note that the
  3585. last arguments @emph{must} be of type @code{starpu_tag_t}: constant values
  3586. typically need to be explicitly casted. Using the
  3587. @code{starpu_tag_declare_deps_array} function avoids this hazard.
  3588. @item @emph{Prototype}:
  3589. @code{void starpu_tag_declare_deps(starpu_tag_t id, unsigned ndeps, ...);}
  3590. @item @emph{Example}:
  3591. @cartouche
  3592. @example
  3593. /* Tag 0x1 depends on tags 0x32 and 0x52 */
  3594. starpu_tag_declare_deps((starpu_tag_t)0x1,
  3595. 2, (starpu_tag_t)0x32, (starpu_tag_t)0x52);
  3596. @end example
  3597. @end cartouche
  3598. @end table
  3599. @node starpu_tag_declare_deps_array
  3600. @subsection @code{starpu_tag_declare_deps_array} -- Declare the Dependencies of a Tag
  3601. @table @asis
  3602. @item @emph{Description}:
  3603. This function is similar to @code{starpu_tag_declare_deps}, except that its
  3604. does not take a variable number of arguments but an array of tags of size
  3605. @code{ndeps}.
  3606. @item @emph{Prototype}:
  3607. @code{void starpu_tag_declare_deps_array(starpu_tag_t id, unsigned ndeps, starpu_tag_t *array);}
  3608. @item @emph{Example}:
  3609. @cartouche
  3610. @example
  3611. /* Tag 0x1 depends on tags 0x32 and 0x52 */
  3612. starpu_tag_t tag_array[2] = @{0x32, 0x52@};
  3613. starpu_tag_declare_deps_array((starpu_tag_t)0x1, 2, tag_array);
  3614. @end example
  3615. @end cartouche
  3616. @end table
  3617. @node starpu_tag_wait
  3618. @subsection @code{starpu_tag_wait} -- Block until a Tag is terminated
  3619. @deftypefun void starpu_tag_wait (starpu_tag_t @var{id})
  3620. This function blocks until the task associated to tag @var{id} has been
  3621. executed. This is a blocking call which must therefore not be called within
  3622. tasks or callbacks, but only from the application directly. It is possible to
  3623. synchronize with the same tag multiple times, as long as the
  3624. @code{starpu_tag_remove} function is not called. Note that it is still
  3625. possible to synchronize with a tag associated to a task which @code{starpu_task}
  3626. data structure was freed (e.g. if the @code{destroy} flag of the
  3627. @code{starpu_task} was enabled).
  3628. @end deftypefun
  3629. @node starpu_tag_wait_array
  3630. @subsection @code{starpu_tag_wait_array} -- Block until a set of Tags is terminated
  3631. @deftypefun void starpu_tag_wait_array (unsigned @var{ntags}, starpu_tag_t *@var{id})
  3632. This function is similar to @code{starpu_tag_wait} except that it blocks until
  3633. @emph{all} the @var{ntags} tags contained in the @var{id} array are
  3634. terminated.
  3635. @end deftypefun
  3636. @node starpu_tag_remove
  3637. @subsection @code{starpu_tag_remove} -- Destroy a Tag
  3638. @deftypefun void starpu_tag_remove (starpu_tag_t @var{id})
  3639. This function releases the resources associated to tag @var{id}. It can be
  3640. called once the corresponding task has been executed and when there is
  3641. no other tag that depend on this tag anymore.
  3642. @end deftypefun
  3643. @node starpu_tag_notify_from_apps
  3644. @subsection @code{starpu_tag_notify_from_apps} -- Feed a Tag explicitly
  3645. @deftypefun void starpu_tag_notify_from_apps (starpu_tag_t @var{id})
  3646. This function explicitly unlocks tag @var{id}. It may be useful in the
  3647. case of applications which execute part of their computation outside StarPU
  3648. tasks (e.g. third-party libraries). It is also provided as a
  3649. convenient tool for the programmer, for instance to entirely construct the task
  3650. DAG before actually giving StarPU the opportunity to execute the tasks.
  3651. @end deftypefun
  3652. @node Implicit Data Dependencies
  3653. @section Implicit Data Dependencies
  3654. @menu
  3655. * starpu_data_set_default_sequential_consistency_flag:: starpu_data_set_default_sequential_consistency_flag
  3656. * starpu_data_get_default_sequential_consistency_flag:: starpu_data_get_default_sequential_consistency_flag
  3657. * starpu_data_set_sequential_consistency_flag:: starpu_data_set_sequential_consistency_flag
  3658. @end menu
  3659. In this section, we describe how StarPU makes it possible to insert implicit
  3660. task dependencies in order to enforce sequential data consistency. When this
  3661. data consistency is enabled on a specific data handle, any data access will
  3662. appear as sequentially consistent from the application. For instance, if the
  3663. application submits two tasks that access the same piece of data in read-only
  3664. mode, and then a third task that access it in write mode, dependencies will be
  3665. added between the two first tasks and the third one. Implicit data dependencies
  3666. are also inserted in the case of data accesses from the application.
  3667. @node starpu_data_set_default_sequential_consistency_flag
  3668. @subsection @code{starpu_data_set_default_sequential_consistency_flag} -- Set default sequential consistency flag
  3669. @deftypefun void starpu_data_set_default_sequential_consistency_flag (unsigned @var{flag})
  3670. Set the default sequential consistency flag. If a non-zero value is passed, a
  3671. sequential data consistency will be enforced for all handles registered after
  3672. this function call, otherwise it is disabled. By default, StarPU enables
  3673. sequential data consistency. It is also possible to select the data consistency
  3674. mode of a specific data handle with the
  3675. @code{starpu_data_set_sequential_consistency_flag} function.
  3676. @end deftypefun
  3677. @node starpu_data_get_default_sequential_consistency_flag
  3678. @subsection @code{starpu_data_get_default_sequential_consistency_flag} -- Get current default sequential consistency flag
  3679. @deftypefun unsigned starpu_data_set_default_sequential_consistency_flag (void)
  3680. This function returns the current default sequential consistency flag.
  3681. @end deftypefun
  3682. @node starpu_data_set_sequential_consistency_flag
  3683. @subsection @code{starpu_data_set_sequential_consistency_flag} -- Set data sequential consistency mode
  3684. @deftypefun void starpu_data_set_sequential_consistency_flag (starpu_data_handle @var{handle}, unsigned @var{flag})
  3685. Select the data consistency mode associated to a data handle. The consistency
  3686. mode set using this function has the priority over the default mode which can
  3687. be set with @code{starpu_data_set_sequential_consistency_flag}.
  3688. @end deftypefun
  3689. @node Performance Model API
  3690. @section Performance Model API
  3691. @menu
  3692. * starpu_load_history_debug::
  3693. * starpu_perfmodel_debugfilepath::
  3694. * starpu_perfmodel_get_arch_name::
  3695. * starpu_force_bus_sampling::
  3696. @end menu
  3697. @node starpu_load_history_debug
  3698. @subsection @code{starpu_load_history_debug}
  3699. @deftypefun int starpu_load_history_debug ({const char} *@var{symbol}, {struct starpu_perfmodel_t} *@var{model})
  3700. TODO
  3701. @end deftypefun
  3702. @node starpu_perfmodel_debugfilepath
  3703. @subsection @code{starpu_perfmodel_debugfilepath}
  3704. @deftypefun void starpu_perfmodel_debugfilepath ({struct starpu_perfmodel_t} *@var{model}, {enum starpu_perf_archtype} @var{arch}, char *@var{path}, size_t @var{maxlen})
  3705. TODO
  3706. @end deftypefun
  3707. @node starpu_perfmodel_get_arch_name
  3708. @subsection @code{starpu_perfmodel_get_arch_name}
  3709. @deftypefun void starpu_perfmodel_get_arch_name ({enum starpu_perf_archtype} @var{arch}, char *@var{archname}, size_t @var{maxlen})
  3710. TODO
  3711. @end deftypefun
  3712. @node starpu_force_bus_sampling
  3713. @subsection @code{starpu_force_bus_sampling}
  3714. @deftypefun void starpu_force_bus_sampling (void)
  3715. This forces sampling the bus performance model again.
  3716. @end deftypefun
  3717. @node Profiling API
  3718. @section Profiling API
  3719. @menu
  3720. * starpu_profiling_status_set:: starpu_profiling_status_set
  3721. * starpu_profiling_status_get:: starpu_profiling_status_get
  3722. * struct starpu_task_profiling_info:: task profiling information
  3723. * struct starpu_worker_profiling_info:: worker profiling information
  3724. * starpu_worker_get_profiling_info:: starpu_worker_get_profiling_info
  3725. * struct starpu_bus_profiling_info:: bus profiling information
  3726. * starpu_bus_get_count::
  3727. * starpu_bus_get_id::
  3728. * starpu_bus_get_src::
  3729. * starpu_bus_get_dst::
  3730. * starpu_timing_timespec_delay_us::
  3731. * starpu_timing_timespec_to_us::
  3732. * starpu_bus_profiling_helper_display_summary::
  3733. * starpu_worker_profiling_helper_display_summary::
  3734. @end menu
  3735. @node starpu_profiling_status_set
  3736. @subsection @code{starpu_profiling_status_set} -- Set current profiling status
  3737. @table @asis
  3738. @item @emph{Description}:
  3739. Thie function sets the profiling status. Profiling is activated by passing
  3740. @code{STARPU_PROFILING_ENABLE} in @code{status}. Passing
  3741. @code{STARPU_PROFILING_DISABLE} disables profiling. Calling this function
  3742. resets all profiling measurements. When profiling is enabled, the
  3743. @code{profiling_info} field of the @code{struct starpu_task} structure points
  3744. to a valid @code{struct starpu_task_profiling_info} structure containing
  3745. information about the execution of the task.
  3746. @item @emph{Return value}:
  3747. Negative return values indicate an error, otherwise the previous status is
  3748. returned.
  3749. @item @emph{Prototype}:
  3750. @code{int starpu_profiling_status_set(int status);}
  3751. @end table
  3752. @node starpu_profiling_status_get
  3753. @subsection @code{starpu_profiling_status_get} -- Get current profiling status
  3754. @deftypefun int starpu_profiling_status_get (void)
  3755. Return the current profiling status or a negative value in case there was an error.
  3756. @end deftypefun
  3757. @node struct starpu_task_profiling_info
  3758. @subsection @code{struct starpu_task_profiling_info} -- Task profiling information
  3759. @table @asis
  3760. @item @emph{Description}:
  3761. This structure contains information about the execution of a task. It is
  3762. accessible from the @code{.profiling_info} field of the @code{starpu_task}
  3763. structure if profiling was enabled.
  3764. @item @emph{Fields}:
  3765. @table @asis
  3766. @item @code{submit_time}:
  3767. Date of task submission (relative to the initialization of StarPU).
  3768. @item @code{start_time}:
  3769. Date of task execution beginning (relative to the initialization of StarPU).
  3770. @item @code{end_time}:
  3771. Date of task execution termination (relative to the initialization of StarPU).
  3772. @item @code{workerid}:
  3773. Identifier of the worker which has executed the task.
  3774. @end table
  3775. @end table
  3776. @node struct starpu_worker_profiling_info
  3777. @subsection @code{struct starpu_worker_profiling_info} -- Worker profiling information
  3778. @table @asis
  3779. @item @emph{Description}:
  3780. This structure contains the profiling information associated to a worker.
  3781. @item @emph{Fields}:
  3782. @table @asis
  3783. @item @code{start_time}:
  3784. Starting date for the reported profiling measurements.
  3785. @item @code{total_time}:
  3786. Duration of the profiling measurement interval.
  3787. @item @code{executing_time}:
  3788. Time spent by the worker to execute tasks during the profiling measurement interval.
  3789. @item @code{sleeping_time}:
  3790. Time spent idling by the worker during the profiling measurement interval.
  3791. @item @code{executed_tasks}:
  3792. Number of tasks executed by the worker during the profiling measurement interval.
  3793. @end table
  3794. @end table
  3795. @node starpu_worker_get_profiling_info
  3796. @subsection @code{starpu_worker_get_profiling_info} -- Get worker profiling info
  3797. @table @asis
  3798. @item @emph{Description}:
  3799. Get the profiling info associated to the worker identified by @code{workerid},
  3800. and reset the profiling measurements. If the @code{worker_info} argument is
  3801. NULL, only reset the counters associated to worker @code{workerid}.
  3802. @item @emph{Return value}:
  3803. Upon successful completion, this function returns 0. Otherwise, a negative
  3804. value is returned.
  3805. @item @emph{Prototype}:
  3806. @code{int starpu_worker_get_profiling_info(int workerid, struct starpu_worker_profiling_info *worker_info);}
  3807. @end table
  3808. @node struct starpu_bus_profiling_info
  3809. @subsection @code{struct starpu_bus_profiling_info} -- Bus profiling information
  3810. @table @asis
  3811. @item @emph{Description}:
  3812. TODO
  3813. @item @emph{Fields}:
  3814. @table @asis
  3815. @item @code{start_time}:
  3816. TODO
  3817. @item @code{total_time}:
  3818. TODO
  3819. @item @code{transferred_bytes}:
  3820. TODO
  3821. @item @code{transfer_count}:
  3822. TODO
  3823. @end table
  3824. @end table
  3825. @node starpu_bus_get_count
  3826. @subsection @code{starpu_bus_get_count}
  3827. @deftypefun int starpu_bus_get_count (void)
  3828. TODO
  3829. @end deftypefun
  3830. @node starpu_bus_get_id
  3831. @subsection @code{starpu_bus_get_id}
  3832. @deftypefun int starpu_bus_get_id (int @var{src}, int @var{dst})
  3833. TODO
  3834. @end deftypefun
  3835. @node starpu_bus_get_src
  3836. @subsection @code{starpu_bus_get_src}
  3837. @deftypefun int starpu_bus_get_src (int @var{busid})
  3838. TODO
  3839. @end deftypefun
  3840. @node starpu_bus_get_dst
  3841. @subsection @code{starpu_bus_get_dst}
  3842. @deftypefun int starpu_bus_get_dst (int @var{busid})
  3843. TODO
  3844. @end deftypefun
  3845. @node starpu_timing_timespec_delay_us
  3846. @subsection @code{starpu_timing_timespec_delay_us}
  3847. @deftypefun double starpu_timing_timespec_delay_us ({struct timespec} *@var{start}, {struct timespec} *@var{end})
  3848. TODO
  3849. @end deftypefun
  3850. @node starpu_timing_timespec_to_us
  3851. @subsection @code{starpu_timing_timespec_to_us}
  3852. @deftypefun double starpu_timing_timespec_to_us ({struct timespec} *@var{ts})
  3853. TODO
  3854. @end deftypefun
  3855. @node starpu_bus_profiling_helper_display_summary
  3856. @subsection @code{starpu_bus_profiling_helper_display_summary}
  3857. @deftypefun void starpu_bus_profiling_helper_display_summary (void)
  3858. TODO
  3859. @end deftypefun
  3860. @node starpu_worker_profiling_helper_display_summary
  3861. @subsection @code{starpu_worker_profiling_helper_display_summary}
  3862. @deftypefun void starpu_worker_profiling_helper_display_summary (void)
  3863. TODO
  3864. @end deftypefun
  3865. @node CUDA extensions
  3866. @section CUDA extensions
  3867. @c void starpu_malloc(float **A, size_t dim);
  3868. @menu
  3869. * starpu_cuda_get_local_stream:: Get current worker's CUDA stream
  3870. * starpu_helper_cublas_init:: Initialize CUBLAS on every CUDA device
  3871. * starpu_helper_cublas_shutdown:: Deinitialize CUBLAS on every CUDA device
  3872. @end menu
  3873. @node starpu_cuda_get_local_stream
  3874. @subsection @code{starpu_cuda_get_local_stream} -- Get current worker's CUDA stream
  3875. @deftypefun {cudaStream_t *} starpu_cuda_get_local_stream (void)
  3876. StarPU provides a stream for every CUDA device controlled by StarPU. This
  3877. function is only provided for convenience so that programmers can easily use
  3878. asynchronous operations within codelets without having to create a stream by
  3879. hand. Note that the application is not forced to use the stream provided by
  3880. @code{starpu_cuda_get_local_stream} and may also create its own streams.
  3881. Synchronizing with @code{cudaThreadSynchronize()} is allowed, but will reduce
  3882. the likelihood of having all transfers overlapped.
  3883. @end deftypefun
  3884. @node starpu_helper_cublas_init
  3885. @subsection @code{starpu_helper_cublas_init} -- Initialize CUBLAS on every CUDA device
  3886. @deftypefun void starpu_helper_cublas_init (void)
  3887. The CUBLAS library must be initialized prior to any CUBLAS call. Calling
  3888. @code{starpu_helper_cublas_init} will initialize CUBLAS on every CUDA device
  3889. controlled by StarPU. This call blocks until CUBLAS has been properly
  3890. initialized on every device.
  3891. @end deftypefun
  3892. @node starpu_helper_cublas_shutdown
  3893. @subsection @code{starpu_helper_cublas_shutdown} -- Deinitialize CUBLAS on every CUDA device
  3894. @deftypefun void starpu_helper_cublas_shutdown (void)
  3895. This function synchronously deinitializes the CUBLAS library on every CUDA device.
  3896. @end deftypefun
  3897. @node OpenCL extensions
  3898. @section OpenCL extensions
  3899. @menu
  3900. * Compiling OpenCL kernels:: Compiling OpenCL kernels
  3901. * Loading OpenCL kernels:: Loading OpenCL kernels
  3902. * OpenCL statistics:: Collecting statistics from OpenCL
  3903. @end menu
  3904. @node Compiling OpenCL kernels
  3905. @subsection Compiling OpenCL kernels
  3906. Source codes for OpenCL kernels can be stored in a file or in a
  3907. string. StarPU provides functions to build the program executable for
  3908. each available OpenCL device as a @code{cl_program} object. This
  3909. program executable can then be loaded within a specific queue as
  3910. explained in the next section. These are only helpers, Applications
  3911. can also fill a @code{starpu_opencl_program} array by hand for more advanced
  3912. use (e.g. different programs on the different OpenCL devices, for
  3913. relocation purpose for instance).
  3914. @menu
  3915. * starpu_opencl_load_opencl_from_file:: Compiling OpenCL source code
  3916. * starpu_opencl_load_opencl_from_string:: Compiling OpenCL source code
  3917. * starpu_opencl_unload_opencl:: Releasing OpenCL code
  3918. @end menu
  3919. @node starpu_opencl_load_opencl_from_file
  3920. @subsubsection @code{starpu_opencl_load_opencl_from_file} -- Compiling OpenCL source code
  3921. @deftypefun int starpu_opencl_load_opencl_from_file (char *@var{source_file_name}, {struct starpu_opencl_program} *@var{opencl_programs}, {const char}* @var{build_options})
  3922. TODO
  3923. @end deftypefun
  3924. @node starpu_opencl_load_opencl_from_string
  3925. @subsubsection @code{starpu_opencl_load_opencl_from_string} -- Compiling OpenCL source code
  3926. @deftypefun int starpu_opencl_load_opencl_from_string (char *@var{opencl_program_source}, {struct starpu_opencl_program} *@var{opencl_programs}, {const char}* @var{build_options})
  3927. TODO
  3928. @end deftypefun
  3929. @node starpu_opencl_unload_opencl
  3930. @subsubsection @code{starpu_opencl_unload_opencl} -- Releasing OpenCL code
  3931. @deftypefun int starpu_opencl_unload_opencl ({struct starpu_opencl_program} *@var{opencl_programs})
  3932. TODO
  3933. @end deftypefun
  3934. @node Loading OpenCL kernels
  3935. @subsection Loading OpenCL kernels
  3936. @menu
  3937. * starpu_opencl_load_kernel:: Loading a kernel
  3938. * starpu_opencl_relase_kernel:: Releasing a kernel
  3939. @end menu
  3940. @node starpu_opencl_load_kernel
  3941. @subsubsection @code{starpu_opencl_load_kernel} -- Loading a kernel
  3942. @deftypefun int starpu_opencl_load_kernel (cl_kernel *@var{kernel}, cl_command_queue *@var{queue}, {struct starpu_opencl_program} *@var{opencl_programs}, char *@var{kernel_name}, int @var{devid})
  3943. TODO
  3944. @end deftypefun
  3945. @node starpu_opencl_relase_kernel
  3946. @subsubsection @code{starpu_opencl_release_kernel} -- Releasing a kernel
  3947. @deftypefun int starpu_opencl_release_kernel (cl_kernel @var{kernel})
  3948. TODO
  3949. @end deftypefun
  3950. @node OpenCL statistics
  3951. @subsection OpenCL statistics
  3952. @menu
  3953. * starpu_opencl_collect_stats:: Collect statistics on a kernel execution
  3954. @end menu
  3955. @node starpu_opencl_collect_stats
  3956. @subsubsection @code{starpu_opencl_collect_stats} -- Collect statistics on a kernel execution
  3957. @deftypefun int starpu_opencl_collect_stats (cl_event @var{event})
  3958. After termination of the kernels, the OpenCL codelet should call this function
  3959. to pass it the even returned by @code{clEnqueueNDRangeKernel}, to let StarPU
  3960. collect statistics about the kernel execution (used cycles, consumed power).
  3961. @end deftypefun
  3962. @node Cell extensions
  3963. @section Cell extensions
  3964. nothing yet.
  3965. @node Miscellaneous helpers
  3966. @section Miscellaneous helpers
  3967. @menu
  3968. * starpu_data_cpy:: Copy a data handle into another data handle
  3969. * starpu_execute_on_each_worker:: Execute a function on a subset of workers
  3970. @end menu
  3971. @node starpu_data_cpy
  3972. @subsection @code{starpu_data_cpy} -- Copy a data handle into another data handle
  3973. @deftypefun int starpu_data_cpy (starpu_data_handle @var{dst_handle}, starpu_data_handle @var{src_handle}, int @var{asynchronous}, void (*@var{callback_func})(void*), void *@var{callback_arg})
  3974. Copy the content of the @var{src_handle} into the @var{dst_handle} handle.
  3975. The @var{asynchronous} parameter indicates whether the function should
  3976. block or not. In the case of an asynchronous call, it is possible to
  3977. synchronize with the termination of this operation either by the means of
  3978. implicit dependencies (if enabled) or by calling
  3979. @code{starpu_task_wait_for_all()}. If @var{callback_func} is not @code{NULL},
  3980. this callback function is executed after the handle has been copied, and it is
  3981. given the @var{callback_arg} pointer as argument.
  3982. @end deftypefun
  3983. @node starpu_execute_on_each_worker
  3984. @subsection @code{starpu_execute_on_each_worker} -- Execute a function on a subset of workers
  3985. @deftypefun void starpu_execute_on_each_worker (void (*@var{func})(void *), void *@var{arg}, uint32_t @var{where})
  3986. When calling this method, the offloaded function specified by the first argument is
  3987. executed by every StarPU worker that may execute the function.
  3988. The second argument is passed to the offloaded function.
  3989. The last argument specifies on which types of processing units the function
  3990. should be executed. Similarly to the @var{where} field of the
  3991. @code{starpu_codelet} structure, it is possible to specify that the function
  3992. should be executed on every CUDA device and every CPU by passing
  3993. @code{STARPU_CPU|STARPU_CUDA}.
  3994. This function blocks until the function has been executed on every appropriate
  3995. processing units, so that it may not be called from a callback function for
  3996. instance.
  3997. @end deftypefun
  3998. @c ---------------------------------------------------------------------
  3999. @c Advanced Topics
  4000. @c ---------------------------------------------------------------------
  4001. @node Advanced Topics
  4002. @chapter Advanced Topics
  4003. @menu
  4004. * Defining a new data interface::
  4005. * Defining a new scheduling policy::
  4006. @end menu
  4007. @node Defining a new data interface
  4008. @section Defining a new data interface
  4009. @menu
  4010. * struct starpu_data_interface_ops_t:: Per-interface methods
  4011. * struct starpu_data_copy_methods:: Per-interface data transfer methods
  4012. * An example of data interface:: An example of data interface
  4013. @end menu
  4014. @c void *starpu_data_get_interface_on_node(starpu_data_handle handle, unsigned memory_node); TODO
  4015. @node struct starpu_data_interface_ops_t
  4016. @subsection @code{struct starpu_data_interface_ops_t} -- Per-interface methods
  4017. @table @asis
  4018. @item @emph{Description}:
  4019. TODO describe all the different fields
  4020. @end table
  4021. @node struct starpu_data_copy_methods
  4022. @subsection @code{struct starpu_data_copy_methods} -- Per-interface data transfer methods
  4023. @table @asis
  4024. @item @emph{Description}:
  4025. TODO describe all the different fields
  4026. @end table
  4027. @node An example of data interface
  4028. @subsection An example of data interface
  4029. @table @asis
  4030. TODO
  4031. See @code{src/datawizard/interfaces/vector_interface.c} for now.
  4032. @end table
  4033. @node Defining a new scheduling policy
  4034. @section Defining a new scheduling policy
  4035. TODO
  4036. A full example showing how to define a new scheduling policy is available in
  4037. the StarPU sources in the directory @code{examples/scheduler/}.
  4038. @menu
  4039. * struct starpu_sched_policy_s::
  4040. * starpu_worker_set_sched_condition::
  4041. * starpu_sched_set_min_priority:: Set the minimum priority level
  4042. * starpu_sched_set_max_priority:: Set the maximum priority level
  4043. * starpu_push_local_task:: Assign a task to a worker
  4044. * Source code::
  4045. @end menu
  4046. @node struct starpu_sched_policy_s
  4047. @subsection @code{struct starpu_sched_policy_s} -- Scheduler methods
  4048. @table @asis
  4049. @item @emph{Description}:
  4050. This structure contains all the methods that implement a scheduling policy. An
  4051. application may specify which scheduling strategy in the @code{sched_policy}
  4052. field of the @code{starpu_conf} structure passed to the @code{starpu_init}
  4053. function.
  4054. @item @emph{Fields}:
  4055. @table @asis
  4056. @item @code{init_sched}:
  4057. Initialize the scheduling policy.
  4058. @item @code{deinit_sched}:
  4059. Cleanup the scheduling policy.
  4060. @item @code{push_task}:
  4061. Insert a task into the scheduler.
  4062. @item @code{push_prio_task}:
  4063. Insert a priority task into the scheduler.
  4064. @item @code{push_prio_notify}:
  4065. Notify the scheduler that a task was pushed on the worker. This method is
  4066. called when a task that was explicitely assigned to a worker is scheduled. This
  4067. method therefore permits to keep the state of of the scheduler coherent even
  4068. when StarPU bypasses the scheduling strategy.
  4069. @item @code{pop_task}:
  4070. Get a task from the scheduler. The mutex associated to the worker is already
  4071. taken when this method is called. If this method is defined as @code{NULL}, the
  4072. worker will only execute tasks from its local queue. In this case, the
  4073. @code{push_task} method should use the @code{starpu_push_local_task} method to
  4074. assign tasks to the different workers.
  4075. @item @code{pop_every_task}:
  4076. Remove all available tasks from the scheduler (tasks are chained by the means
  4077. of the prev and next fields of the starpu_task structure). The mutex associated
  4078. to the worker is already taken when this method is called.
  4079. @item @code{post_exec_hook} (optionnal):
  4080. This method is called every time a task has been executed.
  4081. @item @code{policy_name}:
  4082. Name of the policy (optionnal).
  4083. @item @code{policy_description}:
  4084. Description of the policy (optionnal).
  4085. @end table
  4086. @end table
  4087. @node starpu_worker_set_sched_condition
  4088. @subsection @code{starpu_worker_set_sched_condition} -- Specify the condition variable associated to a worker
  4089. @deftypefun void starpu_worker_set_sched_condition (int @var{workerid}, pthread_cond_t *@var{sched_cond}, pthread_mutex_t *@var{sched_mutex})
  4090. When there is no available task for a worker, StarPU blocks this worker on a
  4091. condition variable. This function specifies which condition variable (and the
  4092. associated mutex) should be used to block (and to wake up) a worker. Note that
  4093. multiple workers may use the same condition variable. For instance, in the case
  4094. of a scheduling strategy with a single task queue, the same condition variable
  4095. would be used to block and wake up all workers.
  4096. The initialization method of a scheduling strategy (@code{init_sched}) must
  4097. call this function once per worker.
  4098. @end deftypefun
  4099. @node starpu_sched_set_min_priority
  4100. @subsection @code{starpu_sched_set_min_priority}
  4101. @deftypefun void starpu_sched_set_min_priority (int @var{min_prio})
  4102. Defines the minimum priority level supported by the scheduling policy. The
  4103. default minimum priority level is the same as the default priority level which
  4104. is 0 by convention. The application may access that value by calling the
  4105. @code{starpu_sched_get_min_priority} function. This function should only be
  4106. called from the initialization method of the scheduling policy, and should not
  4107. be used directly from the application.
  4108. @end deftypefun
  4109. @node starpu_sched_set_max_priority
  4110. @subsection @code{starpu_sched_set_max_priority}
  4111. @deftypefun void starpu_sched_set_min_priority (int @var{max_prio})
  4112. Defines the maximum priority level supported by the scheduling policy. The
  4113. default maximum priority level is 1. The application may access that value by
  4114. calling the @code{starpu_sched_get_max_priority} function. This function should
  4115. only be called from the initialization method of the scheduling policy, and
  4116. should not be used directly from the application.
  4117. @end deftypefun
  4118. @node starpu_push_local_task
  4119. @subsection @code{starpu_push_local_task}
  4120. @deftypefun int starpu_push_local_task (int @var{workerid}, {struct starpu_task} *@var{task}, int @var{back})
  4121. The scheduling policy may put tasks directly into a worker's local queue so
  4122. that it is not always necessary to create its own queue when the local queue
  4123. is sufficient. If "back" not null, the task is put at the back of the queue
  4124. where the worker will pop tasks first. Setting "back" to 0 therefore ensures
  4125. a FIFO ordering.
  4126. @end deftypefun
  4127. @node Source code
  4128. @subsection Source code
  4129. @cartouche
  4130. @smallexample
  4131. static struct starpu_sched_policy_s dummy_sched_policy = @{
  4132. .init_sched = init_dummy_sched,
  4133. .deinit_sched = deinit_dummy_sched,
  4134. .push_task = push_task_dummy,
  4135. .push_prio_task = NULL,
  4136. .pop_task = pop_task_dummy,
  4137. .post_exec_hook = NULL,
  4138. .pop_every_task = NULL,
  4139. .policy_name = "dummy",
  4140. .policy_description = "dummy scheduling strategy"
  4141. @};
  4142. @end smallexample
  4143. @end cartouche
  4144. @c ---------------------------------------------------------------------
  4145. @c C Extensions
  4146. @c ---------------------------------------------------------------------
  4147. @include c-extensions.texi
  4148. @c ---------------------------------------------------------------------
  4149. @c Appendices
  4150. @c ---------------------------------------------------------------------
  4151. @c ---------------------------------------------------------------------
  4152. @c Full source code for the 'Scaling a Vector' example
  4153. @c ---------------------------------------------------------------------
  4154. @node Full source code for the 'Scaling a Vector' example
  4155. @appendix Full source code for the 'Scaling a Vector' example
  4156. @menu
  4157. * Main application::
  4158. * CPU Kernel::
  4159. * CUDA Kernel::
  4160. * OpenCL Kernel::
  4161. @end menu
  4162. @node Main application
  4163. @section Main application
  4164. @include vector_scal_c.texi
  4165. @node CPU Kernel
  4166. @section CPU Kernel
  4167. @include vector_scal_cpu.texi
  4168. @node CUDA Kernel
  4169. @section CUDA Kernel
  4170. @include vector_scal_cuda.texi
  4171. @node OpenCL Kernel
  4172. @section OpenCL Kernel
  4173. @menu
  4174. * Invoking the kernel::
  4175. * Source of the kernel::
  4176. @end menu
  4177. @node Invoking the kernel
  4178. @subsection Invoking the kernel
  4179. @include vector_scal_opencl.texi
  4180. @node Source of the kernel
  4181. @subsection Source of the kernel
  4182. @include vector_scal_opencl_codelet.texi
  4183. @node GNU Free Documentation License
  4184. @appendix GNU Free Documentation License
  4185. @include fdl-1.3.texi
  4186. @c
  4187. @c Indices.
  4188. @c
  4189. @node Function Index
  4190. @unnumbered Function Index
  4191. @printindex fn
  4192. @bye