starpu.texi 197 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169
  1. \input texinfo @c -*-texinfo-*-
  2. @c %**start of header
  3. @setfilename starpu.info
  4. @settitle StarPU Handbook
  5. @c %**end of header
  6. @include version.texi
  7. @copying
  8. Copyright @copyright{} 2009--2011 Universit@'e de Bordeaux 1
  9. @noindent
  10. Copyright @copyright{} 2010, 2011 Centre National de la Recherche Scientifique
  11. @noindent
  12. Copyright @copyright{} 2011 Institut National de Recherche en Informatique et Automatique
  13. @quotation
  14. Permission is granted to copy, distribute and/or modify this document
  15. under the terms of the GNU Free Documentation License, Version 1.3
  16. or any later version published by the Free Software Foundation;
  17. with no Invariant Sections, no Front-Cover Texts, and no Back-Cover
  18. Texts. A copy of the license is included in the section entitled ``GNU
  19. Free Documentation License''.
  20. @end quotation
  21. @end copying
  22. @setchapternewpage odd
  23. @dircategory Development
  24. @direntry
  25. * StarPU: (starpu). StarPU Handbook
  26. @end direntry
  27. @titlepage
  28. @title StarPU Handbook
  29. @subtitle for StarPU @value{VERSION}
  30. @page
  31. @vskip 0pt plus 1fill
  32. @insertcopying
  33. @end titlepage
  34. @c @summarycontents
  35. @contents
  36. @page
  37. @node Top
  38. @top Preface
  39. This manual documents the usage of StarPU version @value{VERSION}. It
  40. was last updated on @value{UPDATED}.
  41. @ifnottex
  42. @insertcopying
  43. @end ifnottex
  44. @comment
  45. @comment When you add a new menu item, please keep the right hand
  46. @comment aligned to the same column. Do not use tabs. This provides
  47. @comment better formatting.
  48. @comment
  49. @menu
  50. * Introduction:: A basic introduction to using StarPU
  51. * Installing StarPU:: How to configure, build and install StarPU
  52. * Using StarPU:: How to run StarPU application
  53. * Basic Examples:: Basic examples of the use of StarPU
  54. * Performance optimization:: How to optimize performance with StarPU
  55. * Performance feedback:: Performance debugging tools
  56. * StarPU MPI support:: How to combine StarPU with MPI
  57. * Tips and Tricks:: Tips and tricks to know about
  58. * Configuring StarPU:: How to configure StarPU
  59. * StarPU API:: The API to use StarPU
  60. * Advanced Topics:: Advanced use of StarPU
  61. * C Extensions:: Easier StarPU programming with GCC
  62. * Full source code for the 'Scaling a Vector' example::
  63. * Function Index:: Index of C functions.
  64. * GNU Free Documentation License:: How you can copy and share this manual.
  65. @end menu
  66. @c ---------------------------------------------------------------------
  67. @c Introduction to StarPU
  68. @c ---------------------------------------------------------------------
  69. @node Introduction
  70. @chapter Introduction to StarPU
  71. @menu
  72. * Motivation:: Why StarPU ?
  73. * StarPU in a Nutshell:: The Fundamentals of StarPU
  74. @end menu
  75. @node Motivation
  76. @section Motivation
  77. @c complex machines with heterogeneous cores/devices
  78. The use of specialized hardware such as accelerators or coprocessors offers an
  79. interesting approach to overcome the physical limits encountered by processor
  80. architects. As a result, many machines are now equipped with one or several
  81. accelerators (e.g. a GPU), in addition to the usual processor(s). While a lot of
  82. efforts have been devoted to offload computation onto such accelerators, very
  83. little attention as been paid to portability concerns on the one hand, and to the
  84. possibility of having heterogeneous accelerators and processors to interact on the other hand.
  85. StarPU is a runtime system that offers support for heterogeneous multicore
  86. architectures, it not only offers a unified view of the computational resources
  87. (i.e. CPUs and accelerators at the same time), but it also takes care of
  88. efficiently mapping and executing tasks onto an heterogeneous machine while
  89. transparently handling low-level issues such as data transfers in a portable
  90. fashion.
  91. @c this leads to a complicated distributed memory design
  92. @c which is not (easily) manageable by hand
  93. @c added value/benefits of StarPU
  94. @c - portability
  95. @c - scheduling, perf. portability
  96. @node StarPU in a Nutshell
  97. @section StarPU in a Nutshell
  98. @menu
  99. * Codelet and Tasks::
  100. * StarPU Data Management Library::
  101. * Glossary::
  102. * Research Papers::
  103. @end menu
  104. From a programming point of view, StarPU is not a new language but a library
  105. that executes tasks explicitly submitted by the application. The data that a
  106. task manipulates are automatically transferred onto the accelerator so that the
  107. programmer does not have to take care of complex data movements. StarPU also
  108. takes particular care of scheduling those tasks efficiently and allows
  109. scheduling experts to implement custom scheduling policies in a portable
  110. fashion.
  111. @c explain the notion of codelet and task (i.e. g(A, B)
  112. @node Codelet and Tasks
  113. @subsection Codelet and Tasks
  114. One of the StarPU primary data structures is the @b{codelet}. A codelet describes a
  115. computational kernel that can possibly be implemented on multiple architectures
  116. such as a CPU, a CUDA device or a Cell's SPU.
  117. @c TODO insert illustration f : f_spu, f_cpu, ...
  118. Another important data structure is the @b{task}. Executing a StarPU task
  119. consists in applying a codelet on a data set, on one of the architectures on
  120. which the codelet is implemented. A task thus describes the codelet that it
  121. uses, but also which data are accessed, and how they are
  122. accessed during the computation (read and/or write).
  123. StarPU tasks are asynchronous: submitting a task to StarPU is a non-blocking
  124. operation. The task structure can also specify a @b{callback} function that is
  125. called once StarPU has properly executed the task. It also contains optional
  126. fields that the application may use to give hints to the scheduler (such as
  127. priority levels).
  128. By default, task dependencies are inferred from data dependency (sequential
  129. coherence) by StarPU. The application can however disable sequential coherency
  130. for some data, and dependencies be expressed by hand.
  131. A task may be identified by a unique 64-bit number chosen by the application
  132. which we refer as a @b{tag}.
  133. Task dependencies can be enforced by hand either by the means of callback functions, by
  134. submitting other tasks, or by expressing dependencies
  135. between tags (which can thus correspond to tasks that have not been submitted
  136. yet).
  137. @c TODO insert illustration f(Ar, Brw, Cr) + ..
  138. @c DSM
  139. @node StarPU Data Management Library
  140. @subsection StarPU Data Management Library
  141. Because StarPU schedules tasks at runtime, data transfers have to be
  142. done automatically and ``just-in-time'' between processing units,
  143. relieving the application programmer from explicit data transfers.
  144. Moreover, to avoid unnecessary transfers, StarPU keeps data
  145. where it was last needed, even if was modified there, and it
  146. allows multiple copies of the same data to reside at the same time on
  147. several processing units as long as it is not modified.
  148. @node Glossary
  149. @subsection Glossary
  150. A @b{codelet} records pointers to various implementations of the same
  151. theoretical function.
  152. A @b{memory node} can be either the main RAM or GPU-embedded memory.
  153. A @b{bus} is a link between memory nodes.
  154. A @b{data handle} keeps track of replicates of the same data (@b{registered} by the
  155. application) over various memory nodes. The data management library manages
  156. keeping them coherent.
  157. The @b{home} memory node of a data handle is the memory node from which the data
  158. was registered (usually the main memory node).
  159. A @b{task} represents a scheduled execution of a codelet on some data handles.
  160. A @b{tag} is a rendez-vous point. Tasks typically have their own tag, and can
  161. depend on other tags. The value is chosen by the application.
  162. A @b{worker} execute tasks. There is typically one per CPU computation core and
  163. one per accelerator (for which a whole CPU core is dedicated).
  164. A @b{driver} drives a given kind of workers. There are currently CPU, CUDA,
  165. OpenCL and Gordon drivers. They usually start several workers to actually drive
  166. them.
  167. A @b{performance model} is a (dynamic or static) model of the performance of a
  168. given codelet. Codelets can have execution time performance model as well as
  169. power consumption performance models.
  170. A data @b{interface} describes the layout of the data: for a vector, a pointer
  171. for the start, the number of elements and the size of elements ; for a matrix, a
  172. pointer for the start, the number of elements per row, the offset between rows,
  173. and the size of each element ; etc. To access their data, codelet functions are
  174. given interfaces for the local memory node replicates of the data handles of the
  175. scheduled task.
  176. @b{Partitioning} data means dividing the data of a given data handle (called
  177. @b{father}) into a series of @b{children} data handles which designate various
  178. portions of the former.
  179. A @b{filter} is the function which computes children data handles from a father
  180. data handle, and thus describes how the partitioning should be done (horizontal,
  181. vertical, etc.)
  182. @b{Acquiring} a data handle can be done from the main application, to safely
  183. access the data of a data handle from its home node, without having to
  184. unregister it.
  185. @node Research Papers
  186. @subsection Research Papers
  187. Research papers about StarPU can be found at
  188. @indicateurl{http://runtime.bordeaux.inria.fr/Publis/Keyword/STARPU.html}
  189. Notably a good overview in the research report
  190. @indicateurl{http://hal.archives-ouvertes.fr/inria-00467677}
  191. @c ---------------------------------------------------------------------
  192. @c Installing StarPU
  193. @c ---------------------------------------------------------------------
  194. @node Installing StarPU
  195. @chapter Installing StarPU
  196. @menu
  197. * Downloading StarPU::
  198. * Configuration of StarPU::
  199. * Building and Installing StarPU::
  200. @end menu
  201. StarPU can be built and installed by the standard means of the GNU
  202. autotools. The following chapter is intended to briefly remind how these tools
  203. can be used to install StarPU.
  204. @node Downloading StarPU
  205. @section Downloading StarPU
  206. @menu
  207. * Getting Sources::
  208. * Optional dependencies::
  209. @end menu
  210. @node Getting Sources
  211. @subsection Getting Sources
  212. The simplest way to get StarPU sources is to download the latest official
  213. release tarball from @indicateurl{https://gforge.inria.fr/frs/?group_id=1570} ,
  214. or the latest nightly snapshot from
  215. @indicateurl{http://starpu.gforge.inria.fr/testing/} . The following documents
  216. how to get the very latest version from the subversion repository itself, it
  217. should be needed only if you need the very latest changes (i.e. less than a
  218. day!)
  219. The source code is managed by a Subversion server hosted by the
  220. InriaGforge. To get the source code, you need:
  221. @itemize
  222. @item
  223. To install the client side of the software Subversion if it is
  224. not already available on your system. The software can be obtained from
  225. @indicateurl{http://subversion.tigris.org} . If you are running
  226. on Windows, you will probably prefer to use TortoiseSVN from
  227. @indicateurl{http://tortoisesvn.tigris.org/} .
  228. @item
  229. You can check out the project's SVN repository through anonymous
  230. access. This will provide you with a read access to the
  231. repository.
  232. If you need to have write access on the StarPU project, you can also choose to
  233. become a member of the project @code{starpu}. For this, you first need to get
  234. an account to the gForge server. You can then send a request to join the project
  235. (@indicateurl{https://gforge.inria.fr/project/request.php?group_id=1570}).
  236. @item
  237. More information on how to get a gForge account, to become a member of
  238. a project, or on any other related task can be obtained from the
  239. InriaGforge at @indicateurl{https://gforge.inria.fr/}. The most important
  240. thing is to upload your public SSH key on the gForge server (see the
  241. FAQ at @indicateurl{http://siteadmin.gforge.inria.fr/FAQ.html#Q6} for
  242. instructions).
  243. @end itemize
  244. You can now check out the latest version from the Subversion server:
  245. @itemize
  246. @item
  247. using the anonymous access via svn:
  248. @example
  249. % svn checkout svn://scm.gforge.inria.fr/svn/starpu/trunk
  250. @end example
  251. @item
  252. using the anonymous access via https:
  253. @example
  254. % svn checkout --username anonsvn https://scm.gforge.inria.fr/svn/starpu/trunk
  255. @end example
  256. The password is @code{anonsvn}.
  257. @item
  258. using your gForge account
  259. @example
  260. % svn checkout svn+ssh://<login>@@scm.gforge.inria.fr/svn/starpu/trunk
  261. @end example
  262. @end itemize
  263. The following step requires the availability of @code{autoconf} and
  264. @code{automake} to generate the @code{./configure} script. This is
  265. done by calling @code{./autogen.sh}. The required version for
  266. @code{autoconf} is 2.60 or higher. You will also need @code{makeinfo}.
  267. @example
  268. % ./autogen.sh
  269. @end example
  270. If the autotools are not available on your machine or not recent
  271. enough, you can choose to download the latest nightly tarball, which
  272. is provided with a @code{configure} script.
  273. @example
  274. % wget http://starpu.gforge.inria.fr/testing/starpu-nightly-latest.tar.gz
  275. @end example
  276. @node Optional dependencies
  277. @subsection Optional dependencies
  278. The topology discovery library, @code{hwloc}, is not mandatory to use StarPU
  279. but strongly recommended. It allows to increase performance, and to
  280. perform some topology aware scheduling.
  281. @code{hwloc} is available in major distributions and for most OSes and can be
  282. downloaded from @indicateurl{http://www.open-mpi.org/software/hwloc}.
  283. @node Configuration of StarPU
  284. @section Configuration of StarPU
  285. @menu
  286. * Generating Makefiles and configuration scripts::
  287. * Running the configuration::
  288. @end menu
  289. @node Generating Makefiles and configuration scripts
  290. @subsection Generating Makefiles and configuration scripts
  291. This step is not necessary when using the tarball releases of StarPU. If you
  292. are using the source code from the svn repository, you first need to generate
  293. the configure scripts and the Makefiles.
  294. @example
  295. % ./autogen.sh
  296. @end example
  297. @node Running the configuration
  298. @subsection Running the configuration
  299. @example
  300. % ./configure
  301. @end example
  302. Details about options that are useful to give to @code{./configure} are given in
  303. @ref{Compilation configuration}.
  304. @node Building and Installing StarPU
  305. @section Building and Installing StarPU
  306. @menu
  307. * Building::
  308. * Sanity Checks::
  309. * Installing::
  310. @end menu
  311. @node Building
  312. @subsection Building
  313. @example
  314. % make
  315. @end example
  316. @node Sanity Checks
  317. @subsection Sanity Checks
  318. In order to make sure that StarPU is working properly on the system, it is also
  319. possible to run a test suite.
  320. @example
  321. % make check
  322. @end example
  323. @node Installing
  324. @subsection Installing
  325. In order to install StarPU at the location that was specified during
  326. configuration:
  327. @example
  328. % make install
  329. @end example
  330. @c ---------------------------------------------------------------------
  331. @c Using StarPU
  332. @c ---------------------------------------------------------------------
  333. @node Using StarPU
  334. @chapter Using StarPU
  335. @menu
  336. * Setting flags for compiling and linking applications::
  337. * Running a basic StarPU application::
  338. * Kernel threads started by StarPU::
  339. * Enabling OpenCL::
  340. @end menu
  341. @node Setting flags for compiling and linking applications
  342. @section Setting flags for compiling and linking applications
  343. Compiling and linking an application against StarPU may require to use
  344. specific flags or libraries (for instance @code{CUDA} or @code{libspe2}).
  345. To this end, it is possible to use the @code{pkg-config} tool.
  346. If StarPU was not installed at some standard location, the path of StarPU's
  347. library must be specified in the @code{PKG_CONFIG_PATH} environment variable so
  348. that @code{pkg-config} can find it. For example if StarPU was installed in
  349. @code{$prefix_dir}:
  350. @example
  351. % PKG_CONFIG_PATH=$PKG_CONFIG_PATH:$prefix_dir/lib/pkgconfig
  352. @end example
  353. The flags required to compile or link against StarPU are then
  354. accessible with the following commands:
  355. @example
  356. % pkg-config --cflags libstarpu # options for the compiler
  357. % pkg-config --libs libstarpu # options for the linker
  358. @end example
  359. @node Running a basic StarPU application
  360. @section Running a basic StarPU application
  361. Basic examples using StarPU are built in the directory
  362. @code{examples/basic_examples/} (and installed in
  363. @code{$prefix_dir/lib/starpu/examples/}). You can for example run the example
  364. @code{vector_scal}.
  365. @example
  366. % ./examples/basic_examples/vector_scal
  367. BEFORE : First element was 1.000000
  368. AFTER First element is 3.140000
  369. %
  370. @end example
  371. When StarPU is used for the first time, the directory
  372. @code{$HOME/.starpu/} is created, performance models will be stored in
  373. that directory.
  374. Please note that buses are benchmarked when StarPU is launched for the
  375. first time. This may take a few minutes, or less if @code{hwloc} is
  376. installed. This step is done only once per user and per machine.
  377. @node Kernel threads started by StarPU
  378. @section Kernel threads started by StarPU
  379. StarPU automatically binds one thread per CPU core. It does not use
  380. SMT/hyperthreading because kernels are usually already optimized for using a
  381. full core, and using hyperthreading would make kernel calibration rather random.
  382. Since driving GPUs is a CPU-consuming task, StarPU dedicates one core per GPU
  383. While StarPU tasks are executing, the application is not supposed to do
  384. computations in the threads it starts itself, tasks should be used instead.
  385. TODO: add a StarPU function to bind an application thread (e.g. the main thread)
  386. to a dedicated core (and thus disable the corresponding StarPU CPU worker).
  387. @node Enabling OpenCL
  388. @section Enabling OpenCL
  389. When both CUDA and OpenCL drivers are enabled, StarPU will launch an
  390. OpenCL worker for NVIDIA GPUs only if CUDA is not already running on them.
  391. This design choice was necessary as OpenCL and CUDA can not run at the
  392. same time on the same NVIDIA GPU, as there is currently no interoperability
  393. between them.
  394. To enable OpenCL, you need either to disable CUDA when configuring StarPU:
  395. @example
  396. % ./configure --disable-cuda
  397. @end example
  398. or when running applications:
  399. @example
  400. % STARPU_NCUDA=0 ./application
  401. @end example
  402. OpenCL will automatically be started on any device not yet used by
  403. CUDA. So on a machine running 4 GPUS, it is therefore possible to
  404. enable CUDA on 2 devices, and OpenCL on the 2 other devices by doing
  405. so:
  406. @example
  407. % STARPU_NCUDA=2 ./application
  408. @end example
  409. @c ---------------------------------------------------------------------
  410. @c Basic Examples
  411. @c ---------------------------------------------------------------------
  412. @node Basic Examples
  413. @chapter Basic Examples
  414. @menu
  415. * Compiling and linking options::
  416. * Hello World:: Submitting Tasks
  417. * Scaling a Vector:: Manipulating Data
  418. * Vector Scaling on an Hybrid CPU/GPU Machine:: Handling Heterogeneous Architectures
  419. * Using multiple implementations of a codelet::
  420. * Task and Worker Profiling::
  421. * Partitioning Data:: Partitioning Data
  422. * Performance model example::
  423. * Theoretical lower bound on execution time::
  424. * Insert Task Utility::
  425. * More examples:: More examples shipped with StarPU
  426. * Debugging:: When things go wrong.
  427. @end menu
  428. @node Compiling and linking options
  429. @section Compiling and linking options
  430. Let's suppose StarPU has been installed in the directory
  431. @code{$STARPU_DIR}. As explained in @ref{Setting flags for compiling and linking applications},
  432. the variable @code{PKG_CONFIG_PATH} needs to be set. It is also
  433. necessary to set the variable @code{LD_LIBRARY_PATH} to locate dynamic
  434. libraries at runtime.
  435. @example
  436. % PKG_CONFIG_PATH=$STARPU_DIR/lib/pkgconfig:$PKG_CONFIG_PATH
  437. % LD_LIBRARY_PATH=$STARPU_DIR/lib:$LD_LIBRARY_PATH
  438. @end example
  439. The Makefile could for instance contain the following lines to define which
  440. options must be given to the compiler and to the linker:
  441. @cartouche
  442. @example
  443. CFLAGS += $$(pkg-config --cflags libstarpu)
  444. LDFLAGS += $$(pkg-config --libs libstarpu)
  445. @end example
  446. @end cartouche
  447. @node Hello World
  448. @section Hello World
  449. @menu
  450. * Required Headers::
  451. * Defining a Codelet::
  452. * Submitting a Task::
  453. * Execution of Hello World::
  454. @end menu
  455. In this section, we show how to implement a simple program that submits a task to StarPU.
  456. @node Required Headers
  457. @subsection Required Headers
  458. The @code{starpu.h} header should be included in any code using StarPU.
  459. @cartouche
  460. @smallexample
  461. #include <starpu.h>
  462. @end smallexample
  463. @end cartouche
  464. @node Defining a Codelet
  465. @subsection Defining a Codelet
  466. @cartouche
  467. @smallexample
  468. struct params @{
  469. int i;
  470. float f;
  471. @};
  472. void cpu_func(void *buffers[], void *cl_arg)
  473. @{
  474. struct params *params = cl_arg;
  475. printf("Hello world (params = @{%i, %f@} )\n", params->i, params->f);
  476. @}
  477. starpu_codelet cl =
  478. @{
  479. .where = STARPU_CPU,
  480. .cpu_func = cpu_func,
  481. .nbuffers = 0
  482. @};
  483. @end smallexample
  484. @end cartouche
  485. A codelet is a structure that represents a computational kernel. Such a codelet
  486. may contain an implementation of the same kernel on different architectures
  487. (e.g. CUDA, Cell's SPU, x86, ...).
  488. The @code{nbuffers} field specifies the number of data buffers that are
  489. manipulated by the codelet: here the codelet does not access or modify any data
  490. that is controlled by our data management library. Note that the argument
  491. passed to the codelet (the @code{cl_arg} field of the @code{starpu_task}
  492. structure) does not count as a buffer since it is not managed by our data
  493. management library, but just contain trivial parameters.
  494. @c TODO need a crossref to the proper description of "where" see bla for more ...
  495. We create a codelet which may only be executed on the CPUs. The @code{where}
  496. field is a bitmask that defines where the codelet may be executed. Here, the
  497. @code{STARPU_CPU} value means that only CPUs can execute this codelet
  498. (@pxref{Codelets and Tasks} for more details on this field).
  499. When a CPU core executes a codelet, it calls the @code{cpu_func} function,
  500. which @emph{must} have the following prototype:
  501. @code{void (*cpu_func)(void *buffers[], void *cl_arg);}
  502. In this example, we can ignore the first argument of this function which gives a
  503. description of the input and output buffers (e.g. the size and the location of
  504. the matrices) since there is none.
  505. The second argument is a pointer to a buffer passed as an
  506. argument to the codelet by the means of the @code{cl_arg} field of the
  507. @code{starpu_task} structure.
  508. @c TODO rewrite so that it is a little clearer ?
  509. Be aware that this may be a pointer to a
  510. @emph{copy} of the actual buffer, and not the pointer given by the programmer:
  511. if the codelet modifies this buffer, there is no guarantee that the initial
  512. buffer will be modified as well: this for instance implies that the buffer
  513. cannot be used as a synchronization medium. If synchronization is needed, data
  514. has to be registered to StarPU, see @ref{Scaling a Vector}.
  515. @node Submitting a Task
  516. @subsection Submitting a Task
  517. @cartouche
  518. @smallexample
  519. void callback_func(void *callback_arg)
  520. @{
  521. printf("Callback function (arg %x)\n", callback_arg);
  522. @}
  523. int main(int argc, char **argv)
  524. @{
  525. /* @b{initialize StarPU} */
  526. starpu_init(NULL);
  527. struct starpu_task *task = starpu_task_create();
  528. task->cl = &cl; /* @b{Pointer to the codelet defined above} */
  529. struct params params = @{ 1, 2.0f @};
  530. task->cl_arg = &params;
  531. task->cl_arg_size = sizeof(params);
  532. task->callback_func = callback_func;
  533. task->callback_arg = 0x42;
  534. /* @b{starpu_task_submit will be a blocking call} */
  535. task->synchronous = 1;
  536. /* @b{submit the task to StarPU} */
  537. starpu_task_submit(task);
  538. /* @b{terminate StarPU} */
  539. starpu_shutdown();
  540. return 0;
  541. @}
  542. @end smallexample
  543. @end cartouche
  544. Before submitting any tasks to StarPU, @code{starpu_init} must be called. The
  545. @code{NULL} argument specifies that we use default configuration. Tasks cannot
  546. be submitted after the termination of StarPU by a call to
  547. @code{starpu_shutdown}.
  548. In the example above, a task structure is allocated by a call to
  549. @code{starpu_task_create}. This function only allocates and fills the
  550. corresponding structure with the default settings (@pxref{Codelets and
  551. Tasks, starpu_task_create}), but it does not submit the task to StarPU.
  552. @c not really clear ;)
  553. The @code{cl} field is a pointer to the codelet which the task will
  554. execute: in other words, the codelet structure describes which computational
  555. kernel should be offloaded on the different architectures, and the task
  556. structure is a wrapper containing a codelet and the piece of data on which the
  557. codelet should operate.
  558. The optional @code{cl_arg} field is a pointer to a buffer (of size
  559. @code{cl_arg_size}) with some parameters for the kernel
  560. described by the codelet. For instance, if a codelet implements a computational
  561. kernel that multiplies its input vector by a constant, the constant could be
  562. specified by the means of this buffer, instead of registering it as a StarPU
  563. data. It must however be noted that StarPU avoids making copy whenever possible
  564. and rather passes the pointer as such, so the buffer which is pointed at must
  565. kept allocated until the task terminates, and if several tasks are submitted
  566. with various parameters, each of them must be given a pointer to their own
  567. buffer.
  568. Once a task has been executed, an optional callback function is be called.
  569. While the computational kernel could be offloaded on various architectures, the
  570. callback function is always executed on a CPU. The @code{callback_arg}
  571. pointer is passed as an argument of the callback. The prototype of a callback
  572. function must be:
  573. @code{void (*callback_function)(void *);}
  574. If the @code{synchronous} field is non-zero, task submission will be
  575. synchronous: the @code{starpu_task_submit} function will not return until the
  576. task was executed. Note that the @code{starpu_shutdown} method does not
  577. guarantee that asynchronous tasks have been executed before it returns,
  578. @code{starpu_task_wait_for_all} can be used to that effect, or data can be
  579. unregistered (@code{starpu_data_unregister(vector_handle);}), which will
  580. implicitly wait for all the tasks scheduled to work on it, unless explicitly
  581. disabled thanks to @code{starpu_data_set_default_sequential_consistency_flag} or
  582. @code{starpu_data_set_sequential_consistency_flag}.
  583. @node Execution of Hello World
  584. @subsection Execution of Hello World
  585. @smallexample
  586. % make hello_world
  587. cc $(pkg-config --cflags libstarpu) $(pkg-config --libs libstarpu) hello_world.c -o hello_world
  588. % ./hello_world
  589. Hello world (params = @{1, 2.000000@} )
  590. Callback function (arg 42)
  591. @end smallexample
  592. @node Scaling a Vector
  593. @section Manipulating Data: Scaling a Vector
  594. The previous example has shown how to submit tasks. In this section,
  595. we show how StarPU tasks can manipulate data. The full source code for
  596. this example is given in @ref{Full source code for the 'Scaling a Vector' example}.
  597. @menu
  598. * Source code of Vector Scaling::
  599. * Execution of Vector Scaling::
  600. @end menu
  601. @node Source code of Vector Scaling
  602. @subsection Source code of Vector Scaling
  603. Programmers can describe the data layout of their application so that StarPU is
  604. responsible for enforcing data coherency and availability across the machine.
  605. Instead of handling complex (and non-portable) mechanisms to perform data
  606. movements, programmers only declare which piece of data is accessed and/or
  607. modified by a task, and StarPU makes sure that when a computational kernel
  608. starts somewhere (e.g. on a GPU), its data are available locally.
  609. Before submitting those tasks, the programmer first needs to declare the
  610. different pieces of data to StarPU using the @code{starpu_*_data_register}
  611. functions. To ease the development of applications for StarPU, it is possible
  612. to describe multiple types of data layout. A type of data layout is called an
  613. @b{interface}. There are different predefined interfaces available in StarPU:
  614. here we will consider the @b{vector interface}.
  615. The following lines show how to declare an array of @code{NX} elements of type
  616. @code{float} using the vector interface:
  617. @cartouche
  618. @smallexample
  619. float vector[NX];
  620. starpu_data_handle vector_handle;
  621. starpu_vector_data_register(&vector_handle, 0, (uintptr_t)vector, NX,
  622. sizeof(vector[0]));
  623. @end smallexample
  624. @end cartouche
  625. The first argument, called the @b{data handle}, is an opaque pointer which
  626. designates the array in StarPU. This is also the structure which is used to
  627. describe which data is used by a task. The second argument is the node number
  628. where the data originally resides. Here it is 0 since the @code{vector} array is in
  629. the main memory. Then comes the pointer @code{vector} where the data can be found in main memory,
  630. the number of elements in the vector and the size of each element.
  631. The following shows how to construct a StarPU task that will manipulate the
  632. vector and a constant factor.
  633. @cartouche
  634. @smallexample
  635. float factor = 3.14;
  636. struct starpu_task *task = starpu_task_create();
  637. task->cl = &cl; /* @b{Pointer to the codelet defined below} */
  638. task->buffers[0].handle = vector_handle; /* @b{First parameter of the codelet} */
  639. task->buffers[0].mode = STARPU_RW;
  640. task->cl_arg = &factor;
  641. task->cl_arg_size = sizeof(factor);
  642. task->synchronous = 1;
  643. starpu_task_submit(task);
  644. @end smallexample
  645. @end cartouche
  646. Since the factor is a mere constant float value parameter,
  647. it does not need a preliminary registration, and
  648. can just be passed through the @code{cl_arg} pointer like in the previous
  649. example. The vector parameter is described by its handle.
  650. There are two fields in each element of the @code{buffers} array.
  651. @code{handle} is the handle of the data, and @code{mode} specifies how the
  652. kernel will access the data (@code{STARPU_R} for read-only, @code{STARPU_W} for
  653. write-only and @code{STARPU_RW} for read and write access).
  654. The definition of the codelet can be written as follows:
  655. @cartouche
  656. @smallexample
  657. void scal_cpu_func(void *buffers[], void *cl_arg)
  658. @{
  659. unsigned i;
  660. float *factor = cl_arg;
  661. /* length of the vector */
  662. unsigned n = STARPU_VECTOR_GET_NX(buffers[0]);
  663. /* CPU copy of the vector pointer */
  664. float *val = (float *)STARPU_VECTOR_GET_PTR(buffers[0]);
  665. for (i = 0; i < n; i++)
  666. val[i] *= *factor;
  667. @}
  668. starpu_codelet cl = @{
  669. .where = STARPU_CPU,
  670. .cpu_func = scal_cpu_func,
  671. .nbuffers = 1
  672. @};
  673. @end smallexample
  674. @end cartouche
  675. The first argument is an array that gives
  676. a description of all the buffers passed in the @code{task->buffers}@ array. The
  677. size of this array is given by the @code{nbuffers} field of the codelet
  678. structure. For the sake of genericity, this array contains pointers to the
  679. different interfaces describing each buffer. In the case of the @b{vector
  680. interface}, the location of the vector (resp. its length) is accessible in the
  681. @code{ptr} (resp. @code{nx}) of this array. Since the vector is accessed in a
  682. read-write fashion, any modification will automatically affect future accesses
  683. to this vector made by other tasks.
  684. The second argument of the @code{scal_cpu_func} function contains a pointer to the
  685. parameters of the codelet (given in @code{task->cl_arg}), so that we read the
  686. constant factor from this pointer.
  687. @node Execution of Vector Scaling
  688. @subsection Execution of Vector Scaling
  689. @smallexample
  690. % make vector_scal
  691. cc $(pkg-config --cflags libstarpu) $(pkg-config --libs libstarpu) vector_scal.c -o vector_scal
  692. % ./vector_scal
  693. 0.000000 3.000000 6.000000 9.000000 12.000000
  694. @end smallexample
  695. @node Vector Scaling on an Hybrid CPU/GPU Machine
  696. @section Vector Scaling on an Hybrid CPU/GPU Machine
  697. Contrary to the previous examples, the task submitted in this example may not
  698. only be executed by the CPUs, but also by a CUDA device.
  699. @menu
  700. * Definition of the CUDA Kernel::
  701. * Definition of the OpenCL Kernel::
  702. * Definition of the Main Code::
  703. * Execution of Hybrid Vector Scaling::
  704. @end menu
  705. @node Definition of the CUDA Kernel
  706. @subsection Definition of the CUDA Kernel
  707. The CUDA implementation can be written as follows. It needs to be compiled with
  708. a CUDA compiler such as nvcc, the NVIDIA CUDA compiler driver. It must be noted
  709. that the vector pointer returned by STARPU_VECTOR_GET_PTR is here a pointer in GPU
  710. memory, so that it can be passed as such to the @code{vector_mult_cuda} kernel
  711. call.
  712. @cartouche
  713. @smallexample
  714. #include <starpu.h>
  715. #include <starpu_cuda.h>
  716. static __global__ void vector_mult_cuda(float *val, unsigned n,
  717. float factor)
  718. @{
  719. unsigned i = blockIdx.x*blockDim.x + threadIdx.x;
  720. if (i < n)
  721. val[i] *= factor;
  722. @}
  723. extern "C" void scal_cuda_func(void *buffers[], void *_args)
  724. @{
  725. float *factor = (float *)_args;
  726. /* length of the vector */
  727. unsigned n = STARPU_VECTOR_GET_NX(buffers[0]);
  728. /* CUDA copy of the vector pointer */
  729. float *val = (float *)STARPU_VECTOR_GET_PTR(buffers[0]);
  730. unsigned threads_per_block = 64;
  731. unsigned nblocks = (n + threads_per_block-1) / threads_per_block;
  732. @i{ vector_mult_cuda<<<nblocks,threads_per_block, 0, starpu_cuda_get_local_stream()>>>(val, n, *factor);}
  733. @i{ cudaStreamSynchronize(starpu_cuda_get_local_stream());}
  734. @}
  735. @end smallexample
  736. @end cartouche
  737. @node Definition of the OpenCL Kernel
  738. @subsection Definition of the OpenCL Kernel
  739. The OpenCL implementation can be written as follows. StarPU provides
  740. tools to compile a OpenCL kernel stored in a file.
  741. @cartouche
  742. @smallexample
  743. __kernel void vector_mult_opencl(__global float* val, int nx, float factor)
  744. @{
  745. const int i = get_global_id(0);
  746. if (i < nx) @{
  747. val[i] *= factor;
  748. @}
  749. @}
  750. @end smallexample
  751. @end cartouche
  752. Similarly to CUDA, the pointer returned by @code{STARPU_VECTOR_GET_PTR} is here
  753. a device pointer, so that it is passed as such to the OpenCL kernel.
  754. @cartouche
  755. @smallexample
  756. #include <starpu.h>
  757. @i{#include <starpu_opencl.h>}
  758. @i{extern struct starpu_opencl_program programs;}
  759. void scal_opencl_func(void *buffers[], void *_args)
  760. @{
  761. float *factor = _args;
  762. @i{ int id, devid, err;}
  763. @i{ cl_kernel kernel;}
  764. @i{ cl_command_queue queue;}
  765. @i{ cl_event event;}
  766. /* length of the vector */
  767. unsigned n = STARPU_VECTOR_GET_NX(buffers[0]);
  768. /* OpenCL copy of the vector pointer */
  769. cl_mem val = (cl_mem) STARPU_VECTOR_GET_PTR(buffers[0]);
  770. @i{ id = starpu_worker_get_id();}
  771. @i{ devid = starpu_worker_get_devid(id);}
  772. @i{ err = starpu_opencl_load_kernel(&kernel, &queue, &programs,}
  773. @i{ "vector_mult_opencl", devid); /* @b{Name of the codelet defined above} */}
  774. @i{ if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);}
  775. @i{ err = clSetKernelArg(kernel, 0, sizeof(val), &val);}
  776. @i{ err |= clSetKernelArg(kernel, 1, sizeof(n), &n);}
  777. @i{ err |= clSetKernelArg(kernel, 2, sizeof(*factor), factor);}
  778. @i{ if (err) STARPU_OPENCL_REPORT_ERROR(err);}
  779. @i{ @{}
  780. @i{ size_t global=1;}
  781. @i{ size_t local=1;}
  782. @i{ err = clEnqueueNDRangeKernel(queue, kernel, 1, NULL, &global, &local, 0, NULL, &event);}
  783. @i{ if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);}
  784. @i{ @}}
  785. @i{ clFinish(queue);}
  786. @i{ starpu_opencl_collect_stats(event);}
  787. @i{ clReleaseEvent(event);}
  788. @i{ starpu_opencl_release_kernel(kernel);}
  789. @}
  790. @end smallexample
  791. @end cartouche
  792. @node Definition of the Main Code
  793. @subsection Definition of the Main Code
  794. The CPU implementation is the same as in the previous section.
  795. Here is the source of the main application. You can notice the value of the
  796. field @code{where} for the codelet. We specify
  797. @code{STARPU_CPU|STARPU_CUDA|STARPU_OPENCL} to indicate to StarPU that the codelet
  798. can be executed either on a CPU or on a CUDA or an OpenCL device.
  799. @cartouche
  800. @smallexample
  801. #include <starpu.h>
  802. #define NX 2048
  803. extern void scal_cuda_func(void *buffers[], void *_args);
  804. extern void scal_cpu_func(void *buffers[], void *_args);
  805. extern void scal_opencl_func(void *buffers[], void *_args);
  806. /* @b{Definition of the codelet} */
  807. static starpu_codelet cl = @{
  808. .where = STARPU_CPU|STARPU_CUDA|STARPU_OPENCL; /* @b{It can be executed on a CPU,} */
  809. /* @b{on a CUDA device, or on an OpenCL device} */
  810. .cuda_func = scal_cuda_func,
  811. .cpu_func = scal_cpu_func,
  812. .opencl_func = scal_opencl_func,
  813. .nbuffers = 1
  814. @}
  815. #ifdef STARPU_USE_OPENCL
  816. /* @b{The compiled version of the OpenCL program} */
  817. struct starpu_opencl_program programs;
  818. #endif
  819. int main(int argc, char **argv)
  820. @{
  821. float *vector;
  822. int i, ret;
  823. float factor=3.0;
  824. struct starpu_task *task;
  825. starpu_data_handle vector_handle;
  826. starpu_init(NULL); /* @b{Initialising StarPU} */
  827. #ifdef STARPU_USE_OPENCL
  828. starpu_opencl_load_opencl_from_file(
  829. "examples/basic_examples/vector_scal_opencl_codelet.cl",
  830. &programs, NULL);
  831. #endif
  832. vector = malloc(NX*sizeof(vector[0]));
  833. assert(vector);
  834. for(i=0 ; i<NX ; i++) vector[i] = i;
  835. @end smallexample
  836. @end cartouche
  837. @cartouche
  838. @smallexample
  839. /* @b{Registering data within StarPU} */
  840. starpu_vector_data_register(&vector_handle, 0, (uintptr_t)vector,
  841. NX, sizeof(vector[0]));
  842. /* @b{Definition of the task} */
  843. task = starpu_task_create();
  844. task->cl = &cl;
  845. task->buffers[0].handle = vector_handle;
  846. task->buffers[0].mode = STARPU_RW;
  847. task->cl_arg = &factor;
  848. task->cl_arg_size = sizeof(factor);
  849. @end smallexample
  850. @end cartouche
  851. @cartouche
  852. @smallexample
  853. /* @b{Submitting the task} */
  854. ret = starpu_task_submit(task);
  855. if (ret == -ENODEV) @{
  856. fprintf(stderr, "No worker may execute this task\n");
  857. return 1;
  858. @}
  859. @c TODO: Mmm, should rather be an unregistration with an implicit dependency, no?
  860. /* @b{Waiting for its termination} */
  861. starpu_task_wait_for_all();
  862. /* @b{Update the vector in RAM} */
  863. starpu_data_acquire(vector_handle, STARPU_R);
  864. @end smallexample
  865. @end cartouche
  866. @cartouche
  867. @smallexample
  868. /* @b{Access the data} */
  869. for(i=0 ; i<NX; i++) @{
  870. fprintf(stderr, "%f ", vector[i]);
  871. @}
  872. fprintf(stderr, "\n");
  873. /* @b{Release the RAM view of the data before unregistering it and shutting down StarPU} */
  874. starpu_data_release(vector_handle);
  875. starpu_data_unregister(vector_handle);
  876. starpu_shutdown();
  877. return 0;
  878. @}
  879. @end smallexample
  880. @end cartouche
  881. @node Execution of Hybrid Vector Scaling
  882. @subsection Execution of Hybrid Vector Scaling
  883. The Makefile given at the beginning of the section must be extended to
  884. give the rules to compile the CUDA source code. Note that the source
  885. file of the OpenCL kernel does not need to be compiled now, it will
  886. be compiled at run-time when calling the function
  887. @code{starpu_opencl_load_opencl_from_file()} (@pxref{starpu_opencl_load_opencl_from_file}).
  888. @cartouche
  889. @smallexample
  890. CFLAGS += $(shell pkg-config --cflags libstarpu)
  891. LDFLAGS += $(shell pkg-config --libs libstarpu)
  892. CC = gcc
  893. vector_scal: vector_scal.o vector_scal_cpu.o vector_scal_cuda.o vector_scal_opencl.o
  894. %.o: %.cu
  895. nvcc $(CFLAGS) $< -c $@
  896. clean:
  897. rm -f vector_scal *.o
  898. @end smallexample
  899. @end cartouche
  900. @smallexample
  901. % make
  902. @end smallexample
  903. and to execute it, with the default configuration:
  904. @smallexample
  905. % ./vector_scal
  906. 0.000000 3.000000 6.000000 9.000000 12.000000
  907. @end smallexample
  908. or for example, by disabling CPU devices:
  909. @smallexample
  910. % STARPU_NCPUS=0 ./vector_scal
  911. 0.000000 3.000000 6.000000 9.000000 12.000000
  912. @end smallexample
  913. or by disabling CUDA devices (which may permit to enable the use of OpenCL,
  914. see @ref{Enabling OpenCL}):
  915. @smallexample
  916. % STARPU_NCUDA=0 ./vector_scal
  917. 0.000000 3.000000 6.000000 9.000000 12.000000
  918. @end smallexample
  919. @node Using multiple implementations of a codelet
  920. @section Using multiple implementations of a codelet
  921. One may want to write multiple implementations of a codelet for a single type of
  922. device and let StarPU choose which one to run. As an example, we will show how
  923. to use SSE to scale a vector. The codelet can be written as follows :
  924. @cartouche
  925. @smallexample
  926. #include <xmmintrin.h>
  927. void scal_sse_func(void *buffers[], void *cl_arg)
  928. @{
  929. float *vector = (float *) STARPU_VECTOR_GET_PTR(buffers[0]);
  930. unsigned int n = STARPU_VECTOR_GET_NX(buffers[0]);
  931. unsigned int n_iterations = n/4;
  932. if (n % 4 != 0)
  933. n_iterations++;
  934. __m128 *VECTOR = (__m128*) vector;
  935. __m128 factor __attribute__((aligned(16)));
  936. factor = _mm_set1_ps(*(float *) cl_arg);
  937. unsigned int i;
  938. for (i = 0; i < n_iterations; i++)
  939. VECTOR[i] = _mm_mul_ps(factor, VECTOR[i]);
  940. @}
  941. @end smallexample
  942. @end cartouche
  943. The @code{cpu_func} field of the @code{starpu_codelet} structure has to be set
  944. to the special value @code{STARPU_MULTIPLE_CPU_IMPLEMENTATIONS}. Note that
  945. @code{STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS} and
  946. @code{STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS} are also available.
  947. @cartouche
  948. @smallexample
  949. starpu_codelet cl = @{
  950. .where = STARPU_CPU,
  951. .cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS,
  952. .cpu_funcs = @{ scal_cpu_func, scal_sse_func @},
  953. .nbuffers = 1
  954. @};
  955. @end smallexample
  956. @end cartouche
  957. The scheduler will measure the performance of all the implementations it was
  958. given, and pick the one that seems to be the fastest.
  959. @node Task and Worker Profiling
  960. @section Task and Worker Profiling
  961. A full example showing how to use the profiling API is available in
  962. the StarPU sources in the directory @code{examples/profiling/}.
  963. @cartouche
  964. @smallexample
  965. struct starpu_task *task = starpu_task_create();
  966. task->cl = &cl;
  967. task->synchronous = 1;
  968. /* We will destroy the task structure by hand so that we can
  969. * query the profiling info before the task is destroyed. */
  970. task->destroy = 0;
  971. /* Submit and wait for completion (since synchronous was set to 1) */
  972. starpu_task_submit(task);
  973. /* The task is finished, get profiling information */
  974. struct starpu_task_profiling_info *info = task->profiling_info;
  975. /* How much time did it take before the task started ? */
  976. double delay += starpu_timing_timespec_delay_us(&info->submit_time, &info->start_time);
  977. /* How long was the task execution ? */
  978. double length += starpu_timing_timespec_delay_us(&info->start_time, &info->end_time);
  979. /* We don't need the task structure anymore */
  980. starpu_task_destroy(task);
  981. @end smallexample
  982. @end cartouche
  983. @cartouche
  984. @smallexample
  985. /* Display the occupancy of all workers during the test */
  986. int worker;
  987. for (worker = 0; worker < starpu_worker_get_count(); worker++)
  988. @{
  989. struct starpu_worker_profiling_info worker_info;
  990. int ret = starpu_worker_get_profiling_info(worker, &worker_info);
  991. STARPU_ASSERT(!ret);
  992. double total_time = starpu_timing_timespec_to_us(&worker_info.total_time);
  993. double executing_time = starpu_timing_timespec_to_us(&worker_info.executing_time);
  994. double sleeping_time = starpu_timing_timespec_to_us(&worker_info.sleeping_time);
  995. float executing_ratio = 100.0*executing_time/total_time;
  996. float sleeping_ratio = 100.0*sleeping_time/total_time;
  997. char workername[128];
  998. starpu_worker_get_name(worker, workername, 128);
  999. fprintf(stderr, "Worker %s:\n", workername);
  1000. fprintf(stderr, "\ttotal time : %.2lf ms\n", total_time*1e-3);
  1001. fprintf(stderr, "\texec time : %.2lf ms (%.2f %%)\n", executing_time*1e-3,
  1002. executing_ratio);
  1003. fprintf(stderr, "\tblocked time : %.2lf ms (%.2f %%)\n", sleeping_time*1e-3,
  1004. sleeping_ratio);
  1005. @}
  1006. @end smallexample
  1007. @end cartouche
  1008. @node Partitioning Data
  1009. @section Partitioning Data
  1010. An existing piece of data can be partitioned in sub parts to be used by different tasks, for instance:
  1011. @cartouche
  1012. @smallexample
  1013. int vector[NX];
  1014. starpu_data_handle handle;
  1015. /* Declare data to StarPU */
  1016. starpu_vector_data_register(&handle, 0, (uintptr_t)vector, NX, sizeof(vector[0]));
  1017. /* Partition the vector in PARTS sub-vectors */
  1018. starpu_filter f =
  1019. @{
  1020. .filter_func = starpu_block_filter_func_vector,
  1021. .nchildren = PARTS
  1022. @};
  1023. starpu_data_partition(handle, &f);
  1024. @end smallexample
  1025. @end cartouche
  1026. @cartouche
  1027. @smallexample
  1028. /* Submit a task on each sub-vector */
  1029. for (i=0; i<starpu_data_get_nb_children(handle); i++) @{
  1030. /* Get subdata number i (there is only 1 dimension) */
  1031. starpu_data_handle sub_handle = starpu_data_get_sub_data(handle, 1, i);
  1032. struct starpu_task *task = starpu_task_create();
  1033. task->buffers[0].handle = sub_handle;
  1034. task->buffers[0].mode = STARPU_RW;
  1035. task->cl = &cl;
  1036. task->synchronous = 1;
  1037. task->cl_arg = &factor;
  1038. task->cl_arg_size = sizeof(factor);
  1039. starpu_task_submit(task);
  1040. @}
  1041. @end smallexample
  1042. @end cartouche
  1043. Partitioning can be applied several times, see
  1044. @code{examples/basic_examples/mult.c} and @code{examples/filters/}.
  1045. @node Performance model example
  1046. @section Performance model example
  1047. To achieve good scheduling, StarPU scheduling policies need to be able to
  1048. estimate in advance the duration of a task. This is done by giving to codelets
  1049. a performance model, by defining a @code{starpu_perfmodel_t} structure and
  1050. providing its address in the @code{model} field of the @code{starpu_codelet}
  1051. structure. The @code{symbol} and @code{type} fields of @code{starpu_perfmodel_t}
  1052. are mandatory, to give a name to the model, and the type of the model, since
  1053. there are several kinds of performance models.
  1054. @itemize
  1055. @item
  1056. Measured at runtime (@code{STARPU_HISTORY_BASED} model type). This assumes that for a
  1057. given set of data input/output sizes, the performance will always be about the
  1058. same. This is very true for regular kernels on GPUs for instance (<0.1% error),
  1059. and just a bit less true on CPUs (~=1% error). This also assumes that there are
  1060. few different sets of data input/output sizes. StarPU will then keep record of
  1061. the average time of previous executions on the various processing units, and use
  1062. it as an estimation. History is done per task size, by using a hash of the input
  1063. and ouput sizes as an index.
  1064. It will also save it in @code{~/.starpu/sampling/codelets}
  1065. for further executions, and can be observed by using the
  1066. @code{starpu_perfmodel_display} command, or drawn by using
  1067. the @code{starpu_perfmodel_plot}. The models are indexed by machine name. To
  1068. share the models between machines (e.g. for a homogeneous cluster), use
  1069. @code{export STARPU_HOSTNAME=some_global_name}. The following is a small code
  1070. example.
  1071. If e.g. the code is recompiled with other compilation options, or several
  1072. variants of the code are used, the symbol string should be changed to reflect
  1073. that, in order to recalibrate a new model from zero. The symbol string can even
  1074. be constructed dynamically at execution time, as long as this is done before
  1075. submitting any task using it.
  1076. @cartouche
  1077. @smallexample
  1078. static struct starpu_perfmodel_t mult_perf_model = @{
  1079. .type = STARPU_HISTORY_BASED,
  1080. .symbol = "mult_perf_model"
  1081. @};
  1082. starpu_codelet cl = @{
  1083. .where = STARPU_CPU,
  1084. .cpu_func = cpu_mult,
  1085. .nbuffers = 3,
  1086. /* for the scheduling policy to be able to use performance models */
  1087. .model = &mult_perf_model
  1088. @};
  1089. @end smallexample
  1090. @end cartouche
  1091. @item
  1092. Measured at runtime and refined by regression (@code{STARPU_REGRESSION_*_BASED}
  1093. model type). This still assumes performance regularity, but can work
  1094. with various data input sizes, by applying regression over observed
  1095. execution times. STARPU_REGRESSION_BASED uses an a*n^b regression
  1096. form, STARPU_NL_REGRESSION_BASED uses an a*n^b+c (more precise than
  1097. STARPU_REGRESSION_BASED, but costs a lot more to compute). For instance,
  1098. @code{tests/perfmodels/regression_based.c} uses a regression-based performance
  1099. model for the @code{memset} operation.
  1100. @item
  1101. Provided as an estimation from the application itself (@code{STARPU_COMMON} model type and @code{cost_model} field),
  1102. see for instance
  1103. @code{examples/common/blas_model.h} and @code{examples/common/blas_model.c}.
  1104. @item
  1105. Provided explicitly by the application (@code{STARPU_PER_ARCH} model type): the
  1106. @code{.per_arch[i].cost_model} fields have to be filled with pointers to
  1107. functions which return the expected duration of the task in micro-seconds, one
  1108. per architecture.
  1109. @end itemize
  1110. How to use schedulers which can benefit from such performance model is explained
  1111. in @ref{Task scheduling policy}.
  1112. The same can be done for task power consumption estimation, by setting the
  1113. @code{power_model} field the same way as the @code{model} field. Note: for
  1114. now, the application has to give to the power consumption performance model
  1115. a name which is different from the execution time performance model.
  1116. The application can request time estimations from the StarPU performance
  1117. models by filling a task structure as usual without actually submitting
  1118. it. The data handles can be created by calling @code{starpu_data_register}
  1119. functions with a @code{NULL} pointer (and need to be unregistered as usual)
  1120. and the desired data sizes. The @code{starpu_task_expected_length} and
  1121. @code{starpu_task_expected_power} functions can then be called to get an
  1122. estimation of the task duration on a given arch. @code{starpu_task_destroy}
  1123. needs to be called to destroy the dummy task afterwards. See
  1124. @code{tests/perfmodels/regression_based.c} for an example.
  1125. @node Theoretical lower bound on execution time
  1126. @section Theoretical lower bound on execution time
  1127. For kernels with history-based performance models, StarPU can very easily provide a theoretical lower
  1128. bound for the execution time of a whole set of tasks. See for
  1129. instance @code{examples/lu/lu_example.c}: before submitting tasks,
  1130. call @code{starpu_bound_start}, and after complete execution, call
  1131. @code{starpu_bound_stop}. @code{starpu_bound_print_lp} or
  1132. @code{starpu_bound_print_mps} can then be used to output a Linear Programming
  1133. problem corresponding to the schedule of your tasks. Run it through
  1134. @code{lp_solve} or any other linear programming solver, and that will give you a
  1135. lower bound for the total execution time of your tasks. If StarPU was compiled
  1136. with the glpk library installed, @code{starpu_bound_compute} can be used to
  1137. solve it immediately and get the optimized minimum, in ms. Its @code{integer}
  1138. parameter allows to decide whether integer resolution should be computed
  1139. and returned too.
  1140. The @code{deps} parameter tells StarPU whether to take tasks and implicit data
  1141. dependencies into account. It must be understood that the linear programming
  1142. problem size is quadratic with the number of tasks and thus the time to solve it
  1143. will be very long, it could be minutes for just a few dozen tasks. You should
  1144. probably use @code{lp_solve -timeout 1 test.pl -wmps test.mps} to convert the
  1145. problem to MPS format and then use a better solver, @code{glpsol} might be
  1146. better than @code{lp_solve} for instance (the @code{--pcost} option may be
  1147. useful), but sometimes doesn't manage to converge. @code{cbc} might look
  1148. slower, but it is parallel. Be sure to try at least all the @code{-B} options
  1149. of @code{lp_solve}. For instance, we often just use
  1150. @code{lp_solve -cc -B1 -Bb -Bg -Bp -Bf -Br -BG -Bd -Bs -BB -Bo -Bc -Bi} , and
  1151. the @code{-gr} option can also be quite useful.
  1152. Setting @code{deps} to 0 will only take into account the actual computations
  1153. on processing units. It however still properly takes into account the varying
  1154. performances of kernels and processing units, which is quite more accurate than
  1155. just comparing StarPU performances with the fastest of the kernels being used.
  1156. The @code{prio} parameter tells StarPU whether to simulate taking into account
  1157. the priorities as the StarPU scheduler would, i.e. schedule prioritized
  1158. tasks before less prioritized tasks, to check to which extend this results
  1159. to a less optimal solution. This increases even more computation time.
  1160. Note that for simplicity, all this however doesn't take into account data
  1161. transfers, which are assumed to be completely overlapped.
  1162. @node Insert Task Utility
  1163. @section Insert Task Utility
  1164. StarPU provides the wrapper function @code{starpu_insert_task} to ease
  1165. the creation and submission of tasks.
  1166. @deftypefun int starpu_insert_task (starpu_codelet *@var{cl}, ...)
  1167. Create and submit a task corresponding to @var{cl} with the following
  1168. arguments. The argument list must be zero-terminated.
  1169. The arguments following the codelets can be of the following types:
  1170. @itemize
  1171. @item
  1172. @code{STARPU_R}, @code{STARPU_W}, @code{STARPU_RW}, @code{STARPU_SCRATCH}, @code{STARPU_REDUX} an access mode followed by a data handle;
  1173. @item
  1174. @code{STARPU_VALUE} followed by a pointer to a constant value and
  1175. the size of the constant;
  1176. @item
  1177. @code{STARPU_CALLBACK} followed by a pointer to a callback function;
  1178. @item
  1179. @code{STARPU_CALLBACK_ARG} followed by a pointer to be given as an
  1180. argument to the callback function;
  1181. @item
  1182. @code{STARPU_CALLBACK_WITH_ARG} followed by two pointers: one to a callback
  1183. function, and the other to be given as an argument to the callback
  1184. function; this is equivalent to using both @code{STARPU_CALLBACK} and
  1185. @code{STARPU_CALLBACK_WITH_ARG}
  1186. @item
  1187. @code{STARPU_PRIORITY} followed by a integer defining a priority level.
  1188. @end itemize
  1189. Parameters to be passed to the codelet implementation are defined
  1190. through the type @code{STARPU_VALUE}. The function
  1191. @code{starpu_unpack_cl_args} must be called within the codelet
  1192. implementation to retrieve them.
  1193. @end deftypefun
  1194. Here the implementation of the codelet:
  1195. @smallexample
  1196. void func_cpu(void *descr[], void *_args)
  1197. @{
  1198. int *x0 = (int *)STARPU_VARIABLE_GET_PTR(descr[0]);
  1199. float *x1 = (float *)STARPU_VARIABLE_GET_PTR(descr[1]);
  1200. int ifactor;
  1201. float ffactor;
  1202. starpu_unpack_cl_args(_args, &ifactor, &ffactor);
  1203. *x0 = *x0 * ifactor;
  1204. *x1 = *x1 * ffactor;
  1205. @}
  1206. starpu_codelet mycodelet = @{
  1207. .where = STARPU_CPU,
  1208. .cpu_func = func_cpu,
  1209. .nbuffers = 2
  1210. @};
  1211. @end smallexample
  1212. And the call to the @code{starpu_insert_task} wrapper:
  1213. @smallexample
  1214. starpu_insert_task(&mycodelet,
  1215. STARPU_VALUE, &ifactor, sizeof(ifactor),
  1216. STARPU_VALUE, &ffactor, sizeof(ffactor),
  1217. STARPU_RW, data_handles[0], STARPU_RW, data_handles[1],
  1218. 0);
  1219. @end smallexample
  1220. The call to @code{starpu_insert_task} is equivalent to the following
  1221. code:
  1222. @smallexample
  1223. struct starpu_task *task = starpu_task_create();
  1224. task->cl = &mycodelet;
  1225. task->buffers[0].handle = data_handles[0];
  1226. task->buffers[0].mode = STARPU_RW;
  1227. task->buffers[1].handle = data_handles[1];
  1228. task->buffers[1].mode = STARPU_RW;
  1229. char *arg_buffer;
  1230. size_t arg_buffer_size;
  1231. starpu_pack_cl_args(&arg_buffer, &arg_buffer_size,
  1232. STARPU_VALUE, &ifactor, sizeof(ifactor),
  1233. STARPU_VALUE, &ffactor, sizeof(ffactor),
  1234. 0);
  1235. task->cl_arg = arg_buffer;
  1236. task->cl_arg_size = arg_buffer_size;
  1237. int ret = starpu_task_submit(task);
  1238. @end smallexample
  1239. If some part of the task insertion depends on the value of some computation,
  1240. the @code{STARPU_DATA_ACQUIRE_CB} macro can be very convenient. For
  1241. instance, assuming that the index variable @code{i} was registered as handle
  1242. @code{i_handle}:
  1243. @smallexample
  1244. /* Compute which portion we will work on, e.g. pivot */
  1245. starpu_insert_task(&which_index, STARPU_W, i_handle, 0);
  1246. /* And submit the corresponding task */
  1247. STARPU_DATA_ACQUIRE_CB(i_handle, STARPU_R, starpu_insert_task(&work, STARPU_RW, A_handle[i], 0));
  1248. @end smallexample
  1249. The @code{STARPU_DATA_ACQUIRE_CB} macro submits an asynchronous request for
  1250. acquiring data @code{i} for the main application, and will execute the code
  1251. given as third parameter when it is acquired. In other words, as soon as the
  1252. value of @code{i} computed by the @code{which_index} codelet can be read, the
  1253. portion of code passed as third parameter of @code{STARPU_DATA_ACQUIRE_CB} will
  1254. be executed, and is allowed to read from @code{i} to use it e.g. as an
  1255. index. Note that this macro is only avaible when compiling StarPU with
  1256. the compiler @code{gcc}.
  1257. @node Debugging
  1258. @section Debugging
  1259. StarPU provides several tools to help debugging aplications. Execution traces
  1260. can be generated and displayed graphically, see @ref{Generating traces}. Some
  1261. gdb helpers are also provided to show the whole StarPU state:
  1262. @smallexample
  1263. (gdb) source tools/gdbinit
  1264. (gdb) help starpu
  1265. @end smallexample
  1266. @node More examples
  1267. @section More examples
  1268. More examples are available in the StarPU sources in the @code{examples/}
  1269. directory. Simple examples include:
  1270. @table @asis
  1271. @item @code{incrementer/}:
  1272. Trivial incrementation test.
  1273. @item @code{basic_examples/}:
  1274. Simple documented Hello world (as shown in @ref{Hello World}), vector/scalar product (as shown
  1275. in @ref{Vector Scaling on an Hybrid CPU/GPU Machine}), matrix
  1276. product examples (as shown in @ref{Performance model example}), an example using the blocked matrix data
  1277. interface, and an example using the variable data interface.
  1278. @item @code{matvecmult/}:
  1279. OpenCL example from NVidia, adapted to StarPU.
  1280. @item @code{axpy/}:
  1281. AXPY CUBLAS operation adapted to StarPU.
  1282. @item @code{fortran/}:
  1283. Example of Fortran bindings.
  1284. @end table
  1285. More advanced examples include:
  1286. @table @asis
  1287. @item @code{filters/}:
  1288. Examples using filters, as shown in @ref{Partitioning Data}.
  1289. @item @code{lu/}:
  1290. LU matrix factorization, see for instance @code{xlu_implicit.c}
  1291. @item @code{cholesky/}:
  1292. Cholesky matrix factorization, see for instance @code{cholesky_implicit.c}.
  1293. @end table
  1294. @c ---------------------------------------------------------------------
  1295. @c Performance options
  1296. @c ---------------------------------------------------------------------
  1297. @node Performance optimization
  1298. @chapter How to optimize performance with StarPU
  1299. TODO: improve!
  1300. @menu
  1301. * Data management::
  1302. * Task submission::
  1303. * Task priorities::
  1304. * Task scheduling policy::
  1305. * Performance model calibration::
  1306. * Task distribution vs Data transfer::
  1307. * Data prefetch::
  1308. * Power-based scheduling::
  1309. * Profiling::
  1310. * CUDA-specific optimizations::
  1311. @end menu
  1312. Simply encapsulating application kernels into tasks already permits to
  1313. seamlessly support CPU and GPUs at the same time. To achieve good performance, a
  1314. few additional changes are needed.
  1315. @node Data management
  1316. @section Data management
  1317. When the application allocates data, whenever possible it should use the
  1318. @code{starpu_malloc} function, which will ask CUDA or
  1319. OpenCL to make the allocation itself and pin the corresponding allocated
  1320. memory. This is needed to permit asynchronous data transfer, i.e. permit data
  1321. transfer to overlap with computations. Otherwise, the trace will show that the
  1322. @code{DriverCopyAsync} state takes a lot of time, this is because CUDA or OpenCL
  1323. then reverts to synchronous transfers.
  1324. By default, StarPU leaves replicates of data wherever they were used, in case they
  1325. will be re-used by other tasks, thus saving the data transfer time. When some
  1326. task modifies some data, all the other replicates are invalidated, and only the
  1327. processing unit which ran that task will have a valid replicate of the data. If the application knows
  1328. that this data will not be re-used by further tasks, it should advise StarPU to
  1329. immediately replicate it to a desired list of memory nodes (given through a
  1330. bitmask). This can be understood like the write-through mode of CPU caches.
  1331. @example
  1332. starpu_data_set_wt_mask(img_handle, 1<<0);
  1333. @end example
  1334. will for instance request to always automatically transfer a replicate into the
  1335. main memory (node 0), as bit 0 of the write-through bitmask is being set.
  1336. @example
  1337. starpu_data_set_wt_mask(img_handle, ~0U);
  1338. @end example
  1339. will request to always automatically broadcast the updated data to all memory
  1340. nodes.
  1341. @node Task submission
  1342. @section Task submission
  1343. To let StarPU make online optimizations, tasks should be submitted
  1344. asynchronously as much as possible. Ideally, all the tasks should be
  1345. submitted, and mere calls to @code{starpu_task_wait_for_all} or
  1346. @code{starpu_data_unregister} be done to wait for
  1347. termination. StarPU will then be able to rework the whole schedule, overlap
  1348. computation with communication, manage accelerator local memory usage, etc.
  1349. @node Task priorities
  1350. @section Task priorities
  1351. By default, StarPU will consider the tasks in the order they are submitted by
  1352. the application. If the application programmer knows that some tasks should
  1353. be performed in priority (for instance because their output is needed by many
  1354. other tasks and may thus be a bottleneck if not executed early enough), the
  1355. @code{priority} field of the task structure should be set to transmit the
  1356. priority information to StarPU.
  1357. @node Task scheduling policy
  1358. @section Task scheduling policy
  1359. By default, StarPU uses the @code{eager} simple greedy scheduler. This is
  1360. because it provides correct load balance even if the application codelets do not
  1361. have performance models. If your application codelets have performance models
  1362. (@pxref{Performance model example} for examples showing how to do it),
  1363. you should change the scheduler thanks to the @code{STARPU_SCHED} environment
  1364. variable. For instance @code{export STARPU_SCHED=dmda} . Use @code{help} to get
  1365. the list of available schedulers.
  1366. The @b{eager} scheduler uses a central task queue, from which workers draw tasks
  1367. to work on. This however does not permit to prefetch data since the scheduling
  1368. decision is taken late. If a task has a non-0 priority, it is put at the front of the queue.
  1369. The @b{prio} scheduler also uses a central task queue, but sorts tasks by
  1370. priority (between -5 and 5).
  1371. The @b{random} scheduler distributes tasks randomly according to assumed worker
  1372. overall performance.
  1373. The @b{ws} (work stealing) scheduler schedules tasks on the local worker by
  1374. default. When a worker becomes idle, it steals a task from the most loaded
  1375. worker.
  1376. The @b{dm} (deque model) scheduler uses task execution performance models into account to
  1377. perform an HEFT-similar scheduling strategy: it schedules tasks where their
  1378. termination time will be minimal.
  1379. The @b{dmda} (deque model data aware) scheduler is similar to dm, it also takes
  1380. into account data transfer time.
  1381. The @b{dmdar} (deque model data aware ready) scheduler is similar to dmda,
  1382. it also sorts tasks on per-worker queues by number of already-available data
  1383. buffers.
  1384. The @b{dmdas} (deque model data aware sorted) scheduler is similar to dmda, it
  1385. also supports arbitrary priority values.
  1386. The @b{heft} (HEFT) scheduler is similar to dmda, it also supports task bundles.
  1387. The @b{pheft} (parallel HEFT) scheduler is similar to heft, it also supports
  1388. parallel tasks (still experimental).
  1389. The @b{pgreedy} (parallel greedy) scheduler is similar to greedy, it also
  1390. supports parallel tasks (still experimental).
  1391. @node Performance model calibration
  1392. @section Performance model calibration
  1393. Most schedulers are based on an estimation of codelet duration on each kind
  1394. of processing unit. For this to be possible, the application programmer needs
  1395. to configure a performance model for the codelets of the application (see
  1396. @ref{Performance model example} for instance). History-based performance models
  1397. use on-line calibration. StarPU will automatically calibrate codelets
  1398. which have never been calibrated yet, and save the result in
  1399. @code{~/.starpu/sampling/codelets}.
  1400. The models are indexed by machine name. To share the models between machines (e.g. for a homogeneous cluster), use @code{export STARPU_HOSTNAME=some_global_name}. To force continuing calibration, use
  1401. @code{export STARPU_CALIBRATE=1} . This may be necessary if your application
  1402. has not-so-stable performance. StarPU will force calibration (and thus ignore
  1403. the current result) until 10 (STARPU_CALIBRATION_MINIMUM) measurements have been
  1404. made on each architecture, to avoid badly scheduling tasks just because the
  1405. first measurements were not so good. Details on the current performance model status
  1406. can be obtained from the @code{starpu_perfmodel_display} command: the @code{-l}
  1407. option lists the available performance models, and the @code{-s} option permits
  1408. to choose the performance model to be displayed. The result looks like:
  1409. @example
  1410. $ starpu_perfmodel_display -s starpu_dlu_lu_model_22
  1411. performance model for cpu
  1412. # hash size mean dev n
  1413. 880805ba 98304 2.731309e+02 6.010210e+01 1240
  1414. b50b6605 393216 1.469926e+03 1.088828e+02 1240
  1415. 5c6c3401 1572864 1.125983e+04 3.265296e+03 1240
  1416. @end example
  1417. Which shows that for the LU 22 kernel with a 1.5MiB matrix, the average
  1418. execution time on CPUs was about 12ms, with a 2ms standard deviation, over
  1419. 1240 samples. It is a good idea to check this before doing actual performance
  1420. measurements.
  1421. A graph can be drawn by using the @code{starpu_perfmodel_plot}:
  1422. @example
  1423. $ starpu_perfmodel_plot -s starpu_dlu_lu_model_22
  1424. 98304 393216 1572864
  1425. $ gnuplot starpu_starpu_dlu_lu_model_22.gp
  1426. $ gv starpu_starpu_dlu_lu_model_22.eps
  1427. @end example
  1428. If a kernel source code was modified (e.g. performance improvement), the
  1429. calibration information is stale and should be dropped, to re-calibrate from
  1430. start. This can be done by using @code{export STARPU_CALIBRATE=2}.
  1431. Note: due to CUDA limitations, to be able to measure kernel duration,
  1432. calibration mode needs to disable asynchronous data transfers. Calibration thus
  1433. disables data transfer / computation overlapping, and should thus not be used
  1434. for eventual benchmarks. Note 2: history-based performance models get calibrated
  1435. only if a performance-model-based scheduler is chosen.
  1436. @node Task distribution vs Data transfer
  1437. @section Task distribution vs Data transfer
  1438. Distributing tasks to balance the load induces data transfer penalty. StarPU
  1439. thus needs to find a balance between both. The target function that the
  1440. @code{dmda} scheduler of StarPU
  1441. tries to minimize is @code{alpha * T_execution + beta * T_data_transfer}, where
  1442. @code{T_execution} is the estimated execution time of the codelet (usually
  1443. accurate), and @code{T_data_transfer} is the estimated data transfer time. The
  1444. latter is estimated based on bus calibration before execution start,
  1445. i.e. with an idle machine, thus without contention. You can force bus re-calibration by running
  1446. @code{starpu_calibrate_bus}. The beta parameter defaults to 1, but it can be
  1447. worth trying to tweak it by using @code{export STARPU_BETA=2} for instance,
  1448. since during real application execution, contention makes transfer times bigger.
  1449. This is of course imprecise, but in practice, a rough estimation already gives
  1450. the good results that a precise estimation would give.
  1451. @node Data prefetch
  1452. @section Data prefetch
  1453. The @code{heft}, @code{dmda} and @code{pheft} scheduling policies perform data prefetch (see @ref{STARPU_PREFETCH}):
  1454. as soon as a scheduling decision is taken for a task, requests are issued to
  1455. transfer its required data to the target processing unit, if needeed, so that
  1456. when the processing unit actually starts the task, its data will hopefully be
  1457. already available and it will not have to wait for the transfer to finish.
  1458. The application may want to perform some manual prefetching, for several reasons
  1459. such as excluding initial data transfers from performance measurements, or
  1460. setting up an initial statically-computed data distribution on the machine
  1461. before submitting tasks, which will thus guide StarPU toward an initial task
  1462. distribution (since StarPU will try to avoid further transfers).
  1463. This can be achieved by giving the @code{starpu_data_prefetch_on_node} function
  1464. the handle and the desired target memory node.
  1465. @node Power-based scheduling
  1466. @section Power-based scheduling
  1467. If the application can provide some power performance model (through
  1468. the @code{power_model} field of the codelet structure), StarPU will
  1469. take it into account when distributing tasks. The target function that
  1470. the @code{dmda} scheduler minimizes becomes @code{alpha * T_execution +
  1471. beta * T_data_transfer + gamma * Consumption} , where @code{Consumption}
  1472. is the estimated task consumption in Joules. To tune this parameter, use
  1473. @code{export STARPU_GAMMA=3000} for instance, to express that each Joule
  1474. (i.e kW during 1000us) is worth 3000us execution time penalty. Setting
  1475. @code{alpha} and @code{beta} to zero permits to only take into account power consumption.
  1476. This is however not sufficient to correctly optimize power: the scheduler would
  1477. simply tend to run all computations on the most energy-conservative processing
  1478. unit. To account for the consumption of the whole machine (including idle
  1479. processing units), the idle power of the machine should be given by setting
  1480. @code{export STARPU_IDLE_POWER=200} for 200W, for instance. This value can often
  1481. be obtained from the machine power supplier.
  1482. The power actually consumed by the total execution can be displayed by setting
  1483. @code{export STARPU_PROFILING=1 STARPU_WORKER_STATS=1} .
  1484. @node Profiling
  1485. @section Profiling
  1486. A quick view of how many tasks each worker has executed can be obtained by setting
  1487. @code{export STARPU_WORKER_STATS=1} This is a convenient way to check that
  1488. execution did happen on accelerators without penalizing performance with
  1489. the profiling overhead.
  1490. A quick view of how much data transfers have been issued can be obtained by setting
  1491. @code{export STARPU_BUS_STATS=1} .
  1492. More detailed profiling information can be enabled by using @code{export STARPU_PROFILING=1} or by
  1493. calling @code{starpu_profiling_status_set} from the source code.
  1494. Statistics on the execution can then be obtained by using @code{export
  1495. STARPU_BUS_STATS=1} and @code{export STARPU_WORKER_STATS=1} .
  1496. More details on performance feedback are provided by the next chapter.
  1497. @node CUDA-specific optimizations
  1498. @section CUDA-specific optimizations
  1499. Due to CUDA limitations, StarPU will have a hard time overlapping its own
  1500. communications and the codelet computations if the application does not use a
  1501. dedicated CUDA stream for its computations. StarPU provides one by the use of
  1502. @code{starpu_cuda_get_local_stream()} which should be used by all CUDA codelet
  1503. operations. For instance:
  1504. @example
  1505. func <<<grid,block,0,starpu_cuda_get_local_stream()>>> (foo, bar);
  1506. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  1507. @end example
  1508. StarPU already does appropriate calls for the CUBLAS library.
  1509. Unfortunately, some CUDA libraries do not have stream variants of
  1510. kernels. That will lower the potential for overlapping.
  1511. @c ---------------------------------------------------------------------
  1512. @c Performance feedback
  1513. @c ---------------------------------------------------------------------
  1514. @node Performance feedback
  1515. @chapter Performance feedback
  1516. @menu
  1517. * On-line:: On-line performance feedback
  1518. * Off-line:: Off-line performance feedback
  1519. * Codelet performance:: Performance of codelets
  1520. @end menu
  1521. @node On-line
  1522. @section On-line performance feedback
  1523. @menu
  1524. * Enabling monitoring:: Enabling on-line performance monitoring
  1525. * Task feedback:: Per-task feedback
  1526. * Codelet feedback:: Per-codelet feedback
  1527. * Worker feedback:: Per-worker feedback
  1528. * Bus feedback:: Bus-related feedback
  1529. * StarPU-Top:: StarPU-Top interface
  1530. @end menu
  1531. @node Enabling monitoring
  1532. @subsection Enabling on-line performance monitoring
  1533. In order to enable online performance monitoring, the application can call
  1534. @code{starpu_profiling_status_set(STARPU_PROFILING_ENABLE)}. It is possible to
  1535. detect whether monitoring is already enabled or not by calling
  1536. @code{starpu_profiling_status_get()}. Enabling monitoring also reinitialize all
  1537. previously collected feedback. The @code{STARPU_PROFILING} environment variable
  1538. can also be set to 1 to achieve the same effect.
  1539. Likewise, performance monitoring is stopped by calling
  1540. @code{starpu_profiling_status_set(STARPU_PROFILING_DISABLE)}. Note that this
  1541. does not reset the performance counters so that the application may consult
  1542. them later on.
  1543. More details about the performance monitoring API are available in section
  1544. @ref{Profiling API}.
  1545. @node Task feedback
  1546. @subsection Per-task feedback
  1547. If profiling is enabled, a pointer to a @code{starpu_task_profiling_info}
  1548. structure is put in the @code{.profiling_info} field of the @code{starpu_task}
  1549. structure when a task terminates.
  1550. This structure is automatically destroyed when the task structure is destroyed,
  1551. either automatically or by calling @code{starpu_task_destroy}.
  1552. The @code{starpu_task_profiling_info} structure indicates the date when the
  1553. task was submitted (@code{submit_time}), started (@code{start_time}), and
  1554. terminated (@code{end_time}), relative to the initialization of
  1555. StarPU with @code{starpu_init}. It also specifies the identifier of the worker
  1556. that has executed the task (@code{workerid}).
  1557. These date are stored as @code{timespec} structures which the user may convert
  1558. into micro-seconds using the @code{starpu_timing_timespec_to_us} helper
  1559. function.
  1560. It it worth noting that the application may directly access this structure from
  1561. the callback executed at the end of the task. The @code{starpu_task} structure
  1562. associated to the callback currently being executed is indeed accessible with
  1563. the @code{starpu_get_current_task()} function.
  1564. @node Codelet feedback
  1565. @subsection Per-codelet feedback
  1566. The @code{per_worker_stats} field of the @code{starpu_codelet_t} structure is
  1567. an array of counters. The i-th entry of the array is incremented every time a
  1568. task implementing the codelet is executed on the i-th worker.
  1569. This array is not reinitialized when profiling is enabled or disabled.
  1570. @node Worker feedback
  1571. @subsection Per-worker feedback
  1572. The second argument returned by the @code{starpu_worker_get_profiling_info}
  1573. function is a @code{starpu_worker_profiling_info} structure that gives
  1574. statistics about the specified worker. This structure specifies when StarPU
  1575. started collecting profiling information for that worker (@code{start_time}),
  1576. the duration of the profiling measurement interval (@code{total_time}), the
  1577. time spent executing kernels (@code{executing_time}), the time spent sleeping
  1578. because there is no task to execute at all (@code{sleeping_time}), and the
  1579. number of tasks that were executed while profiling was enabled.
  1580. These values give an estimation of the proportion of time spent do real work,
  1581. and the time spent either sleeping because there are not enough executable
  1582. tasks or simply wasted in pure StarPU overhead.
  1583. Calling @code{starpu_worker_get_profiling_info} resets the profiling
  1584. information associated to a worker.
  1585. When an FxT trace is generated (see @ref{Generating traces}), it is also
  1586. possible to use the @code{starpu_top} script (described in @ref{starpu-top}) to
  1587. generate a graphic showing the evolution of these values during the time, for
  1588. the different workers.
  1589. @node Bus feedback
  1590. @subsection Bus-related feedback
  1591. TODO
  1592. @c how to enable/disable performance monitoring
  1593. @c what kind of information do we get ?
  1594. The bus speed measured by StarPU can be displayed by using the
  1595. @code{starpu_machine_display} tool, for instance:
  1596. @example
  1597. StarPU has found :
  1598. 3 CUDA devices
  1599. CUDA 0 (Tesla C2050 02:00.0)
  1600. CUDA 1 (Tesla C2050 03:00.0)
  1601. CUDA 2 (Tesla C2050 84:00.0)
  1602. from to RAM to CUDA 0 to CUDA 1 to CUDA 2
  1603. RAM 0.000000 5176.530428 5176.492994 5191.710722
  1604. CUDA 0 4523.732446 0.000000 2414.074751 2417.379201
  1605. CUDA 1 4523.718152 2414.078822 0.000000 2417.375119
  1606. CUDA 2 4534.229519 2417.069025 2417.060863 0.000000
  1607. @end example
  1608. @node StarPU-Top
  1609. @subsection StarPU-Top interface
  1610. StarPU-Top is an interface which remotely displays the on-line state of a StarPU
  1611. application and permits the user to change parameters on the fly.
  1612. Variables to be monitored can be registered by calling the
  1613. @code{starputop_add_data_boolean}, @code{starputop_add_data_integer},
  1614. @code{starputop_add_data_float} functions, e.g.:
  1615. @example
  1616. starputop_data *data = starputop_add_data_integer("mynum", 0, 100, 1);
  1617. @end example
  1618. The application should then call @code{starputop_init_and_wait} to give its name
  1619. and wait for StarPU-Top to get a start request from the user. The name is used
  1620. by StarPU-Top to quickly reload a previously-saved layout of parameter display.
  1621. @example
  1622. starputop_init_and_wait("the application");
  1623. @end example
  1624. The new values can then be provided thanks to
  1625. @code{starputop_update_data_boolean}, @code{starputop_update_data_integer},
  1626. @code{starputop_update_data_float}, e.g.:
  1627. @example
  1628. starputop_update_data_integer(data, mynum);
  1629. @end example
  1630. Updateable parameters can be registered thanks to @code{starputop_register_parameter_boolean}, @code{starputop_register_parameter_integer}, @code{starputop_register_parameter_float}, e.g.:
  1631. @example
  1632. float apha;
  1633. starputop_register_parameter_float("alpha", &alpha, 0, 10, modif_hook);
  1634. @end example
  1635. @code{modif_hook} is a function which will be called when the parameter is being modified, it can for instance print the new value:
  1636. @example
  1637. void modif_hook(struct starputop_param_t *d) @{
  1638. fprintf(stderr,"%s has been modified: %f\n", d->name, alpha);
  1639. @}
  1640. @end example
  1641. Task schedulers should notify StarPU-Top when it has decided when a task will be
  1642. scheduled, so that it can show it in its Gantt chart, for instance:
  1643. @example
  1644. starputop_task_prevision(task, workerid, begin, end);
  1645. @end example
  1646. Starting StarPU-Top and the application can be done two ways:
  1647. @itemize
  1648. @item The application is started by hand on some machine (and thus already
  1649. waiting for the start event). In the Preference dialog of StarPU-Top, the SSH
  1650. checkbox should be unchecked, and the hostname and port (default is 2011) on
  1651. which the application is already running should be specified. Clicking on the
  1652. connection button will thus connect to the already-running application.
  1653. @item StarPU-Top is started first, and clicking on the connection button will
  1654. start the application itself (possibly on a remote machine). The SSH checkbox
  1655. should be checked, and a command line provided, e.g.:
  1656. @example
  1657. ssh myserver STARPU_SCHED=heft ./application
  1658. @end example
  1659. If port 2011 of the remote machine can not be accessed directly, an ssh port bridge should be added:
  1660. @example
  1661. ssh -L 2011:localhost:2011 myserver STARPU_SCHED=heft ./application
  1662. @end example
  1663. and "localhost" should be used as IP Address to connect to.
  1664. @end itemize
  1665. @node Off-line
  1666. @section Off-line performance feedback
  1667. @menu
  1668. * Generating traces:: Generating traces with FxT
  1669. * Gantt diagram:: Creating a Gantt Diagram
  1670. * DAG:: Creating a DAG with graphviz
  1671. * starpu-top:: Monitoring activity
  1672. @end menu
  1673. @node Generating traces
  1674. @subsection Generating traces with FxT
  1675. StarPU can use the FxT library (see
  1676. @indicateurl{https://savannah.nongnu.org/projects/fkt/}) to generate traces
  1677. with a limited runtime overhead.
  1678. You can either get a tarball:
  1679. @example
  1680. % wget http://download.savannah.gnu.org/releases/fkt/fxt-0.2.2.tar.gz
  1681. @end example
  1682. or use the FxT library from CVS (autotools are required):
  1683. @example
  1684. % cvs -d :pserver:anonymous@@cvs.sv.gnu.org:/sources/fkt co FxT
  1685. % ./bootstrap
  1686. @end example
  1687. Compiling and installing the FxT library in the @code{$FXTDIR} path is
  1688. done following the standard procedure:
  1689. @example
  1690. % ./configure --prefix=$FXTDIR
  1691. % make
  1692. % make install
  1693. @end example
  1694. In order to have StarPU to generate traces, StarPU should be configured with
  1695. the @code{--with-fxt} option:
  1696. @example
  1697. $ ./configure --with-fxt=$FXTDIR
  1698. @end example
  1699. Or you can simply point the @code{PKG_CONFIG_PATH} to
  1700. @code{$FXTDIR/lib/pkgconfig} and pass @code{--with-fxt} to @code{./configure}
  1701. When FxT is enabled, a trace is generated when StarPU is terminated by calling
  1702. @code{starpu_shutdown()}). The trace is a binary file whose name has the form
  1703. @code{prof_file_XXX_YYY} where @code{XXX} is the user name, and
  1704. @code{YYY} is the pid of the process that used StarPU. This file is saved in the
  1705. @code{/tmp/} directory by default, or by the directory specified by
  1706. the @code{STARPU_FXT_PREFIX} environment variable.
  1707. @node Gantt diagram
  1708. @subsection Creating a Gantt Diagram
  1709. When the FxT trace file @code{filename} has been generated, it is possible to
  1710. generate a trace in the Paje format by calling:
  1711. @example
  1712. % starpu_fxt_tool -i filename
  1713. @end example
  1714. Or alternatively, setting the @code{STARPU_GENERATE_TRACE} environment variable
  1715. to 1 before application execution will make StarPU do it automatically at
  1716. application shutdown.
  1717. This will create a @code{paje.trace} file in the current directory that can be
  1718. inspected with the ViTE trace visualizing open-source tool. More information
  1719. about ViTE is available at @indicateurl{http://vite.gforge.inria.fr/}. It is
  1720. possible to open the @code{paje.trace} file with ViTE by using the following
  1721. command:
  1722. @example
  1723. % vite paje.trace
  1724. @end example
  1725. @node DAG
  1726. @subsection Creating a DAG with graphviz
  1727. When the FxT trace file @code{filename} has been generated, it is possible to
  1728. generate a task graph in the DOT format by calling:
  1729. @example
  1730. $ starpu_fxt_tool -i filename
  1731. @end example
  1732. This will create a @code{dag.dot} file in the current directory. This file is a
  1733. task graph described using the DOT language. It is possible to get a
  1734. graphical output of the graph by using the graphviz library:
  1735. @example
  1736. $ dot -Tpdf dag.dot -o output.pdf
  1737. @end example
  1738. @node starpu-top
  1739. @subsection Monitoring activity
  1740. When the FxT trace file @code{filename} has been generated, it is possible to
  1741. generate a activity trace by calling:
  1742. @example
  1743. $ starpu_fxt_tool -i filename
  1744. @end example
  1745. This will create an @code{activity.data} file in the current
  1746. directory. A profile of the application showing the activity of StarPU
  1747. during the execution of the program can be generated:
  1748. @example
  1749. $ starpu_top activity.data
  1750. @end example
  1751. This will create a file named @code{activity.eps} in the current directory.
  1752. This picture is composed of two parts.
  1753. The first part shows the activity of the different workers. The green sections
  1754. indicate which proportion of the time was spent executed kernels on the
  1755. processing unit. The red sections indicate the proportion of time spent in
  1756. StartPU: an important overhead may indicate that the granularity may be too
  1757. low, and that bigger tasks may be appropriate to use the processing unit more
  1758. efficiently. The black sections indicate that the processing unit was blocked
  1759. because there was no task to process: this may indicate a lack of parallelism
  1760. which may be alleviated by creating more tasks when it is possible.
  1761. The second part of the @code{activity.eps} picture is a graph showing the
  1762. evolution of the number of tasks available in the system during the execution.
  1763. Ready tasks are shown in black, and tasks that are submitted but not
  1764. schedulable yet are shown in grey.
  1765. @node Codelet performance
  1766. @section Performance of codelets
  1767. The performance model of codelets can be examined by using the
  1768. @code{starpu_perfmodel_display} tool:
  1769. @example
  1770. $ starpu_perfmodel_display -l
  1771. file: <malloc_pinned.hannibal>
  1772. file: <starpu_slu_lu_model_21.hannibal>
  1773. file: <starpu_slu_lu_model_11.hannibal>
  1774. file: <starpu_slu_lu_model_22.hannibal>
  1775. file: <starpu_slu_lu_model_12.hannibal>
  1776. @end example
  1777. Here, the codelets of the lu example are available. We can examine the
  1778. performance of the 22 kernel:
  1779. @example
  1780. $ starpu_perfmodel_display -s starpu_slu_lu_model_22
  1781. performance model for cpu
  1782. # hash size mean dev n
  1783. 57618ab0 19660800 2.851069e+05 1.829369e+04 109
  1784. performance model for cuda_0
  1785. # hash size mean dev n
  1786. 57618ab0 19660800 1.164144e+04 1.556094e+01 315
  1787. performance model for cuda_1
  1788. # hash size mean dev n
  1789. 57618ab0 19660800 1.164271e+04 1.330628e+01 360
  1790. performance model for cuda_2
  1791. # hash size mean dev n
  1792. 57618ab0 19660800 1.166730e+04 3.390395e+02 456
  1793. @end example
  1794. We can see that for the given size, over a sample of a few hundreds of
  1795. execution, the GPUs are about 20 times faster than the CPUs (numbers are in
  1796. us). The standard deviation is extremely low for the GPUs, and less than 10% for
  1797. CPUs.
  1798. The @code{starpu_regression_display} tool does the same for regression-based
  1799. performance models. It also writes a @code{.gp} file in the current directory,
  1800. to be run in the @code{gnuplot} tool, which shows the corresponding curve.
  1801. @c ---------------------------------------------------------------------
  1802. @c MPI support
  1803. @c ---------------------------------------------------------------------
  1804. @node StarPU MPI support
  1805. @chapter StarPU MPI support
  1806. The integration of MPI transfers within task parallelism is done in a
  1807. very natural way by the means of asynchronous interactions between the
  1808. application and StarPU. This is implemented in a separate libstarpumpi library
  1809. which basically provides "StarPU" equivalents of @code{MPI_*} functions, where
  1810. @code{void *} buffers are replaced with @code{starpu_data_handle}s, and all
  1811. GPU-RAM-NIC transfers are handled efficiently by StarPU-MPI. The user has to
  1812. use the usual @code{mpirun} command of the MPI implementation to start StarPU on
  1813. the different MPI nodes.
  1814. An MPI Insert Task function provides an even more seamless transition to a
  1815. distributed application, by automatically issuing all required data transfers
  1816. according to the task graph and an application-provided distribution.
  1817. @menu
  1818. * The API::
  1819. * Simple Example::
  1820. * MPI Insert Task Utility::
  1821. * MPI Collective Operations::
  1822. @end menu
  1823. @node The API
  1824. @section The API
  1825. @subsection Compilation
  1826. The flags required to compile or link against the MPI layer are then
  1827. accessible with the following commands:
  1828. @example
  1829. % pkg-config --cflags libstarpumpi # options for the compiler
  1830. % pkg-config --libs libstarpumpi # options for the linker
  1831. @end example
  1832. @subsection Initialisation
  1833. @deftypefun int starpu_mpi_initialize (void)
  1834. Initializes the starpumpi library. This must be called between calling
  1835. @code{starpu_init} and other @code{starpu_mpi} functions. This
  1836. function does not call @code{MPI_Init}, it should be called beforehand.
  1837. @end deftypefun
  1838. @deftypefun int starpu_mpi_initialize_extended (int *@var{rank}, int *@var{world_size})
  1839. Initializes the starpumpi library. This must be called between calling
  1840. @code{starpu_init} and other @code{starpu_mpi} functions.
  1841. This function calls @code{MPI_Init}, and therefore should be prefered
  1842. to the previous one for MPI implementations which are not thread-safe.
  1843. Returns the current MPI node rank and world size.
  1844. @end deftypefun
  1845. @deftypefun int starpu_mpi_shutdown (void)
  1846. Cleans the starpumpi library. This must be called between calling
  1847. @code{starpu_mpi} functions and @code{starpu_shutdown}.
  1848. @code{MPI_Finalize} will be called if StarPU-MPI has been initialized
  1849. by calling @code{starpu_mpi_initialize_extended}.
  1850. @end deftypefun
  1851. @subsection Communication
  1852. @deftypefun int starpu_mpi_send (starpu_data_handle @var{data_handle}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm})
  1853. @end deftypefun
  1854. @deftypefun int starpu_mpi_recv (starpu_data_handle @var{data_handle}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm}, MPI_Status *@var{status})
  1855. @end deftypefun
  1856. @deftypefun int starpu_mpi_isend (starpu_data_handle @var{data_handle}, starpu_mpi_req *@var{req}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm})
  1857. @end deftypefun
  1858. @deftypefun int starpu_mpi_irecv (starpu_data_handle @var{data_handle}, starpu_mpi_req *@var{req}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm})
  1859. @end deftypefun
  1860. @deftypefun int starpu_mpi_isend_detached (starpu_data_handle @var{data_handle}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm}, void (*@var{callback})(void *), void *@var{arg})
  1861. @end deftypefun
  1862. @deftypefun int starpu_mpi_irecv_detached (starpu_data_handle @var{data_handle}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm}, void (*@var{callback})(void *), void *@var{arg})
  1863. @end deftypefun
  1864. @deftypefun int starpu_mpi_wait (starpu_mpi_req *@var{req}, MPI_Status *@var{status})
  1865. @end deftypefun
  1866. @deftypefun int starpu_mpi_test (starpu_mpi_req *@var{req}, int *@var{flag}, MPI_Status *@var{status})
  1867. @end deftypefun
  1868. @deftypefun int starpu_mpi_barrier (MPI_Comm @var{comm})
  1869. @end deftypefun
  1870. @deftypefun int starpu_mpi_isend_detached_unlock_tag (starpu_data_handle @var{data_handle}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm}, starpu_tag_t @var{tag})
  1871. When the transfer is completed, the tag is unlocked
  1872. @end deftypefun
  1873. @deftypefun int starpu_mpi_irecv_detached_unlock_tag (starpu_data_handle @var{data_handle}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm}, starpu_tag_t @var{tag})
  1874. @end deftypefun
  1875. @deftypefun int starpu_mpi_isend_array_detached_unlock_tag (unsigned @var{array_size}, starpu_data_handle *@var{data_handle}, int *@var{dest}, int *@var{mpi_tag}, MPI_Comm *@var{comm}, starpu_tag_t @var{tag})
  1876. Asynchronously send an array of buffers, and unlocks the tag once all
  1877. of them are transmitted.
  1878. @end deftypefun
  1879. @deftypefun int starpu_mpi_irecv_array_detached_unlock_tag (unsigned @var{array_size}, starpu_data_handle *@var{data_handle}, int *@var{source}, int *@var{mpi_tag}, MPI_Comm *@var{comm}, starpu_tag_t @var{tag})
  1880. @end deftypefun
  1881. @page
  1882. @node Simple Example
  1883. @section Simple Example
  1884. @cartouche
  1885. @smallexample
  1886. void increment_token(void)
  1887. @{
  1888. struct starpu_task *task = starpu_task_create();
  1889. task->cl = &increment_cl;
  1890. task->buffers[0].handle = token_handle;
  1891. task->buffers[0].mode = STARPU_RW;
  1892. starpu_task_submit(task);
  1893. @}
  1894. @end smallexample
  1895. @end cartouche
  1896. @cartouche
  1897. @smallexample
  1898. int main(int argc, char **argv)
  1899. @{
  1900. int rank, size;
  1901. starpu_init(NULL);
  1902. starpu_mpi_initialize_extended(&rank, &size);
  1903. starpu_vector_data_register(&token_handle, 0, (uintptr_t)&token, 1, sizeof(unsigned));
  1904. unsigned nloops = NITER;
  1905. unsigned loop;
  1906. unsigned last_loop = nloops - 1;
  1907. unsigned last_rank = size - 1;
  1908. @end smallexample
  1909. @end cartouche
  1910. @cartouche
  1911. @smallexample
  1912. for (loop = 0; loop < nloops; loop++) @{
  1913. int tag = loop*size + rank;
  1914. if (loop == 0 && rank == 0)
  1915. @{
  1916. token = 0;
  1917. fprintf(stdout, "Start with token value %d\n", token);
  1918. @}
  1919. else
  1920. @{
  1921. starpu_mpi_irecv_detached(token_handle, (rank+size-1)%size, tag,
  1922. MPI_COMM_WORLD, NULL, NULL);
  1923. @}
  1924. increment_token();
  1925. if (loop == last_loop && rank == last_rank)
  1926. @{
  1927. starpu_data_acquire(token_handle, STARPU_R);
  1928. fprintf(stdout, "Finished : token value %d\n", token);
  1929. starpu_data_release(token_handle);
  1930. @}
  1931. else
  1932. @{
  1933. starpu_mpi_isend_detached(token_handle, (rank+1)%size, tag+1,
  1934. MPI_COMM_WORLD, NULL, NULL);
  1935. @}
  1936. @}
  1937. starpu_task_wait_for_all();
  1938. @end smallexample
  1939. @end cartouche
  1940. @cartouche
  1941. @smallexample
  1942. starpu_mpi_shutdown();
  1943. starpu_shutdown();
  1944. if (rank == last_rank)
  1945. @{
  1946. fprintf(stderr, "[%d] token = %d == %d * %d ?\n", rank, token, nloops, size);
  1947. STARPU_ASSERT(token == nloops*size);
  1948. @}
  1949. @end smallexample
  1950. @end cartouche
  1951. @page
  1952. @node MPI Insert Task Utility
  1953. @section MPI Insert Task Utility
  1954. To save the programmer from having to explicit all communications, StarPU
  1955. provides an "MPI Insert Task Utility". The principe is that the application
  1956. decides a distribution of the data over the MPI nodes by allocating it and
  1957. notifying StarPU of that decision, i.e. tell StarPU which MPI node "owns" which
  1958. data. All MPI nodes then process the whole task graph, and StarPU automatically
  1959. determines which node actually execute which task, as well as the required MPI
  1960. transfers.
  1961. @deftypefun int starpu_data_set_rank (starpu_data_handle @var{handle}, int @var{mpi_rank})
  1962. Tell StarPU-MPI which MPI node "owns" a given data, that is, the node which will
  1963. always keep an up-to-date value, and will by default execute tasks which write
  1964. to it.
  1965. @end deftypefun
  1966. @deftypefun int starpu_data_get_rank (starpu_data_handle @var{handle})
  1967. Returns the last value set by @code{starpu_data_set_rank}.
  1968. @end deftypefun
  1969. @deftypefun void starpu_mpi_insert_task (MPI_Comm @var{comm}, starpu_codelet *@var{cl}, ...)
  1970. Create and submit a task corresponding to @var{cl} with the following
  1971. arguments. The argument list must be zero-terminated.
  1972. The arguments following the codelets are the same types as for the
  1973. function @code{starpu_insert_task} defined in @ref{Insert Task
  1974. Utility}. The extra argument @code{STARPU_EXECUTE_ON_NODE} followed by an
  1975. integer allows to specify the MPI node to execute the codelet. It is also
  1976. possible to specify that the node owning a specific data will execute
  1977. the codelet, by using @code{STARPU_EXECUTE_ON_DATA} followed by a data
  1978. handle.
  1979. The internal algorithm is as follows:
  1980. @enumerate
  1981. @item Find out whether we (as an MPI node) are to execute the codelet
  1982. because we own the data to be written to. If different nodes own data
  1983. to be written to, the argument @code{STARPU_EXECUTE_ON_NODE} or
  1984. @code{STARPU_EXECUTE_ON_DATA} has to be used to specify which MPI node will
  1985. execute the task.
  1986. @item Send and receive data as requested. Nodes owning data which need to be
  1987. read by the task are sending them to the MPI node which will execute it. The
  1988. latter receives them.
  1989. @item Execute the codelet. This is done by the MPI node selected in the
  1990. 1st step of the algorithm.
  1991. @item In the case when different MPI nodes own data to be written to, send
  1992. written data back to their owners.
  1993. @end enumerate
  1994. The algorithm also includes a cache mechanism that allows not to send
  1995. data twice to the same MPI node, unless the data has been modified.
  1996. @end deftypefun
  1997. @deftypefun void starpu_mpi_get_data_on_node (MPI_Comm @var{comm}, starpu_data_handle @var{data_handle}, int @var{node})
  1998. @end deftypefun
  1999. @page
  2000. Here an stencil example showing how to use @code{starpu_mpi_insert_task}. One
  2001. first needs to define a distribution function which specifies the
  2002. locality of the data. Note that that distribution information needs to
  2003. be given to StarPU by calling @code{starpu_data_set_rank}.
  2004. @cartouche
  2005. @smallexample
  2006. /* Returns the MPI node number where data is */
  2007. int my_distrib(int x, int y, int nb_nodes) @{
  2008. /* Block distrib */
  2009. return ((int)(x / sqrt(nb_nodes) + (y / sqrt(nb_nodes)) * sqrt(nb_nodes))) % nb_nodes;
  2010. // /* Other examples useful for other kinds of computations */
  2011. // /* / distrib */
  2012. // return (x+y) % nb_nodes;
  2013. // /* Block cyclic distrib */
  2014. // unsigned side = sqrt(nb_nodes);
  2015. // return x % side + (y % side) * size;
  2016. @}
  2017. @end smallexample
  2018. @end cartouche
  2019. Now the data can be registered within StarPU. Data which are not
  2020. owned but will be needed for computations can be registered through
  2021. the lazy allocation mechanism, i.e. with a @code{home_node} set to -1.
  2022. StarPU will automatically allocate the memory when it is used for the
  2023. first time.
  2024. One can note an optimization here (the @code{else if} test): we only register
  2025. data which will be needed by the tasks that we will execute.
  2026. @cartouche
  2027. @smallexample
  2028. unsigned matrix[X][Y];
  2029. starpu_data_handle data_handles[X][Y];
  2030. for(x = 0; x < X; x++) @{
  2031. for (y = 0; y < Y; y++) @{
  2032. int mpi_rank = my_distrib(x, y, size);
  2033. if (mpi_rank == my_rank)
  2034. /* Owning data */
  2035. starpu_variable_data_register(&data_handles[x][y], 0,
  2036. (uintptr_t)&(matrix[x][y]), sizeof(unsigned));
  2037. else if (my_rank == my_distrib(x+1, y, size) || my_rank == my_distrib(x-1, y, size)
  2038. || my_rank == my_distrib(x, y+1, size) || my_rank == my_distrib(x, y-1, size))
  2039. /* I don't own that index, but will need it for my computations */
  2040. starpu_variable_data_register(&data_handles[x][y], -1,
  2041. (uintptr_t)NULL, sizeof(unsigned));
  2042. else
  2043. /* I know it's useless to allocate anything for this */
  2044. data_handles[x][y] = NULL;
  2045. if (data_handles[x][y])
  2046. starpu_data_set_rank(data_handles[x][y], mpi_rank);
  2047. @}
  2048. @}
  2049. @end smallexample
  2050. @end cartouche
  2051. Now @code{starpu_mpi_insert_task()} can be called for the different
  2052. steps of the application.
  2053. @cartouche
  2054. @smallexample
  2055. for(loop=0 ; loop<niter; loop++)
  2056. for (x = 1; x < X-1; x++)
  2057. for (y = 1; y < Y-1; y++)
  2058. starpu_mpi_insert_task(MPI_COMM_WORLD, &stencil5_cl,
  2059. STARPU_RW, data_handles[x][y],
  2060. STARPU_R, data_handles[x-1][y],
  2061. STARPU_R, data_handles[x+1][y],
  2062. STARPU_R, data_handles[x][y-1],
  2063. STARPU_R, data_handles[x][y+1],
  2064. 0);
  2065. starpu_task_wait_for_all();
  2066. @end smallexample
  2067. @end cartouche
  2068. I.e. all MPI nodes process the whole task graph, but as mentioned above, for
  2069. each task, only the MPI node which owns the data being written to (here,
  2070. @code{data_handles[x][y]}) will actually run the task. The other MPI nodes will
  2071. automatically send the required data.
  2072. @node MPI Collective Operations
  2073. @section MPI Collective Operations
  2074. @deftypefun int starpu_mpi_scatter_detached (starpu_data_handle *@var{data_handles}, int @var{count}, int @var{root}, MPI_Comm @var{comm})
  2075. Scatter data among processes of the communicator based on the ownership of
  2076. the data. For each data of the array @var{data_handles}, the
  2077. process @var{root} sends the data to the process owning this data.
  2078. Processes receiving data must have valid data handles to receive them.
  2079. @end deftypefun
  2080. @deftypefun int starpu_mpi_gather_detached (starpu_data_handle *@var{data_handles}, int @var{count}, int @var{root}, MPI_Comm @var{comm})
  2081. Gather data from the different processes of the communicator onto the
  2082. process @var{root}. Each process owning data handle in the array
  2083. @var{data_handles} will send them to the process @var{root}. The
  2084. process @var{root} must have valid data handles to receive the data.
  2085. @end deftypefun
  2086. @page
  2087. @cartouche
  2088. @smallexample
  2089. if (rank == root)
  2090. @{
  2091. /* Allocate the vector */
  2092. vector = malloc(nblocks * sizeof(float *));
  2093. for(x=0 ; x<nblocks ; x++)
  2094. @{
  2095. starpu_malloc((void **)&vector[x], block_size*sizeof(float));
  2096. @}
  2097. @}
  2098. /* Allocate data handles and register data to StarPU */
  2099. data_handles = malloc(nblocks*sizeof(starpu_data_handle *));
  2100. for(x = 0; x < nblocks ; x++)
  2101. @{
  2102. int mpi_rank = my_distrib(x, nodes);
  2103. if (rank == root) @{
  2104. starpu_vector_data_register(&data_handles[x], 0, (uintptr_t)vector[x],
  2105. blocks_size, sizeof(float));
  2106. @}
  2107. else if ((mpi_rank == rank) || ((rank == mpi_rank+1 || rank == mpi_rank-1))) @{
  2108. /* I own that index, or i will need it for my computations */
  2109. starpu_vector_data_register(&data_handles[x], -1, (uintptr_t)NULL,
  2110. block_size, sizeof(float));
  2111. @}
  2112. else @{
  2113. /* I know it's useless to allocate anything for this */
  2114. data_handles[x] = NULL;
  2115. @}
  2116. if (data_handles[x]) @{
  2117. starpu_data_set_rank(data_handles[x], mpi_rank);
  2118. @}
  2119. @}
  2120. /* Scatter the matrix among the nodes */
  2121. starpu_mpi_scatter_detached(data_handles, nblocks, root, MPI_COMM_WORLD);
  2122. /* Calculation */
  2123. for(x = 0; x < nblocks ; x++) @{
  2124. if (data_handles[x]) @{
  2125. int owner = starpu_data_get_rank(data_handles[x]);
  2126. if (owner == rank) @{
  2127. starpu_insert_task(&cl, STARPU_RW, data_handles[x], 0);
  2128. @}
  2129. @}
  2130. @}
  2131. /* Gather the matrix on main node */
  2132. starpu_mpi_gather_detached(data_handles, nblocks, 0, MPI_COMM_WORLD);
  2133. @end smallexample
  2134. @end cartouche
  2135. @c ---------------------------------------------------------------------
  2136. @c Tips and Tricks
  2137. @c ---------------------------------------------------------------------
  2138. @node Tips and Tricks
  2139. @chapter Tips and Tricks to know about
  2140. @menu
  2141. * Per-worker library initialization:: How to initialize a computation library once for each worker?
  2142. @end menu
  2143. @node Per-worker library initialization
  2144. @section How to initialize a computation library once for each worker?
  2145. Some libraries need to be initialized one for each concurrent instance that
  2146. may run on the machine. For instance, a C++ computation class which is not
  2147. thread-safe by itself, but for which several instanciated objects of that class
  2148. can be used concurrently. This can be used in StarPU by initializing one such
  2149. object per worker. For instance, the libstarpufft example does the following to be able to use FFTW.
  2150. Some global array stores the instanciated objects:
  2151. @smallexample
  2152. fftw_plan plan_cpu[STARPU_NMAXWORKERS];
  2153. @end smallexample
  2154. At initialisation time of libstarpu, the objects are initialized:
  2155. @smallexample
  2156. int workerid;
  2157. for (workerid = 0; workerid < starpu_worker_get_count(); workerid++) @{
  2158. switch (starpu_worker_get_type(workerid)) @{
  2159. case STARPU_CPU_WORKER:
  2160. plan_cpu[workerid] = fftw_plan(...);
  2161. break;
  2162. @}
  2163. @}
  2164. @end smallexample
  2165. And in the codelet body, they are used:
  2166. @smallexample
  2167. static void fft(void *descr[], void *_args)
  2168. @{
  2169. int workerid = starpu_worker_get_id();
  2170. fftw_plan plan = plan_cpu[workerid];
  2171. ...
  2172. fftw_execute(plan, ...);
  2173. @}
  2174. @end smallexample
  2175. To also deal with the CUDA CUFFT implementation, the @code{fftw_plan} type can
  2176. be replaced with a union of @code{fftw_plan} and @code{cufftHandle}, and the
  2177. @code{switch} statement extended with @code{STARPU_CUDA_WORKER}.
  2178. @c ---------------------------------------------------------------------
  2179. @c Configuration options
  2180. @c ---------------------------------------------------------------------
  2181. @node Configuring StarPU
  2182. @chapter Configuring StarPU
  2183. @menu
  2184. * Compilation configuration::
  2185. * Execution configuration through environment variables::
  2186. @end menu
  2187. @node Compilation configuration
  2188. @section Compilation configuration
  2189. The following arguments can be given to the @code{configure} script.
  2190. @menu
  2191. * Common configuration::
  2192. * Configuring workers::
  2193. * Advanced configuration::
  2194. @end menu
  2195. @node Common configuration
  2196. @subsection Common configuration
  2197. @menu
  2198. * --enable-debug::
  2199. * --enable-fast::
  2200. * --enable-verbose::
  2201. * --enable-coverage::
  2202. @end menu
  2203. @node --enable-debug
  2204. @subsubsection @code{--enable-debug}
  2205. @table @asis
  2206. @item @emph{Description}:
  2207. Enable debugging messages.
  2208. @end table
  2209. @node --enable-fast
  2210. @subsubsection @code{--enable-fast}
  2211. @table @asis
  2212. @item @emph{Description}:
  2213. Do not enforce assertions, saves a lot of time spent to compute them otherwise.
  2214. @end table
  2215. @node --enable-verbose
  2216. @subsubsection @code{--enable-verbose}
  2217. @table @asis
  2218. @item @emph{Description}:
  2219. Augment the verbosity of the debugging messages. This can be disabled
  2220. at runtime by setting the environment variable @code{STARPU_SILENT} to
  2221. any value.
  2222. @smallexample
  2223. % STARPU_SILENT=1 ./vector_scal
  2224. @end smallexample
  2225. @end table
  2226. @node --enable-coverage
  2227. @subsubsection @code{--enable-coverage}
  2228. @table @asis
  2229. @item @emph{Description}:
  2230. Enable flags for the @code{gcov} coverage tool.
  2231. @end table
  2232. @node Configuring workers
  2233. @subsection Configuring workers
  2234. @menu
  2235. * --enable-maxcpus::
  2236. * --disable-cpu::
  2237. * --enable-maxcudadev::
  2238. * --disable-cuda::
  2239. * --with-cuda-dir::
  2240. * --with-cuda-include-dir::
  2241. * --with-cuda-lib-dir::
  2242. * --disable-cuda-memcpy-peer::
  2243. * --enable-maxopencldev::
  2244. * --disable-opencl::
  2245. * --with-opencl-dir::
  2246. * --with-opencl-include-dir::
  2247. * --with-opencl-lib-dir::
  2248. * --enable-gordon::
  2249. * --with-gordon-dir::
  2250. * --enable-maximplementations::
  2251. @end menu
  2252. @node --enable-maxcpus
  2253. @subsubsection @code{--enable-maxcpus=<number>}
  2254. @table @asis
  2255. @item @emph{Description}:
  2256. Defines the maximum number of CPU cores that StarPU will support, then
  2257. available as the @code{STARPU_MAXCPUS} macro.
  2258. @end table
  2259. @node --disable-cpu
  2260. @subsubsection @code{--disable-cpu}
  2261. @table @asis
  2262. @item @emph{Description}:
  2263. Disable the use of CPUs of the machine. Only GPUs etc. will be used.
  2264. @end table
  2265. @node --enable-maxcudadev
  2266. @subsubsection @code{--enable-maxcudadev=<number>}
  2267. @table @asis
  2268. @item @emph{Description}:
  2269. Defines the maximum number of CUDA devices that StarPU will support, then
  2270. available as the @code{STARPU_MAXCUDADEVS} macro.
  2271. @end table
  2272. @node --disable-cuda
  2273. @subsubsection @code{--disable-cuda}
  2274. @table @asis
  2275. @item @emph{Description}:
  2276. Disable the use of CUDA, even if a valid CUDA installation was detected.
  2277. @end table
  2278. @node --with-cuda-dir
  2279. @subsubsection @code{--with-cuda-dir=<path>}
  2280. @table @asis
  2281. @item @emph{Description}:
  2282. Specify the directory where CUDA is installed. This directory should notably contain
  2283. @code{include/cuda.h}.
  2284. @end table
  2285. @node --with-cuda-include-dir
  2286. @subsubsection @code{--with-cuda-include-dir=<path>}
  2287. @table @asis
  2288. @item @emph{Description}:
  2289. Specify the directory where CUDA headers are installed. This directory should
  2290. notably contain @code{cuda.h}. This defaults to @code{/include} appended to the
  2291. value given to @code{--with-cuda-dir}.
  2292. @end table
  2293. @node --with-cuda-lib-dir
  2294. @subsubsection @code{--with-cuda-lib-dir=<path>}
  2295. @table @asis
  2296. @item @emph{Description}:
  2297. Specify the directory where the CUDA library is installed. This directory should
  2298. notably contain the CUDA shared libraries (e.g. libcuda.so). This defaults to
  2299. @code{/lib} appended to the value given to @code{--with-cuda-dir}.
  2300. @end table
  2301. @node --disable-cuda-memcpy-peer
  2302. @subsubsection @code{--disable-cuda-memcpy-peer}
  2303. @table @asis
  2304. @item @emph{Description}
  2305. Explicitely disables peer transfers when using CUDA 4.0
  2306. @end table
  2307. @node --enable-maxopencldev
  2308. @subsubsection @code{--enable-maxopencldev=<number>}
  2309. @table @asis
  2310. @item @emph{Description}:
  2311. Defines the maximum number of OpenCL devices that StarPU will support, then
  2312. available as the @code{STARPU_MAXOPENCLDEVS} macro.
  2313. @end table
  2314. @node --disable-opencl
  2315. @subsubsection @code{--disable-opencl}
  2316. @table @asis
  2317. @item @emph{Description}:
  2318. Disable the use of OpenCL, even if the SDK is detected.
  2319. @end table
  2320. @node --with-opencl-dir
  2321. @subsubsection @code{--with-opencl-dir=<path>}
  2322. @table @asis
  2323. @item @emph{Description}:
  2324. Specify the location of the OpenCL SDK. This directory should notably contain
  2325. @code{include/CL/cl.h} (or @code{include/OpenCL/cl.h} on Mac OS).
  2326. @end table
  2327. @node --with-opencl-include-dir
  2328. @subsubsection @code{--with-opencl-include-dir=<path>}
  2329. @table @asis
  2330. @item @emph{Description}:
  2331. Specify the location of OpenCL headers. This directory should notably contain
  2332. @code{CL/cl.h} (or @code{OpenCL/cl.h} on Mac OS). This defaults to
  2333. @code{/include} appended to the value given to @code{--with-opencl-dir}.
  2334. @end table
  2335. @node --with-opencl-lib-dir
  2336. @subsubsection @code{--with-opencl-lib-dir=<path>}
  2337. @table @asis
  2338. @item @emph{Description}:
  2339. Specify the location of the OpenCL library. This directory should notably
  2340. contain the OpenCL shared libraries (e.g. libOpenCL.so). This defaults to
  2341. @code{/lib} appended to the value given to @code{--with-opencl-dir}.
  2342. @end table
  2343. @node --enable-gordon
  2344. @subsubsection @code{--enable-gordon}
  2345. @table @asis
  2346. @item @emph{Description}:
  2347. Enable the use of the Gordon runtime for Cell SPUs.
  2348. @c TODO: rather default to enabled when detected
  2349. @end table
  2350. @node --with-gordon-dir
  2351. @subsubsection @code{--with-gordon-dir=<path>}
  2352. @table @asis
  2353. @item @emph{Description}:
  2354. Specify the location of the Gordon SDK.
  2355. @end table
  2356. @node --enable-maximplementations
  2357. @subsubsection @code{--enable-maximplementations=<number>}
  2358. @table @asis
  2359. @item @emph{Description}:
  2360. Defines the number of implementations that can be defined for a single kind of
  2361. device. It is then available as the @code{STARPU_MAXIMPLEMENTATIONS} macro.
  2362. @end table
  2363. @node Advanced configuration
  2364. @subsection Advanced configuration
  2365. @menu
  2366. * --enable-perf-debug::
  2367. * --enable-model-debug::
  2368. * --enable-stats::
  2369. * --enable-maxbuffers::
  2370. * --enable-allocation-cache::
  2371. * --enable-opengl-render::
  2372. * --enable-blas-lib::
  2373. * --with-magma::
  2374. * --with-fxt::
  2375. * --with-perf-model-dir::
  2376. * --with-mpicc::
  2377. * --with-goto-dir::
  2378. * --with-atlas-dir::
  2379. * --with-mkl-cflags::
  2380. * --with-mkl-ldflags::
  2381. @end menu
  2382. @node --enable-perf-debug
  2383. @subsubsection @code{--enable-perf-debug}
  2384. @table @asis
  2385. @item @emph{Description}:
  2386. Enable performance debugging through gprof.
  2387. @end table
  2388. @node --enable-model-debug
  2389. @subsubsection @code{--enable-model-debug}
  2390. @table @asis
  2391. @item @emph{Description}:
  2392. Enable performance model debugging.
  2393. @end table
  2394. @node --enable-stats
  2395. @subsubsection @code{--enable-stats}
  2396. @table @asis
  2397. @item @emph{Description}:
  2398. Enable statistics.
  2399. @end table
  2400. @node --enable-maxbuffers
  2401. @subsubsection @code{--enable-maxbuffers=<nbuffers>}
  2402. @table @asis
  2403. @item @emph{Description}:
  2404. Define the maximum number of buffers that tasks will be able to take
  2405. as parameters, then available as the @code{STARPU_NMAXBUFS} macro.
  2406. @end table
  2407. @node --enable-allocation-cache
  2408. @subsubsection @code{--enable-allocation-cache}
  2409. @table @asis
  2410. @item @emph{Description}:
  2411. Enable the use of a data allocation cache to avoid the cost of it with
  2412. CUDA. Still experimental.
  2413. @end table
  2414. @node --enable-opengl-render
  2415. @subsubsection @code{--enable-opengl-render}
  2416. @table @asis
  2417. @item @emph{Description}:
  2418. Enable the use of OpenGL for the rendering of some examples.
  2419. @c TODO: rather default to enabled when detected
  2420. @end table
  2421. @node --enable-blas-lib
  2422. @subsubsection @code{--enable-blas-lib=<name>}
  2423. @table @asis
  2424. @item @emph{Description}:
  2425. Specify the blas library to be used by some of the examples. The
  2426. library has to be 'atlas' or 'goto'.
  2427. @end table
  2428. @node --with-magma
  2429. @subsubsection @code{--with-magma=<path>}
  2430. @table @asis
  2431. @item @emph{Description}:
  2432. Specify where magma is installed. This directory should notably contain
  2433. @code{include/magmablas.h}.
  2434. @end table
  2435. @node --with-fxt
  2436. @subsubsection @code{--with-fxt=<path>}
  2437. @table @asis
  2438. @item @emph{Description}:
  2439. Specify the location of FxT (for generating traces and rendering them
  2440. using ViTE). This directory should notably contain
  2441. @code{include/fxt/fxt.h}.
  2442. @c TODO add ref to other section
  2443. @end table
  2444. @node --with-perf-model-dir
  2445. @subsubsection @code{--with-perf-model-dir=<dir>}
  2446. @table @asis
  2447. @item @emph{Description}:
  2448. Specify where performance models should be stored (instead of defaulting to the
  2449. current user's home).
  2450. @end table
  2451. @node --with-mpicc
  2452. @subsubsection @code{--with-mpicc=<path to mpicc>}
  2453. @table @asis
  2454. @item @emph{Description}:
  2455. Specify the location of the @code{mpicc} compiler to be used for starpumpi.
  2456. @end table
  2457. @node --with-goto-dir
  2458. @subsubsection @code{--with-goto-dir=<dir>}
  2459. @table @asis
  2460. @item @emph{Description}:
  2461. Specify the location of GotoBLAS.
  2462. @end table
  2463. @node --with-atlas-dir
  2464. @subsubsection @code{--with-atlas-dir=<dir>}
  2465. @table @asis
  2466. @item @emph{Description}:
  2467. Specify the location of ATLAS. This directory should notably contain
  2468. @code{include/cblas.h}.
  2469. @end table
  2470. @node --with-mkl-cflags
  2471. @subsubsection @code{--with-mkl-cflags=<cflags>}
  2472. @table @asis
  2473. @item @emph{Description}:
  2474. Specify the compilation flags for the MKL Library.
  2475. @end table
  2476. @node --with-mkl-ldflags
  2477. @subsubsection @code{--with-mkl-ldflags=<ldflags>}
  2478. @table @asis
  2479. @item @emph{Description}:
  2480. Specify the linking flags for the MKL Library. Note that the
  2481. @url{http://software.intel.com/en-us/articles/intel-mkl-link-line-advisor/}
  2482. website provides a script to determine the linking flags.
  2483. @end table
  2484. @c ---------------------------------------------------------------------
  2485. @c Environment variables
  2486. @c ---------------------------------------------------------------------
  2487. @node Execution configuration through environment variables
  2488. @section Execution configuration through environment variables
  2489. @menu
  2490. * Workers:: Configuring workers
  2491. * Scheduling:: Configuring the Scheduling engine
  2492. * Misc:: Miscellaneous and debug
  2493. @end menu
  2494. Note: the values given in @code{starpu_conf} structure passed when
  2495. calling @code{starpu_init} will override the values of the environment
  2496. variables.
  2497. @node Workers
  2498. @subsection Configuring workers
  2499. @menu
  2500. * STARPU_NCPUS:: Number of CPU workers
  2501. * STARPU_NCUDA:: Number of CUDA workers
  2502. * STARPU_NOPENCL:: Number of OpenCL workers
  2503. * STARPU_NGORDON:: Number of SPU workers (Cell)
  2504. * STARPU_WORKERS_CPUID:: Bind workers to specific CPUs
  2505. * STARPU_WORKERS_CUDAID:: Select specific CUDA devices
  2506. * STARPU_WORKERS_OPENCLID:: Select specific OpenCL devices
  2507. @end menu
  2508. @node STARPU_NCPUS
  2509. @subsubsection @code{STARPU_NCPUS} -- Number of CPU workers
  2510. @table @asis
  2511. @item @emph{Description}:
  2512. Specify the number of CPU workers (thus not including workers dedicated to control acceleratores). Note that by default, StarPU will not allocate
  2513. more CPU workers than there are physical CPUs, and that some CPUs are used to control
  2514. the accelerators.
  2515. @end table
  2516. @node STARPU_NCUDA
  2517. @subsubsection @code{STARPU_NCUDA} -- Number of CUDA workers
  2518. @table @asis
  2519. @item @emph{Description}:
  2520. Specify the number of CUDA devices that StarPU can use. If
  2521. @code{STARPU_NCUDA} is lower than the number of physical devices, it is
  2522. possible to select which CUDA devices should be used by the means of the
  2523. @code{STARPU_WORKERS_CUDAID} environment variable. By default, StarPU will
  2524. create as many CUDA workers as there are CUDA devices.
  2525. @end table
  2526. @node STARPU_NOPENCL
  2527. @subsubsection @code{STARPU_NOPENCL} -- Number of OpenCL workers
  2528. @table @asis
  2529. @item @emph{Description}:
  2530. OpenCL equivalent of the @code{STARPU_NCUDA} environment variable.
  2531. @end table
  2532. @node STARPU_NGORDON
  2533. @subsubsection @code{STARPU_NGORDON} -- Number of SPU workers (Cell)
  2534. @table @asis
  2535. @item @emph{Description}:
  2536. Specify the number of SPUs that StarPU can use.
  2537. @end table
  2538. @node STARPU_WORKERS_CPUID
  2539. @subsubsection @code{STARPU_WORKERS_CPUID} -- Bind workers to specific CPUs
  2540. @table @asis
  2541. @item @emph{Description}:
  2542. Passing an array of integers (starting from 0) in @code{STARPU_WORKERS_CPUID}
  2543. specifies on which logical CPU the different workers should be
  2544. bound. For instance, if @code{STARPU_WORKERS_CPUID = "0 1 4 5"}, the first
  2545. worker will be bound to logical CPU #0, the second CPU worker will be bound to
  2546. logical CPU #1 and so on. Note that the logical ordering of the CPUs is either
  2547. determined by the OS, or provided by the @code{hwloc} library in case it is
  2548. available.
  2549. Note that the first workers correspond to the CUDA workers, then come the
  2550. OpenCL and the SPU, and finally the CPU workers. For example if
  2551. we have @code{STARPU_NCUDA=1}, @code{STARPU_NOPENCL=1}, @code{STARPU_NCPUS=2}
  2552. and @code{STARPU_WORKERS_CPUID = "0 2 1 3"}, the CUDA device will be controlled
  2553. by logical CPU #0, the OpenCL device will be controlled by logical CPU #2, and
  2554. the logical CPUs #1 and #3 will be used by the CPU workers.
  2555. If the number of workers is larger than the array given in
  2556. @code{STARPU_WORKERS_CPUID}, the workers are bound to the logical CPUs in a
  2557. round-robin fashion: if @code{STARPU_WORKERS_CPUID = "0 1"}, the first and the
  2558. third (resp. second and fourth) workers will be put on CPU #0 (resp. CPU #1).
  2559. This variable is ignored if the @code{use_explicit_workers_bindid} flag of the
  2560. @code{starpu_conf} structure passed to @code{starpu_init} is set.
  2561. @end table
  2562. @node STARPU_WORKERS_CUDAID
  2563. @subsubsection @code{STARPU_WORKERS_CUDAID} -- Select specific CUDA devices
  2564. @table @asis
  2565. @item @emph{Description}:
  2566. Similarly to the @code{STARPU_WORKERS_CPUID} environment variable, it is
  2567. possible to select which CUDA devices should be used by StarPU. On a machine
  2568. equipped with 4 GPUs, setting @code{STARPU_WORKERS_CUDAID = "1 3"} and
  2569. @code{STARPU_NCUDA=2} specifies that 2 CUDA workers should be created, and that
  2570. they should use CUDA devices #1 and #3 (the logical ordering of the devices is
  2571. the one reported by CUDA).
  2572. This variable is ignored if the @code{use_explicit_workers_cuda_gpuid} flag of
  2573. the @code{starpu_conf} structure passed to @code{starpu_init} is set.
  2574. @end table
  2575. @node STARPU_WORKERS_OPENCLID
  2576. @subsubsection @code{STARPU_WORKERS_OPENCLID} -- Select specific OpenCL devices
  2577. @table @asis
  2578. @item @emph{Description}:
  2579. OpenCL equivalent of the @code{STARPU_WORKERS_CUDAID} environment variable.
  2580. This variable is ignored if the @code{use_explicit_workers_opencl_gpuid} flag of
  2581. the @code{starpu_conf} structure passed to @code{starpu_init} is set.
  2582. @end table
  2583. @node Scheduling
  2584. @subsection Configuring the Scheduling engine
  2585. @menu
  2586. * STARPU_SCHED:: Scheduling policy
  2587. * STARPU_CALIBRATE:: Calibrate performance models
  2588. * STARPU_PREFETCH:: Use data prefetch
  2589. * STARPU_SCHED_ALPHA:: Computation factor
  2590. * STARPU_SCHED_BETA:: Communication factor
  2591. @end menu
  2592. @node STARPU_SCHED
  2593. @subsubsection @code{STARPU_SCHED} -- Scheduling policy
  2594. @table @asis
  2595. @item @emph{Description}:
  2596. This chooses between the different scheduling policies proposed by StarPU: work
  2597. random, stealing, greedy, with performance models, etc.
  2598. Use @code{STARPU_SCHED=help} to get the list of available schedulers.
  2599. @end table
  2600. @node STARPU_CALIBRATE
  2601. @subsubsection @code{STARPU_CALIBRATE} -- Calibrate performance models
  2602. @table @asis
  2603. @item @emph{Description}:
  2604. If this variable is set to 1, the performance models are calibrated during
  2605. the execution. If it is set to 2, the previous values are dropped to restart
  2606. calibration from scratch. Setting this variable to 0 disable calibration, this
  2607. is the default behaviour.
  2608. Note: this currently only applies to @code{dm}, @code{dmda} and @code{heft} scheduling policies.
  2609. @end table
  2610. @node STARPU_PREFETCH
  2611. @subsubsection @code{STARPU_PREFETCH} -- Use data prefetch
  2612. @table @asis
  2613. @item @emph{Description}:
  2614. This variable indicates whether data prefetching should be enabled (0 means
  2615. that it is disabled). If prefetching is enabled, when a task is scheduled to be
  2616. executed e.g. on a GPU, StarPU will request an asynchronous transfer in
  2617. advance, so that data is already present on the GPU when the task starts. As a
  2618. result, computation and data transfers are overlapped.
  2619. Note that prefetching is enabled by default in StarPU.
  2620. @end table
  2621. @node STARPU_SCHED_ALPHA
  2622. @subsubsection @code{STARPU_SCHED_ALPHA} -- Computation factor
  2623. @table @asis
  2624. @item @emph{Description}:
  2625. To estimate the cost of a task StarPU takes into account the estimated
  2626. computation time (obtained thanks to performance models). The alpha factor is
  2627. the coefficient to be applied to it before adding it to the communication part.
  2628. @end table
  2629. @node STARPU_SCHED_BETA
  2630. @subsubsection @code{STARPU_SCHED_BETA} -- Communication factor
  2631. @table @asis
  2632. @item @emph{Description}:
  2633. To estimate the cost of a task StarPU takes into account the estimated
  2634. data transfer time (obtained thanks to performance models). The beta factor is
  2635. the coefficient to be applied to it before adding it to the computation part.
  2636. @end table
  2637. @node Misc
  2638. @subsection Miscellaneous and debug
  2639. @menu
  2640. * STARPU_SILENT:: Disable verbose mode
  2641. * STARPU_LOGFILENAME:: Select debug file name
  2642. * STARPU_FXT_PREFIX:: FxT trace location
  2643. * STARPU_LIMIT_GPU_MEM:: Restrict memory size on the GPUs
  2644. * STARPU_GENERATE_TRACE:: Generate a Paje trace when StarPU is shut down
  2645. @end menu
  2646. @node STARPU_SILENT
  2647. @subsubsection @code{STARPU_SILENT} -- Disable verbose mode
  2648. @table @asis
  2649. @item @emph{Description}:
  2650. This variable allows to disable verbose mode at runtime when StarPU
  2651. has been configured with the option @code{--enable-verbose}.
  2652. @end table
  2653. @node STARPU_LOGFILENAME
  2654. @subsubsection @code{STARPU_LOGFILENAME} -- Select debug file name
  2655. @table @asis
  2656. @item @emph{Description}:
  2657. This variable specifies in which file the debugging output should be saved to.
  2658. @end table
  2659. @node STARPU_FXT_PREFIX
  2660. @subsubsection @code{STARPU_FXT_PREFIX} -- FxT trace location
  2661. @table @asis
  2662. @item @emph{Description}
  2663. This variable specifies in which directory to save the trace generated if FxT is enabled. It needs to have a trailing '/' character.
  2664. @end table
  2665. @node STARPU_LIMIT_GPU_MEM
  2666. @subsubsection @code{STARPU_LIMIT_GPU_MEM} -- Restrict memory size on the GPUs
  2667. @table @asis
  2668. @item @emph{Description}
  2669. This variable specifies the maximum number of megabytes that should be
  2670. available to the application on each GPUs. In case this value is smaller than
  2671. the size of the memory of a GPU, StarPU pre-allocates a buffer to waste memory
  2672. on the device. This variable is intended to be used for experimental purposes
  2673. as it emulates devices that have a limited amount of memory.
  2674. @end table
  2675. @node STARPU_GENERATE_TRACE
  2676. @subsubsection @code{STARPU_GENERATE_TRACE} -- Generate a Paje trace when StarPU is shut down
  2677. @table @asis
  2678. @item @emph{Description}
  2679. When set to 1, this variable indicates that StarPU should automatically
  2680. generate a Paje trace when starpu_shutdown is called.
  2681. @end table
  2682. @c ---------------------------------------------------------------------
  2683. @c StarPU API
  2684. @c ---------------------------------------------------------------------
  2685. @node StarPU API
  2686. @chapter StarPU API
  2687. @menu
  2688. * Initialization and Termination:: Initialization and Termination methods
  2689. * Workers' Properties:: Methods to enumerate workers' properties
  2690. * Data Library:: Methods to manipulate data
  2691. * Data Interfaces::
  2692. * Data Partition::
  2693. * Codelets and Tasks:: Methods to construct tasks
  2694. * Explicit Dependencies:: Explicit Dependencies
  2695. * Implicit Data Dependencies:: Implicit Data Dependencies
  2696. * Performance Model API::
  2697. * Profiling API:: Profiling API
  2698. * CUDA extensions:: CUDA extensions
  2699. * OpenCL extensions:: OpenCL extensions
  2700. * Cell extensions:: Cell extensions
  2701. * Miscellaneous helpers::
  2702. @end menu
  2703. @node Initialization and Termination
  2704. @section Initialization and Termination
  2705. @menu
  2706. * starpu_init:: Initialize StarPU
  2707. * struct starpu_conf:: StarPU runtime configuration
  2708. * starpu_conf_init:: Initialize starpu_conf structure
  2709. * starpu_shutdown:: Terminate StarPU
  2710. @end menu
  2711. @node starpu_init
  2712. @subsection @code{starpu_init} -- Initialize StarPU
  2713. @table @asis
  2714. @item @emph{Description}:
  2715. This is StarPU initialization method, which must be called prior to any other
  2716. StarPU call. It is possible to specify StarPU's configuration (e.g. scheduling
  2717. policy, number of cores, ...) by passing a non-null argument. Default
  2718. configuration is used if the passed argument is @code{NULL}.
  2719. @item @emph{Return value}:
  2720. Upon successful completion, this function returns 0. Otherwise, @code{-ENODEV}
  2721. indicates that no worker was available (so that StarPU was not initialized).
  2722. @item @emph{Prototype}:
  2723. @code{int starpu_init(struct starpu_conf *conf);}
  2724. @end table
  2725. @node struct starpu_conf
  2726. @subsection @code{struct starpu_conf} -- StarPU runtime configuration
  2727. @table @asis
  2728. @item @emph{Description}:
  2729. This structure is passed to the @code{starpu_init} function in order
  2730. to configure StarPU.
  2731. When the default value is used, StarPU automatically selects the number
  2732. of processing units and takes the default scheduling policy. This parameter
  2733. overwrites the equivalent environment variables.
  2734. @item @emph{Fields}:
  2735. @table @asis
  2736. @item @code{sched_policy_name} (default = NULL):
  2737. This is the name of the scheduling policy. This can also be specified with the
  2738. @code{STARPU_SCHED} environment variable.
  2739. @item @code{sched_policy} (default = NULL):
  2740. This is the definition of the scheduling policy. This field is ignored
  2741. if @code{sched_policy_name} is set.
  2742. @item @code{ncpus} (default = -1):
  2743. This is the number of CPU cores that StarPU can use. This can also be
  2744. specified with the @code{STARPU_NCPUS} environment variable.
  2745. @item @code{ncuda} (default = -1):
  2746. This is the number of CUDA devices that StarPU can use. This can also be
  2747. specified with the @code{STARPU_NCUDA} environment variable.
  2748. @item @code{nopencl} (default = -1):
  2749. This is the number of OpenCL devices that StarPU can use. This can also be
  2750. specified with the @code{STARPU_NOPENCL} environment variable.
  2751. @item @code{nspus} (default = -1):
  2752. This is the number of Cell SPUs that StarPU can use. This can also be
  2753. specified with the @code{STARPU_NGORDON} environment variable.
  2754. @item @code{use_explicit_workers_bindid} (default = 0)
  2755. If this flag is set, the @code{workers_bindid} array indicates where the
  2756. different workers are bound, otherwise StarPU automatically selects where to
  2757. bind the different workers unless the @code{STARPU_WORKERS_CPUID} environment
  2758. variable is set. The @code{STARPU_WORKERS_CPUID} environment variable is
  2759. ignored if the @code{use_explicit_workers_bindid} flag is set.
  2760. @item @code{workers_bindid[STARPU_NMAXWORKERS]}
  2761. If the @code{use_explicit_workers_bindid} flag is set, this array indicates
  2762. where to bind the different workers. The i-th entry of the
  2763. @code{workers_bindid} indicates the logical identifier of the processor which
  2764. should execute the i-th worker. Note that the logical ordering of the CPUs is
  2765. either determined by the OS, or provided by the @code{hwloc} library in case it
  2766. is available.
  2767. When this flag is set, the @ref{STARPU_WORKERS_CPUID} environment variable is
  2768. ignored.
  2769. @item @code{use_explicit_workers_cuda_gpuid} (default = 0)
  2770. If this flag is set, the CUDA workers will be attached to the CUDA devices
  2771. specified in the @code{workers_cuda_gpuid} array. Otherwise, StarPU affects the
  2772. CUDA devices in a round-robin fashion.
  2773. When this flag is set, the @ref{STARPU_WORKERS_CUDAID} environment variable is
  2774. ignored.
  2775. @item @code{workers_cuda_gpuid[STARPU_NMAXWORKERS]}
  2776. If the @code{use_explicit_workers_cuda_gpuid} flag is set, this array contains
  2777. the logical identifiers of the CUDA devices (as used by @code{cudaGetDevice}).
  2778. @item @code{use_explicit_workers_opencl_gpuid} (default = 0)
  2779. If this flag is set, the OpenCL workers will be attached to the OpenCL devices
  2780. specified in the @code{workers_opencl_gpuid} array. Otherwise, StarPU affects the
  2781. OpenCL devices in a round-robin fashion.
  2782. @item @code{workers_opencl_gpuid[STARPU_NMAXWORKERS]}:
  2783. @item @code{calibrate} (default = 0):
  2784. If this flag is set, StarPU will calibrate the performance models when
  2785. executing tasks. If this value is equal to -1, the default value is used. The
  2786. default value is overwritten by the @code{STARPU_CALIBRATE} environment
  2787. variable when it is set.
  2788. @end table
  2789. @item @code{single_combined_worker} (default = 0):
  2790. By default, StarPU creates various combined workers according to the machine
  2791. structure. Some parallel libraries (e.g. most OpenMP implementations) however do
  2792. not support concurrent calls to parallel code. In such case, setting this flag
  2793. makes StarPU only create one combined worker, containing all
  2794. the CPU workers. The default value is overwritten by the
  2795. @code{STARPU_SINGLE_COMBINED_WORKER} environment variable when it is set.
  2796. @end table
  2797. @node starpu_conf_init
  2798. @subsection @code{starpu_conf_init} -- Initialize starpu_conf structure
  2799. @table @asis
  2800. This function initializes the @code{starpu_conf} structure passed as argument
  2801. with the default values. In case some configuration parameters are already
  2802. specified through environment variables, @code{starpu_conf_init} initializes
  2803. the fields of the structure according to the environment variables. For
  2804. instance if @code{STARPU_CALIBRATE} is set, its value is put in the
  2805. @code{.ncuda} field of the structure passed as argument.
  2806. @item @emph{Return value}:
  2807. Upon successful completion, this function returns 0. Otherwise, @code{-EINVAL}
  2808. indicates that the argument was NULL.
  2809. @item @emph{Prototype}:
  2810. @code{int starpu_conf_init(struct starpu_conf *conf);}
  2811. @end table
  2812. @node starpu_shutdown
  2813. @subsection @code{starpu_shutdown} -- Terminate StarPU
  2814. @deftypefun void starpu_shutdown (void)
  2815. This is StarPU termination method. It must be called at the end of the
  2816. application: statistics and other post-mortem debugging information are not
  2817. guaranteed to be available until this method has been called.
  2818. @end deftypefun
  2819. @node Workers' Properties
  2820. @section Workers' Properties
  2821. @menu
  2822. * starpu_worker_get_count:: Get the number of processing units
  2823. * starpu_worker_get_count_by_type:: Get the number of processing units of a given type
  2824. * starpu_cpu_worker_get_count:: Get the number of CPU controlled by StarPU
  2825. * starpu_cuda_worker_get_count:: Get the number of CUDA devices controlled by StarPU
  2826. * starpu_opencl_worker_get_count:: Get the number of OpenCL devices controlled by StarPU
  2827. * starpu_spu_worker_get_count:: Get the number of Cell SPUs controlled by StarPU
  2828. * starpu_worker_get_id:: Get the identifier of the current worker
  2829. * starpu_worker_get_ids_by_type:: Get the list of identifiers of workers with a given type
  2830. * starpu_worker_get_devid:: Get the device identifier of a worker
  2831. * starpu_worker_get_type:: Get the type of processing unit associated to a worker
  2832. * starpu_worker_get_name:: Get the name of a worker
  2833. * starpu_worker_get_memory_node:: Get the memory node of a worker
  2834. @end menu
  2835. @node starpu_worker_get_count
  2836. @subsection @code{starpu_worker_get_count} -- Get the number of processing units
  2837. @deftypefun unsigned starpu_worker_get_count (void)
  2838. This function returns the number of workers (i.e. processing units executing
  2839. StarPU tasks). The returned value should be at most @code{STARPU_NMAXWORKERS}.
  2840. @end deftypefun
  2841. @node starpu_worker_get_count_by_type
  2842. @subsection @code{starpu_worker_get_count_by_type} -- Get the number of processing units of a given type
  2843. @deftypefun int starpu_worker_get_count_by_type ({enum starpu_archtype} @var{type})
  2844. Returns the number of workers of the type indicated by the argument. A positive
  2845. (or null) value is returned in case of success, @code{-EINVAL} indicates that
  2846. the type is not valid otherwise.
  2847. @end deftypefun
  2848. @node starpu_cpu_worker_get_count
  2849. @subsection @code{starpu_cpu_worker_get_count} -- Get the number of CPU controlled by StarPU
  2850. @deftypefun unsigned starpu_cpu_worker_get_count (void)
  2851. This function returns the number of CPUs controlled by StarPU. The returned
  2852. value should be at most @code{STARPU_MAXCPUS}.
  2853. @end deftypefun
  2854. @node starpu_cuda_worker_get_count
  2855. @subsection @code{starpu_cuda_worker_get_count} -- Get the number of CUDA devices controlled by StarPU
  2856. @deftypefun unsigned starpu_cuda_worker_get_count (void)
  2857. This function returns the number of CUDA devices controlled by StarPU. The returned
  2858. value should be at most @code{STARPU_MAXCUDADEVS}.
  2859. @end deftypefun
  2860. @node starpu_opencl_worker_get_count
  2861. @subsection @code{starpu_opencl_worker_get_count} -- Get the number of OpenCL devices controlled by StarPU
  2862. @deftypefun unsigned starpu_opencl_worker_get_count (void)
  2863. This function returns the number of OpenCL devices controlled by StarPU. The returned
  2864. value should be at most @code{STARPU_MAXOPENCLDEVS}.
  2865. @end deftypefun
  2866. @node starpu_spu_worker_get_count
  2867. @subsection @code{starpu_spu_worker_get_count} -- Get the number of Cell SPUs controlled by StarPU
  2868. @deftypefun unsigned starpu_spu_worker_get_count (void)
  2869. This function returns the number of Cell SPUs controlled by StarPU.
  2870. @end deftypefun
  2871. @node starpu_worker_get_id
  2872. @subsection @code{starpu_worker_get_id} -- Get the identifier of the current worker
  2873. @deftypefun int starpu_worker_get_id (void)
  2874. This function returns the identifier of the worker associated to the calling
  2875. thread. The returned value is either -1 if the current context is not a StarPU
  2876. worker (i.e. when called from the application outside a task or a callback), or
  2877. an integer between 0 and @code{starpu_worker_get_count() - 1}.
  2878. @end deftypefun
  2879. @node starpu_worker_get_ids_by_type
  2880. @subsection @code{starpu_worker_get_ids_by_type} -- Get the list of identifiers of workers with a given type
  2881. @deftypefun int starpu_worker_get_ids_by_type ({enum starpu_archtype} @var{type}, int *@var{workerids}, int @var{maxsize})
  2882. Fill the workerids array with the identifiers of the workers that have the type
  2883. indicated in the first argument. The maxsize argument indicates the size of the
  2884. workids array. The returned value gives the number of identifiers that were put
  2885. in the array. @code{-ERANGE} is returned is maxsize is lower than the number of
  2886. workers with the appropriate type: in that case, the array is filled with the
  2887. maxsize first elements. To avoid such overflows, the value of maxsize can be
  2888. chosen by the means of the @code{starpu_worker_get_count_by_type} function, or
  2889. by passing a value greater or equal to @code{STARPU_NMAXWORKERS}.
  2890. @end deftypefun
  2891. @node starpu_worker_get_devid
  2892. @subsection @code{starpu_worker_get_devid} -- Get the device identifier of a worker
  2893. @deftypefun int starpu_worker_get_devid (int @var{id})
  2894. This functions returns the device id of the worker associated to an identifier
  2895. (as returned by the @code{starpu_worker_get_id} function). In the case of a
  2896. CUDA worker, this device identifier is the logical device identifier exposed by
  2897. CUDA (used by the @code{cudaGetDevice} function for instance). The device
  2898. identifier of a CPU worker is the logical identifier of the core on which the
  2899. worker was bound; this identifier is either provided by the OS or by the
  2900. @code{hwloc} library in case it is available.
  2901. @end deftypefun
  2902. @node starpu_worker_get_type
  2903. @subsection @code{starpu_worker_get_type} -- Get the type of processing unit associated to a worker
  2904. @deftypefun {enum starpu_archtype} starpu_worker_get_type (int @var{id})
  2905. This function returns the type of worker associated to an identifier (as
  2906. returned by the @code{starpu_worker_get_id} function). The returned value
  2907. indicates the architecture of the worker: @code{STARPU_CPU_WORKER} for a CPU
  2908. core, @code{STARPU_CUDA_WORKER} for a CUDA device,
  2909. @code{STARPU_OPENCL_WORKER} for a OpenCL device, and
  2910. @code{STARPU_GORDON_WORKER} for a Cell SPU. The value returned for an invalid
  2911. identifier is unspecified.
  2912. @end deftypefun
  2913. @node starpu_worker_get_name
  2914. @subsection @code{starpu_worker_get_name} -- Get the name of a worker
  2915. @deftypefun void starpu_worker_get_name (int @var{id}, char *@var{dst}, size_t @var{maxlen})
  2916. StarPU associates a unique human readable string to each processing unit. This
  2917. function copies at most the @var{maxlen} first bytes of the unique string
  2918. associated to a worker identified by its identifier @var{id} into the
  2919. @var{dst} buffer. The caller is responsible for ensuring that the @var{dst}
  2920. is a valid pointer to a buffer of @var{maxlen} bytes at least. Calling this
  2921. function on an invalid identifier results in an unspecified behaviour.
  2922. @end deftypefun
  2923. @node starpu_worker_get_memory_node
  2924. @subsection @code{starpu_worker_get_memory_node} -- Get the memory node of a worker
  2925. @deftypefun unsigned starpu_worker_get_memory_node (unsigned @var{workerid})
  2926. This function returns the identifier of the memory node associated to the
  2927. worker identified by @var{workerid}.
  2928. @end deftypefun
  2929. @node Data Library
  2930. @section Data Library
  2931. This section describes the data management facilities provided by StarPU.
  2932. We show how to use existing data interfaces in @ref{Data Interfaces}, but developers can
  2933. design their own data interfaces if required.
  2934. @menu
  2935. * starpu_malloc:: Allocate data and pin it
  2936. * starpu_access_mode:: Data access mode
  2937. * unsigned memory_node:: Memory node
  2938. * starpu_data_handle:: StarPU opaque data handle
  2939. * void *interface:: StarPU data interface
  2940. * starpu_data_register:: Register a piece of data to StarPU
  2941. * starpu_data_unregister:: Unregister a piece of data from StarPU
  2942. * starpu_data_unregister_no_coherency:: Unregister a piece of data from StarPU without coherency
  2943. * starpu_data_invalidate:: Invalidate all data replicates
  2944. * starpu_data_acquire:: Access registered data from the application
  2945. * starpu_data_acquire_cb:: Access registered data from the application asynchronously
  2946. * STARPU_DATA_ACQUIRE_CB:: Access registered data from the application asynchronously, macro
  2947. * starpu_data_release:: Release registered data from the application
  2948. * starpu_data_set_wt_mask:: Set the Write-Through mask
  2949. * starpu_data_prefetch_on_node:: Prefetch data to a given node
  2950. @end menu
  2951. @node starpu_malloc
  2952. @subsection @code{starpu_malloc} -- Allocate data and pin it
  2953. @deftypefun int starpu_malloc (void **@var{A}, size_t @var{dim})
  2954. This function allocates data of the given size in main memory. It will also try to pin it in
  2955. CUDA or OpenCL, so that data transfers from this buffer can be asynchronous, and
  2956. thus permit data transfer and computation overlapping. The allocated buffer must
  2957. be freed thanks to the @code{starpu_free} function.
  2958. @end deftypefun
  2959. @node starpu_access_mode
  2960. @subsection @code{starpu_access_mode} -- Data access mode
  2961. This datatype describes a data access mode. The different available modes are:
  2962. @table @asis
  2963. @table @asis
  2964. @item @code{STARPU_R} read-only mode.
  2965. @item @code{STARPU_W} write-only mode.
  2966. @item @code{STARPU_RW} read-write mode. This is equivalent to @code{STARPU_R|STARPU_W}.
  2967. @item @code{STARPU_SCRATCH} scratch memory. A temporary buffer is allocated for the task, but StarPU does not enforce data consistency, i.e. each device has its own buffer, independently from each other (even for CPUs). This is useful for temporary variables. For now, no behaviour is defined concerning the relation with STARPU_R/W modes and the value provided at registration, i.e. the value of the scratch buffer is undefined at entry of the codelet function, but this is being considered for future extensions.
  2968. @item @code{STARPU_REDUX} reduction mode. TODO: document, as well as @code{starpu_data_set_reduction_methods}
  2969. @end table
  2970. @end table
  2971. @node unsigned memory_node
  2972. @subsection @code{unsigned memory_node} -- Memory node
  2973. @table @asis
  2974. @item @emph{Description}:
  2975. Every worker is associated to a memory node which is a logical abstraction of
  2976. the address space from which the processing unit gets its data. For instance,
  2977. the memory node associated to the different CPU workers represents main memory
  2978. (RAM), the memory node associated to a GPU is DRAM embedded on the device.
  2979. Every memory node is identified by a logical index which is accessible from the
  2980. @code{starpu_worker_get_memory_node} function. When registering a piece of data
  2981. to StarPU, the specified memory node indicates where the piece of data
  2982. initially resides (we also call this memory node the home node of a piece of
  2983. data).
  2984. @end table
  2985. @node starpu_data_handle
  2986. @subsection @code{starpu_data_handle} -- StarPU opaque data handle
  2987. @table @asis
  2988. @item @emph{Description}:
  2989. StarPU uses @code{starpu_data_handle} as an opaque handle to manage a piece of
  2990. data. Once a piece of data has been registered to StarPU, it is associated to a
  2991. @code{starpu_data_handle} which keeps track of the state of the piece of data
  2992. over the entire machine, so that we can maintain data consistency and locate
  2993. data replicates for instance.
  2994. @end table
  2995. @node void *interface
  2996. @subsection @code{void *interface} -- StarPU data interface
  2997. @table @asis
  2998. @item @emph{Description}:
  2999. Data management is done at a high-level in StarPU: rather than accessing a mere
  3000. list of contiguous buffers, the tasks may manipulate data that are described by
  3001. a high-level construct which we call data interface.
  3002. An example of data interface is the "vector" interface which describes a
  3003. contiguous data array on a spefic memory node. This interface is a simple
  3004. structure containing the number of elements in the array, the size of the
  3005. elements, and the address of the array in the appropriate address space (this
  3006. address may be invalid if there is no valid copy of the array in the memory
  3007. node). More informations on the data interfaces provided by StarPU are
  3008. given in @ref{Data Interfaces}.
  3009. When a piece of data managed by StarPU is used by a task, the task
  3010. implementation is given a pointer to an interface describing a valid copy of
  3011. the data that is accessible from the current processing unit.
  3012. @end table
  3013. @node starpu_data_register
  3014. @subsection @code{starpu_data_register} -- Register a piece of data to StarPU
  3015. @deftypefun void starpu_data_register (starpu_data_handle *@var{handleptr}, uint32_t @var{home_node}, void *@var{interface}, {struct starpu_data_interface_ops_t} *@var{ops})
  3016. Register a piece of data into the handle located at the @var{handleptr}
  3017. address. The @var{interface} buffer contains the initial description of the
  3018. data in the home node. The @var{ops} argument is a pointer to a structure
  3019. describing the different methods used to manipulate this type of interface. See
  3020. @ref{struct starpu_data_interface_ops_t} for more details on this structure.
  3021. If @code{home_node} is -1, StarPU will automatically
  3022. allocate the memory when it is used for the
  3023. first time in write-only mode. Once such data handle has been automatically
  3024. allocated, it is possible to access it using any access mode.
  3025. Note that StarPU supplies a set of predefined types of interface (e.g. vector or
  3026. matrix) which can be registered by the means of helper functions (e.g.
  3027. @code{starpu_vector_data_register} or @code{starpu_matrix_data_register}).
  3028. @end deftypefun
  3029. @node starpu_data_unregister
  3030. @subsection @code{starpu_data_unregister} -- Unregister a piece of data from StarPU
  3031. @deftypefun void starpu_data_unregister (starpu_data_handle @var{handle})
  3032. This function unregisters a data handle from StarPU. If the data was
  3033. automatically allocated by StarPU because the home node was -1, all
  3034. automatically allocated buffers are freed. Otherwise, a valid copy of the data
  3035. is put back into the home node in the buffer that was initially registered.
  3036. Using a data handle that has been unregistered from StarPU results in an
  3037. undefined behaviour.
  3038. @end deftypefun
  3039. @node starpu_data_unregister_no_coherency
  3040. @subsection @code{starpu_data_unregister_no_coherency} -- Unregister a piece of data from StarPU
  3041. @deftypefun void starpu_data_unregister_no_coherency (starpu_data_handle @var{handle})
  3042. This is the same as starpu_data_unregister, except that StarPU does not put back
  3043. a valid copy into the home node, in the buffer that was initially registered.
  3044. @end deftypefun
  3045. @node starpu_data_invalidate
  3046. @subsection @code{starpu_data_invalidate} -- Invalidate all data replicates
  3047. @deftypefun void starpu_data_invalidate (starpu_data_handle @var{handle})
  3048. Destroy all replicates of the data handle. After data invalidation, the first
  3049. access to the handle must be performed in write-only mode. Accessing an
  3050. invalidated data in read-mode results in undefined behaviour.
  3051. @end deftypefun
  3052. @c TODO create a specific sections about user interaction with the DSM ?
  3053. @node starpu_data_acquire
  3054. @subsection @code{starpu_data_acquire} -- Access registered data from the application
  3055. @deftypefun int starpu_data_acquire (starpu_data_handle @var{handle}, starpu_access_mode @var{mode})
  3056. The application must call this function prior to accessing registered data from
  3057. main memory outside tasks. StarPU ensures that the application will get an
  3058. up-to-date copy of the data in main memory located where the data was
  3059. originally registered, and that all concurrent accesses (e.g. from tasks) will
  3060. be consistent with the access mode specified in the @var{mode} argument.
  3061. @code{starpu_data_release} must be called once the application does not need to
  3062. access the piece of data anymore. Note that implicit data
  3063. dependencies are also enforced by @code{starpu_data_acquire}, i.e.
  3064. @code{starpu_data_acquire} will wait for all tasks scheduled to work on
  3065. the data, unless that they have not been disabled explictly by calling
  3066. @code{starpu_data_set_default_sequential_consistency_flag} or
  3067. @code{starpu_data_set_sequential_consistency_flag}.
  3068. @code{starpu_data_acquire} is a blocking call, so that it cannot be called from
  3069. tasks or from their callbacks (in that case, @code{starpu_data_acquire} returns
  3070. @code{-EDEADLK}). Upon successful completion, this function returns 0.
  3071. @end deftypefun
  3072. @node starpu_data_acquire_cb
  3073. @subsection @code{starpu_data_acquire_cb} -- Access registered data from the application asynchronously
  3074. @deftypefun int starpu_data_acquire_cb (starpu_data_handle @var{handle}, starpu_access_mode @var{mode}, void (*@var{callback})(void *), void *@var{arg})
  3075. @code{starpu_data_acquire_cb} is the asynchronous equivalent of
  3076. @code{starpu_data_release}. When the data specified in the first argument is
  3077. available in the appropriate access mode, the callback function is executed.
  3078. The application may access the requested data during the execution of this
  3079. callback. The callback function must call @code{starpu_data_release} once the
  3080. application does not need to access the piece of data anymore.
  3081. Note that implicit data dependencies are also enforced by
  3082. @code{starpu_data_acquire_cb} in case they are enabled.
  3083. Contrary to @code{starpu_data_acquire}, this function is non-blocking and may
  3084. be called from task callbacks. Upon successful completion, this function
  3085. returns 0.
  3086. @end deftypefun
  3087. @node STARPU_DATA_ACQUIRE_CB
  3088. @subsection @code{STARPU_DATA_ACQUIRE_CB} -- Access registered data from the application asynchronously, macro
  3089. @deftypefun STARPU_DATA_ACQUIRE_CB (starpu_data_handle @var{handle}, starpu_access_mode @var{mode}, code)
  3090. @code{STARPU_DATA_ACQUIRE_CB} is the same as @code{starpu_data_acquire_cb},
  3091. except that the code to be executed in a callback is directly provided as a
  3092. macro parameter, and the data handle is automatically released after it. This
  3093. permit to easily execute code which depends on the value of some registered
  3094. data. This is non-blocking too and may be called from task callbacks.
  3095. @end deftypefun
  3096. @node starpu_data_release
  3097. @subsection @code{starpu_data_release} -- Release registered data from the application
  3098. @deftypefun void starpu_data_release (starpu_data_handle @var{handle})
  3099. This function releases the piece of data acquired by the application either by
  3100. @code{starpu_data_acquire} or by @code{starpu_data_acquire_cb}.
  3101. @end deftypefun
  3102. @node starpu_data_set_wt_mask
  3103. @subsection @code{starpu_data_set_wt_mask} -- Set the Write-Through mask
  3104. @deftypefun void starpu_data_set_wt_mask (starpu_data_handle @var{handle}, uint32_t @var{wt_mask})
  3105. This function sets the write-through mask of a given data, i.e. a bitmask of
  3106. nodes where the data should be always replicated after modification.
  3107. @end deftypefun
  3108. @node starpu_data_prefetch_on_node
  3109. @subsection @code{starpu_data_prefetch_on_node} -- Prefetch data to a given node
  3110. @deftypefun int starpu_data_prefetch_on_node (starpu_data_handle @var{handle}, unsigned @var{node}, unsigned @var{async})
  3111. Issue a prefetch request for a given data to a given node, i.e.
  3112. requests that the data be replicated to the given node, so that it is available
  3113. there for tasks. If the @var{async} parameter is 0, the call will block until
  3114. the transfer is achieved, else the call will return as soon as the request is
  3115. scheduled (which may however have to wait for a task completion).
  3116. @end deftypefun
  3117. @node Data Interfaces
  3118. @section Data Interfaces
  3119. There are several ways to register a memory region so that it can be
  3120. managed by StarPU. The functions below allow the registration of
  3121. vectors, 2D matrices, and 3D matrices, as well as BCSR and CSR sparse
  3122. matrices.
  3123. @deftypefun void starpu_variable_data_register ({starpu_data_handle *}@var{handle}, uint32_t @var{home_node}, uintptr_t @var{ptr}, size_t @var{size})
  3124. Register the @var{size}-byte element pointed to by @var{ptr}, which is
  3125. typically a scalar, and initialize @var{handle} to represent this data
  3126. item.
  3127. @smallexample
  3128. float var;
  3129. starpu_data_handle var_handle;
  3130. starpu_variable_data_register(&var_handle, 0, (uintptr_t)&var, sizeof(var));
  3131. @end smallexample
  3132. @end deftypefun
  3133. @deftypefun void starpu_vector_data_register ({starpu_data_handle *}@var{handle}, uint32_t @var{home_node}, uintptr_t @var{ptr}, uint32_t @var{count}, size_t @var{size})
  3134. Register the @var{count} @var{size}-byte elements pointed to by
  3135. @var{ptr} and initialize @var{handle} to represent it.
  3136. @example
  3137. float vector[NX];
  3138. starpu_data_handle vector_handle;
  3139. starpu_vector_data_register(&vector_handle, 0, (uintptr_t)vector, NX,
  3140. sizeof(vector[0]));
  3141. @end example
  3142. @end deftypefun
  3143. @deftypefun void starpu_matrix_data_register ({starpu_data_handle *}@var{handle}, uint32_t @var{home_node}, uintptr_t @var{ptr}, uint32_t @var{ld}, uint32_t @var{nx}, uint32_t @var{ny}, size_t @var{size})
  3144. Register the @var{nx}x@var{ny} 2D matrix of @var{size}-byte elements
  3145. pointed by @var{ptr} and initialize @var{handle} to represent it.
  3146. @var{ld} specifies the number of extra elements present at the end of
  3147. each row; a non-zero @var{ld} adds padding, which can be useful for
  3148. alignment purposes.
  3149. @example
  3150. float *matrix;
  3151. starpu_data_handle matrix_handle;
  3152. matrix = (float*)malloc(width * height * sizeof(float));
  3153. starpu_matrix_data_register(&matrix_handle, 0, (uintptr_t)matrix,
  3154. width, width, height, sizeof(float));
  3155. @end example
  3156. @end deftypefun
  3157. @deftypefun void starpu_block_data_register ({starpu_data_handle *}@var{handle}, uint32_t @var{home_node}, uintptr_t @var{ptr}, uint32_t @var{ldy}, uint32_t @var{ldz}, uint32_t @var{nx}, uint32_t @var{ny}, uint32_t @var{nz}, size_t @var{size})
  3158. Register the @var{nx}x@var{ny}x@var{nz} 3D matrix of @var{size}-byte
  3159. elements pointed by @var{ptr} and initialize @var{handle} to represent
  3160. it. Again, @var{ldy} and @var{ldz} specify the number of extra elements
  3161. present at the end of each row or column.
  3162. @example
  3163. float *block;
  3164. starpu_data_handle block_handle;
  3165. block = (float*)malloc(nx*ny*nz*sizeof(float));
  3166. starpu_block_data_register(&block_handle, 0, (uintptr_t)block,
  3167. nx, nx*ny, nx, ny, nz, sizeof(float));
  3168. @end example
  3169. @end deftypefun
  3170. @deftypefun void starpu_bcsr_data_register (starpu_data_handle *@var{handle}, uint32_t @var{home_node}, uint32_t @var{nnz}, uint32_t @var{nrow}, uintptr_t @var{nzval}, uint32_t *@var{colind}, uint32_t *@var{rowptr}, uint32_t @var{firstentry}, uint32_t @var{r}, uint32_t @var{c}, size_t @var{elemsize})
  3171. This variant of @code{starpu_data_register} uses the BCSR (Blocked
  3172. Compressed Sparse Row Representation) sparse matrix interface.
  3173. TODO
  3174. @end deftypefun
  3175. @deftypefun void starpu_csr_data_register (starpu_data_handle *@var{handle}, uint32_t @var{home_node}, uint32_t @var{nnz}, uint32_t @var{nrow}, uintptr_t @var{nzval}, uint32_t *@var{colind}, uint32_t *@var{rowptr}, uint32_t @var{firstentry}, size_t @var{elemsize})
  3176. This variant of @code{starpu_data_register} uses the CSR (Compressed
  3177. Sparse Row Representation) sparse matrix interface.
  3178. TODO
  3179. @end deftypefun
  3180. @node Data Partition
  3181. @section Data Partition
  3182. @menu
  3183. * struct starpu_data_filter:: StarPU filter structure
  3184. * starpu_data_partition:: Partition Data
  3185. * starpu_data_unpartition:: Unpartition Data
  3186. * starpu_data_get_nb_children::
  3187. * starpu_data_get_sub_data::
  3188. * Predefined filter functions::
  3189. @end menu
  3190. @node struct starpu_data_filter
  3191. @subsection @code{struct starpu_data_filter} -- StarPU filter structure
  3192. @table @asis
  3193. @item @emph{Description}:
  3194. The filter structure describes a data partitioning operation, to be given to the
  3195. @code{starpu_data_partition} function, see @ref{starpu_data_partition} for an example.
  3196. @item @emph{Fields}:
  3197. @table @asis
  3198. @item @code{filter_func}:
  3199. This function fills the @code{child_interface} structure with interface
  3200. information for the @code{id}-th child of the parent @code{father_interface} (among @code{nparts}).
  3201. @code{void (*filter_func)(void *father_interface, void* child_interface, struct starpu_data_filter *, unsigned id, unsigned nparts);}
  3202. @item @code{nchildren}:
  3203. This is the number of parts to partition the data into.
  3204. @item @code{get_nchildren}:
  3205. This returns the number of children. This can be used instead of @code{nchildren} when the number of
  3206. children depends on the actual data (e.g. the number of blocks in a sparse
  3207. matrix).
  3208. @code{unsigned (*get_nchildren)(struct starpu_data_filter *, starpu_data_handle initial_handle);}
  3209. @item @code{get_child_ops}:
  3210. In case the resulting children use a different data interface, this function
  3211. returns which interface is used by child number @code{id}.
  3212. @code{struct starpu_data_interface_ops_t *(*get_child_ops)(struct starpu_data_filter *, unsigned id);}
  3213. @item @code{filter_arg}:
  3214. Some filters take an addition parameter, but this is usually unused.
  3215. @item @code{filter_arg_ptr}:
  3216. Some filters take an additional array parameter like the sizes of the parts, but
  3217. this is usually unused.
  3218. @end table
  3219. @end table
  3220. @node starpu_data_partition
  3221. @subsection starpu_data_partition -- Partition Data
  3222. @table @asis
  3223. @item @emph{Description}:
  3224. This requests partitioning one StarPU data @code{initial_handle} into several
  3225. subdata according to the filter @code{f}
  3226. @item @emph{Prototype}:
  3227. @code{void starpu_data_partition(starpu_data_handle initial_handle, struct starpu_data_filter *f);}
  3228. @item @emph{Example}:
  3229. @cartouche
  3230. @smallexample
  3231. struct starpu_data_filter f = @{
  3232. .filter_func = starpu_vertical_block_filter_func,
  3233. .nchildren = nslicesx,
  3234. .get_nchildren = NULL,
  3235. .get_child_ops = NULL
  3236. @};
  3237. starpu_data_partition(A_handle, &f);
  3238. @end smallexample
  3239. @end cartouche
  3240. @end table
  3241. @node starpu_data_unpartition
  3242. @subsection starpu_data_unpartition -- Unpartition data
  3243. @table @asis
  3244. @item @emph{Description}:
  3245. This unapplies one filter, thus unpartitioning the data. The pieces of data are
  3246. collected back into one big piece in the @code{gathering_node} (usually 0).
  3247. @item @emph{Prototype}:
  3248. @code{void starpu_data_unpartition(starpu_data_handle root_data, uint32_t gathering_node);}
  3249. @item @emph{Example}:
  3250. @cartouche
  3251. @smallexample
  3252. starpu_data_unpartition(A_handle, 0);
  3253. @end smallexample
  3254. @end cartouche
  3255. @end table
  3256. @node starpu_data_get_nb_children
  3257. @subsection starpu_data_get_nb_children
  3258. @table @asis
  3259. @item @emph{Description}:
  3260. This function returns the number of children.
  3261. @item @emph{Return value}:
  3262. The number of children.
  3263. @item @emph{Prototype}:
  3264. @code{int starpu_data_get_nb_children(starpu_data_handle handle);}
  3265. @end table
  3266. @c starpu_data_handle starpu_data_get_child(starpu_data_handle handle, unsigned i);
  3267. @node starpu_data_get_sub_data
  3268. @subsection starpu_data_get_sub_data
  3269. @table @asis
  3270. @item @emph{Description}:
  3271. After partitioning a StarPU data by applying a filter,
  3272. @code{starpu_data_get_sub_data} can be used to get handles for each of the data
  3273. portions. @code{root_data} is the parent data that was partitioned. @code{depth}
  3274. is the number of filters to traverse (in case several filters have been applied,
  3275. to e.g. partition in row blocks, and then in column blocks), and the subsequent
  3276. parameters are the indexes.
  3277. @item @emph{Return value}:
  3278. A handle to the subdata.
  3279. @item @emph{Prototype}:
  3280. @code{starpu_data_handle starpu_data_get_sub_data(starpu_data_handle root_data, unsigned depth, ... );}
  3281. @item @emph{Example}:
  3282. @cartouche
  3283. @smallexample
  3284. h = starpu_data_get_sub_data(A_handle, 1, taskx);
  3285. @end smallexample
  3286. @end cartouche
  3287. @end table
  3288. @node Predefined filter functions
  3289. @subsection Predefined filter functions
  3290. @menu
  3291. * Partitioning BCSR Data::
  3292. * Partitioning BLAS interface::
  3293. * Partitioning Vector Data::
  3294. * Partitioning Block Data::
  3295. @end menu
  3296. This section gives a partial list of the predefined partitioning functions.
  3297. Examples on how to use them are shown in @ref{Partitioning Data}. The complete
  3298. list can be found in @code{starpu_data_filters.h} .
  3299. @node Partitioning BCSR Data
  3300. @subsubsection Partitioning BCSR Data
  3301. @deftypefun void starpu_canonical_block_filter_bcsr (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  3302. TODO
  3303. @end deftypefun
  3304. @deftypefun void starpu_vertical_block_filter_func_csr (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  3305. TODO
  3306. @end deftypefun
  3307. @node Partitioning BLAS interface
  3308. @subsubsection Partitioning BLAS interface
  3309. @deftypefun void starpu_block_filter_func (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  3310. This partitions a dense Matrix into horizontal blocks.
  3311. @end deftypefun
  3312. @deftypefun void starpu_vertical_block_filter_func (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  3313. This partitions a dense Matrix into vertical blocks.
  3314. @end deftypefun
  3315. @node Partitioning Vector Data
  3316. @subsubsection Partitioning Vector Data
  3317. @deftypefun void starpu_block_filter_func_vector (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  3318. This partitions a vector into blocks of the same size.
  3319. @end deftypefun
  3320. @deftypefun void starpu_vector_list_filter_func (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  3321. This partitions a vector into blocks of sizes given in the @var{filter_arg_ptr}
  3322. field of @var{f}, supposed to point on a @code{uint32_t*} array.
  3323. @end deftypefun
  3324. @deftypefun void starpu_vector_divide_in_2_filter_func (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  3325. This partitions a vector into two blocks, the first block size being given in
  3326. the @var{filter_arg} field of @var{f}.
  3327. @end deftypefun
  3328. @node Partitioning Block Data
  3329. @subsubsection Partitioning Block Data
  3330. @deftypefun void starpu_block_filter_func_block (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  3331. This partitions a 3D matrix along the X axis.
  3332. @end deftypefun
  3333. @node Codelets and Tasks
  3334. @section Codelets and Tasks
  3335. This section describes the interface to manipulate codelets and tasks.
  3336. @deftp {Data Type} {struct starpu_codelet}
  3337. The codelet structure describes a kernel that is possibly implemented on various
  3338. targets. For compatibility, make sure to initialize the whole structure to zero.
  3339. @table @asis
  3340. @item @code{where}
  3341. Indicates which types of processing units are able to execute the codelet.
  3342. @code{STARPU_CPU|STARPU_CUDA} for instance indicates that the codelet is
  3343. implemented for both CPU cores and CUDA devices while @code{STARPU_GORDON}
  3344. indicates that it is only available on Cell SPUs.
  3345. @item @code{cpu_func} (optional)
  3346. Is a function pointer to the CPU implementation of the codelet. Its prototype
  3347. must be: @code{void cpu_func(void *buffers[], void *cl_arg)}. The first
  3348. argument being the array of data managed by the data management library, and
  3349. the second argument is a pointer to the argument passed from the @code{cl_arg}
  3350. field of the @code{starpu_task} structure.
  3351. The @code{cpu_func} field is ignored if @code{STARPU_CPU} does not appear in
  3352. the @code{where} field, it must be non-null otherwise. When multiple CPU
  3353. implementations are used, this field must be set to
  3354. @code{STARPU_MULTIPLE_CPU_IMPLEMENTATIONS}.
  3355. @item @code{cpu_funcs} (optional)
  3356. Is an array of function pointers to the CPU implementations of the codelet. This
  3357. field is ignored if the @code{cpu_func} field is set to anything else than
  3358. @code{STARPU_MULTIPLE_CPU_IMPLEMENTATIONS}. Otherwise, it should contain at
  3359. least one function pointer, and at most @code{STARPU_MAXIMPLEMENTATIONS}.
  3360. @item @code{cuda_func} (optional)
  3361. Is a function pointer to the CUDA implementation of the codelet. @emph{This
  3362. must be a host-function written in the CUDA runtime API}. Its prototype must
  3363. be: @code{void cuda_func(void *buffers[], void *cl_arg);}. The @code{cuda_func}
  3364. field is ignored if @code{STARPU_CUDA} does not appear in the @code{where}
  3365. field, it must be non-null otherwise. When multiple CUDA implementations are
  3366. used, this field must be set to @code{STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS}.
  3367. @item @code{cuda_funcs} (optional)
  3368. Is an array of function pointers to the CUDA implementations of the codelet.
  3369. This field is ignored if the @code{cuda_func} field is set to anything else than
  3370. @code{STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS}. Otherwise, it should contain at
  3371. least one function pointer, and at most @code{STARPU_MAXIMPLEMENTATIONS}.
  3372. @item @code{opencl_func} (optional)
  3373. Is a function pointer to the OpenCL implementation of the codelet. Its
  3374. prototype must be:
  3375. @code{void opencl_func(starpu_data_interface_t *descr, void *arg);}.
  3376. This pointer is ignored if @code{STARPU_OPENCL} does not appear in the
  3377. @code{where} field, it must be non-null otherwise. When multiple OpenCL
  3378. implementations are used, this field must be set to
  3379. @code{STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS}.
  3380. @item @code{opencl_funcs} (optional)
  3381. Is an array of function pointers to the OpenCL implementations of the codelet.
  3382. This field is ignored if the @code{opencl_func} field is set to anything else
  3383. than @code{STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS}. Otherwise, it should contain
  3384. at least one function pointer, and at most @code{STARPU_MAXIMPLEMENTATIONS}.
  3385. @item @code{gordon_func} (optional)
  3386. This is the index of the Cell SPU implementation within the Gordon library.
  3387. See Gordon documentation for more details on how to register a kernel and
  3388. retrieve its index.
  3389. @item @code{nbuffers}
  3390. Specifies the number of arguments taken by the codelet. These arguments are
  3391. managed by the DSM and are accessed from the @code{void *buffers[]}
  3392. array. The constant argument passed with the @code{cl_arg} field of the
  3393. @code{starpu_task} structure is not counted in this number. This value should
  3394. not be above @code{STARPU_NMAXBUFS}.
  3395. @item @code{model} (optional)
  3396. This is a pointer to the task duration performance model associated to this
  3397. codelet. This optional field is ignored when set to @code{NULL}.
  3398. TODO
  3399. @item @code{power_model} (optional)
  3400. This is a pointer to the task power consumption performance model associated
  3401. to this codelet. This optional field is ignored when set to @code{NULL}.
  3402. In the case of parallel codelets, this has to account for all processing units
  3403. involved in the parallel execution.
  3404. TODO
  3405. @end table
  3406. @end deftp
  3407. @deftp {Data Type} {struct starpu_task}
  3408. The @code{starpu_task} structure describes a task that can be offloaded on the various
  3409. processing units managed by StarPU. It instantiates a codelet. It can either be
  3410. allocated dynamically with the @code{starpu_task_create} method, or declared
  3411. statically. In the latter case, the programmer has to zero the
  3412. @code{starpu_task} structure and to fill the different fields properly. The
  3413. indicated default values correspond to the configuration of a task allocated
  3414. with @code{starpu_task_create}.
  3415. @table @asis
  3416. @item @code{cl}
  3417. Is a pointer to the corresponding @code{starpu_codelet} data structure. This
  3418. describes where the kernel should be executed, and supplies the appropriate
  3419. implementations. When set to @code{NULL}, no code is executed during the tasks,
  3420. such empty tasks can be useful for synchronization purposes.
  3421. @item @code{buffers}
  3422. Is an array of @code{starpu_buffer_descr_t} structures. It describes the
  3423. different pieces of data accessed by the task, and how they should be accessed.
  3424. The @code{starpu_buffer_descr_t} structure is composed of two fields, the
  3425. @code{handle} field specifies the handle of the piece of data, and the
  3426. @code{mode} field is the required access mode (eg @code{STARPU_RW}). The number
  3427. of entries in this array must be specified in the @code{nbuffers} field of the
  3428. @code{starpu_codelet} structure, and should not excede @code{STARPU_NMAXBUFS}.
  3429. If unsufficient, this value can be set with the @code{--enable-maxbuffers}
  3430. option when configuring StarPU.
  3431. @item @code{cl_arg} (optional; default: @code{NULL})
  3432. This pointer is passed to the codelet through the second argument
  3433. of the codelet implementation (e.g. @code{cpu_func} or @code{cuda_func}).
  3434. In the specific case of the Cell processor, see the @code{cl_arg_size}
  3435. argument.
  3436. @item @code{cl_arg_size} (optional, Cell-specific)
  3437. In the case of the Cell processor, the @code{cl_arg} pointer is not directly
  3438. given to the SPU function. A buffer of size @code{cl_arg_size} is allocated on
  3439. the SPU. This buffer is then filled with the @code{cl_arg_size} bytes starting
  3440. at address @code{cl_arg}. In this case, the argument given to the SPU codelet
  3441. is therefore not the @code{cl_arg} pointer, but the address of the buffer in
  3442. local store (LS) instead. This field is ignored for CPU, CUDA and OpenCL
  3443. codelets, where the @code{cl_arg} pointer is given as such.
  3444. @item @code{callback_func} (optional) (default: @code{NULL})
  3445. This is a function pointer of prototype @code{void (*f)(void *)} which
  3446. specifies a possible callback. If this pointer is non-null, the callback
  3447. function is executed @emph{on the host} after the execution of the task. The
  3448. callback is passed the value contained in the @code{callback_arg} field. No
  3449. callback is executed if the field is set to @code{NULL}.
  3450. @item @code{callback_arg} (optional) (default: @code{NULL})
  3451. This is the pointer passed to the callback function. This field is ignored if
  3452. the @code{callback_func} is set to @code{NULL}.
  3453. @item @code{use_tag} (optional) (default: @code{0})
  3454. If set, this flag indicates that the task should be associated with the tag
  3455. contained in the @code{tag_id} field. Tag allow the application to synchronize
  3456. with the task and to express task dependencies easily.
  3457. @item @code{tag_id}
  3458. This fields contains the tag associated to the task if the @code{use_tag} field
  3459. was set, it is ignored otherwise.
  3460. @item @code{synchronous}
  3461. If this flag is set, the @code{starpu_task_submit} function is blocking and
  3462. returns only when the task has been executed (or if no worker is able to
  3463. process the task). Otherwise, @code{starpu_task_submit} returns immediately.
  3464. @item @code{priority} (optional) (default: @code{STARPU_DEFAULT_PRIO})
  3465. This field indicates a level of priority for the task. This is an integer value
  3466. that must be set between the return values of the
  3467. @code{starpu_sched_get_min_priority} function for the least important tasks,
  3468. and that of the @code{starpu_sched_get_max_priority} for the most important
  3469. tasks (included). The @code{STARPU_MIN_PRIO} and @code{STARPU_MAX_PRIO} macros
  3470. are provided for convenience and respectively returns value of
  3471. @code{starpu_sched_get_min_priority} and @code{starpu_sched_get_max_priority}.
  3472. Default priority is @code{STARPU_DEFAULT_PRIO}, which is always defined as 0 in
  3473. order to allow static task initialization. Scheduling strategies that take
  3474. priorities into account can use this parameter to take better scheduling
  3475. decisions, but the scheduling policy may also ignore it.
  3476. @item @code{execute_on_a_specific_worker} (default: @code{0})
  3477. If this flag is set, StarPU will bypass the scheduler and directly affect this
  3478. task to the worker specified by the @code{workerid} field.
  3479. @item @code{workerid} (optional)
  3480. If the @code{execute_on_a_specific_worker} field is set, this field indicates
  3481. which is the identifier of the worker that should process this task (as
  3482. returned by @code{starpu_worker_get_id}). This field is ignored if
  3483. @code{execute_on_a_specific_worker} field is set to 0.
  3484. @item @code{detach} (optional) (default: @code{1})
  3485. If this flag is set, it is not possible to synchronize with the task
  3486. by the means of @code{starpu_task_wait} later on. Internal data structures
  3487. are only guaranteed to be freed once @code{starpu_task_wait} is called if the
  3488. flag is not set.
  3489. @item @code{destroy} (optional) (default: @code{1})
  3490. If this flag is set, the task structure will automatically be freed, either
  3491. after the execution of the callback if the task is detached, or during
  3492. @code{starpu_task_wait} otherwise. If this flag is not set, dynamically
  3493. allocated data structures will not be freed until @code{starpu_task_destroy} is
  3494. called explicitly. Setting this flag for a statically allocated task structure
  3495. will result in undefined behaviour.
  3496. @item @code{predicted} (output field)
  3497. Predicted duration of the task. This field is only set if the scheduling
  3498. strategy used performance models.
  3499. @end table
  3500. @end deftp
  3501. @deftypefun void starpu_task_init ({struct starpu_task} *@var{task})
  3502. Initialize @var{task} with default values. This function is implicitly
  3503. called by @code{starpu_task_create}. By default, tasks initialized with
  3504. @code{starpu_task_init} must be deinitialized explicitly with
  3505. @code{starpu_task_deinit}. Tasks can also be initialized statically, using the
  3506. constant @code{STARPU_TASK_INITIALIZER}.
  3507. @end deftypefun
  3508. @deftypefun {struct starpu_task *} starpu_task_create (void)
  3509. Allocate a task structure and initialize it with default values. Tasks
  3510. allocated dynamically with @code{starpu_task_create} are automatically freed when the
  3511. task is terminated. If the destroy flag is explicitly unset, the resources used
  3512. by the task are freed by calling
  3513. @code{starpu_task_destroy}.
  3514. @end deftypefun
  3515. @deftypefun void starpu_task_deinit ({struct starpu_task} *@var{task})
  3516. Release all the structures automatically allocated to execute @var{task}. This is
  3517. called automatically by @code{starpu_task_destroy}, but the task structure itself is not
  3518. freed. This should be used for statically allocated tasks for instance.
  3519. @end deftypefun
  3520. @deftypefun void starpu_task_destroy ({struct starpu_task} *@var{task})
  3521. Free the resource allocated during @code{starpu_task_create} and
  3522. associated with @var{task}. This function can be called automatically
  3523. after the execution of a task by setting the @code{destroy} flag of the
  3524. @code{starpu_task} structure (default behaviour). Calling this function
  3525. on a statically allocated task results in an undefined behaviour.
  3526. @end deftypefun
  3527. @deftypefun int starpu_task_wait ({struct starpu_task} *@var{task})
  3528. This function blocks until @var{task} has been executed. It is not possible to
  3529. synchronize with a task more than once. It is not possible to wait for
  3530. synchronous or detached tasks.
  3531. Upon successful completion, this function returns 0. Otherwise, @code{-EINVAL}
  3532. indicates that the specified task was either synchronous or detached.
  3533. @end deftypefun
  3534. @deftypefun int starpu_task_submit ({struct starpu_task} *@var{task})
  3535. This function submits @var{task} to StarPU. Calling this function does
  3536. not mean that the task will be executed immediately as there can be data or task
  3537. (tag) dependencies that are not fulfilled yet: StarPU will take care of
  3538. scheduling this task with respect to such dependencies.
  3539. This function returns immediately if the @code{synchronous} field of the
  3540. @code{starpu_task} structure was set to 0, and block until the termination of
  3541. the task otherwise. It is also possible to synchronize the application with
  3542. asynchronous tasks by the means of tags, using the @code{starpu_tag_wait}
  3543. function for instance.
  3544. In case of success, this function returns 0, a return value of @code{-ENODEV}
  3545. means that there is no worker able to process this task (e.g. there is no GPU
  3546. available and this task is only implemented for CUDA devices).
  3547. @end deftypefun
  3548. @deftypefun int starpu_task_wait_for_all (void)
  3549. This function blocks until all the tasks that were submitted are terminated.
  3550. @end deftypefun
  3551. @deftypefun {struct starpu_task *} starpu_get_current_task (void)
  3552. This function returns the task currently executed by the worker, or
  3553. NULL if it is called either from a thread that is not a task or simply
  3554. because there is no task being executed at the moment.
  3555. @end deftypefun
  3556. @deftypefun void starpu_display_codelet_stats ({struct starpu_codelet_t} *@var{cl})
  3557. Output on @code{stderr} some statistics on the codelet @var{cl}.
  3558. @end deftypefun
  3559. @c Callbacks : what can we put in callbacks ?
  3560. @node Explicit Dependencies
  3561. @section Explicit Dependencies
  3562. @menu
  3563. * starpu_task_declare_deps_array:: starpu_task_declare_deps_array
  3564. * starpu_tag_t:: Task logical identifier
  3565. * starpu_tag_declare_deps:: Declare the Dependencies of a Tag
  3566. * starpu_tag_declare_deps_array:: Declare the Dependencies of a Tag
  3567. * starpu_tag_wait:: Block until a Tag is terminated
  3568. * starpu_tag_wait_array:: Block until a set of Tags is terminated
  3569. * starpu_tag_remove:: Destroy a Tag
  3570. * starpu_tag_notify_from_apps:: Feed a tag explicitly
  3571. @end menu
  3572. @node starpu_task_declare_deps_array
  3573. @subsection @code{starpu_task_declare_deps_array} -- Declare task dependencies
  3574. @deftypefun void starpu_task_declare_deps_array ({struct starpu_task} *@var{task}, unsigned @var{ndeps}, {struct starpu_task} *@var{task_array}[])
  3575. Declare task dependencies between a @var{task} and an array of tasks of length
  3576. @var{ndeps}. This function must be called prior to the submission of the task,
  3577. but it may called after the submission or the execution of the tasks in the
  3578. array provided the tasks are still valid (ie. they were not automatically
  3579. destroyed). Calling this function on a task that was already submitted or with
  3580. an entry of @var{task_array} that is not a valid task anymore results in an
  3581. undefined behaviour. If @var{ndeps} is null, no dependency is added. It is
  3582. possible to call @code{starpu_task_declare_deps_array} multiple times on the
  3583. same task, in this case, the dependencies are added. It is possible to have
  3584. redundancy in the task dependencies.
  3585. @end deftypefun
  3586. @node starpu_tag_t
  3587. @subsection @code{starpu_tag_t} -- Task logical identifier
  3588. @table @asis
  3589. @item @emph{Description}:
  3590. It is possible to associate a task with a unique ``tag'' chosen by the application, and to express
  3591. dependencies between tasks by the means of those tags. To do so, fill the
  3592. @code{tag_id} field of the @code{starpu_task} structure with a tag number (can
  3593. be arbitrary) and set the @code{use_tag} field to 1.
  3594. If @code{starpu_tag_declare_deps} is called with this tag number, the task will
  3595. not be started until the tasks which holds the declared dependency tags are
  3596. completed.
  3597. @end table
  3598. @node starpu_tag_declare_deps
  3599. @subsection @code{starpu_tag_declare_deps} -- Declare the Dependencies of a Tag
  3600. @table @asis
  3601. @item @emph{Description}:
  3602. Specify the dependencies of the task identified by tag @code{id}. The first
  3603. argument specifies the tag which is configured, the second argument gives the
  3604. number of tag(s) on which @code{id} depends. The following arguments are the
  3605. tags which have to be terminated to unlock the task.
  3606. This function must be called before the associated task is submitted to StarPU
  3607. with @code{starpu_task_submit}.
  3608. @item @emph{Remark}
  3609. Because of the variable arity of @code{starpu_tag_declare_deps}, note that the
  3610. last arguments @emph{must} be of type @code{starpu_tag_t}: constant values
  3611. typically need to be explicitly casted. Using the
  3612. @code{starpu_tag_declare_deps_array} function avoids this hazard.
  3613. @item @emph{Prototype}:
  3614. @code{void starpu_tag_declare_deps(starpu_tag_t id, unsigned ndeps, ...);}
  3615. @item @emph{Example}:
  3616. @cartouche
  3617. @example
  3618. /* Tag 0x1 depends on tags 0x32 and 0x52 */
  3619. starpu_tag_declare_deps((starpu_tag_t)0x1,
  3620. 2, (starpu_tag_t)0x32, (starpu_tag_t)0x52);
  3621. @end example
  3622. @end cartouche
  3623. @end table
  3624. @node starpu_tag_declare_deps_array
  3625. @subsection @code{starpu_tag_declare_deps_array} -- Declare the Dependencies of a Tag
  3626. @table @asis
  3627. @item @emph{Description}:
  3628. This function is similar to @code{starpu_tag_declare_deps}, except that its
  3629. does not take a variable number of arguments but an array of tags of size
  3630. @code{ndeps}.
  3631. @item @emph{Prototype}:
  3632. @code{void starpu_tag_declare_deps_array(starpu_tag_t id, unsigned ndeps, starpu_tag_t *array);}
  3633. @item @emph{Example}:
  3634. @cartouche
  3635. @example
  3636. /* Tag 0x1 depends on tags 0x32 and 0x52 */
  3637. starpu_tag_t tag_array[2] = @{0x32, 0x52@};
  3638. starpu_tag_declare_deps_array((starpu_tag_t)0x1, 2, tag_array);
  3639. @end example
  3640. @end cartouche
  3641. @end table
  3642. @node starpu_tag_wait
  3643. @subsection @code{starpu_tag_wait} -- Block until a Tag is terminated
  3644. @deftypefun void starpu_tag_wait (starpu_tag_t @var{id})
  3645. This function blocks until the task associated to tag @var{id} has been
  3646. executed. This is a blocking call which must therefore not be called within
  3647. tasks or callbacks, but only from the application directly. It is possible to
  3648. synchronize with the same tag multiple times, as long as the
  3649. @code{starpu_tag_remove} function is not called. Note that it is still
  3650. possible to synchronize with a tag associated to a task which @code{starpu_task}
  3651. data structure was freed (e.g. if the @code{destroy} flag of the
  3652. @code{starpu_task} was enabled).
  3653. @end deftypefun
  3654. @node starpu_tag_wait_array
  3655. @subsection @code{starpu_tag_wait_array} -- Block until a set of Tags is terminated
  3656. @deftypefun void starpu_tag_wait_array (unsigned @var{ntags}, starpu_tag_t *@var{id})
  3657. This function is similar to @code{starpu_tag_wait} except that it blocks until
  3658. @emph{all} the @var{ntags} tags contained in the @var{id} array are
  3659. terminated.
  3660. @end deftypefun
  3661. @node starpu_tag_remove
  3662. @subsection @code{starpu_tag_remove} -- Destroy a Tag
  3663. @deftypefun void starpu_tag_remove (starpu_tag_t @var{id})
  3664. This function releases the resources associated to tag @var{id}. It can be
  3665. called once the corresponding task has been executed and when there is
  3666. no other tag that depend on this tag anymore.
  3667. @end deftypefun
  3668. @node starpu_tag_notify_from_apps
  3669. @subsection @code{starpu_tag_notify_from_apps} -- Feed a Tag explicitly
  3670. @deftypefun void starpu_tag_notify_from_apps (starpu_tag_t @var{id})
  3671. This function explicitly unlocks tag @var{id}. It may be useful in the
  3672. case of applications which execute part of their computation outside StarPU
  3673. tasks (e.g. third-party libraries). It is also provided as a
  3674. convenient tool for the programmer, for instance to entirely construct the task
  3675. DAG before actually giving StarPU the opportunity to execute the tasks.
  3676. @end deftypefun
  3677. @node Implicit Data Dependencies
  3678. @section Implicit Data Dependencies
  3679. @menu
  3680. * starpu_data_set_default_sequential_consistency_flag:: starpu_data_set_default_sequential_consistency_flag
  3681. * starpu_data_get_default_sequential_consistency_flag:: starpu_data_get_default_sequential_consistency_flag
  3682. * starpu_data_set_sequential_consistency_flag:: starpu_data_set_sequential_consistency_flag
  3683. @end menu
  3684. In this section, we describe how StarPU makes it possible to insert implicit
  3685. task dependencies in order to enforce sequential data consistency. When this
  3686. data consistency is enabled on a specific data handle, any data access will
  3687. appear as sequentially consistent from the application. For instance, if the
  3688. application submits two tasks that access the same piece of data in read-only
  3689. mode, and then a third task that access it in write mode, dependencies will be
  3690. added between the two first tasks and the third one. Implicit data dependencies
  3691. are also inserted in the case of data accesses from the application.
  3692. @node starpu_data_set_default_sequential_consistency_flag
  3693. @subsection @code{starpu_data_set_default_sequential_consistency_flag} -- Set default sequential consistency flag
  3694. @deftypefun void starpu_data_set_default_sequential_consistency_flag (unsigned @var{flag})
  3695. Set the default sequential consistency flag. If a non-zero value is passed, a
  3696. sequential data consistency will be enforced for all handles registered after
  3697. this function call, otherwise it is disabled. By default, StarPU enables
  3698. sequential data consistency. It is also possible to select the data consistency
  3699. mode of a specific data handle with the
  3700. @code{starpu_data_set_sequential_consistency_flag} function.
  3701. @end deftypefun
  3702. @node starpu_data_get_default_sequential_consistency_flag
  3703. @subsection @code{starpu_data_get_default_sequential_consistency_flag} -- Get current default sequential consistency flag
  3704. @deftypefun unsigned starpu_data_set_default_sequential_consistency_flag (void)
  3705. This function returns the current default sequential consistency flag.
  3706. @end deftypefun
  3707. @node starpu_data_set_sequential_consistency_flag
  3708. @subsection @code{starpu_data_set_sequential_consistency_flag} -- Set data sequential consistency mode
  3709. @deftypefun void starpu_data_set_sequential_consistency_flag (starpu_data_handle @var{handle}, unsigned @var{flag})
  3710. Select the data consistency mode associated to a data handle. The consistency
  3711. mode set using this function has the priority over the default mode which can
  3712. be set with @code{starpu_data_set_sequential_consistency_flag}.
  3713. @end deftypefun
  3714. @node Performance Model API
  3715. @section Performance Model API
  3716. @menu
  3717. * starpu_load_history_debug::
  3718. * starpu_perfmodel_debugfilepath::
  3719. * starpu_perfmodel_get_arch_name::
  3720. * starpu_force_bus_sampling::
  3721. @end menu
  3722. @node starpu_load_history_debug
  3723. @subsection @code{starpu_load_history_debug}
  3724. @deftypefun int starpu_load_history_debug ({const char} *@var{symbol}, {struct starpu_perfmodel_t} *@var{model})
  3725. TODO
  3726. @end deftypefun
  3727. @node starpu_perfmodel_debugfilepath
  3728. @subsection @code{starpu_perfmodel_debugfilepath}
  3729. @deftypefun void starpu_perfmodel_debugfilepath ({struct starpu_perfmodel_t} *@var{model}, {enum starpu_perf_archtype} @var{arch}, char *@var{path}, size_t @var{maxlen})
  3730. TODO
  3731. @end deftypefun
  3732. @node starpu_perfmodel_get_arch_name
  3733. @subsection @code{starpu_perfmodel_get_arch_name}
  3734. @deftypefun void starpu_perfmodel_get_arch_name ({enum starpu_perf_archtype} @var{arch}, char *@var{archname}, size_t @var{maxlen})
  3735. TODO
  3736. @end deftypefun
  3737. @node starpu_force_bus_sampling
  3738. @subsection @code{starpu_force_bus_sampling}
  3739. @deftypefun void starpu_force_bus_sampling (void)
  3740. This forces sampling the bus performance model again.
  3741. @end deftypefun
  3742. @node Profiling API
  3743. @section Profiling API
  3744. @menu
  3745. * starpu_profiling_status_set:: starpu_profiling_status_set
  3746. * starpu_profiling_status_get:: starpu_profiling_status_get
  3747. * struct starpu_task_profiling_info:: task profiling information
  3748. * struct starpu_worker_profiling_info:: worker profiling information
  3749. * starpu_worker_get_profiling_info:: starpu_worker_get_profiling_info
  3750. * struct starpu_bus_profiling_info:: bus profiling information
  3751. * starpu_bus_get_count::
  3752. * starpu_bus_get_id::
  3753. * starpu_bus_get_src::
  3754. * starpu_bus_get_dst::
  3755. * starpu_timing_timespec_delay_us::
  3756. * starpu_timing_timespec_to_us::
  3757. * starpu_bus_profiling_helper_display_summary::
  3758. * starpu_worker_profiling_helper_display_summary::
  3759. @end menu
  3760. @node starpu_profiling_status_set
  3761. @subsection @code{starpu_profiling_status_set} -- Set current profiling status
  3762. @table @asis
  3763. @item @emph{Description}:
  3764. Thie function sets the profiling status. Profiling is activated by passing
  3765. @code{STARPU_PROFILING_ENABLE} in @code{status}. Passing
  3766. @code{STARPU_PROFILING_DISABLE} disables profiling. Calling this function
  3767. resets all profiling measurements. When profiling is enabled, the
  3768. @code{profiling_info} field of the @code{struct starpu_task} structure points
  3769. to a valid @code{struct starpu_task_profiling_info} structure containing
  3770. information about the execution of the task.
  3771. @item @emph{Return value}:
  3772. Negative return values indicate an error, otherwise the previous status is
  3773. returned.
  3774. @item @emph{Prototype}:
  3775. @code{int starpu_profiling_status_set(int status);}
  3776. @end table
  3777. @node starpu_profiling_status_get
  3778. @subsection @code{starpu_profiling_status_get} -- Get current profiling status
  3779. @deftypefun int starpu_profiling_status_get (void)
  3780. Return the current profiling status or a negative value in case there was an error.
  3781. @end deftypefun
  3782. @node struct starpu_task_profiling_info
  3783. @subsection @code{struct starpu_task_profiling_info} -- Task profiling information
  3784. @table @asis
  3785. @item @emph{Description}:
  3786. This structure contains information about the execution of a task. It is
  3787. accessible from the @code{.profiling_info} field of the @code{starpu_task}
  3788. structure if profiling was enabled.
  3789. @item @emph{Fields}:
  3790. @table @asis
  3791. @item @code{submit_time}:
  3792. Date of task submission (relative to the initialization of StarPU).
  3793. @item @code{start_time}:
  3794. Date of task execution beginning (relative to the initialization of StarPU).
  3795. @item @code{end_time}:
  3796. Date of task execution termination (relative to the initialization of StarPU).
  3797. @item @code{workerid}:
  3798. Identifier of the worker which has executed the task.
  3799. @end table
  3800. @end table
  3801. @node struct starpu_worker_profiling_info
  3802. @subsection @code{struct starpu_worker_profiling_info} -- Worker profiling information
  3803. @table @asis
  3804. @item @emph{Description}:
  3805. This structure contains the profiling information associated to a worker.
  3806. @item @emph{Fields}:
  3807. @table @asis
  3808. @item @code{start_time}:
  3809. Starting date for the reported profiling measurements.
  3810. @item @code{total_time}:
  3811. Duration of the profiling measurement interval.
  3812. @item @code{executing_time}:
  3813. Time spent by the worker to execute tasks during the profiling measurement interval.
  3814. @item @code{sleeping_time}:
  3815. Time spent idling by the worker during the profiling measurement interval.
  3816. @item @code{executed_tasks}:
  3817. Number of tasks executed by the worker during the profiling measurement interval.
  3818. @end table
  3819. @end table
  3820. @node starpu_worker_get_profiling_info
  3821. @subsection @code{starpu_worker_get_profiling_info} -- Get worker profiling info
  3822. @table @asis
  3823. @item @emph{Description}:
  3824. Get the profiling info associated to the worker identified by @code{workerid},
  3825. and reset the profiling measurements. If the @code{worker_info} argument is
  3826. NULL, only reset the counters associated to worker @code{workerid}.
  3827. @item @emph{Return value}:
  3828. Upon successful completion, this function returns 0. Otherwise, a negative
  3829. value is returned.
  3830. @item @emph{Prototype}:
  3831. @code{int starpu_worker_get_profiling_info(int workerid, struct starpu_worker_profiling_info *worker_info);}
  3832. @end table
  3833. @node struct starpu_bus_profiling_info
  3834. @subsection @code{struct starpu_bus_profiling_info} -- Bus profiling information
  3835. @table @asis
  3836. @item @emph{Description}:
  3837. TODO
  3838. @item @emph{Fields}:
  3839. @table @asis
  3840. @item @code{start_time}:
  3841. TODO
  3842. @item @code{total_time}:
  3843. TODO
  3844. @item @code{transferred_bytes}:
  3845. TODO
  3846. @item @code{transfer_count}:
  3847. TODO
  3848. @end table
  3849. @end table
  3850. @node starpu_bus_get_count
  3851. @subsection @code{starpu_bus_get_count}
  3852. @deftypefun int starpu_bus_get_count (void)
  3853. TODO
  3854. @end deftypefun
  3855. @node starpu_bus_get_id
  3856. @subsection @code{starpu_bus_get_id}
  3857. @deftypefun int starpu_bus_get_id (int @var{src}, int @var{dst})
  3858. TODO
  3859. @end deftypefun
  3860. @node starpu_bus_get_src
  3861. @subsection @code{starpu_bus_get_src}
  3862. @deftypefun int starpu_bus_get_src (int @var{busid})
  3863. TODO
  3864. @end deftypefun
  3865. @node starpu_bus_get_dst
  3866. @subsection @code{starpu_bus_get_dst}
  3867. @deftypefun int starpu_bus_get_dst (int @var{busid})
  3868. TODO
  3869. @end deftypefun
  3870. @node starpu_timing_timespec_delay_us
  3871. @subsection @code{starpu_timing_timespec_delay_us}
  3872. @deftypefun double starpu_timing_timespec_delay_us ({struct timespec} *@var{start}, {struct timespec} *@var{end})
  3873. TODO
  3874. @end deftypefun
  3875. @node starpu_timing_timespec_to_us
  3876. @subsection @code{starpu_timing_timespec_to_us}
  3877. @deftypefun double starpu_timing_timespec_to_us ({struct timespec} *@var{ts})
  3878. TODO
  3879. @end deftypefun
  3880. @node starpu_bus_profiling_helper_display_summary
  3881. @subsection @code{starpu_bus_profiling_helper_display_summary}
  3882. @deftypefun void starpu_bus_profiling_helper_display_summary (void)
  3883. TODO
  3884. @end deftypefun
  3885. @node starpu_worker_profiling_helper_display_summary
  3886. @subsection @code{starpu_worker_profiling_helper_display_summary}
  3887. @deftypefun void starpu_worker_profiling_helper_display_summary (void)
  3888. TODO
  3889. @end deftypefun
  3890. @node CUDA extensions
  3891. @section CUDA extensions
  3892. @c void starpu_malloc(float **A, size_t dim);
  3893. @menu
  3894. * starpu_cuda_get_local_stream:: Get current worker's CUDA stream
  3895. * starpu_helper_cublas_init:: Initialize CUBLAS on every CUDA device
  3896. * starpu_helper_cublas_shutdown:: Deinitialize CUBLAS on every CUDA device
  3897. @end menu
  3898. @node starpu_cuda_get_local_stream
  3899. @subsection @code{starpu_cuda_get_local_stream} -- Get current worker's CUDA stream
  3900. @deftypefun {cudaStream_t *} starpu_cuda_get_local_stream (void)
  3901. StarPU provides a stream for every CUDA device controlled by StarPU. This
  3902. function is only provided for convenience so that programmers can easily use
  3903. asynchronous operations within codelets without having to create a stream by
  3904. hand. Note that the application is not forced to use the stream provided by
  3905. @code{starpu_cuda_get_local_stream} and may also create its own streams.
  3906. Synchronizing with @code{cudaThreadSynchronize()} is allowed, but will reduce
  3907. the likelihood of having all transfers overlapped.
  3908. @end deftypefun
  3909. @node starpu_helper_cublas_init
  3910. @subsection @code{starpu_helper_cublas_init} -- Initialize CUBLAS on every CUDA device
  3911. @deftypefun void starpu_helper_cublas_init (void)
  3912. The CUBLAS library must be initialized prior to any CUBLAS call. Calling
  3913. @code{starpu_helper_cublas_init} will initialize CUBLAS on every CUDA device
  3914. controlled by StarPU. This call blocks until CUBLAS has been properly
  3915. initialized on every device.
  3916. @end deftypefun
  3917. @node starpu_helper_cublas_shutdown
  3918. @subsection @code{starpu_helper_cublas_shutdown} -- Deinitialize CUBLAS on every CUDA device
  3919. @deftypefun void starpu_helper_cublas_shutdown (void)
  3920. This function synchronously deinitializes the CUBLAS library on every CUDA device.
  3921. @end deftypefun
  3922. @node OpenCL extensions
  3923. @section OpenCL extensions
  3924. @menu
  3925. * Compiling OpenCL kernels:: Compiling OpenCL kernels
  3926. * Loading OpenCL kernels:: Loading OpenCL kernels
  3927. * OpenCL statistics:: Collecting statistics from OpenCL
  3928. @end menu
  3929. @node Compiling OpenCL kernels
  3930. @subsection Compiling OpenCL kernels
  3931. Source codes for OpenCL kernels can be stored in a file or in a
  3932. string. StarPU provides functions to build the program executable for
  3933. each available OpenCL device as a @code{cl_program} object. This
  3934. program executable can then be loaded within a specific queue as
  3935. explained in the next section. These are only helpers, Applications
  3936. can also fill a @code{starpu_opencl_program} array by hand for more advanced
  3937. use (e.g. different programs on the different OpenCL devices, for
  3938. relocation purpose for instance).
  3939. @menu
  3940. * starpu_opencl_load_opencl_from_file:: Compiling OpenCL source code
  3941. * starpu_opencl_load_opencl_from_string:: Compiling OpenCL source code
  3942. * starpu_opencl_unload_opencl:: Releasing OpenCL code
  3943. @end menu
  3944. @node starpu_opencl_load_opencl_from_file
  3945. @subsubsection @code{starpu_opencl_load_opencl_from_file} -- Compiling OpenCL source code
  3946. @deftypefun int starpu_opencl_load_opencl_from_file (char *@var{source_file_name}, {struct starpu_opencl_program} *@var{opencl_programs}, {const char}* @var{build_options})
  3947. TODO
  3948. @end deftypefun
  3949. @node starpu_opencl_load_opencl_from_string
  3950. @subsubsection @code{starpu_opencl_load_opencl_from_string} -- Compiling OpenCL source code
  3951. @deftypefun int starpu_opencl_load_opencl_from_string (char *@var{opencl_program_source}, {struct starpu_opencl_program} *@var{opencl_programs}, {const char}* @var{build_options})
  3952. TODO
  3953. @end deftypefun
  3954. @node starpu_opencl_unload_opencl
  3955. @subsubsection @code{starpu_opencl_unload_opencl} -- Releasing OpenCL code
  3956. @deftypefun int starpu_opencl_unload_opencl ({struct starpu_opencl_program} *@var{opencl_programs})
  3957. TODO
  3958. @end deftypefun
  3959. @node Loading OpenCL kernels
  3960. @subsection Loading OpenCL kernels
  3961. @menu
  3962. * starpu_opencl_load_kernel:: Loading a kernel
  3963. * starpu_opencl_relase_kernel:: Releasing a kernel
  3964. @end menu
  3965. @node starpu_opencl_load_kernel
  3966. @subsubsection @code{starpu_opencl_load_kernel} -- Loading a kernel
  3967. @deftypefun int starpu_opencl_load_kernel (cl_kernel *@var{kernel}, cl_command_queue *@var{queue}, {struct starpu_opencl_program} *@var{opencl_programs}, char *@var{kernel_name}, int @var{devid})
  3968. TODO
  3969. @end deftypefun
  3970. @node starpu_opencl_relase_kernel
  3971. @subsubsection @code{starpu_opencl_release_kernel} -- Releasing a kernel
  3972. @deftypefun int starpu_opencl_release_kernel (cl_kernel @var{kernel})
  3973. TODO
  3974. @end deftypefun
  3975. @node OpenCL statistics
  3976. @subsection OpenCL statistics
  3977. @menu
  3978. * starpu_opencl_collect_stats:: Collect statistics on a kernel execution
  3979. @end menu
  3980. @node starpu_opencl_collect_stats
  3981. @subsubsection @code{starpu_opencl_collect_stats} -- Collect statistics on a kernel execution
  3982. @deftypefun int starpu_opencl_collect_stats (cl_event @var{event})
  3983. After termination of the kernels, the OpenCL codelet should call this function
  3984. to pass it the even returned by @code{clEnqueueNDRangeKernel}, to let StarPU
  3985. collect statistics about the kernel execution (used cycles, consumed power).
  3986. @end deftypefun
  3987. @node Cell extensions
  3988. @section Cell extensions
  3989. nothing yet.
  3990. @node Miscellaneous helpers
  3991. @section Miscellaneous helpers
  3992. @menu
  3993. * starpu_data_cpy:: Copy a data handle into another data handle
  3994. * starpu_execute_on_each_worker:: Execute a function on a subset of workers
  3995. @end menu
  3996. @node starpu_data_cpy
  3997. @subsection @code{starpu_data_cpy} -- Copy a data handle into another data handle
  3998. @deftypefun int starpu_data_cpy (starpu_data_handle @var{dst_handle}, starpu_data_handle @var{src_handle}, int @var{asynchronous}, void (*@var{callback_func})(void*), void *@var{callback_arg})
  3999. Copy the content of the @var{src_handle} into the @var{dst_handle} handle.
  4000. The @var{asynchronous} parameter indicates whether the function should
  4001. block or not. In the case of an asynchronous call, it is possible to
  4002. synchronize with the termination of this operation either by the means of
  4003. implicit dependencies (if enabled) or by calling
  4004. @code{starpu_task_wait_for_all()}. If @var{callback_func} is not @code{NULL},
  4005. this callback function is executed after the handle has been copied, and it is
  4006. given the @var{callback_arg} pointer as argument.
  4007. @end deftypefun
  4008. @node starpu_execute_on_each_worker
  4009. @subsection @code{starpu_execute_on_each_worker} -- Execute a function on a subset of workers
  4010. @deftypefun void starpu_execute_on_each_worker (void (*@var{func})(void *), void *@var{arg}, uint32_t @var{where})
  4011. When calling this method, the offloaded function specified by the first argument is
  4012. executed by every StarPU worker that may execute the function.
  4013. The second argument is passed to the offloaded function.
  4014. The last argument specifies on which types of processing units the function
  4015. should be executed. Similarly to the @var{where} field of the
  4016. @code{starpu_codelet} structure, it is possible to specify that the function
  4017. should be executed on every CUDA device and every CPU by passing
  4018. @code{STARPU_CPU|STARPU_CUDA}.
  4019. This function blocks until the function has been executed on every appropriate
  4020. processing units, so that it may not be called from a callback function for
  4021. instance.
  4022. @end deftypefun
  4023. @c ---------------------------------------------------------------------
  4024. @c Advanced Topics
  4025. @c ---------------------------------------------------------------------
  4026. @node Advanced Topics
  4027. @chapter Advanced Topics
  4028. @menu
  4029. * Defining a new data interface::
  4030. * Defining a new scheduling policy::
  4031. @end menu
  4032. @node Defining a new data interface
  4033. @section Defining a new data interface
  4034. @menu
  4035. * struct starpu_data_interface_ops_t:: Per-interface methods
  4036. * struct starpu_data_copy_methods:: Per-interface data transfer methods
  4037. * An example of data interface:: An example of data interface
  4038. @end menu
  4039. @c void *starpu_data_get_interface_on_node(starpu_data_handle handle, unsigned memory_node); TODO
  4040. @node struct starpu_data_interface_ops_t
  4041. @subsection @code{struct starpu_data_interface_ops_t} -- Per-interface methods
  4042. @table @asis
  4043. @item @emph{Description}:
  4044. TODO describe all the different fields
  4045. @end table
  4046. @node struct starpu_data_copy_methods
  4047. @subsection @code{struct starpu_data_copy_methods} -- Per-interface data transfer methods
  4048. @table @asis
  4049. @item @emph{Description}:
  4050. TODO describe all the different fields
  4051. @end table
  4052. @node An example of data interface
  4053. @subsection An example of data interface
  4054. @table @asis
  4055. TODO
  4056. See @code{src/datawizard/interfaces/vector_interface.c} for now.
  4057. @end table
  4058. @node Defining a new scheduling policy
  4059. @section Defining a new scheduling policy
  4060. TODO
  4061. A full example showing how to define a new scheduling policy is available in
  4062. the StarPU sources in the directory @code{examples/scheduler/}.
  4063. @menu
  4064. * struct starpu_sched_policy_s::
  4065. * starpu_worker_set_sched_condition::
  4066. * starpu_sched_set_min_priority:: Set the minimum priority level
  4067. * starpu_sched_set_max_priority:: Set the maximum priority level
  4068. * starpu_push_local_task:: Assign a task to a worker
  4069. * Source code::
  4070. @end menu
  4071. @node struct starpu_sched_policy_s
  4072. @subsection @code{struct starpu_sched_policy_s} -- Scheduler methods
  4073. @table @asis
  4074. @item @emph{Description}:
  4075. This structure contains all the methods that implement a scheduling policy. An
  4076. application may specify which scheduling strategy in the @code{sched_policy}
  4077. field of the @code{starpu_conf} structure passed to the @code{starpu_init}
  4078. function.
  4079. @item @emph{Fields}:
  4080. @table @asis
  4081. @item @code{init_sched}:
  4082. Initialize the scheduling policy.
  4083. @item @code{deinit_sched}:
  4084. Cleanup the scheduling policy.
  4085. @item @code{push_task}:
  4086. Insert a task into the scheduler.
  4087. @item @code{push_prio_task}:
  4088. Insert a priority task into the scheduler.
  4089. @item @code{push_prio_notify}:
  4090. Notify the scheduler that a task was pushed on the worker. This method is
  4091. called when a task that was explicitely assigned to a worker is scheduled. This
  4092. method therefore permits to keep the state of of the scheduler coherent even
  4093. when StarPU bypasses the scheduling strategy.
  4094. @item @code{pop_task}:
  4095. Get a task from the scheduler. The mutex associated to the worker is already
  4096. taken when this method is called. If this method is defined as @code{NULL}, the
  4097. worker will only execute tasks from its local queue. In this case, the
  4098. @code{push_task} method should use the @code{starpu_push_local_task} method to
  4099. assign tasks to the different workers.
  4100. @item @code{pop_every_task}:
  4101. Remove all available tasks from the scheduler (tasks are chained by the means
  4102. of the prev and next fields of the starpu_task structure). The mutex associated
  4103. to the worker is already taken when this method is called.
  4104. @item @code{post_exec_hook} (optionnal):
  4105. This method is called every time a task has been executed.
  4106. @item @code{policy_name}:
  4107. Name of the policy (optionnal).
  4108. @item @code{policy_description}:
  4109. Description of the policy (optionnal).
  4110. @end table
  4111. @end table
  4112. @node starpu_worker_set_sched_condition
  4113. @subsection @code{starpu_worker_set_sched_condition} -- Specify the condition variable associated to a worker
  4114. @deftypefun void starpu_worker_set_sched_condition (int @var{workerid}, pthread_cond_t *@var{sched_cond}, pthread_mutex_t *@var{sched_mutex})
  4115. When there is no available task for a worker, StarPU blocks this worker on a
  4116. condition variable. This function specifies which condition variable (and the
  4117. associated mutex) should be used to block (and to wake up) a worker. Note that
  4118. multiple workers may use the same condition variable. For instance, in the case
  4119. of a scheduling strategy with a single task queue, the same condition variable
  4120. would be used to block and wake up all workers.
  4121. The initialization method of a scheduling strategy (@code{init_sched}) must
  4122. call this function once per worker.
  4123. @end deftypefun
  4124. @node starpu_sched_set_min_priority
  4125. @subsection @code{starpu_sched_set_min_priority}
  4126. @deftypefun void starpu_sched_set_min_priority (int @var{min_prio})
  4127. Defines the minimum priority level supported by the scheduling policy. The
  4128. default minimum priority level is the same as the default priority level which
  4129. is 0 by convention. The application may access that value by calling the
  4130. @code{starpu_sched_get_min_priority} function. This function should only be
  4131. called from the initialization method of the scheduling policy, and should not
  4132. be used directly from the application.
  4133. @end deftypefun
  4134. @node starpu_sched_set_max_priority
  4135. @subsection @code{starpu_sched_set_max_priority}
  4136. @deftypefun void starpu_sched_set_min_priority (int @var{max_prio})
  4137. Defines the maximum priority level supported by the scheduling policy. The
  4138. default maximum priority level is 1. The application may access that value by
  4139. calling the @code{starpu_sched_get_max_priority} function. This function should
  4140. only be called from the initialization method of the scheduling policy, and
  4141. should not be used directly from the application.
  4142. @end deftypefun
  4143. @node starpu_push_local_task
  4144. @subsection @code{starpu_push_local_task}
  4145. @deftypefun int starpu_push_local_task (int @var{workerid}, {struct starpu_task} *@var{task}, int @var{back})
  4146. The scheduling policy may put tasks directly into a worker's local queue so
  4147. that it is not always necessary to create its own queue when the local queue
  4148. is sufficient. If "back" not null, the task is put at the back of the queue
  4149. where the worker will pop tasks first. Setting "back" to 0 therefore ensures
  4150. a FIFO ordering.
  4151. @end deftypefun
  4152. @node Source code
  4153. @subsection Source code
  4154. @cartouche
  4155. @smallexample
  4156. static struct starpu_sched_policy_s dummy_sched_policy = @{
  4157. .init_sched = init_dummy_sched,
  4158. .deinit_sched = deinit_dummy_sched,
  4159. .push_task = push_task_dummy,
  4160. .push_prio_task = NULL,
  4161. .pop_task = pop_task_dummy,
  4162. .post_exec_hook = NULL,
  4163. .pop_every_task = NULL,
  4164. .policy_name = "dummy",
  4165. .policy_description = "dummy scheduling strategy"
  4166. @};
  4167. @end smallexample
  4168. @end cartouche
  4169. @c ---------------------------------------------------------------------
  4170. @c C Extensions
  4171. @c ---------------------------------------------------------------------
  4172. @include c-extensions.texi
  4173. @c ---------------------------------------------------------------------
  4174. @c Appendices
  4175. @c ---------------------------------------------------------------------
  4176. @c ---------------------------------------------------------------------
  4177. @c Full source code for the 'Scaling a Vector' example
  4178. @c ---------------------------------------------------------------------
  4179. @node Full source code for the 'Scaling a Vector' example
  4180. @appendix Full source code for the 'Scaling a Vector' example
  4181. @menu
  4182. * Main application::
  4183. * CPU Kernel::
  4184. * CUDA Kernel::
  4185. * OpenCL Kernel::
  4186. @end menu
  4187. @node Main application
  4188. @section Main application
  4189. @include vector_scal_c.texi
  4190. @node CPU Kernel
  4191. @section CPU Kernel
  4192. @include vector_scal_cpu.texi
  4193. @node CUDA Kernel
  4194. @section CUDA Kernel
  4195. @include vector_scal_cuda.texi
  4196. @node OpenCL Kernel
  4197. @section OpenCL Kernel
  4198. @menu
  4199. * Invoking the kernel::
  4200. * Source of the kernel::
  4201. @end menu
  4202. @node Invoking the kernel
  4203. @subsection Invoking the kernel
  4204. @include vector_scal_opencl.texi
  4205. @node Source of the kernel
  4206. @subsection Source of the kernel
  4207. @include vector_scal_opencl_codelet.texi
  4208. @node GNU Free Documentation License
  4209. @appendix GNU Free Documentation License
  4210. @include fdl-1.3.texi
  4211. @c
  4212. @c Indices.
  4213. @c
  4214. @node Function Index
  4215. @unnumbered Function Index
  4216. @printindex fn
  4217. @bye