api.texi 181 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029
  1. @c -*-texinfo-*-
  2. @c This file is part of the StarPU Handbook.
  3. @c Copyright (C) 2009--2011 Universit@'e de Bordeaux 1
  4. @c Copyright (C) 2010, 2011, 2012, 2013 Centre National de la Recherche Scientifique
  5. @c Copyright (C) 2011, 2012 Institut National de Recherche en Informatique et Automatique
  6. @c See the file starpu.texi for copying conditions.
  7. @menu
  8. * Versioning::
  9. * Initialization and Termination::
  10. * Standard memory library::
  11. * Workers' Properties::
  12. * Data Management::
  13. * Data Interfaces::
  14. * Data Partition::
  15. * Multiformat Data Interface::
  16. * Codelets and Tasks::
  17. * Insert Task::
  18. * Explicit Dependencies::
  19. * Implicit Data Dependencies::
  20. * Performance Model API::
  21. * Profiling API::
  22. * Theoretical lower bound on execution time API::
  23. * CUDA extensions::
  24. * OpenCL extensions::
  25. * Miscellaneous helpers::
  26. * FXT Support::
  27. * MPI::
  28. * Task Bundles::
  29. * Task Lists::
  30. * Using Parallel Tasks::
  31. * Scheduling Contexts::
  32. * Scheduling Policy::
  33. * Running drivers::
  34. * Expert mode::
  35. @end menu
  36. @node Versioning
  37. @section Versioning
  38. @defmac STARPU_MAJOR_VERSION
  39. Define the major version of StarPU
  40. @end defmac
  41. @defmac STARPU_MINOR_VERSION
  42. Define the minor version of StarPU
  43. @end defmac
  44. @node Initialization and Termination
  45. @section Initialization and Termination
  46. @deftp {Data Type} {struct starpu_driver}
  47. @table @asis
  48. @item @code{enum starpu_archtype type}
  49. The type of the driver. Only STARPU_CPU_DRIVER, STARPU_CUDA_DRIVER and
  50. STARPU_OPENCL_DRIVER are currently supported.
  51. @item @code{union id} Anonymous union
  52. @table @asis
  53. @item @code{unsigned cpu_id}
  54. Should only be used if type is STARPU_CPU_WORKER.
  55. @item @code{unsigned cuda_id}
  56. Should only be used if type is STARPU_CUDA_WORKER.
  57. @item @code{cl_device_id opencl_id}
  58. Should only be used if type is STARPU_OPENCL_WORKER.
  59. @end table
  60. @end table
  61. @end deftp
  62. @deftp {Data Type} {struct starpu_conf}
  63. This structure is passed to the @code{starpu_init} function in order
  64. to configure StarPU. It has to be initialized with @code{starpu_conf_init}.
  65. When the default value is used, StarPU automatically selects the number of
  66. processing units and takes the default scheduling policy. The environment
  67. variables overwrite the equivalent parameters.
  68. @table @asis
  69. @item @code{const char *sched_policy_name} (default = NULL)
  70. This is the name of the scheduling policy. This can also be specified
  71. with the @code{STARPU_SCHED} environment variable.
  72. @item @code{struct starpu_sched_policy *sched_policy} (default = NULL)
  73. This is the definition of the scheduling policy. This field is ignored
  74. if @code{sched_policy_name} is set.
  75. @item @code{int ncpus} (default = -1)
  76. This is the number of CPU cores that StarPU can use. This can also be
  77. specified with the @code{STARPU_NCPU} environment variable.
  78. @item @code{int ncuda} (default = -1)
  79. This is the number of CUDA devices that StarPU can use. This can also
  80. be specified with the @code{STARPU_NCUDA} environment variable.
  81. @item @code{int nopencl} (default = -1)
  82. This is the number of OpenCL devices that StarPU can use. This can
  83. also be specified with the @code{STARPU_NOPENCL} environment variable.
  84. @item @code{unsigned use_explicit_workers_bindid} (default = 0)
  85. If this flag is set, the @code{workers_bindid} array indicates where the
  86. different workers are bound, otherwise StarPU automatically selects where to
  87. bind the different workers. This can also be specified with the
  88. @code{STARPU_WORKERS_CPUID} environment variable.
  89. @item @code{unsigned workers_bindid[STARPU_NMAXWORKERS]}
  90. If the @code{use_explicit_workers_bindid} flag is set, this array
  91. indicates where to bind the different workers. The i-th entry of the
  92. @code{workers_bindid} indicates the logical identifier of the
  93. processor which should execute the i-th worker. Note that the logical
  94. ordering of the CPUs is either determined by the OS, or provided by
  95. the @code{hwloc} library in case it is available.
  96. @item @code{unsigned use_explicit_workers_cuda_gpuid} (default = 0)
  97. If this flag is set, the CUDA workers will be attached to the CUDA devices
  98. specified in the @code{workers_cuda_gpuid} array. Otherwise, StarPU affects the
  99. CUDA devices in a round-robin fashion. This can also be specified with the
  100. @code{STARPU_WORKERS_CUDAID} environment variable.
  101. @item @code{unsigned workers_cuda_gpuid[STARPU_NMAXWORKERS]}
  102. If the @code{use_explicit_workers_cuda_gpuid} flag is set, this array
  103. contains the logical identifiers of the CUDA devices (as used by
  104. @code{cudaGetDevice}).
  105. @item @code{unsigned use_explicit_workers_opencl_gpuid} (default = 0)
  106. If this flag is set, the OpenCL workers will be attached to the OpenCL devices
  107. specified in the @code{workers_opencl_gpuid} array. Otherwise, StarPU affects
  108. the OpenCL devices in a round-robin fashion. This can also be specified with
  109. the @code{STARPU_WORKERS_OPENCLID} environment variable.
  110. @item @code{unsigned workers_opencl_gpuid[STARPU_NMAXWORKERS]}
  111. If the @code{use_explicit_workers_opencl_gpuid} flag is set, this array
  112. contains the logical identifiers of the OpenCL devices to be used.
  113. @item @code{int calibrate} (default = 0)
  114. If this flag is set, StarPU will calibrate the performance models when
  115. executing tasks. If this value is equal to @code{-1}, the default value is
  116. used. If the value is equal to @code{1}, it will force continuing
  117. calibration. If the value is equal to @code{2}, the existing performance
  118. models will be overwritten. This can also be specified with the
  119. @code{STARPU_CALIBRATE} environment variable.
  120. @item @code{int bus_calibrate} (default = 0)
  121. If this flag is set, StarPU will recalibrate the bus. If this value is equal
  122. to @code{-1}, the default value is used. This can also be specified with the
  123. @code{STARPU_BUS_CALIBRATE} environment variable.
  124. @item @code{int single_combined_worker} (default = 0)
  125. By default, StarPU executes parallel tasks concurrently.
  126. Some parallel libraries (e.g. most OpenMP implementations) however do
  127. not support concurrent calls to parallel code. In such case, setting this flag
  128. makes StarPU only start one parallel task at a time (but other
  129. CPU and GPU tasks are not affected and can be run concurrently). The parallel
  130. task scheduler will however still however still try varying combined worker
  131. sizes to look for the most efficient ones.
  132. This can also be specified with the @code{STARPU_SINGLE_COMBINED_WORKER} environment variable.
  133. @item @code{int disable_asynchronous_copy} (default = 0)
  134. This flag should be set to 1 to disable asynchronous copies between
  135. CPUs and all accelerators. This can also be specified with the
  136. @code{STARPU_DISABLE_ASYNCHRONOUS_COPY} environment variable.
  137. The AMD implementation of OpenCL is known to
  138. fail when copying data asynchronously. When using this implementation,
  139. it is therefore necessary to disable asynchronous data transfers.
  140. This can also be specified at compilation time by giving to the
  141. configure script the option @code{--disable-asynchronous-copy}.
  142. @item @code{int disable_asynchronous_cuda_copy} (default = 0)
  143. This flag should be set to 1 to disable asynchronous copies between
  144. CPUs and CUDA accelerators. This can also be specified with the
  145. @code{STARPU_DISABLE_ASYNCHRONOUS_CUDA_COPY} environment variable.
  146. This can also be specified at compilation time by giving to the
  147. configure script the option @code{--disable-asynchronous-cuda-copy}.
  148. @item @code{int disable_asynchronous_opencl_copy} (default = 0)
  149. This flag should be set to 1 to disable asynchronous copies between
  150. CPUs and OpenCL accelerators. This can also be specified with the
  151. @code{STARPU_DISABLE_ASYNCHRONOUS_OPENCL_COPY} environment variable.
  152. The AMD implementation of OpenCL is known to
  153. fail when copying data asynchronously. When using this implementation,
  154. it is therefore necessary to disable asynchronous data transfers.
  155. This can also be specified at compilation time by giving to the
  156. configure script the option @code{--disable-asynchronous-opencl-copy}.
  157. @item @code{int *cuda_opengl_interoperability} (default = NULL)
  158. This can be set to an array of CUDA device identifiers for which
  159. @code{cudaGLSetGLDevice} should be called instead of @code{cudaSetDevice}. Its
  160. size is specified by the @code{n_cuda_opengl_interoperability} field below
  161. @item @code{int *n_cuda_opengl_interoperability} (default = 0)
  162. This has to be set to the size of the array pointed to by the
  163. @code{cuda_opengl_interoperability} field.
  164. @item @code{struct starpu_driver *not_launched_drivers}
  165. The drivers that should not be launched by StarPU.
  166. @item @code{unsigned n_not_launched_drivers}
  167. The number of StarPU drivers that should not be launched by StarPU.
  168. @item @code{trace_buffer_size}
  169. Specifies the buffer size used for FxT tracing. Starting from FxT version
  170. 0.2.12, the buffer will automatically be flushed when it fills in, but it may
  171. still be interesting to specify a bigger value to avoid any flushing (which
  172. would disturb the trace).
  173. @end table
  174. @end deftp
  175. @deftypefun int starpu_init ({struct starpu_conf *}@var{conf})
  176. This is StarPU initialization method, which must be called prior to any other
  177. StarPU call. It is possible to specify StarPU's configuration (e.g. scheduling
  178. policy, number of cores, ...) by passing a non-null argument. Default
  179. configuration is used if the passed argument is @code{NULL}.
  180. Upon successful completion, this function returns 0. Otherwise, @code{-ENODEV}
  181. indicates that no worker was available (so that StarPU was not initialized).
  182. @end deftypefun
  183. @deftypefun int starpu_conf_init ({struct starpu_conf *}@var{conf})
  184. This function initializes the @var{conf} structure passed as argument
  185. with the default values. In case some configuration parameters are already
  186. specified through environment variables, @code{starpu_conf_init} initializes
  187. the fields of the structure according to the environment variables. For
  188. instance if @code{STARPU_CALIBRATE} is set, its value is put in the
  189. @code{.calibrate} field of the structure passed as argument.
  190. Upon successful completion, this function returns 0. Otherwise, @code{-EINVAL}
  191. indicates that the argument was NULL.
  192. @end deftypefun
  193. @deftypefun void starpu_shutdown (void)
  194. This is StarPU termination method. It must be called at the end of the
  195. application: statistics and other post-mortem debugging information are not
  196. guaranteed to be available until this method has been called.
  197. @end deftypefun
  198. @deftypefun int starpu_asynchronous_copy_disabled (void)
  199. Return 1 if asynchronous data transfers between CPU and accelerators
  200. are disabled.
  201. @end deftypefun
  202. @deftypefun int starpu_asynchronous_cuda_copy_disabled (void)
  203. Return 1 if asynchronous data transfers between CPU and CUDA accelerators
  204. are disabled.
  205. @end deftypefun
  206. @deftypefun int starpu_asynchronous_opencl_copy_disabled (void)
  207. Return 1 if asynchronous data transfers between CPU and OpenCL accelerators
  208. are disabled.
  209. @end deftypefun
  210. @node Standard memory library
  211. @section Standard memory library
  212. @defmac STARPU_MALLOC_PINNED
  213. Value passed to the function @code{starpu_malloc_flags} to
  214. indicate the memory allocation should be pinned.
  215. @end defmac
  216. @defmac STARPU_MALLOC_COUNT
  217. Value passed to the function @code{starpu_malloc_flags} to
  218. indicate the memory allocation should be in the limit defined by
  219. the environment variables @code{STARPU_LIMIT_CUDA_devid_MEM},
  220. @code{STARPU_LIMIT_CUDA_MEM}, @code{STARPU_LIMIT_OPENCL_devid_MEM},
  221. @code{STARPU_LIMIT_OPENCL_MEM} and @code{STARPU_LIMIT_CPU_MEM}
  222. (@pxref{Limit memory}). If no memory is available, it tries to reclaim
  223. memory from StarPU. Memory allocated this way needs to be freed by
  224. calling the @code{starpu_free_flags} function with the same flag.
  225. @end defmac
  226. @deftypefun int starpu_malloc_flags (void **@var{A}, size_t @var{dim}, int @var{flags})
  227. Performs a memory allocation based on the constraints defined by the
  228. given @var{flag}.
  229. @end deftypefun
  230. @deftypefun void starpu_malloc_set_align (size_t @var{align})
  231. This functions sets an alignment constraints for @code{starpu_malloc}
  232. allocations. @var{align} must be a power of two. This is for instance called
  233. automatically by the OpenCL driver to specify its own alignment constraints.
  234. @end deftypefun
  235. @deftypefun int starpu_malloc (void **@var{A}, size_t @var{dim})
  236. This function allocates data of the given size in main memory. It will also try to pin it in
  237. CUDA or OpenCL, so that data transfers from this buffer can be asynchronous, and
  238. thus permit data transfer and computation overlapping. The allocated buffer must
  239. be freed thanks to the @code{starpu_free} function.
  240. @end deftypefun
  241. @deftypefun int starpu_free (void *@var{A})
  242. This function frees memory which has previously been allocated with
  243. @code{starpu_malloc}.
  244. @end deftypefun
  245. @deftypefun int starpu_free_flags (void *@var{A}, size_t @var{dim}, int @var{flags})
  246. This function frees memory by specifying its size. The given
  247. @var{flags} should be consistent with the ones given to
  248. @code{starpu_malloc_flags} when allocating the memory.
  249. @end deftypefun
  250. @deftypefun ssize_t starpu_memory_get_available (unsigned @var{node})
  251. If a memory limit is defined on the given node (@pxref{Limit memory}),
  252. return the amount of available memory on the node. Otherwise return
  253. @code{-1}.
  254. @end deftypefun
  255. @node Workers' Properties
  256. @section Workers' Properties
  257. @deftp {Data Type} {enum starpu_archtype}
  258. The different values are:
  259. @table @asis
  260. @item @code{STARPU_CPU_WORKER}
  261. @item @code{STARPU_CUDA_WORKER}
  262. @item @code{STARPU_OPENCL_WORKER}
  263. @end table
  264. @end deftp
  265. @deftypefun unsigned starpu_worker_get_count (void)
  266. This function returns the number of workers (i.e. processing units executing
  267. StarPU tasks). The returned value should be at most @code{STARPU_NMAXWORKERS}.
  268. @end deftypefun
  269. @deftypefun int starpu_worker_get_count_by_type ({enum starpu_archtype} @var{type})
  270. Returns the number of workers of the given @var{type}. A positive
  271. (or @code{NULL}) value is returned in case of success, @code{-EINVAL} indicates that
  272. the type is not valid otherwise.
  273. @end deftypefun
  274. @deftypefun unsigned starpu_cpu_worker_get_count (void)
  275. This function returns the number of CPUs controlled by StarPU. The returned
  276. value should be at most @code{STARPU_MAXCPUS}.
  277. @end deftypefun
  278. @deftypefun unsigned starpu_cuda_worker_get_count (void)
  279. This function returns the number of CUDA devices controlled by StarPU. The returned
  280. value should be at most @code{STARPU_MAXCUDADEVS}.
  281. @end deftypefun
  282. @deftypefun unsigned starpu_opencl_worker_get_count (void)
  283. This function returns the number of OpenCL devices controlled by StarPU. The returned
  284. value should be at most @code{STARPU_MAXOPENCLDEVS}.
  285. @end deftypefun
  286. @deftypefun int starpu_worker_get_id (void)
  287. This function returns the identifier of the current worker, i.e the one associated to the calling
  288. thread. The returned value is either -1 if the current context is not a StarPU
  289. worker (i.e. when called from the application outside a task or a callback), or
  290. an integer between 0 and @code{starpu_worker_get_count() - 1}.
  291. @end deftypefun
  292. @deftypefun int starpu_worker_get_ids_by_type ({enum starpu_archtype} @var{type}, int *@var{workerids}, int @var{maxsize})
  293. This function gets the list of identifiers of workers with the given
  294. type. It fills the workerids array with the identifiers of the workers that have the type
  295. indicated in the first argument. The maxsize argument indicates the size of the
  296. workids array. The returned value gives the number of identifiers that were put
  297. in the array. @code{-ERANGE} is returned is maxsize is lower than the number of
  298. workers with the appropriate type: in that case, the array is filled with the
  299. maxsize first elements. To avoid such overflows, the value of maxsize can be
  300. chosen by the means of the @code{starpu_worker_get_count_by_type} function, or
  301. by passing a value greater or equal to @code{STARPU_NMAXWORKERS}.
  302. @end deftypefun
  303. @deftypefun int starpu_worker_get_by_type ({enum starpu_archtype} @var{type}, int @var{num})
  304. This returns the identifier of the @var{num}-th worker that has the specified type
  305. @var{type}. If there are no such worker, -1 is returned.
  306. @end deftypefun
  307. @deftypefun int starpu_worker_get_by_devid ({enum starpu_archtype} @var{type}, int @var{devid})
  308. This returns the identifier of the worker that has the specified type
  309. @var{type} and devid @var{devid} (which may not be the n-th, if some devices are
  310. skipped for instance). If there are no such worker, -1 is returned.
  311. @end deftypefun
  312. @deftypefun int starpu_worker_get_devid (int @var{id})
  313. This functions returns the device id of the given worker. The worker
  314. should be identified with the value returned by the @code{starpu_worker_get_id} function. In the case of a
  315. CUDA worker, this device identifier is the logical device identifier exposed by
  316. CUDA (used by the @code{cudaGetDevice} function for instance). The device
  317. identifier of a CPU worker is the logical identifier of the core on which the
  318. worker was bound; this identifier is either provided by the OS or by the
  319. @code{hwloc} library in case it is available.
  320. @end deftypefun
  321. @deftypefun {enum starpu_archtype} starpu_worker_get_type (int @var{id})
  322. This function returns the type of processing unit associated to a
  323. worker. The worker identifier is a value returned by the
  324. @code{starpu_worker_get_id} function). The returned value
  325. indicates the architecture of the worker: @code{STARPU_CPU_WORKER} for a CPU
  326. core, @code{STARPU_CUDA_WORKER} for a CUDA device, and
  327. @code{STARPU_OPENCL_WORKER} for a OpenCL device. The value returned for an invalid
  328. identifier is unspecified.
  329. @end deftypefun
  330. @deftypefun void starpu_worker_get_name (int @var{id}, char *@var{dst}, size_t @var{maxlen})
  331. This function allows to get the name of a given worker.
  332. StarPU associates a unique human readable string to each processing unit. This
  333. function copies at most the @var{maxlen} first bytes of the unique string
  334. associated to a worker identified by its identifier @var{id} into the
  335. @var{dst} buffer. The caller is responsible for ensuring that the @var{dst}
  336. is a valid pointer to a buffer of @var{maxlen} bytes at least. Calling this
  337. function on an invalid identifier results in an unspecified behaviour.
  338. @end deftypefun
  339. @deftypefun unsigned starpu_worker_get_memory_node (unsigned @var{workerid})
  340. This function returns the identifier of the memory node associated to the
  341. worker identified by @var{workerid}.
  342. @end deftypefun
  343. @deftp {Data Type} {enum starpu_node_kind}
  344. todo
  345. @table @asis
  346. @item @code{STARPU_UNUSED}
  347. @item @code{STARPU_CPU_RAM}
  348. @item @code{STARPU_CUDA_RAM}
  349. @item @code{STARPU_OPENCL_RAM}
  350. @end table
  351. @end deftp
  352. @deftypefun {enum starpu_node_kind} starpu_node_get_kind (unsigned @var{node})
  353. Returns the type of the given node as defined by @code{enum
  354. starpu_node_kind}. For example, when defining a new data interface,
  355. this function should be used in the allocation function to determine
  356. on which device the memory needs to be allocated.
  357. @end deftypefun
  358. @node Data Management
  359. @section Data Management
  360. @menu
  361. * Introduction to Data Management::
  362. * Basic Data Management API::
  363. * Access registered data from the application::
  364. @end menu
  365. This section describes the data management facilities provided by StarPU.
  366. We show how to use existing data interfaces in @ref{Data Interfaces}, but developers can
  367. design their own data interfaces if required.
  368. @node Introduction to Data Management
  369. @subsection Introduction
  370. Data management is done at a high-level in StarPU: rather than accessing a mere
  371. list of contiguous buffers, the tasks may manipulate data that are described by
  372. a high-level construct which we call data interface.
  373. An example of data interface is the "vector" interface which describes a
  374. contiguous data array on a spefic memory node. This interface is a simple
  375. structure containing the number of elements in the array, the size of the
  376. elements, and the address of the array in the appropriate address space (this
  377. address may be invalid if there is no valid copy of the array in the memory
  378. node). More informations on the data interfaces provided by StarPU are
  379. given in @ref{Data Interfaces}.
  380. When a piece of data managed by StarPU is used by a task, the task
  381. implementation is given a pointer to an interface describing a valid copy of
  382. the data that is accessible from the current processing unit.
  383. Every worker is associated to a memory node which is a logical abstraction of
  384. the address space from which the processing unit gets its data. For instance,
  385. the memory node associated to the different CPU workers represents main memory
  386. (RAM), the memory node associated to a GPU is DRAM embedded on the device.
  387. Every memory node is identified by a logical index which is accessible from the
  388. @code{starpu_worker_get_memory_node} function. When registering a piece of data
  389. to StarPU, the specified memory node indicates where the piece of data
  390. initially resides (we also call this memory node the home node of a piece of
  391. data).
  392. @node Basic Data Management API
  393. @subsection Basic Data Management API
  394. @deftp {Data Type} {enum starpu_access_mode}
  395. This datatype describes a data access mode. The different available modes are:
  396. @table @asis
  397. @item @code{STARPU_R}: read-only mode.
  398. @item @code{STARPU_W}: write-only mode.
  399. @item @code{STARPU_RW}: read-write mode.
  400. This is equivalent to @code{STARPU_R|STARPU_W}.
  401. @item @code{STARPU_SCRATCH}: scratch memory.
  402. A temporary buffer is allocated for the task, but StarPU does not
  403. enforce data consistency---i.e. each device has its own buffer,
  404. independently from each other (even for CPUs), and no data transfer is
  405. ever performed. This is useful for temporary variables to avoid
  406. allocating/freeing buffers inside each task.
  407. Currently, no behavior is defined concerning the relation with the
  408. @code{STARPU_R} and @code{STARPU_W} modes and the value provided at
  409. registration---i.e., the value of the scratch buffer is undefined at
  410. entry of the codelet function. It is being considered for future
  411. extensions at least to define the initial value. For now, data to be
  412. used in @code{SCRATCH} mode should be registered with node @code{-1} and
  413. a @code{NULL} pointer, since the value of the provided buffer is simply
  414. ignored for now.
  415. @item @code{STARPU_REDUX}: reduction mode. TODO!
  416. @end table
  417. @end deftp
  418. @deftp {Data Type} {starpu_data_handle_t}
  419. StarPU uses @code{starpu_data_handle_t} as an opaque handle to manage a piece of
  420. data. Once a piece of data has been registered to StarPU, it is associated to a
  421. @code{starpu_data_handle_t} which keeps track of the state of the piece of data
  422. over the entire machine, so that we can maintain data consistency and locate
  423. data replicates for instance.
  424. @end deftp
  425. @deftypefun void starpu_data_register (starpu_data_handle_t *@var{handleptr}, unsigned @var{home_node}, void *@var{data_interface}, {struct starpu_data_interface_ops} *@var{ops})
  426. Register a piece of data into the handle located at the @var{handleptr}
  427. address. The @var{data_interface} buffer contains the initial description of the
  428. data in the home node. The @var{ops} argument is a pointer to a structure
  429. describing the different methods used to manipulate this type of interface. See
  430. @ref{struct starpu_data_interface_ops} for more details on this structure.
  431. If @code{home_node} is -1, StarPU will automatically
  432. allocate the memory when it is used for the
  433. first time in write-only mode. Once such data handle has been automatically
  434. allocated, it is possible to access it using any access mode.
  435. Note that StarPU supplies a set of predefined types of interface (e.g. vector or
  436. matrix) which can be registered by the means of helper functions (e.g.
  437. @code{starpu_vector_data_register} or @code{starpu_matrix_data_register}).
  438. @end deftypefun
  439. @deftypefun void starpu_data_register_same ({starpu_data_handle_t *}@var{handledst}, starpu_data_handle_t @var{handlesrc})
  440. Register a new piece of data into the handle @var{handledst} with the
  441. same interface as the handle @var{handlesrc}.
  442. @end deftypefun
  443. @deftypefun void starpu_data_unregister (starpu_data_handle_t @var{handle})
  444. This function unregisters a data handle from StarPU. If the data was
  445. automatically allocated by StarPU because the home node was -1, all
  446. automatically allocated buffers are freed. Otherwise, a valid copy of the data
  447. is put back into the home node in the buffer that was initially registered.
  448. Using a data handle that has been unregistered from StarPU results in an
  449. undefined behaviour.
  450. @end deftypefun
  451. @deftypefun void starpu_data_unregister_no_coherency (starpu_data_handle_t @var{handle})
  452. This is the same as starpu_data_unregister, except that StarPU does not put back
  453. a valid copy into the home node, in the buffer that was initially registered.
  454. @end deftypefun
  455. @deftypefun void starpu_data_unregister_submit (starpu_data_handle_t @var{handle})
  456. Destroy the data handle once it is not needed anymore by any submitted
  457. task. No coherency is assumed.
  458. @end deftypefun
  459. @deftypefun void starpu_data_invalidate (starpu_data_handle_t @var{handle})
  460. Destroy all replicates of the data handle immediately. After data invalidation,
  461. the first access to the handle must be performed in write-only mode.
  462. Accessing an invalidated data in read-mode results in undefined
  463. behaviour.
  464. @end deftypefun
  465. @deftypefun void starpu_data_invalidate_submit (starpu_data_handle_t @var{handle})
  466. Submits invalidation of the data handle after completion of previously submitted tasks.
  467. @end deftypefun
  468. @c TODO create a specific sections about user interaction with the DSM ?
  469. @deftypefun void starpu_data_set_wt_mask (starpu_data_handle_t @var{handle}, uint32_t @var{wt_mask})
  470. This function sets the write-through mask of a given data, i.e. a bitmask of
  471. nodes where the data should be always replicated after modification. It also
  472. prevents the data from being evicted from these nodes when memory gets scarse.
  473. @end deftypefun
  474. @deftypefun int starpu_data_prefetch_on_node (starpu_data_handle_t @var{handle}, unsigned @var{node}, unsigned @var{async})
  475. Issue a prefetch request for a given data to a given node, i.e.
  476. requests that the data be replicated to the given node, so that it is available
  477. there for tasks. If the @var{async} parameter is 0, the call will block until
  478. the transfer is achieved, else the call will return as soon as the request is
  479. scheduled (which may however have to wait for a task completion).
  480. @end deftypefun
  481. @deftypefun starpu_data_handle_t starpu_data_lookup ({const void *}@var{ptr})
  482. Return the handle corresponding to the data pointed to by the @var{ptr}
  483. host pointer.
  484. @end deftypefun
  485. @deftypefun int starpu_data_request_allocation (starpu_data_handle_t @var{handle}, unsigned @var{node})
  486. Explicitly ask StarPU to allocate room for a piece of data on the specified
  487. memory node.
  488. @end deftypefun
  489. @deftypefun void starpu_data_query_status (starpu_data_handle_t @var{handle}, int @var{memory_node}, {int *}@var{is_allocated}, {int *}@var{is_valid}, {int *}@var{is_requested})
  490. Query the status of the handle on the specified memory node.
  491. @end deftypefun
  492. @deftypefun void starpu_data_advise_as_important (starpu_data_handle_t @var{handle}, unsigned @var{is_important})
  493. This function allows to specify that a piece of data can be discarded
  494. without impacting the application.
  495. @end deftypefun
  496. @deftypefun void starpu_data_set_reduction_methods (starpu_data_handle_t @var{handle}, {struct starpu_codelet *}@var{redux_cl}, {struct starpu_codelet *}@var{init_cl})
  497. This sets the codelets to be used for the @var{handle} when it is accessed in
  498. REDUX mode. Per-worker buffers will be initialized with the @var{init_cl}
  499. codelet, and reduction between per-worker buffers will be done with the
  500. @var{redux_cl} codelet.
  501. @end deftypefun
  502. @deftypefun struct starpu_data_interface_ops* starpu_data_get_interface_ops (starpu_data_handle_t @var{handle})
  503. Get a pointer to the structure describing the different methods used
  504. to manipulate the given data. See @ref{struct starpu_data_interface_ops} for more details on this structure.
  505. @end deftypefun
  506. @node Access registered data from the application
  507. @subsection Access registered data from the application
  508. @deftypefun int starpu_data_acquire (starpu_data_handle_t @var{handle}, {enum starpu_access_mode} @var{mode})
  509. The application must call this function prior to accessing registered data from
  510. main memory outside tasks. StarPU ensures that the application will get an
  511. up-to-date copy of the data in main memory located where the data was
  512. originally registered, and that all concurrent accesses (e.g. from tasks) will
  513. be consistent with the access mode specified in the @var{mode} argument.
  514. @code{starpu_data_release} must be called once the application does not need to
  515. access the piece of data anymore. Note that implicit data
  516. dependencies are also enforced by @code{starpu_data_acquire}, i.e.
  517. @code{starpu_data_acquire} will wait for all tasks scheduled to work on
  518. the data, unless they have been disabled explictly by calling
  519. @code{starpu_data_set_default_sequential_consistency_flag} or
  520. @code{starpu_data_set_sequential_consistency_flag}.
  521. @code{starpu_data_acquire} is a blocking call, so that it cannot be called from
  522. tasks or from their callbacks (in that case, @code{starpu_data_acquire} returns
  523. @code{-EDEADLK}). Upon successful completion, this function returns 0.
  524. @end deftypefun
  525. @deftypefun int starpu_data_acquire_cb (starpu_data_handle_t @var{handle}, {enum starpu_access_mode} @var{mode}, void (*@var{callback})(void *), void *@var{arg})
  526. @code{starpu_data_acquire_cb} is the asynchronous equivalent of
  527. @code{starpu_data_acquire}. When the data specified in the first argument is
  528. available in the appropriate access mode, the callback function is executed.
  529. The application may access the requested data during the execution of this
  530. callback. The callback function must call @code{starpu_data_release} once the
  531. application does not need to access the piece of data anymore.
  532. Note that implicit data dependencies are also enforced by
  533. @code{starpu_data_acquire_cb} in case they are not disabled.
  534. Contrary to @code{starpu_data_acquire}, this function is non-blocking and may
  535. be called from task callbacks. Upon successful completion, this function
  536. returns 0.
  537. @end deftypefun
  538. @deftypefun int starpu_data_acquire_on_node (starpu_data_handle_t @var{handle}, unsigned @var{node}, {enum starpu_access_mode} @var{mode})
  539. This is the same as @code{starpu_data_acquire}, except that the data will be
  540. available on the given memory node instead of main memory.
  541. @end deftypefun
  542. @deftypefun int starpu_data_acquire_on_node_cb (starpu_data_handle_t @var{handle}, unsigned @var{node}, {enum starpu_access_mode} @var{mode}, void (*@var{callback})(void *), void *@var{arg})
  543. This is the same as @code{starpu_data_acquire_cb}, except that the data will be
  544. available on the given memory node instead of main memory.
  545. @end deftypefun
  546. @defmac STARPU_DATA_ACQUIRE_CB (starpu_data_handle_t @var{handle}, {enum starpu_access_mode} @var{mode}, code)
  547. @code{STARPU_DATA_ACQUIRE_CB} is the same as @code{starpu_data_acquire_cb},
  548. except that the code to be executed in a callback is directly provided as a
  549. macro parameter, and the data handle is automatically released after it. This
  550. permits to easily execute code which depends on the value of some registered
  551. data. This is non-blocking too and may be called from task callbacks.
  552. @end defmac
  553. @deftypefun void starpu_data_release (starpu_data_handle_t @var{handle})
  554. This function releases the piece of data acquired by the application either by
  555. @code{starpu_data_acquire} or by @code{starpu_data_acquire_cb}.
  556. @end deftypefun
  557. @deftypefun void starpu_data_release_on_node (starpu_data_handle_t @var{handle}, unsigned @var{node})
  558. This is the same as @code{starpu_data_release}, except that the data will be
  559. available on the given memory node instead of main memory.
  560. @end deftypefun
  561. @node Data Interfaces
  562. @section Data Interfaces
  563. @menu
  564. * Registering Data::
  565. * Accessing Data Interfaces::
  566. * Defining Interface::
  567. @end menu
  568. @node Registering Data
  569. @subsection Registering Data
  570. There are several ways to register a memory region so that it can be managed by
  571. StarPU. The functions below allow the registration of vectors, 2D matrices, 3D
  572. matrices as well as BCSR and CSR sparse matrices.
  573. @deftypefun void starpu_void_data_register ({starpu_data_handle_t *}@var{handle})
  574. Register a void interface. There is no data really associated to that
  575. interface, but it may be used as a synchronization mechanism. It also
  576. permits to express an abstract piece of data that is managed by the
  577. application internally: this makes it possible to forbid the
  578. concurrent execution of different tasks accessing the same "void" data
  579. in read-write concurrently.
  580. @end deftypefun
  581. @deftypefun void starpu_variable_data_register ({starpu_data_handle_t *}@var{handle}, unsigned @var{home_node}, uintptr_t @var{ptr}, size_t @var{size})
  582. Register the @var{size}-byte element pointed to by @var{ptr}, which is
  583. typically a scalar, and initialize @var{handle} to represent this data
  584. item.
  585. @cartouche
  586. @smallexample
  587. float var;
  588. starpu_data_handle_t var_handle;
  589. starpu_variable_data_register(&var_handle, 0, (uintptr_t)&var, sizeof(var));
  590. @end smallexample
  591. @end cartouche
  592. @end deftypefun
  593. @deftypefun void starpu_vector_data_register ({starpu_data_handle_t *}@var{handle}, unsigned @var{home_node}, uintptr_t @var{ptr}, uint32_t @var{nx}, size_t @var{elemsize})
  594. Register the @var{nx} @var{elemsize}-byte elements pointed to by
  595. @var{ptr} and initialize @var{handle} to represent it.
  596. @cartouche
  597. @smallexample
  598. float vector[NX];
  599. starpu_data_handle_t vector_handle;
  600. starpu_vector_data_register(&vector_handle, 0, (uintptr_t)vector, NX,
  601. sizeof(vector[0]));
  602. @end smallexample
  603. @end cartouche
  604. @end deftypefun
  605. @deftypefun void starpu_matrix_data_register ({starpu_data_handle_t *}@var{handle}, unsigned @var{home_node}, uintptr_t @var{ptr}, uint32_t @var{ld}, uint32_t @var{nx}, uint32_t @var{ny}, size_t @var{elemsize})
  606. Register the @var{nx}x@var{ny} 2D matrix of @var{elemsize}-byte elements
  607. pointed by @var{ptr} and initialize @var{handle} to represent it.
  608. @var{ld} specifies the number of elements between rows.
  609. a value greater than @var{nx} adds padding, which can be useful for
  610. alignment purposes.
  611. @cartouche
  612. @smallexample
  613. float *matrix;
  614. starpu_data_handle_t matrix_handle;
  615. matrix = (float*)malloc(width * height * sizeof(float));
  616. starpu_matrix_data_register(&matrix_handle, 0, (uintptr_t)matrix,
  617. width, width, height, sizeof(float));
  618. @end smallexample
  619. @end cartouche
  620. @end deftypefun
  621. @deftypefun void starpu_block_data_register ({starpu_data_handle_t *}@var{handle}, unsigned @var{home_node}, uintptr_t @var{ptr}, uint32_t @var{ldy}, uint32_t @var{ldz}, uint32_t @var{nx}, uint32_t @var{ny}, uint32_t @var{nz}, size_t @var{elemsize})
  622. Register the @var{nx}x@var{ny}x@var{nz} 3D matrix of @var{elemsize}-byte
  623. elements pointed by @var{ptr} and initialize @var{handle} to represent
  624. it. Again, @var{ldy} and @var{ldz} specify the number of elements
  625. between rows and between z planes.
  626. @cartouche
  627. @smallexample
  628. float *block;
  629. starpu_data_handle_t block_handle;
  630. block = (float*)malloc(nx*ny*nz*sizeof(float));
  631. starpu_block_data_register(&block_handle, 0, (uintptr_t)block,
  632. nx, nx*ny, nx, ny, nz, sizeof(float));
  633. @end smallexample
  634. @end cartouche
  635. @end deftypefun
  636. @deftypefun void starpu_bcsr_data_register (starpu_data_handle_t *@var{handle}, unsigned @var{home_node}, uint32_t @var{nnz}, uint32_t @var{nrow}, uintptr_t @var{nzval}, uint32_t *@var{colind}, uint32_t *@var{rowptr}, uint32_t @var{firstentry}, uint32_t @var{r}, uint32_t @var{c}, size_t @var{elemsize})
  637. This variant of @code{starpu_data_register} uses the BCSR (Blocked
  638. Compressed Sparse Row Representation) sparse matrix interface.
  639. Register the sparse matrix made of @var{nnz} non-zero blocks of elements of size
  640. @var{elemsize} stored in @var{nzval} and initializes @var{handle} to represent
  641. it. Blocks have size @var{r} * @var{c}. @var{nrow} is the number of rows (in
  642. terms of blocks), @code{colind[i]} is the block-column index for block @code{i}
  643. in @code{nzval}, @code{rowptr[i]} is the block-index (in nzval) of the first block of row @code{i}.
  644. @var{firstentry} is the index of the first entry of the given arrays (usually 0
  645. or 1).
  646. @end deftypefun
  647. @deftypefun void starpu_csr_data_register (starpu_data_handle_t *@var{handle}, unsigned @var{home_node}, uint32_t @var{nnz}, uint32_t @var{nrow}, uintptr_t @var{nzval}, uint32_t *@var{colind}, uint32_t *@var{rowptr}, uint32_t @var{firstentry}, size_t @var{elemsize})
  648. This variant of @code{starpu_data_register} uses the CSR (Compressed
  649. Sparse Row Representation) sparse matrix interface.
  650. TODO
  651. @end deftypefun
  652. @deftypefun void starpu_coo_data_register (starpu_data_handle_t *@var{handleptr}, unsigned @var{home_node}, uint32_t @var{nx}, uint32_t @var{ny}, uint32_t @var{n_values}, uint32_t *@var{columns}, uint32_t *@var{rows}, uintptr_t @var{values}, size_t @var{elemsize});
  653. Register the @var{nx}x@var{ny} 2D matrix given in the COO format, using the
  654. @var{columns}, @var{rows}, @var{values} arrays, which must have @var{n_values}
  655. elements of size @var{elemsize}. Initialize @var{handleptr}.
  656. @end deftypefun
  657. @deftypefun {void *} starpu_data_get_interface_on_node (starpu_data_handle_t @var{handle}, unsigned @var{memory_node})
  658. Return the interface associated with @var{handle} on @var{memory_node}.
  659. @end deftypefun
  660. @node Accessing Data Interfaces
  661. @subsection Accessing Data Interfaces
  662. Each data interface is provided with a set of field access functions.
  663. The ones using a @code{void *} parameter aimed to be used in codelet
  664. implementations (see for example the code in @ref{Vector Scaling Using StarPU's API}).
  665. @deftp {Data Type} {enum starpu_data_interface_id}
  666. The different values are:
  667. @table @asis
  668. @item @code{STARPU_MATRIX_INTERFACE_ID}
  669. @item @code{STARPU_BLOCK_INTERFACE_ID}
  670. @item @code{STARPU_VECTOR_INTERFACE_ID}
  671. @item @code{STARPU_CSR_INTERFACE_ID}
  672. @item @code{STARPU_BCSR_INTERFACE_ID}
  673. @item @code{STARPU_VARIABLE_INTERFACE_ID}
  674. @item @code{STARPU_VOID_INTERFACE_ID}
  675. @item @code{STARPU_MULTIFORMAT_INTERFACE_ID}
  676. @item @code{STARPU_COO_INTERCACE_ID}
  677. @item @code{STARPU_NINTERFACES_ID}: number of data interfaces
  678. @end table
  679. @end deftp
  680. @menu
  681. * Accessing Handle::
  682. * Accessing Variable Data Interfaces::
  683. * Accessing Vector Data Interfaces::
  684. * Accessing Matrix Data Interfaces::
  685. * Accessing Block Data Interfaces::
  686. * Accessing BCSR Data Interfaces::
  687. * Accessing CSR Data Interfaces::
  688. * Accessing COO Data Interfaces::
  689. @end menu
  690. @node Accessing Handle
  691. @subsubsection Handle
  692. @deftypefun {void *} starpu_handle_to_pointer (starpu_data_handle_t @var{handle}, unsigned @var{node})
  693. Return the pointer associated with @var{handle} on node @var{node} or
  694. @code{NULL} if @var{handle}'s interface does not support this
  695. operation or data for this handle is not allocated on that node.
  696. @end deftypefun
  697. @deftypefun {void *} starpu_handle_get_local_ptr (starpu_data_handle_t @var{handle})
  698. Return the local pointer associated with @var{handle} or @code{NULL}
  699. if @var{handle}'s interface does not have data allocated locally
  700. @end deftypefun
  701. @deftypefun {enum starpu_data_interface_id} starpu_handle_get_interface_id (starpu_data_handle_t @var{handle})
  702. Return the unique identifier of the interface associated with the given @var{handle}.
  703. @end deftypefun
  704. @deftypefun size_t starpu_handle_get_size (starpu_data_handle_t @var{handle})
  705. Return the size of the data associated with @var{handle}
  706. @end deftypefun
  707. @deftypefun int starpu_handle_pack_data (starpu_data_handle_t @var{handle}, {void **}@var{ptr}, {starpu_ssize_t *}@var{count})
  708. Execute the packing operation of the interface of the data registered
  709. at @var{handle} (@pxref{struct starpu_data_interface_ops}). This
  710. packing operation must allocate a buffer large enough at @var{ptr} and
  711. copy into the newly allocated buffer the data associated to
  712. @var{handle}. @var{count} will be set to the size of the allocated
  713. buffer.
  714. If @var{ptr} is @code{NULL}, the function should not copy the data in the
  715. buffer but just set @var{count} to the size of the buffer which
  716. would have been allocated. The special value @code{-1} indicates the
  717. size is yet unknown.
  718. @end deftypefun
  719. @deftypefun int starpu_handle_unpack_data (starpu_data_handle_t @var{handle}, {void *}@var{ptr}, size_t @var{count})
  720. Unpack in @var{handle} the data located at @var{ptr} of size
  721. @var{count} as described by the interface of the data. The interface
  722. registered at @var{handle} must define a unpacking operation
  723. (@pxref{struct starpu_data_interface_ops}). The memory at the address @code{ptr}
  724. is freed after calling the data unpacking operation.
  725. @end deftypefun
  726. @node Accessing Variable Data Interfaces
  727. @subsubsection Variable Data Interfaces
  728. @deftypefun size_t starpu_variable_get_elemsize (starpu_data_handle_t @var{handle})
  729. Return the size of the variable designated by @var{handle}.
  730. @end deftypefun
  731. @deftypefun uintptr_t starpu_variable_get_local_ptr (starpu_data_handle_t @var{handle})
  732. Return a pointer to the variable designated by @var{handle}.
  733. @end deftypefun
  734. @defmac STARPU_VARIABLE_GET_PTR ({void *}@var{interface})
  735. Return a pointer to the variable designated by @var{interface}.
  736. @end defmac
  737. @defmac STARPU_VARIABLE_GET_ELEMSIZE ({void *}@var{interface})
  738. Return the size of the variable designated by @var{interface}.
  739. @end defmac
  740. @defmac STARPU_VARIABLE_GET_DEV_HANDLE ({void *}@var{interface})
  741. Return a device handle for the variable designated by @var{interface}, to be
  742. used on OpenCL. The offset documented below has to be used in addition to this.
  743. @end defmac
  744. @defmac STARPU_VARIABLE_GET_OFFSET ({void *}@var{interface})
  745. Return the offset in the variable designated by @var{interface}, to be used
  746. with the device handle.
  747. @end defmac
  748. @node Accessing Vector Data Interfaces
  749. @subsubsection Vector Data Interfaces
  750. @deftypefun uint32_t starpu_vector_get_nx (starpu_data_handle_t @var{handle})
  751. Return the number of elements registered into the array designated by @var{handle}.
  752. @end deftypefun
  753. @deftypefun size_t starpu_vector_get_elemsize (starpu_data_handle_t @var{handle})
  754. Return the size of each element of the array designated by @var{handle}.
  755. @end deftypefun
  756. @deftypefun uintptr_t starpu_vector_get_local_ptr (starpu_data_handle_t @var{handle})
  757. Return the local pointer associated with @var{handle}.
  758. @end deftypefun
  759. @defmac STARPU_VECTOR_GET_PTR ({void *}@var{interface})
  760. Return a pointer to the array designated by @var{interface}, valid on CPUs and
  761. CUDA only. For OpenCL, the device handle and offset need to be used instead.
  762. @end defmac
  763. @defmac STARPU_VECTOR_GET_DEV_HANDLE ({void *}@var{interface})
  764. Return a device handle for the array designated by @var{interface}, to be used on OpenCL. the offset
  765. documented below has to be used in addition to this.
  766. @end defmac
  767. @defmac STARPU_VECTOR_GET_OFFSET ({void *}@var{interface})
  768. Return the offset in the array designated by @var{interface}, to be used with the device handle.
  769. @end defmac
  770. @defmac STARPU_VECTOR_GET_NX ({void *}@var{interface})
  771. Return the number of elements registered into the array designated by @var{interface}.
  772. @end defmac
  773. @defmac STARPU_VECTOR_GET_ELEMSIZE ({void *}@var{interface})
  774. Return the size of each element of the array designated by @var{interface}.
  775. @end defmac
  776. @node Accessing Matrix Data Interfaces
  777. @subsubsection Matrix Data Interfaces
  778. @deftypefun uint32_t starpu_matrix_get_nx (starpu_data_handle_t @var{handle})
  779. Return the number of elements on the x-axis of the matrix designated by @var{handle}.
  780. @end deftypefun
  781. @deftypefun uint32_t starpu_matrix_get_ny (starpu_data_handle_t @var{handle})
  782. Return the number of elements on the y-axis of the matrix designated by
  783. @var{handle}.
  784. @end deftypefun
  785. @deftypefun uint32_t starpu_matrix_get_local_ld (starpu_data_handle_t @var{handle})
  786. Return the number of elements between each row of the matrix designated by
  787. @var{handle}. Maybe be equal to nx when there is no padding.
  788. @end deftypefun
  789. @deftypefun uintptr_t starpu_matrix_get_local_ptr (starpu_data_handle_t @var{handle})
  790. Return the local pointer associated with @var{handle}.
  791. @end deftypefun
  792. @deftypefun size_t starpu_matrix_get_elemsize (starpu_data_handle_t @var{handle})
  793. Return the size of the elements registered into the matrix designated by
  794. @var{handle}.
  795. @end deftypefun
  796. @defmac STARPU_MATRIX_GET_PTR ({void *}@var{interface})
  797. Return a pointer to the matrix designated by @var{interface}, valid on CPUs and
  798. CUDA devices only. For OpenCL devices, the device handle and offset need to be
  799. used instead.
  800. @end defmac
  801. @defmac STARPU_MATRIX_GET_DEV_HANDLE ({void *}@var{interface})
  802. Return a device handle for the matrix designated by @var{interface}, to be used
  803. on OpenCL. The offset documented below has to be used in addition to this.
  804. @end defmac
  805. @defmac STARPU_MATRIX_GET_OFFSET ({void *}@var{interface})
  806. Return the offset in the matrix designated by @var{interface}, to be used with
  807. the device handle.
  808. @end defmac
  809. @defmac STARPU_MATRIX_GET_NX ({void *}@var{interface})
  810. Return the number of elements on the x-axis of the matrix designated by
  811. @var{interface}.
  812. @end defmac
  813. @defmac STARPU_MATRIX_GET_NY ({void *}@var{interface})
  814. Return the number of elements on the y-axis of the matrix designated by
  815. @var{interface}.
  816. @end defmac
  817. @defmac STARPU_MATRIX_GET_LD ({void *}@var{interface})
  818. Return the number of elements between each row of the matrix designated by
  819. @var{interface}. May be equal to nx when there is no padding.
  820. @end defmac
  821. @defmac STARPU_MATRIX_GET_ELEMSIZE ({void *}@var{interface})
  822. Return the size of the elements registered into the matrix designated by
  823. @var{interface}.
  824. @end defmac
  825. @node Accessing Block Data Interfaces
  826. @subsubsection Block Data Interfaces
  827. @deftypefun uint32_t starpu_block_get_nx (starpu_data_handle_t @var{handle})
  828. Return the number of elements on the x-axis of the block designated by @var{handle}.
  829. @end deftypefun
  830. @deftypefun uint32_t starpu_block_get_ny (starpu_data_handle_t @var{handle})
  831. Return the number of elements on the y-axis of the block designated by @var{handle}.
  832. @end deftypefun
  833. @deftypefun uint32_t starpu_block_get_nz (starpu_data_handle_t @var{handle})
  834. Return the number of elements on the z-axis of the block designated by @var{handle}.
  835. @end deftypefun
  836. @deftypefun uint32_t starpu_block_get_local_ldy (starpu_data_handle_t @var{handle})
  837. Return the number of elements between each row of the block designated by
  838. @var{handle}, in the format of the current memory node.
  839. @end deftypefun
  840. @deftypefun uint32_t starpu_block_get_local_ldz (starpu_data_handle_t @var{handle})
  841. Return the number of elements between each z plane of the block designated by
  842. @var{handle}, in the format of the current memory node.
  843. @end deftypefun
  844. @deftypefun uintptr_t starpu_block_get_local_ptr (starpu_data_handle_t @var{handle})
  845. Return the local pointer associated with @var{handle}.
  846. @end deftypefun
  847. @deftypefun size_t starpu_block_get_elemsize (starpu_data_handle_t @var{handle})
  848. Return the size of the elements of the block designated by @var{handle}.
  849. @end deftypefun
  850. @defmac STARPU_BLOCK_GET_PTR ({void *}@var{interface})
  851. Return a pointer to the block designated by @var{interface}.
  852. @end defmac
  853. @defmac STARPU_BLOCK_GET_DEV_HANDLE ({void *}@var{interface})
  854. Return a device handle for the block designated by @var{interface}, to be used
  855. on OpenCL. The offset document below has to be used in addition to this.
  856. @end defmac
  857. @defmac STARPU_BLOCK_GET_OFFSET ({void *}@var{interface})
  858. Return the offset in the block designated by @var{interface}, to be used with
  859. the device handle.
  860. @end defmac
  861. @defmac STARPU_BLOCK_GET_NX ({void *}@var{interface})
  862. Return the number of elements on the x-axis of the block designated by @var{handle}.
  863. @end defmac
  864. @defmac STARPU_BLOCK_GET_NY ({void *}@var{interface})
  865. Return the number of elements on the y-axis of the block designated by @var{handle}.
  866. @end defmac
  867. @defmac STARPU_BLOCK_GET_NZ ({void *}@var{interface})
  868. Return the number of elements on the z-axis of the block designated by @var{handle}.
  869. @end defmac
  870. @defmac STARPU_BLOCK_GET_LDY ({void *}@var{interface})
  871. Return the number of elements between each row of the block designated by
  872. @var{interface}. May be equal to nx when there is no padding.
  873. @end defmac
  874. @defmac STARPU_BLOCK_GET_LDZ ({void *}@var{interface})
  875. Return the number of elements between each z plane of the block designated by
  876. @var{interface}. May be equal to nx*ny when there is no padding.
  877. @end defmac
  878. @defmac STARPU_BLOCK_GET_ELEMSIZE ({void *}@var{interface})
  879. Return the size of the elements of the matrix designated by @var{interface}.
  880. @end defmac
  881. @node Accessing BCSR Data Interfaces
  882. @subsubsection BCSR Data Interfaces
  883. @deftypefun uint32_t starpu_bcsr_get_nnz (starpu_data_handle_t @var{handle})
  884. Return the number of non-zero elements in the matrix designated by @var{handle}.
  885. @end deftypefun
  886. @deftypefun uint32_t starpu_bcsr_get_nrow (starpu_data_handle_t @var{handle})
  887. Return the number of rows (in terms of blocks of size r*c) in the matrix
  888. designated by @var{handle}.
  889. @end deftypefun
  890. @deftypefun uint32_t starpu_bcsr_get_firstentry (starpu_data_handle_t @var{handle})
  891. Return the index at which all arrays (the column indexes, the row pointers...)
  892. of the matrix desginated by @var{handle} start.
  893. @end deftypefun
  894. @deftypefun uintptr_t starpu_bcsr_get_local_nzval (starpu_data_handle_t @var{handle})
  895. Return a pointer to the non-zero values of the matrix designated by @var{handle}.
  896. @end deftypefun
  897. @deftypefun {uint32_t *} starpu_bcsr_get_local_colind (starpu_data_handle_t @var{handle})
  898. Return a pointer to the column index, which holds the positions of the non-zero
  899. entries in the matrix designated by @var{handle}.
  900. @end deftypefun
  901. @deftypefun {uint32_t *} starpu_bcsr_get_local_rowptr (starpu_data_handle_t @var{handle})
  902. Return the row pointer array of the matrix designated by @var{handle}.
  903. @end deftypefun
  904. @deftypefun uint32_t starpu_bcsr_get_r (starpu_data_handle_t @var{handle})
  905. Return the number of rows in a block.
  906. @end deftypefun
  907. @deftypefun uint32_t starpu_bcsr_get_c (starpu_data_handle_t @var{handle})
  908. Return the numberof columns in a block.
  909. @end deftypefun
  910. @deftypefun size_t starpu_bcsr_get_elemsize (starpu_data_handle_t @var{handle})
  911. Return the size of the elements in the matrix designated by @var{handle}.
  912. @end deftypefun
  913. @defmac STARPU_BCSR_GET_NNZ ({void *}@var{interface})
  914. Return the number of non-zero values in the matrix designated by @var{interface}.
  915. @end defmac
  916. @defmac STARPU_BCSR_GET_NZVAL ({void *}@var{interface})
  917. Return a pointer to the non-zero values of the matrix designated by @var{interface}.
  918. @end defmac
  919. @defmac STARPU_BCSR_GET_NZVAL_DEV_HANDLE ({void *}@var{interface})
  920. Return a device handle for the array of non-zero values in the matrix designated
  921. by @var{interface}. The offset documented below has to be used in addition to
  922. this.
  923. @end defmac
  924. @defmac STARPU_BCSR_GET_COLIND ({void *}@var{interface})
  925. Return a pointer to the column index of the matrix designated by @var{interface}.
  926. @end defmac
  927. @defmac STARPU_BCSR_GET_COLIND_DEV_HANDLE ({void *}@var{interface})
  928. Return a device handle for the column index of the matrix designated by
  929. @var{interface}. The offset documented below has to be used in addition to
  930. this.
  931. @end defmac
  932. @defmac STARPU_BCSR_GET_ROWPTR ({void *}@var{interface})
  933. Return a pointer to the row pointer array of the matrix designated by @var{interface}.
  934. @end defmac
  935. @defmac STARPU_CSR_GET_ROWPTR_DEV_HANDLE ({void *}@var{interface})
  936. Return a device handle for the row pointer array of the matrix designated by
  937. @var{interface}. The offset documented below has to be used in addition to
  938. this.
  939. @end defmac
  940. @defmac STARPU_BCSR_GET_OFFSET ({void *}@var{interface})
  941. Return the offset in the arrays (coling, rowptr, nzval) of the matrix
  942. designated by @var{interface}, to be used with the device handles.
  943. @end defmac
  944. @node Accessing CSR Data Interfaces
  945. @subsubsection CSR Data Interfaces
  946. @deftypefun uint32_t starpu_csr_get_nnz (starpu_data_handle_t @var{handle})
  947. Return the number of non-zero values in the matrix designated by @var{handle}.
  948. @end deftypefun
  949. @deftypefun uint32_t starpu_csr_get_nrow (starpu_data_handle_t @var{handle})
  950. Return the size of the row pointer array of the matrix designated by @var{handle}.
  951. @end deftypefun
  952. @deftypefun uint32_t starpu_csr_get_firstentry (starpu_data_handle_t @var{handle})
  953. Return the index at which all arrays (the column indexes, the row pointers...)
  954. of the matrix designated by @var{handle} start.
  955. @end deftypefun
  956. @deftypefun uintptr_t starpu_csr_get_local_nzval (starpu_data_handle_t @var{handle})
  957. Return a local pointer to the non-zero values of the matrix designated by @var{handle}.
  958. @end deftypefun
  959. @deftypefun {uint32_t *} starpu_csr_get_local_colind (starpu_data_handle_t @var{handle})
  960. Return a local pointer to the column index of the matrix designated by @var{handle}.
  961. @end deftypefun
  962. @deftypefun {uint32_t *} starpu_csr_get_local_rowptr (starpu_data_handle_t @var{handle})
  963. Return a local pointer to the row pointer array of the matrix designated by @var{handle}.
  964. @end deftypefun
  965. @deftypefun size_t starpu_csr_get_elemsize (starpu_data_handle_t @var{handle})
  966. Return the size of the elements registered into the matrix designated by @var{handle}.
  967. @end deftypefun
  968. @defmac STARPU_CSR_GET_NNZ ({void *}@var{interface})
  969. Return the number of non-zero values in the matrix designated by @var{interface}.
  970. @end defmac
  971. @defmac STARPU_CSR_GET_NROW ({void *}@var{interface})
  972. Return the size of the row pointer array of the matrix designated by @var{interface}.
  973. @end defmac
  974. @defmac STARPU_CSR_GET_NZVAL ({void *}@var{interface})
  975. Return a pointer to the non-zero values of the matrix designated by @var{interface}.
  976. @end defmac
  977. @defmac STARPU_CSR_GET_NZVAL_DEV_HANDLE ({void *}@var{interface})
  978. Return a device handle for the array of non-zero values in the matrix designated
  979. by @var{interface}. The offset documented below has to be used in addition to
  980. this.
  981. @end defmac
  982. @defmac STARPU_CSR_GET_COLIND ({void *}@var{interface})
  983. Return a pointer to the column index of the matrix designated by @var{interface}.
  984. @end defmac
  985. @defmac STARPU_CSR_GET_COLIND_DEV_HANDLE ({void *}@var{interface})
  986. Return a device handle for the column index of the matrix designated by
  987. @var{interface}. The offset documented below has to be used in addition to
  988. this.
  989. @end defmac
  990. @defmac STARPU_CSR_GET_ROWPTR ({void *}@var{interface})
  991. Return a pointer to the row pointer array of the matrix designated by @var{interface}.
  992. @end defmac
  993. @defmac STARPU_CSR_GET_ROWPTR_DEV_HANDLE ({void *}@var{interface})
  994. Return a device handle for the row pointer array of the matrix designated by
  995. @var{interface}. The offset documented below has to be used in addition to
  996. this.
  997. @end defmac
  998. @defmac STARPU_CSR_GET_OFFSET ({void *}@var{interface})
  999. Return the offset in the arrays (colind, rowptr, nzval) of the matrix
  1000. designated by @var{interface}, to be used with the device handles.
  1001. @end defmac
  1002. @defmac STARPU_CSR_GET_FIRSTENTRY ({void *}@var{interface})
  1003. Return the index at which all arrays (the column indexes, the row pointers...)
  1004. of the @var{interface} start.
  1005. @end defmac
  1006. @defmac STARPU_CSR_GET_ELEMSIZE ({void *}@var{interface})
  1007. Return the size of the elements registered into the matrix designated by @var{interface}.
  1008. @end defmac
  1009. @node Accessing COO Data Interfaces
  1010. @subsubsection COO Data Interfaces
  1011. @defmac STARPU_COO_GET_COLUMNS ({void *}@var{interface})
  1012. Return a pointer to the column array of the matrix designated by
  1013. @var{interface}.
  1014. @end defmac
  1015. @defmac STARPU_COO_GET_COLUMNS_DEV_HANDLE ({void *}@var{interface})
  1016. Return a device handle for the column array of the matrix designated by
  1017. @var{interface}, to be used on OpenCL. The offset documented below has to be
  1018. used in addition to this.
  1019. @end defmac
  1020. @defmac STARPU_COO_GET_ROWS (interface)
  1021. Return a pointer to the rows array of the matrix designated by @var{interface}.
  1022. @end defmac
  1023. @defmac STARPU_COO_GET_ROWS_DEV_HANDLE ({void *}@var{interface})
  1024. Return a device handle for the row array of the matrix designated by
  1025. @var{interface}, to be used on OpenCL. The offset documented below has to be
  1026. used in addition to this.
  1027. @end defmac
  1028. @defmac STARPU_COO_GET_VALUES (interface)
  1029. Return a pointer to the values array of the matrix designated by
  1030. @var{interface}.
  1031. @end defmac
  1032. @defmac STARPU_COO_GET_VALUES_DEV_HANDLE ({void *}@var{interface})
  1033. Return a device handle for the value array of the matrix designated by
  1034. @var{interface}, to be used on OpenCL. The offset documented below has to be
  1035. used in addition to this.
  1036. @end defmac
  1037. @defmac STARPU_COO_GET_OFFSET ({void *}@var{itnerface})
  1038. Return the offset in the arrays of the COO matrix designated by @var{interface}.
  1039. @end defmac
  1040. @defmac STARPU_COO_GET_NX (interface)
  1041. Return the number of elements on the x-axis of the matrix designated by
  1042. @var{interface}.
  1043. @end defmac
  1044. @defmac STARPU_COO_GET_NY (interface)
  1045. Return the number of elements on the y-axis of the matrix designated by
  1046. @var{interface}.
  1047. @end defmac
  1048. @defmac STARPU_COO_GET_NVALUES (interface)
  1049. Return the number of values registered in the matrix designated by
  1050. @var{interface}.
  1051. @end defmac
  1052. @defmac STARPU_COO_GET_ELEMSIZE (interface)
  1053. Return the size of the elements registered into the matrix designated by
  1054. @var{interface}.
  1055. @end defmac
  1056. @node Defining Interface
  1057. @subsection Defining Interface
  1058. Applications can provide their own interface as shown in
  1059. @pxref{Defining a New Data Interface}.
  1060. @deftypefun uintptr_t starpu_malloc_on_node (unsigned @var{dst_node}, size_t @var{size})
  1061. Allocate @var{size} bytes on node @var{dst_node}. This returns 0 if allocation
  1062. failed, the allocation method should then return -ENOMEM as allocated size.
  1063. @end deftypefun
  1064. @deftypefun void starpu_free_on_node (unsigned @var{dst_node}, uintptr_t @var{addr}, size_t @var{size})
  1065. Free @var{addr} of @var{size} bytes on node @var{dst_node}.
  1066. @end deftypefun
  1067. @deftp {Data Type} {struct starpu_data_interface_ops}
  1068. @anchor{struct starpu_data_interface_ops}
  1069. Per-interface data transfer methods.
  1070. @table @asis
  1071. @item @code{void (*register_data_handle)(starpu_data_handle_t handle, unsigned home_node, void *data_interface)}
  1072. Register an existing interface into a data handle.
  1073. @item @code{starpu_ssize_t (*allocate_data_on_node)(void *data_interface, unsigned node)}
  1074. Allocate data for the interface on a given node.
  1075. @item @code{ void (*free_data_on_node)(void *data_interface, unsigned node)}
  1076. Free data of the interface on a given node.
  1077. @item @code{ const struct starpu_data_copy_methods *copy_methods}
  1078. ram/cuda/opencl synchronous and asynchronous transfer methods.
  1079. @item @code{ void * (*handle_to_pointer)(starpu_data_handle_t handle, unsigned node)}
  1080. Return the current pointer (if any) for the handle on the given node.
  1081. @item @code{ size_t (*get_size)(starpu_data_handle_t handle)}
  1082. Return an estimation of the size of data, for performance models.
  1083. @item @code{ uint32_t (*footprint)(starpu_data_handle_t handle)}
  1084. Return a 32bit footprint which characterizes the data size.
  1085. @item @code{ int (*compare)(void *data_interface_a, void *data_interface_b)}
  1086. Compare the data size of two interfaces.
  1087. @item @code{ void (*display)(starpu_data_handle_t handle, FILE *f)}
  1088. Dump the sizes of a handle to a file.
  1089. @item @code{enum starpu_data_interface_id interfaceid}
  1090. An identifier that is unique to each interface.
  1091. @item @code{size_t interface_size}
  1092. The size of the interface data descriptor.
  1093. @item @code{int is_multiformat}
  1094. todo
  1095. @item @code{struct starpu_multiformat_data_interface_ops* (*get_mf_ops)(void *data_interface)}
  1096. todo
  1097. @item @code{int (*pack_data)(starpu_data_handle_t handle, unsigned node, void **ptr, ssize_t *count)}
  1098. Pack the data handle into a contiguous buffer at the address
  1099. @code{ptr} and set the size of the newly created buffer in
  1100. @code{count}. If @var{ptr} is @code{NULL}, the function should not copy the data in the
  1101. buffer but just set @var{count} to the size of the buffer which
  1102. would have been allocated. The special value @code{-1} indicates the
  1103. size is yet unknown.
  1104. @item @code{int (*unpack_data)(starpu_data_handle_t handle, unsigned node, void *ptr, size_t count)}
  1105. Unpack the data handle from the contiguous buffer at the address @code{ptr} of size @var{count}
  1106. @end table
  1107. @end deftp
  1108. @deftp {Data Type} {struct starpu_data_copy_methods}
  1109. Defines the per-interface methods. If the @code{any_to_any} method is provided,
  1110. it will be used by default if no more specific method is provided. It can still
  1111. be useful to provide more specific method in case of e.g. available particular
  1112. CUDA or OpenCL support.
  1113. @table @asis
  1114. @item @code{int (*@{ram,cuda,opencl@}_to_@{ram,cuda,opencl@})(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node)}
  1115. These 12 functions define how to copy data from the @var{src_interface}
  1116. interface on the @var{src_node} node to the @var{dst_interface} interface
  1117. on the @var{dst_node} node. They return 0 on success.
  1118. @item @code{int (*@{ram,cuda@}_to_@{ram,cuda@}_async)(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, cudaStream_t stream)}
  1119. These 3 functions (@code{ram_to_ram} is not among these) define how to copy
  1120. data from the @var{src_interface} interface on the @var{src_node} node to the
  1121. @var{dst_interface} interface on the @var{dst_node} node, using the given
  1122. @var{stream}. Must return 0 if the transfer was actually completed completely
  1123. synchronously, or -EAGAIN if at least some transfers are still ongoing and
  1124. should be awaited for by the core.
  1125. @item @code{int (*@{ram,opencl@}_to_@{ram,opencl@}_async)(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, /* cl_event * */ void *event)}
  1126. These 3 functions (@code{ram_to_ram} is not among them) define how to copy
  1127. data from the @var{src_interface} interface on the @var{src_node} node to the
  1128. @var{dst_interface} interface on the @var{dst_node} node, by recording in
  1129. @var{event}, a pointer to a cl_event, the event of the last submitted transfer.
  1130. Must return 0 if the transfer was actually completed completely synchronously,
  1131. or -EAGAIN if at least some transfers are still ongoing and should be awaited
  1132. for by the core.
  1133. @item @code{int (*any_to_any)(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, void *async_data)}
  1134. Define how to copy data from the @var{src_interface} interface on the
  1135. @var{src_node} node to the @var{dst_interface} interface on the @var{dst_node}
  1136. node. This is meant to be implemented through the @var{starpu_interface_copy}
  1137. helper, to which @var{async_data} should be passed as such, and will be used to
  1138. manage asynchronicity. This must return -EAGAIN if any of the
  1139. @var{starpu_interface_copy} calls has returned -EAGAIN (i.e. at least some
  1140. transfer is still ongoing), and return 0 otherwise.
  1141. @end table
  1142. @end deftp
  1143. @deftypefun int starpu_interface_copy (uintptr_t @var{src}, size_t @var{src_offset}, unsigned @var{src_node}, uintptr_t @var{dst}, size_t @var{dst_offset}, unsigned @var{dst_node}, size_t @var{size}, {void *}@var{async_data})
  1144. Copy @var{size} bytes from byte offset @var{src_offset} of @var{src} on
  1145. @var{src_node} to byte offset @var{dst_offset} of @var{dst} on @var{dst_node}.
  1146. This is to be used in the @var{any_to_any} copy method, which is provided with
  1147. the @var{async_data} to be pased to @var{starpu_interface_copy}. this returns
  1148. -EAGAIN if the transfer is still ongoing, or 0 if the transfer is already
  1149. completed.
  1150. @end deftypefun
  1151. @deftypefun uint32_t starpu_crc32_be_n ({void *}@var{input}, size_t @var{n}, uint32_t @var{inputcrc})
  1152. Compute the CRC of a byte buffer seeded by the inputcrc "current
  1153. state". The return value should be considered as the new "current
  1154. state" for future CRC computation. This is used for computing data size
  1155. footprint.
  1156. @end deftypefun
  1157. @deftypefun uint32_t starpu_crc32_be (uint32_t @var{input}, uint32_t @var{inputcrc})
  1158. Compute the CRC of a 32bit number seeded by the inputcrc "current
  1159. state". The return value should be considered as the new "current
  1160. state" for future CRC computation. This is used for computing data size
  1161. footprint.
  1162. @end deftypefun
  1163. @deftypefun uint32_t starpu_crc32_string ({char *}@var{str}, uint32_t @var{inputcrc})
  1164. Compute the CRC of a string seeded by the inputcrc "current state".
  1165. The return value should be considered as the new "current state" for
  1166. future CRC computation. This is used for computing data size footprint.
  1167. @end deftypefun
  1168. @deftypefun int starpu_data_interface_get_next_id (void)
  1169. Returns the next available id for a newly created data interface
  1170. (@pxref{Defining a New Data Interface}).
  1171. @end deftypefun
  1172. @node Data Partition
  1173. @section Data Partition
  1174. @menu
  1175. * Basic API::
  1176. * Predefined filter functions::
  1177. @end menu
  1178. @node Basic API
  1179. @subsection Basic API
  1180. @deftp {Data Type} {struct starpu_data_filter}
  1181. The filter structure describes a data partitioning operation, to be given to the
  1182. @code{starpu_data_partition} function, see @ref{starpu_data_partition}
  1183. for an example. The different fields are:
  1184. @table @asis
  1185. @item @code{void (*filter_func)(void *father_interface, void* child_interface, struct starpu_data_filter *, unsigned id, unsigned nparts)}
  1186. This function fills the @code{child_interface} structure with interface
  1187. information for the @code{id}-th child of the parent @code{father_interface} (among @code{nparts}).
  1188. @item @code{unsigned nchildren}
  1189. This is the number of parts to partition the data into.
  1190. @item @code{unsigned (*get_nchildren)(struct starpu_data_filter *, starpu_data_handle_t initial_handle)}
  1191. This returns the number of children. This can be used instead of @code{nchildren} when the number of
  1192. children depends on the actual data (e.g. the number of blocks in a sparse
  1193. matrix).
  1194. @item @code{struct starpu_data_interface_ops *(*get_child_ops)(struct starpu_data_filter *, unsigned id)}
  1195. In case the resulting children use a different data interface, this function
  1196. returns which interface is used by child number @code{id}.
  1197. @item @code{unsigned filter_arg}
  1198. Allow to define an additional parameter for the filter function.
  1199. @item @code{void *filter_arg_ptr}
  1200. Allow to define an additional pointer parameter for the filter
  1201. function, such as the sizes of the different parts.
  1202. @end table
  1203. @end deftp
  1204. @deftypefun void starpu_data_partition (starpu_data_handle_t @var{initial_handle}, {struct starpu_data_filter *}@var{f})
  1205. @anchor{starpu_data_partition}
  1206. This requests partitioning one StarPU data @var{initial_handle} into several
  1207. subdata according to the filter @var{f}, as shown in the following example:
  1208. @cartouche
  1209. @smallexample
  1210. struct starpu_data_filter f = @{
  1211. .filter_func = starpu_matrix_filter_block,
  1212. .nchildren = nslicesx,
  1213. .get_nchildren = NULL,
  1214. .get_child_ops = NULL
  1215. @};
  1216. starpu_data_partition(A_handle, &f);
  1217. @end smallexample
  1218. @end cartouche
  1219. @end deftypefun
  1220. @deftypefun void starpu_data_unpartition (starpu_data_handle_t @var{root_data}, unsigned @var{gathering_node})
  1221. This unapplies one filter, thus unpartitioning the data. The pieces of data are
  1222. collected back into one big piece in the @var{gathering_node} (usually 0). Tasks
  1223. working on the partitioned data must be already finished when calling @code{starpu_data_unpartition}.
  1224. @cartouche
  1225. @smallexample
  1226. starpu_data_unpartition(A_handle, 0);
  1227. @end smallexample
  1228. @end cartouche
  1229. @end deftypefun
  1230. @deftypefun int starpu_data_get_nb_children (starpu_data_handle_t @var{handle})
  1231. This function returns the number of children.
  1232. @end deftypefun
  1233. @deftypefun starpu_data_handle_t starpu_data_get_child (starpu_data_handle_t @var{handle}, unsigned @var{i})
  1234. Return the @var{i}th child of the given @var{handle}, which must have been partitionned beforehand.
  1235. @end deftypefun
  1236. @deftypefun starpu_data_handle_t starpu_data_get_sub_data (starpu_data_handle_t @var{root_data}, unsigned @var{depth}, ... )
  1237. After partitioning a StarPU data by applying a filter,
  1238. @code{starpu_data_get_sub_data} can be used to get handles for each of
  1239. the data portions. @var{root_data} is the parent data that was
  1240. partitioned. @var{depth} is the number of filters to traverse (in
  1241. case several filters have been applied, to e.g. partition in row
  1242. blocks, and then in column blocks), and the subsequent
  1243. parameters are the indexes. The function returns a handle to the
  1244. subdata.
  1245. @cartouche
  1246. @smallexample
  1247. h = starpu_data_get_sub_data(A_handle, 1, taskx);
  1248. @end smallexample
  1249. @end cartouche
  1250. @end deftypefun
  1251. @deftypefun starpu_data_handle_t starpu_data_vget_sub_data (starpu_data_handle_t @var{root_data}, unsigned @var{depth}, va_list @var{pa})
  1252. This function is similar to @code{starpu_data_get_sub_data} but uses a
  1253. va_list for the parameter list.
  1254. @end deftypefun
  1255. @deftypefun void starpu_data_map_filters (starpu_data_handle_t @var{root_data}, unsigned @var{nfilters}, ...)
  1256. Applies @var{nfilters} filters to the handle designated by @var{root_handle}
  1257. recursively. @var{nfilters} pointers to variables of the type
  1258. starpu_data_filter should be given.
  1259. @end deftypefun
  1260. @deftypefun void starpu_data_vmap_filters (starpu_data_handle_t @var{root_data}, unsigned @var{nfilters}, va_list @var{pa})
  1261. Applies @var{nfilters} filters to the handle designated by @var{root_handle}
  1262. recursively. It uses a va_list of pointers to variables of the typer
  1263. starpu_data_filter.
  1264. @end deftypefun
  1265. @node Predefined filter functions
  1266. @subsection Predefined filter functions
  1267. @menu
  1268. * Partitioning Vector Data::
  1269. * Partitioning Matrix Data::
  1270. * Partitioning 3D Matrix Data::
  1271. * Partitioning BCSR Data::
  1272. @end menu
  1273. This section gives a partial list of the predefined partitioning functions.
  1274. Examples on how to use them are shown in @ref{Partitioning Data}. The complete
  1275. list can be found in @code{starpu_data_filters.h} .
  1276. @node Partitioning Vector Data
  1277. @subsubsection Partitioning Vector Data
  1278. @deftypefun void starpu_vector_filter_block (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1279. Return in @code{*@var{child_interface}} the @var{id}th element of the
  1280. vector represented by @var{father_interface} once partitioned in
  1281. @var{nparts} chunks of equal size.
  1282. @end deftypefun
  1283. @deftypefun void starpu_vector_filter_block_shadow (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1284. Return in @code{*@var{child_interface}} the @var{id}th element of the
  1285. vector represented by @var{father_interface} once partitioned in
  1286. @var{nparts} chunks of equal size with a shadow border @code{filter_arg_ptr}, thus getting a vector of size (n-2*shadow)/nparts+2*shadow
  1287. The @code{filter_arg_ptr} field must be the shadow size casted into @code{void*}.
  1288. IMPORTANT: This can only be used for read-only access, as no coherency is
  1289. enforced for the shadowed parts.
  1290. A usage example is available in examples/filters/shadow.c
  1291. @end deftypefun
  1292. @deftypefun void starpu_vector_filter_list (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1293. Return in @code{*@var{child_interface}} the @var{id}th element of the
  1294. vector represented by @var{father_interface} once partitioned into
  1295. @var{nparts} chunks according to the @code{filter_arg_ptr} field of
  1296. @code{*@var{f}}.
  1297. The @code{filter_arg_ptr} field must point to an array of @var{nparts}
  1298. @code{uint32_t} elements, each of which specifies the number of elements
  1299. in each chunk of the partition.
  1300. @end deftypefun
  1301. @deftypefun void starpu_vector_filter_divide_in_2 (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1302. Return in @code{*@var{child_interface}} the @var{id}th element of the
  1303. vector represented by @var{father_interface} once partitioned in two
  1304. chunks of equal size, ignoring @var{nparts}. Thus, @var{id} must be
  1305. @code{0} or @code{1}.
  1306. @end deftypefun
  1307. @node Partitioning Matrix Data
  1308. @subsubsection Partitioning Matrix Data
  1309. @deftypefun void starpu_matrix_filter_block (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1310. This partitions a dense Matrix along the x dimension, thus getting (x/nparts,y)
  1311. matrices. If nparts does not divide x, the last submatrix contains the
  1312. remainder.
  1313. @end deftypefun
  1314. @deftypefun void starpu_matrix_filter_block_shadow (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1315. This partitions a dense Matrix along the x dimension, with a shadow border
  1316. @code{filter_arg_ptr}, thus getting ((x-2*shadow)/nparts+2*shadow,y)
  1317. matrices. If nparts does not divide x-2*shadow, the last submatrix contains the
  1318. remainder.
  1319. IMPORTANT: This can only be used for read-only access, as no coherency is
  1320. enforced for the shadowed parts.
  1321. A usage example is available in examples/filters/shadow2d.c
  1322. @end deftypefun
  1323. @deftypefun void starpu_matrix_filter_vertical_block (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1324. This partitions a dense Matrix along the y dimension, thus getting (x,y/nparts)
  1325. matrices. If nparts does not divide y, the last submatrix contains the
  1326. remainder.
  1327. @end deftypefun
  1328. @deftypefun void starpu_matrix_filter_vertical_block_shadow (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1329. This partitions a dense Matrix along the y dimension, with a shadow border
  1330. @code{filter_arg_ptr}, thus getting (x,(y-2*shadow)/nparts+2*shadow)
  1331. matrices. If nparts does not divide y-2*shadow, the last submatrix contains the
  1332. remainder.
  1333. IMPORTANT: This can only be used for read-only access, as no coherency is
  1334. enforced for the shadowed parts.
  1335. A usage example is available in examples/filters/shadow2d.c
  1336. @end deftypefun
  1337. @node Partitioning 3D Matrix Data
  1338. @subsubsection Partitioning 3D Matrix Data
  1339. A usage example is available in examples/filters/shadow3d.c
  1340. @deftypefun void starpu_block_filter_block (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1341. This partitions a 3D matrix along the X dimension, thus getting (x/nparts,y,z)
  1342. 3D matrices. If nparts does not divide x, the last submatrix contains the
  1343. remainder.
  1344. @end deftypefun
  1345. @deftypefun void starpu_block_filter_block_shadow (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1346. This partitions a 3D matrix along the X dimension, with a shadow border
  1347. @code{filter_arg_ptr}, thus getting ((x-2*shadow)/nparts+2*shadow,y,z) 3D
  1348. matrices. If nparts does not divide x, the last submatrix contains the
  1349. remainder.
  1350. IMPORTANT: This can only be used for read-only access, as no coherency is
  1351. enforced for the shadowed parts.
  1352. @end deftypefun
  1353. @deftypefun void starpu_block_filter_vertical_block (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1354. This partitions a 3D matrix along the Y dimension, thus getting (x,y/nparts,z)
  1355. 3D matrices. If nparts does not divide y, the last submatrix contains the
  1356. remainder.
  1357. @end deftypefun
  1358. @deftypefun void starpu_block_filter_vertical_block_shadow (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1359. This partitions a 3D matrix along the Y dimension, with a shadow border
  1360. @code{filter_arg_ptr}, thus getting (x,(y-2*shadow)/nparts+2*shadow,z) 3D
  1361. matrices. If nparts does not divide y, the last submatrix contains the
  1362. remainder.
  1363. IMPORTANT: This can only be used for read-only access, as no coherency is
  1364. enforced for the shadowed parts.
  1365. @end deftypefun
  1366. @deftypefun void starpu_block_filter_depth_block (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1367. This partitions a 3D matrix along the Z dimension, thus getting (x,y,z/nparts)
  1368. 3D matrices. If nparts does not divide z, the last submatrix contains the
  1369. remainder.
  1370. @end deftypefun
  1371. @deftypefun void starpu_block_filter_depth_block_shadow (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1372. This partitions a 3D matrix along the Z dimension, with a shadow border
  1373. @code{filter_arg_ptr}, thus getting (x,y,(z-2*shadow)/nparts+2*shadow)
  1374. 3D matrices. If nparts does not divide z, the last submatrix contains the
  1375. remainder.
  1376. IMPORTANT: This can only be used for read-only access, as no coherency is
  1377. enforced for the shadowed parts.
  1378. @end deftypefun
  1379. @node Partitioning BCSR Data
  1380. @subsubsection Partitioning BCSR Data
  1381. @deftypefun void starpu_bcsr_filter_canonical_block (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1382. This partitions a block-sparse matrix into dense matrices.
  1383. @end deftypefun
  1384. @deftypefun void starpu_csr_filter_vertical_block (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1385. This partitions a block-sparse matrix into vertical block-sparse matrices.
  1386. @end deftypefun
  1387. @node Multiformat Data Interface
  1388. @section Multiformat Data Interface
  1389. @deftp {Data Type} {struct starpu_multiformat_data_interface_ops}
  1390. The different fields are:
  1391. @table @asis
  1392. @item @code{size_t cpu_elemsize}
  1393. the size of each element on CPUs,
  1394. @item @code{size_t opencl_elemsize}
  1395. the size of each element on OpenCL devices,
  1396. @item @code{struct starpu_codelet *cpu_to_opencl_cl}
  1397. pointer to a codelet which converts from CPU to OpenCL
  1398. @item @code{struct starpu_codelet *opencl_to_cpu_cl}
  1399. pointer to a codelet which converts from OpenCL to CPU
  1400. @item @code{size_t cuda_elemsize}
  1401. the size of each element on CUDA devices,
  1402. @item @code{struct starpu_codelet *cpu_to_cuda_cl}
  1403. pointer to a codelet which converts from CPU to CUDA
  1404. @item @code{struct starpu_codelet *cuda_to_cpu_cl}
  1405. pointer to a codelet which converts from CUDA to CPU
  1406. @end table
  1407. @end deftp
  1408. @deftypefun void starpu_multiformat_data_register (starpu_data_handle_t *@var{handle}, unsigned @var{home_node}, void *@var{ptr}, uint32_t @var{nobjects}, struct starpu_multiformat_data_interface_ops *@var{format_ops})
  1409. Register a piece of data that can be represented in different ways, depending upon
  1410. the processing unit that manipulates it. It allows the programmer, for instance, to
  1411. use an array of structures when working on a CPU, and a structure of arrays when
  1412. working on a GPU.
  1413. @var{nobjects} is the number of elements in the data. @var{format_ops} describes
  1414. the format.
  1415. @end deftypefun
  1416. @defmac STARPU_MULTIFORMAT_GET_CPU_PTR ({void *}@var{interface})
  1417. returns the local pointer to the data with CPU format.
  1418. @end defmac
  1419. @defmac STARPU_MULTIFORMAT_GET_CUDA_PTR ({void *}@var{interface})
  1420. returns the local pointer to the data with CUDA format.
  1421. @end defmac
  1422. @defmac STARPU_MULTIFORMAT_GET_OPENCL_PTR ({void *}@var{interface})
  1423. returns the local pointer to the data with OpenCL format.
  1424. @end defmac
  1425. @defmac STARPU_MULTIFORMAT_GET_NX ({void *}@var{interface})
  1426. returns the number of elements in the data.
  1427. @end defmac
  1428. @node Codelets and Tasks
  1429. @section Codelets and Tasks
  1430. This section describes the interface to manipulate codelets and tasks.
  1431. @deftp {Data Type} {enum starpu_codelet_type}
  1432. Describes the type of parallel task. The different values are:
  1433. @table @asis
  1434. @item @code{STARPU_SEQ} (default) for classical sequential tasks.
  1435. @item @code{STARPU_SPMD} for a parallel task whose threads are handled by
  1436. StarPU, the code has to use @code{starpu_combined_worker_get_size} and
  1437. @code{starpu_combined_worker_get_rank} to distribute the work
  1438. @item @code{STARPU_FORKJOIN} for a parallel task whose threads are started by
  1439. the codelet function, which has to use @code{starpu_combined_worker_get_size} to
  1440. determine how many threads should be started.
  1441. @end table
  1442. See @ref{Parallel Tasks} for details.
  1443. @end deftp
  1444. @defmac STARPU_CPU
  1445. This macro is used when setting the field @code{where} of a @code{struct
  1446. starpu_codelet} to specify the codelet may be executed on a CPU
  1447. processing unit.
  1448. @end defmac
  1449. @defmac STARPU_CUDA
  1450. This macro is used when setting the field @code{where} of a @code{struct
  1451. starpu_codelet} to specify the codelet may be executed on a CUDA
  1452. processing unit.
  1453. @end defmac
  1454. @defmac STARPU_OPENCL
  1455. This macro is used when setting the field @code{where} of a @code{struct
  1456. starpu_codelet} to specify the codelet may be executed on a OpenCL
  1457. processing unit.
  1458. @end defmac
  1459. @defmac STARPU_MULTIPLE_CPU_IMPLEMENTATIONS
  1460. Setting the field @code{cpu_func} of a @code{struct starpu_codelet}
  1461. with this macro indicates the codelet will have several
  1462. implementations. The use of this macro is deprecated. One should
  1463. always only define the field @code{cpu_funcs}.
  1464. @end defmac
  1465. @defmac STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS
  1466. Setting the field @code{cuda_func} of a @code{struct starpu_codelet}
  1467. with this macro indicates the codelet will have several
  1468. implementations. The use of this macro is deprecated. One should
  1469. always only define the field @code{cuda_funcs}.
  1470. @end defmac
  1471. @defmac STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS
  1472. Setting the field @code{opencl_func} of a @code{struct starpu_codelet}
  1473. with this macro indicates the codelet will have several
  1474. implementations. The use of this macro is deprecated. One should
  1475. always only define the field @code{opencl_funcs}.
  1476. @end defmac
  1477. @deftp {Data Type} {struct starpu_codelet}
  1478. The codelet structure describes a kernel that is possibly implemented on various
  1479. targets. For compatibility, make sure to initialize the whole structure to zero,
  1480. either by using explicit memset, or by letting the compiler implicitly do it in
  1481. e.g. static storage case.
  1482. @table @asis
  1483. @item @code{uint32_t where} (optional)
  1484. Indicates which types of processing units are able to execute the
  1485. codelet. The different values
  1486. @code{STARPU_CPU}, @code{STARPU_CUDA},
  1487. @code{STARPU_OPENCL} can be combined to specify
  1488. on which types of processing units the codelet can be executed.
  1489. @code{STARPU_CPU|STARPU_CUDA} for instance indicates that the codelet is
  1490. implemented for both CPU cores and CUDA devices while @code{STARPU_OPENCL}
  1491. indicates that it is only available on OpenCL devices. If the field is
  1492. unset, its value will be automatically set based on the availability
  1493. of the @code{XXX_funcs} fields defined below.
  1494. @item @code{int (*can_execute)(unsigned workerid, struct starpu_task *task, unsigned nimpl)} (optional)
  1495. Defines a function which should return 1 if the worker designated by
  1496. @var{workerid} can execute the @var{nimpl}th implementation of the
  1497. given @var{task}, 0 otherwise.
  1498. @item @code{enum starpu_codelet_type type} (optional)
  1499. The default is @code{STARPU_SEQ}, i.e. usual sequential implementation. Other
  1500. values (@code{STARPU_SPMD} or @code{STARPU_FORKJOIN} declare that a parallel
  1501. implementation is also available. See @ref{Parallel Tasks} for details.
  1502. @item @code{int max_parallelism} (optional)
  1503. If a parallel implementation is available, this denotes the maximum combined
  1504. worker size that StarPU will use to execute parallel tasks for this codelet.
  1505. @item @code{starpu_cpu_func_t cpu_func} (optional)
  1506. This field has been made deprecated. One should use instead the
  1507. @code{cpu_funcs} field.
  1508. @item @code{starpu_cpu_func_t cpu_funcs[STARPU_MAXIMPLEMENTATIONS]} (optional)
  1509. Is an array of function pointers to the CPU implementations of the codelet.
  1510. It must be terminated by a NULL value.
  1511. The functions prototype must be: @code{void cpu_func(void *buffers[], void *cl_arg)}. The first
  1512. argument being the array of data managed by the data management library, and
  1513. the second argument is a pointer to the argument passed from the @code{cl_arg}
  1514. field of the @code{starpu_task} structure.
  1515. If the @code{where} field is set, then the @code{cpu_funcs} field is
  1516. ignored if @code{STARPU_CPU} does not appear in the @code{where}
  1517. field, it must be non-null otherwise.
  1518. @item @code{starpu_cuda_func_t cuda_func} (optional)
  1519. This field has been made deprecated. One should use instead the
  1520. @code{cuda_funcs} field.
  1521. @item @code{starpu_cuda_func_t cuda_funcs[STARPU_MAXIMPLEMENTATIONS]} (optional)
  1522. Is an array of function pointers to the CUDA implementations of the codelet.
  1523. It must be terminated by a NULL value.
  1524. @emph{The functions must be host-functions written in the CUDA runtime
  1525. API}. Their prototype must
  1526. be: @code{void cuda_func(void *buffers[], void *cl_arg);}.
  1527. If the @code{where} field is set, then the @code{cuda_funcs}
  1528. field is ignored if @code{STARPU_CUDA} does not appear in the @code{where}
  1529. field, it must be non-null otherwise.
  1530. @item @code{starpu_opencl_func_t opencl_func} (optional)
  1531. This field has been made deprecated. One should use instead the
  1532. @code{opencl_funcs} field.
  1533. @item @code{starpu_opencl_func_t opencl_funcs[STARPU_MAXIMPLEMENTATIONS]} (optional)
  1534. Is an array of function pointers to the OpenCL implementations of the codelet.
  1535. It must be terminated by a NULL value.
  1536. The functions prototype must be:
  1537. @code{void opencl_func(void *buffers[], void *cl_arg);}.
  1538. If the @code{where} field is set, then the @code{opencl_funcs} field
  1539. is ignored if @code{STARPU_OPENCL} does not appear in the @code{where}
  1540. field, it must be non-null otherwise.
  1541. @item @code{unsigned nbuffers}
  1542. Specifies the number of arguments taken by the codelet. These arguments are
  1543. managed by the DSM and are accessed from the @code{void *buffers[]}
  1544. array. The constant argument passed with the @code{cl_arg} field of the
  1545. @code{starpu_task} structure is not counted in this number. This value should
  1546. not be above @code{STARPU_NMAXBUFS}.
  1547. @item @code{enum starpu_access_mode modes[STARPU_NMAXBUFS]}
  1548. Is an array of @code{enum starpu_access_mode}. It describes the
  1549. required access modes to the data neeeded by the codelet (e.g.
  1550. @code{STARPU_RW}). The number of entries in this array must be
  1551. specified in the @code{nbuffers} field (defined above), and should not
  1552. exceed @code{STARPU_NMAXBUFS}.
  1553. If unsufficient, this value can be set with the @code{--enable-maxbuffers}
  1554. option when configuring StarPU.
  1555. @item @code{struct starpu_perfmodel *model} (optional)
  1556. This is a pointer to the task duration performance model associated to this
  1557. codelet. This optional field is ignored when set to @code{NULL} or
  1558. when its @code{symbol} field is not set.
  1559. @item @code{struct starpu_perfmodel *power_model} (optional)
  1560. This is a pointer to the task power consumption performance model associated
  1561. to this codelet. This optional field is ignored when set to
  1562. @code{NULL} or when its @code{symbol} field is not set.
  1563. In the case of parallel codelets, this has to account for all processing units
  1564. involved in the parallel execution.
  1565. @item @code{unsigned long per_worker_stats[STARPU_NMAXWORKERS]} (optional)
  1566. Statistics collected at runtime: this is filled by StarPU and should not be
  1567. accessed directly, but for example by calling the
  1568. @code{starpu_display_codelet_stats} function (See
  1569. @ref{starpu_display_codelet_stats} for details).
  1570. @item @code{const char *name} (optional)
  1571. Define the name of the codelet. This can be useful for debugging purposes.
  1572. @end table
  1573. @end deftp
  1574. @deftypefun void starpu_codelet_init ({struct starpu_codelet} *@var{cl})
  1575. Initialize @var{cl} with default values. Codelets should preferably be
  1576. initialized statically as shown in @ref{Defining a Codelet}. However
  1577. such a initialisation is not always possible, e.g. when using C++.
  1578. @end deftypefun
  1579. @deftp {Data Type} {enum starpu_task_status}
  1580. State of a task, can be either of
  1581. @table @asis
  1582. @item @code{STARPU_TASK_INVALID} The task has just been initialized.
  1583. @item @code{STARPU_TASK_BLOCKED} The task has just been submitted, and its dependencies has not been checked yet.
  1584. @item @code{STARPU_TASK_READY} The task is ready for execution.
  1585. @item @code{STARPU_TASK_RUNNING} The task is running on some worker.
  1586. @item @code{STARPU_TASK_FINISHED} The task is finished executing.
  1587. @item @code{STARPU_TASK_BLOCKED_ON_TAG} The task is waiting for a tag.
  1588. @item @code{STARPU_TASK_BLOCKED_ON_TASK} The task is waiting for a task.
  1589. @item @code{STARPU_TASK_BLOCKED_ON_DATA} The task is waiting for some data.
  1590. @end table
  1591. @end deftp
  1592. @deftp {Data Type} {struct starpu_buffer_descr}
  1593. This type is used to describe a data handle along with an
  1594. access mode.
  1595. @table @asis
  1596. @item @code{starpu_data_handle_t handle} describes a data,
  1597. @item @code{enum starpu_access_mode mode} describes its access mode
  1598. @end table
  1599. @end deftp
  1600. @deftp {Data Type} {struct starpu_task}
  1601. The @code{starpu_task} structure describes a task that can be offloaded on the various
  1602. processing units managed by StarPU. It instantiates a codelet. It can either be
  1603. allocated dynamically with the @code{starpu_task_create} method, or declared
  1604. statically. In the latter case, the programmer has to zero the
  1605. @code{starpu_task} structure and to fill the different fields properly. The
  1606. indicated default values correspond to the configuration of a task allocated
  1607. with @code{starpu_task_create}.
  1608. @table @asis
  1609. @item @code{struct starpu_codelet *cl}
  1610. Is a pointer to the corresponding @code{struct starpu_codelet} data structure. This
  1611. describes where the kernel should be executed, and supplies the appropriate
  1612. implementations. When set to @code{NULL}, no code is executed during the tasks,
  1613. such empty tasks can be useful for synchronization purposes.
  1614. @item @code{struct starpu_buffer_descr buffers[STARPU_NMAXBUFS]}
  1615. This field has been made deprecated. One should use instead the
  1616. @code{handles} field to specify the handles to the data accessed by
  1617. the task. The access modes are now defined in the @code{mode} field of
  1618. the @code{struct starpu_codelet cl} field defined above.
  1619. @item @code{starpu_data_handle_t handles[STARPU_NMAXBUFS]}
  1620. Is an array of @code{starpu_data_handle_t}. It specifies the handles
  1621. to the different pieces of data accessed by the task. The number
  1622. of entries in this array must be specified in the @code{nbuffers} field of the
  1623. @code{struct starpu_codelet} structure, and should not exceed
  1624. @code{STARPU_NMAXBUFS}.
  1625. If unsufficient, this value can be set with the @code{--enable-maxbuffers}
  1626. option when configuring StarPU.
  1627. @item @code{void *interfaces[STARPU_NMAXBUFS]}
  1628. The actual data pointers to the memory node where execution will happen, managed
  1629. by the DSM.
  1630. @item @code{void *cl_arg} (optional; default: @code{NULL})
  1631. This pointer is passed to the codelet through the second argument
  1632. of the codelet implementation (e.g. @code{cpu_func} or @code{cuda_func}).
  1633. @item @code{size_t cl_arg_size} (optional)
  1634. For some specific drivers, the @code{cl_arg} pointer cannot not be directly
  1635. given to the driver function. A buffer of size @code{cl_arg_size}
  1636. needs to be allocated on the driver. This buffer is then filled with
  1637. the @code{cl_arg_size} bytes starting at address @code{cl_arg}. In
  1638. this case, the argument given to the codelet is therefore not the
  1639. @code{cl_arg} pointer, but the address of the buffer in local store
  1640. (LS) instead.
  1641. This field is ignored for CPU, CUDA and OpenCL codelets, where the
  1642. @code{cl_arg} pointer is given as such.
  1643. @item @code{void (*callback_func)(void *)} (optional) (default: @code{NULL})
  1644. This is a function pointer of prototype @code{void (*f)(void *)} which
  1645. specifies a possible callback. If this pointer is non-null, the callback
  1646. function is executed @emph{on the host} after the execution of the task. Tasks
  1647. which depend on it might already be executing. The callback is passed the
  1648. value contained in the @code{callback_arg} field. No callback is executed if the
  1649. field is set to @code{NULL}.
  1650. @item @code{void *callback_arg} (optional) (default: @code{NULL})
  1651. This is the pointer passed to the callback function. This field is ignored if
  1652. the @code{callback_func} is set to @code{NULL}.
  1653. @item @code{unsigned use_tag} (optional) (default: @code{0})
  1654. If set, this flag indicates that the task should be associated with the tag
  1655. contained in the @code{tag_id} field. Tag allow the application to synchronize
  1656. with the task and to express task dependencies easily.
  1657. @item @code{starpu_tag_t tag_id}
  1658. This field contains the tag associated to the task if the @code{use_tag} field
  1659. was set, it is ignored otherwise.
  1660. @item @code{unsigned sequential_consistency}
  1661. If this flag is set (which is the default), sequential consistency is enforced
  1662. for the data parameters of this task for which sequential consistency is
  1663. enabled. Clearing this flag permits to disable sequential consistency for this
  1664. task, even if data have it enabled.
  1665. @item @code{unsigned synchronous}
  1666. If this flag is set, the @code{starpu_task_submit} function is blocking and
  1667. returns only when the task has been executed (or if no worker is able to
  1668. process the task). Otherwise, @code{starpu_task_submit} returns immediately.
  1669. @item @code{int priority} (optional) (default: @code{STARPU_DEFAULT_PRIO})
  1670. This field indicates a level of priority for the task. This is an integer value
  1671. that must be set between the return values of the
  1672. @code{starpu_sched_get_min_priority} function for the least important tasks,
  1673. and that of the @code{starpu_sched_get_max_priority} for the most important
  1674. tasks (included). The @code{STARPU_MIN_PRIO} and @code{STARPU_MAX_PRIO} macros
  1675. are provided for convenience and respectively returns value of
  1676. @code{starpu_sched_get_min_priority} and @code{starpu_sched_get_max_priority}.
  1677. Default priority is @code{STARPU_DEFAULT_PRIO}, which is always defined as 0 in
  1678. order to allow static task initialization. Scheduling strategies that take
  1679. priorities into account can use this parameter to take better scheduling
  1680. decisions, but the scheduling policy may also ignore it.
  1681. @item @code{unsigned execute_on_a_specific_worker} (default: @code{0})
  1682. If this flag is set, StarPU will bypass the scheduler and directly affect this
  1683. task to the worker specified by the @code{workerid} field.
  1684. @item @code{unsigned workerid} (optional)
  1685. If the @code{execute_on_a_specific_worker} field is set, this field indicates
  1686. which is the identifier of the worker that should process this task (as
  1687. returned by @code{starpu_worker_get_id}). This field is ignored if
  1688. @code{execute_on_a_specific_worker} field is set to 0.
  1689. @item @code{starpu_task_bundle_t bundle} (optional)
  1690. The bundle that includes this task. If no bundle is used, this should be NULL.
  1691. @item @code{int detach} (optional) (default: @code{1})
  1692. If this flag is set, it is not possible to synchronize with the task
  1693. by the means of @code{starpu_task_wait} later on. Internal data structures
  1694. are only guaranteed to be freed once @code{starpu_task_wait} is called if the
  1695. flag is not set.
  1696. @item @code{int destroy} (optional) (default: @code{0} for starpu_task_init, @code{1} for starpu_task_create)
  1697. If this flag is set, the task structure will automatically be freed, either
  1698. after the execution of the callback if the task is detached, or during
  1699. @code{starpu_task_wait} otherwise. If this flag is not set, dynamically
  1700. allocated data structures will not be freed until @code{starpu_task_destroy} is
  1701. called explicitly. Setting this flag for a statically allocated task structure
  1702. will result in undefined behaviour. The flag is set to 1 when the task is
  1703. created by calling @code{starpu_task_create()}. Note that
  1704. @code{starpu_task_wait_for_all} will not free any task.
  1705. @item @code{int regenerate} (optional)
  1706. If this flag is set, the task will be re-submitted to StarPU once it has been
  1707. executed. This flag must not be set if the destroy flag is set too.
  1708. @item @code{enum starpu_task_status status} (optional)
  1709. Current state of the task.
  1710. @item @code{struct starpu_task_profiling_info *profiling_info} (optional)
  1711. Profiling information for the task.
  1712. @item @code{double predicted} (output field)
  1713. Predicted duration of the task. This field is only set if the scheduling
  1714. strategy used performance models.
  1715. @item @code{double predicted_transfer} (optional)
  1716. Predicted data transfer duration for the task in microseconds. This field is
  1717. only valid if the scheduling strategy uses performance models.
  1718. @item @code{struct starpu_task *prev}
  1719. A pointer to the previous task. This should only be used by StarPU.
  1720. @item @code{struct starpu_task *next}
  1721. A pointer to the next task. This should only be used by StarPU.
  1722. @item @code{unsigned int mf_skip}
  1723. This is only used for tasks that use multiformat handle. This should only be
  1724. used by StarPU.
  1725. @item @code{double flops}
  1726. This can be set to the number of floating points operations that the task
  1727. will have to achieve. This is useful for easily getting GFlops curves from
  1728. @code{starpu_perfmodel_plot}, and for the hypervisor load balancing.
  1729. @item @code{void *starpu_private}
  1730. This is private to StarPU, do not modify. If the task is allocated by hand
  1731. (without starpu_task_create), this field should be set to NULL.
  1732. @item @code{int magic}
  1733. This field is set when initializing a task. It prevents a task from being
  1734. submitted if it has not been properly initialized.
  1735. @end table
  1736. @end deftp
  1737. @deftypefun void starpu_task_init ({struct starpu_task} *@var{task})
  1738. Initialize @var{task} with default values. This function is implicitly
  1739. called by @code{starpu_task_create}. By default, tasks initialized with
  1740. @code{starpu_task_init} must be deinitialized explicitly with
  1741. @code{starpu_task_clean}. Tasks can also be initialized statically,
  1742. using @code{STARPU_TASK_INITIALIZER} defined below.
  1743. @end deftypefun
  1744. @defmac STARPU_TASK_INITIALIZER
  1745. It is possible to initialize statically allocated tasks with this
  1746. value. This is equivalent to initializing a starpu_task structure with
  1747. the @code{starpu_task_init} function defined above.
  1748. @end defmac
  1749. @deftypefun {struct starpu_task *} starpu_task_create (void)
  1750. Allocate a task structure and initialize it with default values. Tasks
  1751. allocated dynamically with @code{starpu_task_create} are automatically freed when the
  1752. task is terminated. This means that the task pointer can not be used any more
  1753. once the task is submitted, since it can be executed at any time (unless
  1754. dependencies make it wait) and thus freed at any time.
  1755. If the destroy flag is explicitly unset, the resources used
  1756. by the task have to be freed by calling
  1757. @code{starpu_task_destroy}.
  1758. @end deftypefun
  1759. @deftypefun void starpu_task_clean ({struct starpu_task} *@var{task})
  1760. Release all the structures automatically allocated to execute @var{task}, but
  1761. not the task structure itself and values set by the user remain unchanged.
  1762. It is thus useful for statically allocated tasks for instance.
  1763. It is also useful when the user wants to execute the same operation several
  1764. times with as least overhead as possible.
  1765. It is called automatically by @code{starpu_task_destroy}.
  1766. It has to be called only after explicitly waiting for the task or after
  1767. @code{starpu_shutdown} (waiting for the callback is not enough, since starpu
  1768. still manipulates the task after calling the callback).
  1769. @end deftypefun
  1770. @deftypefun void starpu_task_destroy ({struct starpu_task} *@var{task})
  1771. Free the resource allocated during @code{starpu_task_create} and
  1772. associated with @var{task}. This function is already called automatically
  1773. after the execution of a task when the @code{destroy} flag of the
  1774. @code{starpu_task} structure is set, which is the default for tasks created by
  1775. @code{starpu_task_create}. Calling this function on a statically allocated task
  1776. results in an undefined behaviour.
  1777. @end deftypefun
  1778. @deftypefun int starpu_task_wait ({struct starpu_task} *@var{task})
  1779. This function blocks until @var{task} has been executed. It is not possible to
  1780. synchronize with a task more than once. It is not possible to wait for
  1781. synchronous or detached tasks.
  1782. Upon successful completion, this function returns 0. Otherwise, @code{-EINVAL}
  1783. indicates that the specified task was either synchronous or detached.
  1784. @end deftypefun
  1785. @deftypefun int starpu_task_submit ({struct starpu_task} *@var{task})
  1786. This function submits @var{task} to StarPU. Calling this function does
  1787. not mean that the task will be executed immediately as there can be data or task
  1788. (tag) dependencies that are not fulfilled yet: StarPU will take care of
  1789. scheduling this task with respect to such dependencies.
  1790. This function returns immediately if the @code{synchronous} field of the
  1791. @code{starpu_task} structure was set to 0, and block until the termination of
  1792. the task otherwise. It is also possible to synchronize the application with
  1793. asynchronous tasks by the means of tags, using the @code{starpu_tag_wait}
  1794. function for instance.
  1795. In case of success, this function returns 0, a return value of @code{-ENODEV}
  1796. means that there is no worker able to process this task (e.g. there is no GPU
  1797. available and this task is only implemented for CUDA devices).
  1798. starpu_task_submit() can be called from anywhere, including codelet
  1799. functions and callbacks, provided that the @code{synchronous} field of the
  1800. @code{starpu_task} structure is left to 0.
  1801. @end deftypefun
  1802. @deftypefun int starpu_task_wait_for_all (void)
  1803. This function blocks until all the tasks that were submitted are terminated. It
  1804. does not destroy these tasks.
  1805. @end deftypefun
  1806. @deftypefun int starpu_task_nready (void)
  1807. @end deftypefun
  1808. @deftypefun int starpu_task_nsubmitted (void)
  1809. Return the number of submitted tasks which have not completed yet.
  1810. @end deftypefun
  1811. @deftypefun int starpu_task_nready (void)
  1812. Return the number of submitted tasks which are ready for execution are already
  1813. executing. It thus does not include tasks waiting for dependencies.
  1814. @end deftypefun
  1815. @deftypefun {struct starpu_task *} starpu_task_get_current (void)
  1816. This function returns the task currently executed by the worker, or
  1817. NULL if it is called either from a thread that is not a task or simply
  1818. because there is no task being executed at the moment.
  1819. @end deftypefun
  1820. @deftypefun void starpu_display_codelet_stats ({struct starpu_codelet} *@var{cl})
  1821. @anchor{starpu_display_codelet_stats}
  1822. Output on @code{stderr} some statistics on the codelet @var{cl}.
  1823. @end deftypefun
  1824. @deftypefun int starpu_task_wait_for_no_ready (void)
  1825. This function waits until there is no more ready task.
  1826. @end deftypefun
  1827. @c Callbacks: what can we put in callbacks ?
  1828. @node Insert Task
  1829. @section Insert Task
  1830. @deftypefun int starpu_insert_task (struct starpu_codelet *@var{cl}, ...)
  1831. Create and submit a task corresponding to @var{cl} with the following
  1832. arguments. The argument list must be zero-terminated.
  1833. The arguments following the codelets can be of the following types:
  1834. @itemize
  1835. @item
  1836. @code{STARPU_R}, @code{STARPU_W}, @code{STARPU_RW}, @code{STARPU_SCRATCH}, @code{STARPU_REDUX} an access mode followed by a data handle;
  1837. @item
  1838. @code{STARPU_DATA_ARRAY} followed by an array of data handles and its number of elements;
  1839. @item
  1840. the specific values @code{STARPU_VALUE}, @code{STARPU_CALLBACK},
  1841. @code{STARPU_CALLBACK_ARG}, @code{STARPU_CALLBACK_WITH_ARG},
  1842. @code{STARPU_PRIORITY}, @code{STARPU_TAG}, @code{STARPU_FLOPS}, followed by the appropriated objects
  1843. as defined below.
  1844. @end itemize
  1845. When using @code{STARPU_DATA_ARRAY}, the access mode of the data
  1846. handles is not defined.
  1847. Parameters to be passed to the codelet implementation are defined
  1848. through the type @code{STARPU_VALUE}. The function
  1849. @code{starpu_codelet_unpack_args} must be called within the codelet
  1850. implementation to retrieve them.
  1851. @end deftypefun
  1852. @defmac STARPU_VALUE
  1853. this macro is used when calling @code{starpu_insert_task}, and must be
  1854. followed by a pointer to a constant value and the size of the constant
  1855. @end defmac
  1856. @defmac STARPU_CALLBACK
  1857. this macro is used when calling @code{starpu_insert_task}, and must be
  1858. followed by a pointer to a callback function
  1859. @end defmac
  1860. @defmac STARPU_CALLBACK_ARG
  1861. this macro is used when calling @code{starpu_insert_task}, and must be
  1862. followed by a pointer to be given as an argument to the callback
  1863. function
  1864. @end defmac
  1865. @defmac STARPU_CALLBACK_WITH_ARG
  1866. this macro is used when calling @code{starpu_insert_task}, and must be
  1867. followed by two pointers: one to a callback function, and the other to
  1868. be given as an argument to the callback function; this is equivalent
  1869. to using both @code{STARPU_CALLBACK} and
  1870. @code{STARPU_CALLBACK_WITH_ARG}
  1871. @end defmac
  1872. @defmac STARPU_PRIORITY
  1873. this macro is used when calling @code{starpu_insert_task}, and must be
  1874. followed by a integer defining a priority level
  1875. @end defmac
  1876. @defmac STARPU_TAG
  1877. this macro is used when calling @code{starpu_insert_task}, and must be
  1878. followed by a tag.
  1879. @end defmac
  1880. @defmac STARPU_FLOPS
  1881. this macro is used when calling @code{starpu_insert_task}, and must be followed
  1882. by an amount of floating point operations, as a double. The user may have to
  1883. explicitly cast into double, otherwise parameter passing will not work.
  1884. @end defmac
  1885. @deftypefun void starpu_codelet_pack_args ({char **}@var{arg_buffer}, {size_t *}@var{arg_buffer_size}, ...)
  1886. Pack arguments of type @code{STARPU_VALUE} into a buffer which can be
  1887. given to a codelet and later unpacked with the function
  1888. @code{starpu_codelet_unpack_args} defined below.
  1889. @end deftypefun
  1890. @deftypefun void starpu_codelet_unpack_args ({void *}@var{cl_arg}, ...)
  1891. Retrieve the arguments of type @code{STARPU_VALUE} associated to a
  1892. task automatically created using the function
  1893. @code{starpu_insert_task} defined above.
  1894. @end deftypefun
  1895. @node Explicit Dependencies
  1896. @section Explicit Dependencies
  1897. @deftypefun void starpu_task_declare_deps_array ({struct starpu_task} *@var{task}, unsigned @var{ndeps}, {struct starpu_task} *@var{task_array}[])
  1898. Declare task dependencies between a @var{task} and an array of tasks of length
  1899. @var{ndeps}. This function must be called prior to the submission of the task,
  1900. but it may called after the submission or the execution of the tasks in the
  1901. array, provided the tasks are still valid (ie. they were not automatically
  1902. destroyed). Calling this function on a task that was already submitted or with
  1903. an entry of @var{task_array} that is not a valid task anymore results in an
  1904. undefined behaviour. If @var{ndeps} is null, no dependency is added. It is
  1905. possible to call @code{starpu_task_declare_deps_array} multiple times on the
  1906. same task, in this case, the dependencies are added. It is possible to have
  1907. redundancy in the task dependencies.
  1908. @end deftypefun
  1909. @deftp {Data Type} {starpu_tag_t}
  1910. This type defines a task logical identifer. It is possible to associate a task with a unique ``tag'' chosen by the application, and to express
  1911. dependencies between tasks by the means of those tags. To do so, fill the
  1912. @code{tag_id} field of the @code{starpu_task} structure with a tag number (can
  1913. be arbitrary) and set the @code{use_tag} field to 1.
  1914. If @code{starpu_tag_declare_deps} is called with this tag number, the task will
  1915. not be started until the tasks which holds the declared dependency tags are
  1916. completed.
  1917. @end deftp
  1918. @deftypefun void starpu_tag_declare_deps (starpu_tag_t @var{id}, unsigned @var{ndeps}, ...)
  1919. Specify the dependencies of the task identified by tag @var{id}. The first
  1920. argument specifies the tag which is configured, the second argument gives the
  1921. number of tag(s) on which @var{id} depends. The following arguments are the
  1922. tags which have to be terminated to unlock the task.
  1923. This function must be called before the associated task is submitted to StarPU
  1924. with @code{starpu_task_submit}.
  1925. Because of the variable arity of @code{starpu_tag_declare_deps}, note that the
  1926. last arguments @emph{must} be of type @code{starpu_tag_t}: constant values
  1927. typically need to be explicitly casted. Using the
  1928. @code{starpu_tag_declare_deps_array} function avoids this hazard.
  1929. @cartouche
  1930. @smallexample
  1931. /* Tag 0x1 depends on tags 0x32 and 0x52 */
  1932. starpu_tag_declare_deps((starpu_tag_t)0x1,
  1933. 2, (starpu_tag_t)0x32, (starpu_tag_t)0x52);
  1934. @end smallexample
  1935. @end cartouche
  1936. @end deftypefun
  1937. @deftypefun void starpu_tag_declare_deps_array (starpu_tag_t @var{id}, unsigned @var{ndeps}, {starpu_tag_t *}@var{array})
  1938. This function is similar to @code{starpu_tag_declare_deps}, except
  1939. that its does not take a variable number of arguments but an array of
  1940. tags of size @var{ndeps}.
  1941. @cartouche
  1942. @smallexample
  1943. /* Tag 0x1 depends on tags 0x32 and 0x52 */
  1944. starpu_tag_t tag_array[2] = @{0x32, 0x52@};
  1945. starpu_tag_declare_deps_array((starpu_tag_t)0x1, 2, tag_array);
  1946. @end smallexample
  1947. @end cartouche
  1948. @end deftypefun
  1949. @deftypefun int starpu_tag_wait (starpu_tag_t @var{id})
  1950. This function blocks until the task associated to tag @var{id} has been
  1951. executed. This is a blocking call which must therefore not be called within
  1952. tasks or callbacks, but only from the application directly. It is possible to
  1953. synchronize with the same tag multiple times, as long as the
  1954. @code{starpu_tag_remove} function is not called. Note that it is still
  1955. possible to synchronize with a tag associated to a task which @code{starpu_task}
  1956. data structure was freed (e.g. if the @code{destroy} flag of the
  1957. @code{starpu_task} was enabled).
  1958. @end deftypefun
  1959. @deftypefun int starpu_tag_wait_array (unsigned @var{ntags}, starpu_tag_t *@var{id})
  1960. This function is similar to @code{starpu_tag_wait} except that it blocks until
  1961. @emph{all} the @var{ntags} tags contained in the @var{id} array are
  1962. terminated.
  1963. @end deftypefun
  1964. @deftypefun void starpu_tag_restart (starpu_tag_t @var{id})
  1965. This function can be used to clear the "already notified" status
  1966. of a tag which is not associated with a task. Before that, calling
  1967. @code{starpu_tag_notify_from_apps} again will not notify the successors. After
  1968. that, the next call to @code{starpu_tag_notify_from_apps} will notify the
  1969. successors.
  1970. @end deftypefun
  1971. @deftypefun void starpu_tag_remove (starpu_tag_t @var{id})
  1972. This function releases the resources associated to tag @var{id}. It can be
  1973. called once the corresponding task has been executed and when there is
  1974. no other tag that depend on this tag anymore.
  1975. @end deftypefun
  1976. @deftypefun void starpu_tag_notify_from_apps (starpu_tag_t @var{id})
  1977. This function explicitly unlocks tag @var{id}. It may be useful in the
  1978. case of applications which execute part of their computation outside StarPU
  1979. tasks (e.g. third-party libraries). It is also provided as a
  1980. convenient tool for the programmer, for instance to entirely construct the task
  1981. DAG before actually giving StarPU the opportunity to execute the tasks. When
  1982. called several times on the same tag, notification will be done only on first
  1983. call, thus implementing "OR" dependencies, until the tag is restarted using
  1984. @code{starpu_tag_restart}.
  1985. @end deftypefun
  1986. @node Implicit Data Dependencies
  1987. @section Implicit Data Dependencies
  1988. In this section, we describe how StarPU makes it possible to insert implicit
  1989. task dependencies in order to enforce sequential data consistency. When this
  1990. data consistency is enabled on a specific data handle, any data access will
  1991. appear as sequentially consistent from the application. For instance, if the
  1992. application submits two tasks that access the same piece of data in read-only
  1993. mode, and then a third task that access it in write mode, dependencies will be
  1994. added between the two first tasks and the third one. Implicit data dependencies
  1995. are also inserted in the case of data accesses from the application.
  1996. @deftypefun void starpu_data_set_default_sequential_consistency_flag (unsigned @var{flag})
  1997. Set the default sequential consistency flag. If a non-zero value is passed, a
  1998. sequential data consistency will be enforced for all handles registered after
  1999. this function call, otherwise it is disabled. By default, StarPU enables
  2000. sequential data consistency. It is also possible to select the data consistency
  2001. mode of a specific data handle with the
  2002. @code{starpu_data_set_sequential_consistency_flag} function.
  2003. @end deftypefun
  2004. @deftypefun unsigned starpu_data_get_default_sequential_consistency_flag (void)
  2005. Return the default sequential consistency flag
  2006. @end deftypefun
  2007. @deftypefun void starpu_data_set_sequential_consistency_flag (starpu_data_handle_t @var{handle}, unsigned @var{flag})
  2008. Sets the data consistency mode associated to a data handle. The consistency
  2009. mode set using this function has the priority over the default mode which can
  2010. be set with @code{starpu_data_set_default_sequential_consistency_flag}.
  2011. @end deftypefun
  2012. @node Performance Model API
  2013. @section Performance Model API
  2014. @deftp {Data Type} {enum starpu_perf_archtype}
  2015. Enumerates the various types of architectures.
  2016. CPU types range within STARPU_CPU_DEFAULT (1 CPU), STARPU_CPU_DEFAULT+1 (2 CPUs), ... STARPU_CPU_DEFAULT + STARPU_MAXCPUS - 1 (STARPU_MAXCPUS CPUs).
  2017. CUDA types range within STARPU_CUDA_DEFAULT (GPU number 0), STARPU_CUDA_DEFAULT + 1 (GPU number 1), ..., STARPU_CUDA_DEFAULT + STARPU_MAXCUDADEVS - 1 (GPU number STARPU_MAXCUDADEVS - 1).
  2018. OpenCL types range within STARPU_OPENCL_DEFAULT (GPU number 0), STARPU_OPENCL_DEFAULT + 1 (GPU number 1), ..., STARPU_OPENCL_DEFAULT + STARPU_MAXOPENCLDEVS - 1 (GPU number STARPU_MAXOPENCLDEVS - 1).
  2019. @table @asis
  2020. @item @code{STARPU_CPU_DEFAULT}
  2021. @item @code{STARPU_CUDA_DEFAULT}
  2022. @item @code{STARPU_OPENCL_DEFAULT}
  2023. @end table
  2024. @end deftp
  2025. @deftp {Data Type} {enum starpu_perfmodel_type}
  2026. The possible values are:
  2027. @table @asis
  2028. @item @code{STARPU_PER_ARCH} for application-provided per-arch cost model functions.
  2029. @item @code{STARPU_COMMON} for application-provided common cost model function, with per-arch factor.
  2030. @item @code{STARPU_HISTORY_BASED} for automatic history-based cost model.
  2031. @item @code{STARPU_REGRESSION_BASED} for automatic linear regression-based cost model (alpha * size ^ beta).
  2032. @item @code{STARPU_NL_REGRESSION_BASED} for automatic non-linear regression-based cost mode (a * size ^ b + c).
  2033. @end table
  2034. @end deftp
  2035. @deftp {Data Type} {struct starpu_perfmodel}
  2036. @anchor{struct starpu_perfmodel}
  2037. contains all information about a performance model. At least the
  2038. @code{type} and @code{symbol} fields have to be filled when defining a
  2039. performance model for a codelet. For compatibility, make sure to initialize the
  2040. whole structure to zero, either by using explicit memset, or by letting the
  2041. compiler implicitly do it in e.g. static storage case.
  2042. If not provided, other fields have to be zero.
  2043. @table @asis
  2044. @item @code{type}
  2045. is the type of performance model @code{enum starpu_perfmodel_type}:
  2046. @code{STARPU_HISTORY_BASED},
  2047. @code{STARPU_REGRESSION_BASED}, @code{STARPU_NL_REGRESSION_BASED}: No
  2048. other fields needs to be provided, this is purely history-based. @code{STARPU_PER_ARCH}:
  2049. @code{per_arch} has to be filled with functions which return the cost in
  2050. micro-seconds. @code{STARPU_COMMON}: @code{cost_function} has to be filled with
  2051. a function that returns the cost in micro-seconds on a CPU, timing on other
  2052. archs will be determined by multiplying by an arch-specific factor.
  2053. @item @code{const char *symbol}
  2054. is the symbol name for the performance model, which will be used as
  2055. file name to store the model. It must be set otherwise the model will
  2056. be ignored.
  2057. @item @code{double (*cost_model)(struct starpu_buffer_descr *)}
  2058. This field is deprecated. Use instead the @code{cost_function} field.
  2059. @item @code{double (*cost_function)(struct starpu_task *, unsigned nimpl)}
  2060. Used by @code{STARPU_COMMON}: takes a task and
  2061. implementation number, and must return a task duration estimation in micro-seconds.
  2062. @item @code{size_t (*size_base)(struct starpu_task *, unsigned nimpl)}
  2063. Used by @code{STARPU_HISTORY_BASED} and
  2064. @code{STARPU_*REGRESSION_BASED}. If not NULL, takes a task and
  2065. implementation number, and returns the size to be used as index for
  2066. history and regression.
  2067. @item @code{struct starpu_perfmodel_per_arch per_arch[STARPU_NARCH_VARIATIONS][STARPU_MAXIMPLEMENTATIONS]}
  2068. Used by @code{STARPU_PER_ARCH}: array of @code{struct
  2069. starpu_per_arch_perfmodel} structures.
  2070. @item @code{unsigned is_loaded}
  2071. Whether the performance model is already loaded from the disk.
  2072. @item @code{unsigned benchmarking}
  2073. Whether the performance model is still being calibrated.
  2074. @item @code{pthread_rwlock_t model_rwlock}
  2075. Lock to protect concurrency between loading from disk (W), updating the values
  2076. (W), and making a performance estimation (R).
  2077. @end table
  2078. @end deftp
  2079. @deftp {Data Type} {struct starpu_perfmodel_regression_model}
  2080. @table @asis
  2081. @item @code{double sumlny} sum of ln(measured)
  2082. @item @code{double sumlnx} sum of ln(size)
  2083. @item @code{double sumlnx2} sum of ln(size)^2
  2084. @item @code{unsigned long minx} minimum size
  2085. @item @code{unsigned long maxx} maximum size
  2086. @item @code{double sumlnxlny} sum of ln(size)*ln(measured)
  2087. @item @code{double alpha} estimated = alpha * size ^ beta
  2088. @item @code{double beta}
  2089. @item @code{unsigned valid} whether the linear regression model is valid (i.e. enough measures)
  2090. @item @code{double a, b, c} estimaed = a size ^b + c
  2091. @item @code{unsigned nl_valid} whether the non-linear regression model is valid (i.e. enough measures)
  2092. @item @code{unsigned nsample} number of sample values for non-linear regression
  2093. @end table
  2094. @end deftp
  2095. @deftp {Data Type} {struct starpu_perfmodel_per_arch}
  2096. contains information about the performance model of a given arch.
  2097. @table @asis
  2098. @item @code{double (*cost_model)(struct starpu_buffer_descr *t)}
  2099. This field is deprecated. Use instead the @code{cost_function} field.
  2100. @item @code{double (*cost_function)(struct starpu_task *task, enum starpu_perf_archtype arch, unsigned nimpl)}
  2101. Used by @code{STARPU_PER_ARCH}, must point to functions which take a task, the
  2102. target arch and implementation number (as mere conveniency, since the array
  2103. is already indexed by these), and must return a task duration estimation in
  2104. micro-seconds.
  2105. @item @code{size_t (*size_base)(struct starpu_task *, enum
  2106. starpu_perf_archtype arch, unsigned nimpl)}
  2107. Same as in @ref{struct starpu_perfmodel}, but per-arch, in
  2108. case it depends on the architecture-specific implementation.
  2109. @item @code{struct starpu_htbl32_node *history}
  2110. The history of performance measurements.
  2111. @item @code{struct starpu_perfmodel_history_list *list}
  2112. Used by @code{STARPU_HISTORY_BASED} and @code{STARPU_NL_REGRESSION_BASED},
  2113. records all execution history measures.
  2114. @item @code{struct starpu_perfmodel_regression_model regression}
  2115. Used by @code{STARPU_HISTORY_REGRESION_BASED} and
  2116. @code{STARPU_NL_REGRESSION_BASED}, contains the estimated factors of the
  2117. regression.
  2118. @end table
  2119. @end deftp
  2120. @deftypefun int starpu_perfmodel_load_symbol ({const char} *@var{symbol}, {struct starpu_perfmodel} *@var{model})
  2121. loads a given performance model. The @var{model} structure has to be completely zero, and will be filled with the information saved in @code{$STARPU_HOME/.starpu}.
  2122. @end deftypefun
  2123. @deftypefun int starpu_perfmodel_unload_model ({struct starpu_perfmodel} *@var{model})
  2124. unloads the given model which has been previously loaded through the function @code{starpu_perfmodel_load_symbol}
  2125. @end deftypefun
  2126. @deftypefun void starpu_perfmodel_debugfilepath ({struct starpu_perfmodel} *@var{model}, {enum starpu_perf_archtype} @var{arch}, char *@var{path}, size_t @var{maxlen}, unsigned nimpl)
  2127. returns the path to the debugging information for the performance model.
  2128. @end deftypefun
  2129. @deftypefun void starpu_perfmodel_get_arch_name ({enum starpu_perf_archtype} @var{arch}, char *@var{archname}, size_t @var{maxlen}, unsigned nimpl)
  2130. returns the architecture name for @var{arch}.
  2131. @end deftypefun
  2132. @deftypefun {enum starpu_perf_archtype} starpu_worker_get_perf_archtype (int @var{workerid})
  2133. returns the architecture type of a given worker.
  2134. @end deftypefun
  2135. @deftypefun int starpu_perfmodel_list ({FILE *}@var{output})
  2136. prints a list of all performance models on @var{output}.
  2137. @end deftypefun
  2138. @deftypefun void starpu_perfmodel_print ({struct starpu_perfmodel *}@var{model}, {enum starpu_perf_archtype} @var{arch}, unsigned @var{nimpl}, {char *}@var{parameter}, {uint32_t *}footprint, {FILE *}@var{output})
  2139. todo
  2140. @end deftypefun
  2141. @deftypefun int starpu_perfmodel_print_all ({struct starpu_perfmodel *}@var{model}, {char *}@var{arch}, @var{char *}parameter, {uint32_t *}@var{footprint}, {FILE *}@var{output})
  2142. todo
  2143. @end deftypefun
  2144. @deftypefun void starpu_bus_print_bandwidth ({FILE *}@var{f})
  2145. prints a matrix of bus bandwidths on @var{f}.
  2146. @end deftypefun
  2147. @deftypefun void starpu_bus_print_affinity ({FILE *}@var{f})
  2148. prints the affinity devices on @var{f}.
  2149. @end deftypefun
  2150. @deftypefun void starpu_topology_print ({FILE *}@var{f})
  2151. prints a description of the topology on @var{f}.
  2152. @end deftypefun
  2153. @deftypefun void starpu_perfmodel_update_history ({struct starpu_perfmodel *}@var{model}, {struct starpu_task *}@var{task}, {enum starpu_perf_archtype} @var{arch}, unsigned @var{cpuid}, unsigned @var{nimpl}, double @var{measured});
  2154. This feeds the performance model @var{model} with an explicit measurement
  2155. @var{measured}, in addition to measurements done by StarPU itself. This can be
  2156. useful when the application already has an existing set of measurements done
  2157. in good conditions, that StarPU could benefit from instead of doing on-line
  2158. measurements. And example of use can be see in @ref{Performance model example}.
  2159. @end deftypefun
  2160. @node Profiling API
  2161. @section Profiling API
  2162. @deftypefun int starpu_profiling_status_set (int @var{status})
  2163. Thie function sets the profiling status. Profiling is activated by passing
  2164. @code{STARPU_PROFILING_ENABLE} in @var{status}. Passing
  2165. @code{STARPU_PROFILING_DISABLE} disables profiling. Calling this function
  2166. resets all profiling measurements. When profiling is enabled, the
  2167. @code{profiling_info} field of the @code{struct starpu_task} structure points
  2168. to a valid @code{struct starpu_task_profiling_info} structure containing
  2169. information about the execution of the task.
  2170. Negative return values indicate an error, otherwise the previous status is
  2171. returned.
  2172. @end deftypefun
  2173. @deftypefun int starpu_profiling_status_get (void)
  2174. Return the current profiling status or a negative value in case there was an error.
  2175. @end deftypefun
  2176. @deftypefun void starpu_set_profiling_id (int @var{new_id})
  2177. This function sets the ID used for profiling trace filename. It needs to be
  2178. called before starpu_init.
  2179. @end deftypefun
  2180. @deftp {Data Type} {struct starpu_task_profiling_info}
  2181. This structure contains information about the execution of a task. It is
  2182. accessible from the @code{.profiling_info} field of the @code{starpu_task}
  2183. structure if profiling was enabled. The different fields are:
  2184. @table @asis
  2185. @item @code{struct timespec submit_time}
  2186. Date of task submission (relative to the initialization of StarPU).
  2187. @item @code{struct timespec push_start_time}
  2188. Time when the task was submitted to the scheduler.
  2189. @item @code{struct timespec push_end_time}
  2190. Time when the scheduler finished with the task submission.
  2191. @item @code{struct timespec pop_start_time}
  2192. Time when the scheduler started to be requested for a task, and eventually gave
  2193. that task.
  2194. @item @code{struct timespec pop_end_time}
  2195. Time when the scheduler finished providing the task for execution.
  2196. @item @code{struct timespec acquire_data_start_time}
  2197. Time when the worker started fetching input data.
  2198. @item @code{struct timespec acquire_data_end_time}
  2199. Time when the worker finished fetching input data.
  2200. @item @code{struct timespec start_time}
  2201. Date of task execution beginning (relative to the initialization of StarPU).
  2202. @item @code{struct timespec end_time}
  2203. Date of task execution termination (relative to the initialization of StarPU).
  2204. @item @code{struct timespec release_data_start_time}
  2205. Time when the worker started releasing data.
  2206. @item @code{struct timespec release_data_end_time}
  2207. Time when the worker finished releasing data.
  2208. @item @code{struct timespec callback_start_time}
  2209. Time when the worker started the application callback for the task.
  2210. @item @code{struct timespec callback_end_time}
  2211. Time when the worker finished the application callback for the task.
  2212. @item @code{workerid}
  2213. Identifier of the worker which has executed the task.
  2214. @item @code{uint64_t used_cycles}
  2215. Number of cycles used by the task, only available in the MoviSim
  2216. @item @code{uint64_t stall_cycles}
  2217. Number of cycles stalled within the task, only available in the MoviSim
  2218. @item @code{double power_consumed}
  2219. Power consumed by the task, only available in the MoviSim
  2220. @end table
  2221. @end deftp
  2222. @deftp {Data Type} {struct starpu_worker_profiling_info}
  2223. This structure contains the profiling information associated to a
  2224. worker. The different fields are:
  2225. @table @asis
  2226. @item @code{struct timespec start_time}
  2227. Starting date for the reported profiling measurements.
  2228. @item @code{struct timespec total_time}
  2229. Duration of the profiling measurement interval.
  2230. @item @code{struct timespec executing_time}
  2231. Time spent by the worker to execute tasks during the profiling measurement interval.
  2232. @item @code{struct timespec sleeping_time}
  2233. Time spent idling by the worker during the profiling measurement interval.
  2234. @item @code{int executed_tasks}
  2235. Number of tasks executed by the worker during the profiling measurement interval.
  2236. @item @code{uint64_t used_cycles}
  2237. Number of cycles used by the worker, only available in the MoviSim
  2238. @item @code{uint64_t stall_cycles}
  2239. Number of cycles stalled within the worker, only available in the MoviSim
  2240. @item @code{double power_consumed}
  2241. Power consumed by the worker, only available in the MoviSim
  2242. @end table
  2243. @end deftp
  2244. @deftypefun int starpu_worker_get_profiling_info (int @var{workerid}, {struct starpu_worker_profiling_info *}@var{worker_info})
  2245. Get the profiling info associated to the worker identified by @var{workerid},
  2246. and reset the profiling measurements. If the @var{worker_info} argument is
  2247. NULL, only reset the counters associated to worker @var{workerid}.
  2248. Upon successful completion, this function returns 0. Otherwise, a negative
  2249. value is returned.
  2250. @end deftypefun
  2251. @deftp {Data Type} {struct starpu_bus_profiling_info}
  2252. The different fields are:
  2253. @table @asis
  2254. @item @code{struct timespec start_time}
  2255. Time of bus profiling startup.
  2256. @item @code{struct timespec total_time}
  2257. Total time of bus profiling.
  2258. @item @code{int long long transferred_bytes}
  2259. Number of bytes transferred during profiling.
  2260. @item @code{int transfer_count}
  2261. Number of transfers during profiling.
  2262. @end table
  2263. @end deftp
  2264. @deftypefun int starpu_bus_get_profiling_info (int @var{busid}, {struct starpu_bus_profiling_info *}@var{bus_info})
  2265. Get the profiling info associated to the worker designated by @var{workerid},
  2266. and reset the profiling measurements. If worker_info is NULL, only reset the
  2267. counters.
  2268. @end deftypefun
  2269. @deftypefun int starpu_bus_get_count (void)
  2270. Return the number of buses in the machine.
  2271. @end deftypefun
  2272. @deftypefun int starpu_bus_get_id (int @var{src}, int @var{dst})
  2273. Return the identifier of the bus between @var{src} and @var{dst}
  2274. @end deftypefun
  2275. @deftypefun int starpu_bus_get_src (int @var{busid})
  2276. Return the source point of bus @var{busid}
  2277. @end deftypefun
  2278. @deftypefun int starpu_bus_get_dst (int @var{busid})
  2279. Return the destination point of bus @var{busid}
  2280. @end deftypefun
  2281. @deftypefun double starpu_timing_timespec_delay_us ({struct timespec} *@var{start}, {struct timespec} *@var{end})
  2282. Returns the time elapsed between @var{start} and @var{end} in microseconds.
  2283. @end deftypefun
  2284. @deftypefun double starpu_timing_timespec_to_us ({struct timespec} *@var{ts})
  2285. Converts the given timespec @var{ts} into microseconds.
  2286. @end deftypefun
  2287. @deftypefun void starpu_bus_profiling_helper_display_summary (void)
  2288. Displays statistics about the bus on stderr. if the environment
  2289. variable @code{STARPU_BUS_STATS} is defined. The function is called
  2290. automatically by @code{starpu_shutdown()}.
  2291. @end deftypefun
  2292. @deftypefun void starpu_worker_profiling_helper_display_summary (void)
  2293. Displays statistics about the workers on stderr if the environment
  2294. variable @code{STARPU_WORKER_STATS} is defined. The function is called
  2295. automatically by @code{starpu_shutdown()}.
  2296. @end deftypefun
  2297. @deftypefun void starpu_memory_display_stats ()
  2298. Display statistics about the current data handles registered within
  2299. StarPU. StarPU must have been configured with the option
  2300. @code{----enable-memory-stats} (@pxref{Memory feedback}).
  2301. @end deftypefun
  2302. @node Theoretical lower bound on execution time API
  2303. @section Theoretical lower bound on execution time
  2304. @deftypefun void starpu_bound_start (int @var{deps}, int @var{prio})
  2305. Start recording tasks (resets stats). @var{deps} tells whether
  2306. dependencies should be recorded too (this is quite expensive)
  2307. @end deftypefun
  2308. @deftypefun void starpu_bound_stop (void)
  2309. Stop recording tasks
  2310. @end deftypefun
  2311. @deftypefun void starpu_bound_print_dot ({FILE *}@var{output})
  2312. Print the DAG that was recorded
  2313. @end deftypefun
  2314. @deftypefun void starpu_bound_compute ({double *}@var{res}, {double *}@var{integer_res}, int @var{integer})
  2315. Get theoretical upper bound (in ms) (needs glpk support detected by @code{configure} script). It returns 0 if some performance models are not calibrated.
  2316. @end deftypefun
  2317. @deftypefun void starpu_bound_print_lp ({FILE *}@var{output})
  2318. Emit the Linear Programming system on @var{output} for the recorded tasks, in
  2319. the lp format
  2320. @end deftypefun
  2321. @deftypefun void starpu_bound_print_mps ({FILE *}@var{output})
  2322. Emit the Linear Programming system on @var{output} for the recorded tasks, in
  2323. the mps format
  2324. @end deftypefun
  2325. @deftypefun void starpu_bound_print ({FILE *}@var{output}, int @var{integer})
  2326. Emit statistics of actual execution vs theoretical upper bound. @var{integer}
  2327. permits to choose between integer solving (which takes a long time but is
  2328. correct), and relaxed solving (which provides an approximate solution).
  2329. @end deftypefun
  2330. @node CUDA extensions
  2331. @section CUDA extensions
  2332. @defmac STARPU_USE_CUDA
  2333. This macro is defined when StarPU has been installed with CUDA
  2334. support. It should be used in your code to detect the availability of
  2335. CUDA as shown in @ref{Full source code for the 'Scaling a Vector' example}.
  2336. @end defmac
  2337. @deftypefun cudaStream_t starpu_cuda_get_local_stream (void)
  2338. This function gets the current worker's CUDA stream.
  2339. StarPU provides a stream for every CUDA device controlled by StarPU. This
  2340. function is only provided for convenience so that programmers can easily use
  2341. asynchronous operations within codelets without having to create a stream by
  2342. hand. Note that the application is not forced to use the stream provided by
  2343. @code{starpu_cuda_get_local_stream} and may also create its own streams.
  2344. Synchronizing with @code{cudaThreadSynchronize()} is allowed, but will reduce
  2345. the likelihood of having all transfers overlapped.
  2346. @end deftypefun
  2347. @deftypefun {const struct cudaDeviceProp *} starpu_cuda_get_device_properties (unsigned @var{workerid})
  2348. This function returns a pointer to device properties for worker @var{workerid}
  2349. (assumed to be a CUDA worker).
  2350. @end deftypefun
  2351. @deftypefun void starpu_cuda_report_error ({const char *}@var{func}, {const char *}@var{file}, int @var{line}, cudaError_t @var{status})
  2352. Report a CUDA error.
  2353. @end deftypefun
  2354. @defmac STARPU_CUDA_REPORT_ERROR (cudaError_t @var{status})
  2355. Calls starpu_cuda_report_error, passing the current function, file and line
  2356. position.
  2357. @end defmac
  2358. @deftypefun int starpu_cuda_copy_async_sync ({void *}@var{src_ptr}, unsigned @var{src_node}, {void *}@var{dst_ptr}, unsigned @var{dst_node}, size_t @var{ssize}, cudaStream_t @var{stream}, {enum cudaMemcpyKind} @var{kind})
  2359. Copy @var{ssize} bytes from the pointer @var{src_ptr} on
  2360. @var{src_node} to the pointer @var{dst_ptr} on @var{dst_node}.
  2361. The function first tries to copy the data asynchronous (unless
  2362. @var{stream} is @code{NULL}. If the asynchronous copy fails or if
  2363. @var{stream} is @code{NULL}, it copies the data synchronously.
  2364. The function returns @code{-EAGAIN} if the asynchronous launch was
  2365. successfull. It returns 0 if the synchronous copy was successful, or
  2366. fails otherwise.
  2367. @end deftypefun
  2368. @deftypefun void starpu_cuda_set_device (unsigned @var{devid})
  2369. Calls @code{cudaSetDevice(devid)} or @code{cudaGLSetGLDevice(devid)}, according to
  2370. whether @code{devid} is among the @code{cuda_opengl_interoperability} field of
  2371. the @code{starpu_conf} structure.
  2372. @end deftypefun
  2373. @deftypefun void starpu_cublas_init (void)
  2374. This function initializes CUBLAS on every CUDA device.
  2375. The CUBLAS library must be initialized prior to any CUBLAS call. Calling
  2376. @code{starpu_cublas_init} will initialize CUBLAS on every CUDA device
  2377. controlled by StarPU. This call blocks until CUBLAS has been properly
  2378. initialized on every device.
  2379. @end deftypefun
  2380. @deftypefun void starpu_cublas_shutdown (void)
  2381. This function synchronously deinitializes the CUBLAS library on every CUDA device.
  2382. @end deftypefun
  2383. @deftypefun void starpu_cublas_report_error ({const char *}@var{func}, {const char *}@var{file}, int @var{line}, cublasStatus @var{status})
  2384. Report a cublas error.
  2385. @end deftypefun
  2386. @defmac STARPU_CUBLAS_REPORT_ERROR (cublasStatus @var{status})
  2387. Calls starpu_cublas_report_error, passing the current function, file and line
  2388. position.
  2389. @end defmac
  2390. @node OpenCL extensions
  2391. @section OpenCL extensions
  2392. @menu
  2393. * Writing OpenCL kernels:: Writing OpenCL kernels
  2394. * Compiling OpenCL kernels:: Compiling OpenCL kernels
  2395. * Loading OpenCL kernels:: Loading OpenCL kernels
  2396. * OpenCL statistics:: Collecting statistics from OpenCL
  2397. * OpenCL utilities:: Utilities for OpenCL
  2398. @end menu
  2399. @defmac STARPU_USE_OPENCL
  2400. This macro is defined when StarPU has been installed with OpenCL
  2401. support. It should be used in your code to detect the availability of
  2402. OpenCL as shown in @ref{Full source code for the 'Scaling a Vector' example}.
  2403. @end defmac
  2404. @node Writing OpenCL kernels
  2405. @subsection Writing OpenCL kernels
  2406. @deftypefun void starpu_opencl_get_context (int @var{devid}, {cl_context *}@var{context})
  2407. Places the OpenCL context of the device designated by @var{devid} into @var{context}.
  2408. @end deftypefun
  2409. @deftypefun void starpu_opencl_get_device (int @var{devid}, {cl_device_id *}@var{device})
  2410. Places the cl_device_id corresponding to @var{devid} in @var{device}.
  2411. @end deftypefun
  2412. @deftypefun void starpu_opencl_get_queue (int @var{devid}, {cl_command_queue *}@var{queue})
  2413. Places the command queue of the the device designated by @var{devid} into @var{queue}.
  2414. @end deftypefun
  2415. @deftypefun void starpu_opencl_get_current_context ({cl_context *}@var{context})
  2416. Return the context of the current worker.
  2417. @end deftypefun
  2418. @deftypefun void starpu_opencl_get_current_queue ({cl_command_queue *}@var{queue})
  2419. Return the computation kernel command queue of the current worker.
  2420. @end deftypefun
  2421. @deftypefun int starpu_opencl_set_kernel_args ({cl_int *}@var{err}, {cl_kernel *}@var{kernel}, ...)
  2422. Sets the arguments of a given kernel. The list of arguments must be given as
  2423. (size_t @var{size_of_the_argument}, cl_mem * @var{pointer_to_the_argument}).
  2424. The last argument must be 0. Returns the number of arguments that were
  2425. successfully set. In case of failure, returns the id of the argument
  2426. that could not be set and @var{err} is set to the error returned by
  2427. OpenCL. Otherwise, returns the number of arguments that were set.
  2428. @cartouche
  2429. @smallexample
  2430. int n;
  2431. cl_int err;
  2432. cl_kernel kernel;
  2433. n = starpu_opencl_set_kernel_args(&err, 2, &kernel,
  2434. sizeof(foo), &foo,
  2435. sizeof(bar), &bar,
  2436. 0);
  2437. if (n != 2)
  2438. fprintf(stderr, "Error : %d\n", err);
  2439. @end smallexample
  2440. @end cartouche
  2441. @end deftypefun
  2442. @node Compiling OpenCL kernels
  2443. @subsection Compiling OpenCL kernels
  2444. Source codes for OpenCL kernels can be stored in a file or in a
  2445. string. StarPU provides functions to build the program executable for
  2446. each available OpenCL device as a @code{cl_program} object. This
  2447. program executable can then be loaded within a specific queue as
  2448. explained in the next section. These are only helpers, Applications
  2449. can also fill a @code{starpu_opencl_program} array by hand for more advanced
  2450. use (e.g. different programs on the different OpenCL devices, for
  2451. relocation purpose for instance).
  2452. @deftp {Data Type} {struct starpu_opencl_program}
  2453. Stores the OpenCL programs as compiled for the different OpenCL
  2454. devices. The different fields are:
  2455. @table @asis
  2456. @item @code{cl_program programs[STARPU_MAXOPENCLDEVS]}
  2457. Stores each program for each OpenCL device.
  2458. @end table
  2459. @end deftp
  2460. @deftypefun int starpu_opencl_load_opencl_from_file ({const char} *@var{source_file_name}, {struct starpu_opencl_program} *@var{opencl_programs}, {const char}* @var{build_options})
  2461. @anchor{starpu_opencl_load_opencl_from_file}
  2462. This function compiles an OpenCL source code stored in a file.
  2463. @end deftypefun
  2464. @deftypefun int starpu_opencl_load_opencl_from_string ({const char} *@var{opencl_program_source}, {struct starpu_opencl_program} *@var{opencl_programs}, {const char}* @var{build_options})
  2465. This function compiles an OpenCL source code stored in a string.
  2466. @end deftypefun
  2467. @deftypefun int starpu_opencl_unload_opencl ({struct starpu_opencl_program} *@var{opencl_programs})
  2468. This function unloads an OpenCL compiled code.
  2469. @end deftypefun
  2470. @deftypefun void starpu_opencl_load_program_source ({const char *}@var{source_file_name}, char *@var{located_file_name}, char *@var{located_dir_name}, char *@var{opencl_program_source})
  2471. @anchor{starpu_opencl_load_program_source}
  2472. Store the contents of the file @var{source_file_name} in the buffer
  2473. @var{opencl_program_source}. The file @var{source_file_name} can be
  2474. located in the current directory, or in the directory specified by the
  2475. environment variable @code{STARPU_OPENCL_PROGRAM_DIR} (@pxref{STARPU_OPENCL_PROGRAM_DIR}), or in the
  2476. directory @code{share/starpu/opencl} of the installation directory of
  2477. StarPU, or in the source directory of StarPU.
  2478. When the file is found, @code{located_file_name} is the full name of
  2479. the file as it has been located on the system, @code{located_dir_name}
  2480. the directory where it has been located. Otherwise, they are both set
  2481. to the empty string.
  2482. @end deftypefun
  2483. @deftypefun int starpu_opencl_compile_opencl_from_file ({const char *}@var{source_file_name}, {const char *} @var{build_options})
  2484. Compile the OpenCL kernel stored in the file @code{source_file_name}
  2485. with the given options @code{build_options} and stores the result in
  2486. the directory @code{$STARPU_HOME/.starpu/opencl} with the same
  2487. filename as @code{source_file_name}. The compilation is done for every
  2488. OpenCL device, and the filename is suffixed with the vendor id and the
  2489. device id of the OpenCL device.
  2490. @end deftypefun
  2491. @deftypefun int starpu_opencl_compile_opencl_from_string ({const char *}@var{opencl_program_source}, {const char *}@var{file_name}, {const char* }@var{build_options})
  2492. Compile the OpenCL kernel in the string @code{opencl_program_source}
  2493. with the given options @code{build_options} and stores the result in
  2494. the directory @code{$STARPU_HOME/.starpu/opencl}
  2495. with the filename
  2496. @code{file_name}. The compilation is done for every
  2497. OpenCL device, and the filename is suffixed with the vendor id and the
  2498. device id of the OpenCL device.
  2499. @end deftypefun
  2500. @deftypefun int starpu_opencl_load_binary_opencl ({const char *}@var{kernel_id}, {struct starpu_opencl_program *}@var{opencl_programs})
  2501. Compile the binary OpenCL kernel identified with @var{id}. For every
  2502. OpenCL device, the binary OpenCL kernel will be loaded from the file
  2503. @code{$STARPU_HOME/.starpu/opencl/<kernel_id>.<device_type>.vendor_id_<vendor_id>_device_id_<device_id>}.
  2504. @end deftypefun
  2505. @node Loading OpenCL kernels
  2506. @subsection Loading OpenCL kernels
  2507. @deftypefun int starpu_opencl_load_kernel (cl_kernel *@var{kernel}, cl_command_queue *@var{queue}, {struct starpu_opencl_program} *@var{opencl_programs}, {const char} *@var{kernel_name}, int @var{devid})
  2508. Create a kernel @var{kernel} for device @var{devid}, on its computation command
  2509. queue returned in @var{queue}, using program @var{opencl_programs} and name
  2510. @var{kernel_name}
  2511. @end deftypefun
  2512. @deftypefun int starpu_opencl_release_kernel (cl_kernel @var{kernel})
  2513. Release the given @var{kernel}, to be called after kernel execution.
  2514. @end deftypefun
  2515. @node OpenCL statistics
  2516. @subsection OpenCL statistics
  2517. @deftypefun int starpu_opencl_collect_stats (cl_event @var{event})
  2518. This function allows to collect statistics on a kernel execution.
  2519. After termination of the kernels, the OpenCL codelet should call this function
  2520. to pass it the even returned by @code{clEnqueueNDRangeKernel}, to let StarPU
  2521. collect statistics about the kernel execution (used cycles, consumed power).
  2522. @end deftypefun
  2523. @node OpenCL utilities
  2524. @subsection OpenCL utilities
  2525. @deftypefun {const char *} starpu_opencl_error_string (cl_int @var{status})
  2526. Return the error message in English corresponding to @var{status}, an
  2527. OpenCL error code.
  2528. @end deftypefun
  2529. @deftypefun void starpu_opencl_display_error ({const char *}@var{func}, {const char *}@var{file}, int @var{line}, {const char *}@var{msg}, cl_int @var{status})
  2530. Given a valid error @var{status}, prints the corresponding error message on
  2531. stdout, along with the given function name @var{func}, the given filename
  2532. @var{file}, the given line number @var{line} and the given message @var{msg}.
  2533. @end deftypefun
  2534. @defmac STARPU_OPENCL_DISPLAY_ERROR (cl_int @var{status})
  2535. Call the function @code{starpu_opencl_display_error} with the given
  2536. error @var{status}, the current function name, current file and line
  2537. number, and a empty message.
  2538. @end defmac
  2539. @deftypefun void starpu_opencl_report_error ({const char *}@var{func}, {const char *}@var{file}, int @var{line}, {const char *}@var{msg}, cl_int @var{status})
  2540. Call the function @code{starpu_opencl_display_error} and abort.
  2541. @end deftypefun
  2542. @defmac STARPU_OPENCL_REPORT_ERROR (cl_int @var{status})
  2543. Call the function @code{starpu_opencl_report_error} with the given
  2544. error @var{status}, with the current function name, current file and
  2545. line number, and a empty message.
  2546. @end defmac
  2547. @defmac STARPU_OPENCL_REPORT_ERROR_WITH_MSG ({const char *}@var{msg}, cl_int @var{status})
  2548. Call the function @code{starpu_opencl_report_error} with the given
  2549. message and the given error @var{status}, with the current function
  2550. name, current file and line number.
  2551. @end defmac
  2552. @deftypefun cl_int starpu_opencl_allocate_memory ({cl_mem *}@var{addr}, size_t @var{size}, cl_mem_flags @var{flags})
  2553. Allocate @var{size} bytes of memory, stored in @var{addr}. @var{flags} must be a
  2554. valid combination of cl_mem_flags values.
  2555. @end deftypefun
  2556. @deftypefun cl_int starpu_opencl_copy_ram_to_opencl ({void *}@var{ptr}, unsigned @var{src_node}, cl_mem @var{buffer}, unsigned @var{dst_node}, size_t @var{size}, size_t @var{offset}, {cl_event *}@var{event}, {int *}@var{ret})
  2557. Copy @var{size} bytes from the given @var{ptr} on
  2558. RAM @var{src_node} to the given @var{buffer} on OpenCL @var{dst_node}.
  2559. @var{offset} is the offset, in bytes, in @var{buffer}.
  2560. if @var{event} is NULL, the copy is synchronous, i.e the queue is
  2561. synchronised before returning. If non NULL, @var{event} can be used
  2562. after the call to wait for this particular copy to complete.
  2563. This function returns CL_SUCCESS if the copy was successful, or a valid OpenCL error code
  2564. otherwise. The integer pointed to by @var{ret} is set to -EAGAIN if the asynchronous launch
  2565. was successful, or to 0 if event was NULL.
  2566. @end deftypefun
  2567. @deftypefun cl_int starpu_opencl_copy_opencl_to_ram (cl_mem @var{buffer}, unsigned @var{src_node}, void *@var{ptr}, unsigned @var{dst_node}, size_t @var{size}, size_t @var{offset}, {cl_event *}@var{event}, {int *}@var{ret})
  2568. Copy @var{size} bytes asynchronously from the given @var{buffer} on
  2569. OpenCL @var{src_node} to the given @var{ptr} on RAM @var{dst_node}.
  2570. @var{offset} is the offset, in bytes, in @var{buffer}.
  2571. if @var{event} is NULL, the copy is synchronous, i.e the queue is
  2572. synchronised before returning. If non NULL, @var{event} can be used
  2573. after the call to wait for this particular copy to complete.
  2574. This function returns CL_SUCCESS if the copy was successful, or a valid OpenCL error code
  2575. otherwise. The integer pointed to by @var{ret} is set to -EAGAIN if the asynchronous launch
  2576. was successful, or to 0 if event was NULL.
  2577. @end deftypefun
  2578. @deftypefun cl_int starpu_opencl_copy_opencl_to_opencl (cl_mem @var{src}, unsigned @var{src_node}, size_t @var{src_offset}, cl_mem @var{dst}, unsigned @var{dst_node}, size_t @var{dst_offset}, size_t @var{size}, {cl_event *}@var{event}, {int *}@var{ret})
  2579. Copy @var{size} bytes asynchronously from byte offset @var{src_offset} of
  2580. @var{src} on OpenCL @var{src_node} to byte offset @var{dst_offset} of @var{dst} on
  2581. OpenCL @var{dst_node}.
  2582. if @var{event} is NULL, the copy is synchronous, i.e the queue is
  2583. synchronised before returning. If non NULL, @var{event} can be used
  2584. after the call to wait for this particular copy to complete.
  2585. This function returns CL_SUCCESS if the copy was successful, or a valid OpenCL error code
  2586. otherwise. The integer pointed to by @var{ret} is set to -EAGAIN if the asynchronous launch
  2587. was successful, or to 0 if event was NULL.
  2588. @end deftypefun
  2589. @deftypefun cl_int starpu_opencl_copy_async_sync (uintptr_t @var{src}, size_t @var{src_offset}, unsigned @var{src_node}, uintptr_t @var{dst}, size_t @var{dst_offset}, unsigned @var{dst_node}, size_t @var{size}, {cl_event *}@var{event})
  2590. Copy @var{size} bytes from byte offset @var{src_offset} of
  2591. @var{src} on @var{src_node} to byte offset @var{dst_offset} of @var{dst} on
  2592. @var{dst_node}. if @var{event} is NULL, the copy is synchronous, i.e the queue is
  2593. synchronised before returning. If non NULL, @var{event} can be used
  2594. after the call to wait for this particular copy to complete.
  2595. The function returns @code{-EAGAIN} if the asynchronous launch was
  2596. successfull. It returns 0 if the synchronous copy was successful, or
  2597. fails otherwise.
  2598. @end deftypefun
  2599. @node Miscellaneous helpers
  2600. @section Miscellaneous helpers
  2601. @deftypefun int starpu_data_cpy (starpu_data_handle_t @var{dst_handle}, starpu_data_handle_t @var{src_handle}, int @var{asynchronous}, void (*@var{callback_func})(void*), void *@var{callback_arg})
  2602. Copy the content of the @var{src_handle} into the @var{dst_handle} handle.
  2603. The @var{asynchronous} parameter indicates whether the function should
  2604. block or not. In the case of an asynchronous call, it is possible to
  2605. synchronize with the termination of this operation either by the means of
  2606. implicit dependencies (if enabled) or by calling
  2607. @code{starpu_task_wait_for_all()}. If @var{callback_func} is not @code{NULL},
  2608. this callback function is executed after the handle has been copied, and it is
  2609. given the @var{callback_arg} pointer as argument.
  2610. @end deftypefun
  2611. @deftypefun void starpu_execute_on_each_worker (void (*@var{func})(void *), void *@var{arg}, uint32_t @var{where})
  2612. This function executes the given function on a subset of workers.
  2613. When calling this method, the offloaded function specified by the first argument is
  2614. executed by every StarPU worker that may execute the function.
  2615. The second argument is passed to the offloaded function.
  2616. The last argument specifies on which types of processing units the function
  2617. should be executed. Similarly to the @var{where} field of the
  2618. @code{struct starpu_codelet} structure, it is possible to specify that the function
  2619. should be executed on every CUDA device and every CPU by passing
  2620. @code{STARPU_CPU|STARPU_CUDA}.
  2621. This function blocks until the function has been executed on every appropriate
  2622. processing units, so that it may not be called from a callback function for
  2623. instance.
  2624. @end deftypefun
  2625. @node FXT Support
  2626. @section FXT Support
  2627. @deftypefun void starpu_fxt_start_profiling (void)
  2628. Start recording the trace. The trace is by default started from
  2629. @code{starpu_init()} call, but can be paused by using
  2630. @code{starpu_fxt_stop_profiling}, in which case
  2631. @code{starpu_fxt_start_profiling} should be called to specify when to resume
  2632. recording events.
  2633. @end deftypefun
  2634. @deftypefun void starpu_fxt_stop_profiling (void)
  2635. Stop recording the trace. The trace is by default stopped at
  2636. @code{starpu_shutdown()} call. @code{starpu_fxt_stop_profiling} can however be
  2637. used to stop it earlier. @code{starpu_fxt_start_profiling} can then be called to
  2638. start recording it again, etc.
  2639. @end deftypefun
  2640. @node MPI
  2641. @section MPI
  2642. @menu
  2643. * Initialisation::
  2644. * Communication::
  2645. * Communication Cache::
  2646. * MPI Insert Task::
  2647. * Collective Operations::
  2648. @end menu
  2649. @node Initialisation
  2650. @subsection Initialisation
  2651. @deftypefun int starpu_mpi_init (int *@var{argc}, char ***@var{argv}, int initialize_mpi)
  2652. Initializes the starpumpi library. @code{initialize_mpi} indicates if
  2653. MPI should be initialized or not by StarPU. If the value is not @code{0},
  2654. MPI will be initialized by calling @code{MPI_Init_Thread(argc, argv,
  2655. MPI_THREAD_SERIALIZED, ...)}.
  2656. @end deftypefun
  2657. @deftypefun int starpu_mpi_initialize (void)
  2658. This function has been made deprecated. One should use instead the
  2659. function @code{starpu_mpi_init()} defined above.
  2660. This function does not call @code{MPI_Init}, it should be called beforehand.
  2661. @end deftypefun
  2662. @deftypefun int starpu_mpi_initialize_extended (int *@var{rank}, int *@var{world_size})
  2663. This function has been made deprecated. One should use instead the
  2664. function @code{starpu_mpi_init()} defined above.
  2665. MPI will be initialized by starpumpi by calling @code{MPI_Init_Thread(argc, argv,
  2666. MPI_THREAD_SERIALIZED, ...)}.
  2667. @end deftypefun
  2668. @deftypefun int starpu_mpi_shutdown (void)
  2669. Cleans the starpumpi library. This must be called between calling
  2670. @code{starpu_mpi} functions and @code{starpu_shutdown()}.
  2671. @code{MPI_Finalize()} will be called if StarPU-MPI has been initialized
  2672. by @code{starpu_mpi_init()}.
  2673. @end deftypefun
  2674. @deftypefun void starpu_mpi_comm_amounts_retrieve (size_t *@var{comm_amounts})
  2675. Retrieve the current amount of communications from the current node in
  2676. the array @code{comm_amounts} which must have a size greater or equal
  2677. to the world size. Communications statistics must be enabled
  2678. (@pxref{STARPU_COMM_STATS}).
  2679. @end deftypefun
  2680. @deftypefun void starpu_mpi_set_communication_tag (int @var{tag})
  2681. @anchor{starpu_mpi_set_communication_tag}
  2682. Tell StarPU-MPI which MPI tag to use for all its communications.
  2683. @end deftypefun
  2684. @deftypefun int starpu_mpi_get_communication_tag (void)
  2685. @anchor{starpu_mpi_get_communication_tag}
  2686. Returns the MPI tag which will be used for all StarPU-MPI communications.
  2687. @end deftypefun
  2688. @node Communication
  2689. @subsection Communication
  2690. @deftypefun int starpu_mpi_send (starpu_data_handle_t @var{data_handle}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm})
  2691. Performs a standard-mode, blocking send of @var{data_handle} to the
  2692. node @var{dest} using the message tag @code{mpi_tag} within the
  2693. communicator @var{comm}.
  2694. @end deftypefun
  2695. @deftypefun int starpu_mpi_recv (starpu_data_handle_t @var{data_handle}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm}, MPI_Status *@var{status})
  2696. Performs a standard-mode, blocking receive in @var{data_handle} from the
  2697. node @var{source} using the message tag @code{mpi_tag} within the
  2698. communicator @var{comm}.
  2699. @end deftypefun
  2700. @deftypefun int starpu_mpi_isend (starpu_data_handle_t @var{data_handle}, starpu_mpi_req *@var{req}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm})
  2701. Posts a standard-mode, non blocking send of @var{data_handle} to the
  2702. node @var{dest} using the message tag @code{mpi_tag} within the
  2703. communicator @var{comm}. After the call, the pointer to the request
  2704. @var{req} can be used to test or to wait for the completion of the communication.
  2705. @end deftypefun
  2706. @deftypefun int starpu_mpi_irecv (starpu_data_handle_t @var{data_handle}, starpu_mpi_req *@var{req}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm})
  2707. Posts a nonblocking receive in @var{data_handle} from the
  2708. node @var{source} using the message tag @code{mpi_tag} within the
  2709. communicator @var{comm}. After the call, the pointer to the request
  2710. @var{req} can be used to test or to wait for the completion of the communication.
  2711. @end deftypefun
  2712. @deftypefun int starpu_mpi_isend_detached (starpu_data_handle_t @var{data_handle}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm}, void (*@var{callback})(void *), void *@var{arg})
  2713. Posts a standard-mode, non blocking send of @var{data_handle} to the
  2714. node @var{dest} using the message tag @code{mpi_tag} within the
  2715. communicator @var{comm}. On completion, the @var{callback} function is
  2716. called with the argument @var{arg}. Similarly to the pthread detached
  2717. functionality, when a detached communication completes, its resources
  2718. are automatically released back to the system, there is no need to
  2719. test or to wait for the completion of the request.
  2720. @end deftypefun
  2721. @deftypefun int starpu_mpi_irecv_detached (starpu_data_handle_t @var{data_handle}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm}, void (*@var{callback})(void *), void *@var{arg})
  2722. Posts a nonblocking receive in @var{data_handle} from the
  2723. node @var{source} using the message tag @code{mpi_tag} within the
  2724. communicator @var{comm}. On completion, the @var{callback} function is
  2725. called with the argument @var{arg}. Similarly to the pthread detached
  2726. functionality, when a detached communication completes, its resources
  2727. are automatically released back to the system, there is no need to
  2728. test or to wait for the completion of the request.
  2729. @end deftypefun
  2730. @deftypefun int starpu_mpi_wait (starpu_mpi_req *@var{req}, MPI_Status *@var{status})
  2731. Returns when the operation identified by request @var{req} is complete.
  2732. @end deftypefun
  2733. @deftypefun int starpu_mpi_test (starpu_mpi_req *@var{req}, int *@var{flag}, MPI_Status *@var{status})
  2734. If the operation identified by @var{req} is complete, set @var{flag}
  2735. to 1. The @var{status} object is set to contain information on the
  2736. completed operation.
  2737. @end deftypefun
  2738. @deftypefun int starpu_mpi_barrier (MPI_Comm @var{comm})
  2739. Blocks the caller until all group members of the communicator
  2740. @var{comm} have called it.
  2741. @end deftypefun
  2742. @deftypefun int starpu_mpi_isend_detached_unlock_tag (starpu_data_handle_t @var{data_handle}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm}, starpu_tag_t @var{tag})
  2743. Posts a standard-mode, non blocking send of @var{data_handle} to the
  2744. node @var{dest} using the message tag @code{mpi_tag} within the
  2745. communicator @var{comm}. On completion, @var{tag} is unlocked.
  2746. @end deftypefun
  2747. @deftypefun int starpu_mpi_irecv_detached_unlock_tag (starpu_data_handle_t @var{data_handle}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm}, starpu_tag_t @var{tag})
  2748. Posts a nonblocking receive in @var{data_handle} from the
  2749. node @var{source} using the message tag @code{mpi_tag} within the
  2750. communicator @var{comm}. On completion, @var{tag} is unlocked.
  2751. @end deftypefun
  2752. @deftypefun int starpu_mpi_isend_array_detached_unlock_tag (unsigned @var{array_size}, starpu_data_handle_t *@var{data_handle}, int *@var{dest}, int *@var{mpi_tag}, MPI_Comm *@var{comm}, starpu_tag_t @var{tag})
  2753. Posts @var{array_size} standard-mode, non blocking send. Each post
  2754. sends the n-th data of the array @var{data_handle} to the n-th node of
  2755. the array @var{dest}
  2756. using the n-th message tag of the array @code{mpi_tag} within the n-th
  2757. communicator of the array
  2758. @var{comm}. On completion of the all the requests, @var{tag} is unlocked.
  2759. @end deftypefun
  2760. @deftypefun int starpu_mpi_irecv_array_detached_unlock_tag (unsigned @var{array_size}, starpu_data_handle_t *@var{data_handle}, int *@var{source}, int *@var{mpi_tag}, MPI_Comm *@var{comm}, starpu_tag_t @var{tag})
  2761. Posts @var{array_size} nonblocking receive. Each post receives in the
  2762. n-th data of the array @var{data_handle} from the n-th
  2763. node of the array @var{source} using the n-th message tag of the array
  2764. @code{mpi_tag} within the n-th communicator of the array @var{comm}.
  2765. On completion of the all the requests, @var{tag} is unlocked.
  2766. @end deftypefun
  2767. @node Communication Cache
  2768. @subsection Communication Cache
  2769. @deftypefun void starpu_mpi_cache_flush (MPI_Comm @var{comm}, starpu_data_handle_t @var{data_handle})
  2770. Clear the send and receive communication cache for the data
  2771. @var{data_handle}. The function has to be called synchronously by all
  2772. the MPI nodes.
  2773. The function does nothing if the cache mechanism is disabled (@pxref{STARPU_MPI_CACHE}).
  2774. @end deftypefun
  2775. @deftypefun void starpu_mpi_cache_flush_all_data (MPI_Comm @var{comm})
  2776. Clear the send and receive communication cache for all data. The
  2777. function has to be called synchronously by all the MPI nodes.
  2778. The function does nothing if the cache mechanism is disabled (@pxref{STARPU_MPI_CACHE}).
  2779. @end deftypefun
  2780. @node MPI Insert Task
  2781. @subsection MPI Insert Task
  2782. @deftypefun int starpu_data_set_tag (starpu_data_handle_t @var{handle}, int @var{tag})
  2783. Tell StarPU-MPI which MPI tag to use when exchanging the data.
  2784. @end deftypefun
  2785. @deftypefun int starpu_data_get_tag (starpu_data_handle_t @var{handle})
  2786. Returns the MPI tag to be used when exchanging the data.
  2787. @end deftypefun
  2788. @deftypefun int starpu_data_set_rank (starpu_data_handle_t @var{handle}, int @var{rank})
  2789. Tell StarPU-MPI which MPI node "owns" a given data, that is, the node which will
  2790. always keep an up-to-date value, and will by default execute tasks which write
  2791. to it.
  2792. @end deftypefun
  2793. @deftypefun int starpu_data_get_rank (starpu_data_handle_t @var{handle})
  2794. Returns the last value set by @code{starpu_data_set_rank}.
  2795. @end deftypefun
  2796. @deftypefun starpu_data_handle_t starpu_data_get_data_handle_from_tag (int @var{tag})
  2797. Returns the data handle associated to the MPI tag, or NULL if there is not.
  2798. @end deftypefun
  2799. @defmac STARPU_EXECUTE_ON_NODE
  2800. this macro is used when calling @code{starpu_mpi_insert_task}, and
  2801. must be followed by a integer value which specified the node on which
  2802. to execute the codelet.
  2803. @end defmac
  2804. @defmac STARPU_EXECUTE_ON_DATA
  2805. this macro is used when calling @code{starpu_mpi_insert_task}, and
  2806. must be followed by a data handle to specify that the node owning the
  2807. given data will execute the codelet.
  2808. @end defmac
  2809. @deftypefun int starpu_mpi_insert_task (MPI_Comm @var{comm}, struct starpu_codelet *@var{codelet}, ...)
  2810. Create and submit a task corresponding to @var{codelet} with the following
  2811. arguments. The argument list must be zero-terminated.
  2812. The arguments following the codelets are the same types as for the
  2813. function @code{starpu_insert_task} defined in @ref{Insert Task
  2814. Utility}. The extra argument @code{STARPU_EXECUTE_ON_NODE} followed by an
  2815. integer allows to specify the MPI node to execute the codelet. It is also
  2816. possible to specify that the node owning a specific data will execute
  2817. the codelet, by using @code{STARPU_EXECUTE_ON_DATA} followed by a data
  2818. handle.
  2819. The internal algorithm is as follows:
  2820. @enumerate
  2821. @item Find out which MPI node is going to execute the codelet.
  2822. @enumerate
  2823. @item If there is only one node owning data in W mode, it will
  2824. be selected;
  2825. @item If there is several nodes owning data in W node, the one
  2826. selected will be the one having the least data in R mode so as
  2827. to minimize the amount of data to be transfered;
  2828. @item The argument @code{STARPU_EXECUTE_ON_NODE} followed by an
  2829. integer can be used to specify the node;
  2830. @item The argument @code{STARPU_EXECUTE_ON_DATA} followed by a
  2831. data handle can be used to specify that the node owing the given
  2832. data will execute the codelet.
  2833. @end enumerate
  2834. @item Send and receive data as requested. Nodes owning data which need to be
  2835. read by the task are sending them to the MPI node which will execute it. The
  2836. latter receives them.
  2837. @item Execute the codelet. This is done by the MPI node selected in the
  2838. 1st step of the algorithm.
  2839. @item If several MPI nodes own data to be written to, send written
  2840. data back to their owners.
  2841. @end enumerate
  2842. The algorithm also includes a communication cache mechanism that
  2843. allows not to send data twice to the same MPI node, unless the data
  2844. has been modified. The cache can be disabled
  2845. (@pxref{STARPU_MPI_CACHE}).
  2846. @c todo parler plus du cache
  2847. @end deftypefun
  2848. @deftypefun void starpu_mpi_get_data_on_node (MPI_Comm @var{comm}, starpu_data_handle_t @var{data_handle}, int @var{node})
  2849. Transfer data @var{data_handle} to MPI node @var{node}, sending it from its
  2850. owner if needed. At least the target node and the owner have to call the
  2851. function.
  2852. @end deftypefun
  2853. @deftypefun void starpu_mpi_get_data_on_node_detached (MPI_Comm @var{comm}, starpu_data_handle_t @var{data_handle}, int @var{node}, {void (*}@var{callback})(void*), {void *}@var{arg})
  2854. Transfer data @var{data_handle} to MPI node @var{node}, sending it from its
  2855. owner if needed. At least the target node and the owner have to call the
  2856. function. On reception, the @var{callback} function is called with the
  2857. argument @var{arg}.
  2858. @end deftypefun
  2859. @node Collective Operations
  2860. @subsection Collective Operations
  2861. @deftypefun void starpu_mpi_redux_data (MPI_Comm @var{comm}, starpu_data_handle_t @var{data_handle})
  2862. Perform a reduction on the given data. All nodes send the data to its
  2863. owner node which will perform a reduction.
  2864. @end deftypefun
  2865. @deftypefun int starpu_mpi_scatter_detached (starpu_data_handle_t *@var{data_handles}, int @var{count}, int @var{root}, MPI_Comm @var{comm}, {void (*}@var{scallback})(void *), {void *}@var{sarg}, {void (*}@var{rcallback})(void *), {void *}@var{rarg})
  2866. Scatter data among processes of the communicator based on the ownership of
  2867. the data. For each data of the array @var{data_handles}, the
  2868. process @var{root} sends the data to the process owning this data.
  2869. Processes receiving data must have valid data handles to receive them.
  2870. On completion of the collective communication, the @var{scallback} function is
  2871. called with the argument @var{sarg} on the process @var{root}, the @var{rcallback} function is
  2872. called with the argument @var{rarg} on any other process.
  2873. @end deftypefun
  2874. @deftypefun int starpu_mpi_gather_detached (starpu_data_handle_t *@var{data_handles}, int @var{count}, int @var{root}, MPI_Comm @var{comm}, {void (*}@var{scallback})(void *), {void *}@var{sarg}, {void (*}@var{rcallback})(void *), {void *}@var{rarg})
  2875. Gather data from the different processes of the communicator onto the
  2876. process @var{root}. Each process owning data handle in the array
  2877. @var{data_handles} will send them to the process @var{root}. The
  2878. process @var{root} must have valid data handles to receive the data.
  2879. On completion of the collective communication, the @var{rcallback} function is
  2880. called with the argument @var{rarg} on the process @var{root}, the @var{scallback} function is
  2881. called with the argument @var{sarg} on any other process.
  2882. @end deftypefun
  2883. @node Task Bundles
  2884. @section Task Bundles
  2885. @deftp {Data Type} {starpu_task_bundle_t}
  2886. Opaque structure describing a list of tasks that should be scheduled
  2887. on the same worker whenever it's possible. It must be considered as a
  2888. hint given to the scheduler as there is no guarantee that they will be
  2889. executed on the same worker.
  2890. @end deftp
  2891. @deftypefun void starpu_task_bundle_create ({starpu_task_bundle_t *}@var{bundle})
  2892. Factory function creating and initializing @var{bundle}, when the call returns, memory needed is allocated and @var{bundle} is ready to use.
  2893. @end deftypefun
  2894. @deftypefun int starpu_task_bundle_insert (starpu_task_bundle_t @var{bundle}, {struct starpu_task *}@var{task})
  2895. Insert @var{task} in @var{bundle}. Until @var{task} is removed from @var{bundle} its expected length and data transfer time will be considered along those of the other tasks of @var{bundle}.
  2896. This function mustn't be called if @var{bundle} is already closed and/or @var{task} is already submitted.
  2897. @end deftypefun
  2898. @deftypefun int starpu_task_bundle_remove (starpu_task_bundle_t @var{bundle}, {struct starpu_task *}@var{task})
  2899. Remove @var{task} from @var{bundle}.
  2900. Of course @var{task} must have been previously inserted @var{bundle}.
  2901. This function mustn't be called if @var{bundle} is already closed and/or @var{task} is already submitted. Doing so would result in undefined behaviour.
  2902. @end deftypefun
  2903. @deftypefun void starpu_task_bundle_close (starpu_task_bundle_t @var{bundle})
  2904. Inform the runtime that the user won't modify @var{bundle} anymore, it means no more inserting or removing task. Thus the runtime can destroy it when possible.
  2905. @end deftypefun
  2906. @deftypefun double starpu_task_bundle_expected_length (starpu_task_bundle_t @var{bundle}, {enum starpu_perf_archtype} @var{arch}, unsigned @var{nimpl})
  2907. Return the expected duration of the entire task bundle in µs.
  2908. @end deftypefun
  2909. @deftypefun double starpu_task_bundle_expected_power (starpu_task_bundle_t @var{bundle}, enum starpu_perf_archtype @var{arch}, unsigned @var{nimpl})
  2910. Return the expected power consumption of the entire task bundle in J.
  2911. @end deftypefun
  2912. @deftypefun double starpu_task_bundle_expected_data_transfer_time (starpu_task_bundle_t @var{bundle}, unsigned @var{memory_node})
  2913. Return the time (in µs) expected to transfer all data used within the bundle.
  2914. @end deftypefun
  2915. @node Task Lists
  2916. @section Task Lists
  2917. @deftp {Data Type} {struct starpu_task_list}
  2918. Stores a double-chained list of tasks
  2919. @end deftp
  2920. @deftypefun void starpu_task_list_init ({struct starpu_task_list *}@var{list})
  2921. Initialize a list structure
  2922. @end deftypefun
  2923. @deftypefun void starpu_task_list_push_front ({struct starpu_task_list *}@var{list}, {struct starpu_task *}@var{task})
  2924. Push a task at the front of a list
  2925. @end deftypefun
  2926. @deftypefun void starpu_task_list_push_back ({struct starpu_task_list *}@var{list}, {struct starpu_task *}@var{task})
  2927. Push a task at the back of a list
  2928. @end deftypefun
  2929. @deftypefun {struct starpu_task *} starpu_task_list_front ({struct starpu_task_list *}@var{list})
  2930. Get the front of the list (without removing it)
  2931. @end deftypefun
  2932. @deftypefun {struct starpu_task *} starpu_task_list_back ({struct starpu_task_list *}@var{list})
  2933. Get the back of the list (without removing it)
  2934. @end deftypefun
  2935. @deftypefun int starpu_task_list_empty ({struct starpu_task_list *}@var{list})
  2936. Test if a list is empty
  2937. @end deftypefun
  2938. @deftypefun void starpu_task_list_erase ({struct starpu_task_list *}@var{list}, {struct starpu_task *}@var{task})
  2939. Remove an element from the list
  2940. @end deftypefun
  2941. @deftypefun {struct starpu_task *} starpu_task_list_pop_front ({struct starpu_task_list *}@var{list})
  2942. Remove the element at the front of the list
  2943. @end deftypefun
  2944. @deftypefun {struct starpu_task *} starpu_task_list_pop_back ({struct starpu_task_list *}@var{list})
  2945. Remove the element at the back of the list
  2946. @end deftypefun
  2947. @deftypefun {struct starpu_task *} starpu_task_list_begin ({struct starpu_task_list *}@var{list})
  2948. Get the first task of the list.
  2949. @end deftypefun
  2950. @deftypefun {struct starpu_task *} starpu_task_list_end ({struct starpu_task_list *}@var{list})
  2951. Get the end of the list.
  2952. @end deftypefun
  2953. @deftypefun {struct starpu_task *} starpu_task_list_next ({struct starpu_task *}@var{task})
  2954. Get the next task of the list. This is not erase-safe.
  2955. @end deftypefun
  2956. @node Using Parallel Tasks
  2957. @section Using Parallel Tasks
  2958. These are used by parallel tasks:
  2959. @deftypefun int starpu_combined_worker_get_size (void)
  2960. Return the size of the current combined worker, i.e. the total number of cpus
  2961. running the same task in the case of SPMD parallel tasks, or the total number
  2962. of threads that the task is allowed to start in the case of FORKJOIN parallel
  2963. tasks.
  2964. @end deftypefun
  2965. @deftypefun int starpu_combined_worker_get_rank (void)
  2966. Return the rank of the current thread within the combined worker. Can only be
  2967. used in FORKJOIN parallel tasks, to know which part of the task to work on.
  2968. @end deftypefun
  2969. Most of these are used for schedulers which support parallel tasks.
  2970. @deftypefun unsigned starpu_combined_worker_get_count (void)
  2971. Return the number of different combined workers.
  2972. @end deftypefun
  2973. @deftypefun int starpu_combined_worker_get_id (void)
  2974. Return the identifier of the current combined worker.
  2975. @end deftypefun
  2976. @deftypefun int starpu_combined_worker_assign_workerid (int @var{nworkers}, int @var{workerid_array}[])
  2977. Register a new combined worker and get its identifier
  2978. @end deftypefun
  2979. @deftypefun int starpu_combined_worker_get_description (int @var{workerid}, {int *}@var{worker_size}, {int **}@var{combined_workerid})
  2980. Get the description of a combined worker
  2981. @end deftypefun
  2982. @deftypefun int starpu_combined_worker_can_execute_task (unsigned @var{workerid}, {struct starpu_task *}@var{task}, unsigned @var{nimpl})
  2983. Variant of starpu_worker_can_execute_task compatible with combined workers
  2984. @end deftypefun
  2985. @deftp {Data Type} {struct starpu_machine_topology}
  2986. @table @asis
  2987. @item @code{unsigned nworkers}
  2988. Total number of workers.
  2989. @item @code{unsigned ncombinedworkers}
  2990. Total number of combined workers.
  2991. @item @code{hwloc_topology_t hwtopology}
  2992. Topology as detected by hwloc.
  2993. To maintain ABI compatibility when hwloc is not available, the field
  2994. is replaced with @code{void *dummy}
  2995. @item @code{unsigned nhwcpus}
  2996. Total number of CPUs, as detected by the topology code. May be different from
  2997. the actual number of CPU workers.
  2998. @item @code{unsigned nhwcudagpus}
  2999. Total number of CUDA devices, as detected. May be different from the actual
  3000. number of CUDA workers.
  3001. @item @code{unsigned nhwopenclgpus}
  3002. Total number of OpenCL devices, as detected. May be different from the actual
  3003. number of CUDA workers.
  3004. @item @code{unsigned ncpus}
  3005. Actual number of CPU workers used by StarPU.
  3006. @item @code{unsigned ncudagpus}
  3007. Actual number of CUDA workers used by StarPU.
  3008. @item @code{unsigned nopenclgpus}
  3009. Actual number of OpenCL workers used by StarPU.
  3010. @item @code{unsigned workers_bindid[STARPU_NMAXWORKERS]}
  3011. Indicates the successive cpu identifier that should be used to bind the
  3012. workers. It is either filled according to the user's explicit
  3013. parameters (from starpu_conf) or according to the STARPU_WORKERS_CPUID env.
  3014. variable. Otherwise, a round-robin policy is used to distributed the workers
  3015. over the cpus.
  3016. @item @code{unsigned workers_cuda_gpuid[STARPU_NMAXWORKERS]}
  3017. Indicates the successive cpu identifier that should be used by the CUDA
  3018. driver. It is either filled according to the user's explicit parameters (from
  3019. starpu_conf) or according to the STARPU_WORKERS_CUDAID env. variable. Otherwise,
  3020. they are taken in ID order.
  3021. @item @code{unsigned workers_opencl_gpuid[STARPU_NMAXWORKERS]}
  3022. Indicates the successive cpu identifier that should be used by the OpenCL
  3023. driver. It is either filled according to the user's explicit parameters (from
  3024. starpu_conf) or according to the STARPU_WORKERS_OPENCLID env. variable. Otherwise,
  3025. they are taken in ID order.
  3026. @end table
  3027. @end deftp
  3028. @node Scheduling Contexts
  3029. @section Scheduling Contexts
  3030. StarPU permits on one hand grouping workers in combined workers in order to execute a parallel task and on the other hand grouping tasks in bundles that will be executed by a single specified worker.
  3031. In contrast when we group workers in scheduling contexts we submit starpu tasks to them and we schedule them with the policy assigned to the context.
  3032. Scheduling contexts can be created, deleted and modified dynamically.
  3033. @deftypefun unsigned starpu_sched_ctx_create (const char *@var{policy_name}, int *@var{workerids_ctx}, int @var{nworkers_ctx}, const char *@var{sched_ctx_name})
  3034. This function creates a scheduling context which uses the scheduling policy indicated in the first argument and assigns the workers indicated in the second argument to execute the tasks submitted to it.
  3035. The return value represents the identifier of the context that has just been created. It will be further used to indicate the context the tasks will be submitted to. The return value should be at most @code{STARPU_NMAX_SCHED_CTXS}.
  3036. @end deftypefun
  3037. @deftypefun void starpu_sched_ctx_delete (unsigned @var{sched_ctx_id})
  3038. Delete scheduling context @var{sched_ctx_id} and transfer remaining workers to the inheritor scheduling context.
  3039. @end deftypefun
  3040. @deftypefun void starpu_sched_ctx_add_workers ({int *}@var{workerids_ctx}, int @var{nworkers_ctx}, unsigned @var{sched_ctx_id})
  3041. This function adds dynamically the workers indicated in the first argument to the context indicated in the last argument. The last argument cannot be greater than @code{STARPU_NMAX_SCHED_CTXS}.
  3042. @end deftypefun
  3043. @deftypefun void starpu_sched_ctx_remove_workers ({int *}@var{workerids_ctx}, int @var{nworkers_ctx}, unsigned @var{sched_ctx_id})
  3044. This function removes the workers indicated in the first argument from the context indicated in the last argument. The last argument cannot be greater than @code{STARPU_NMAX_SCHED_CTXS}.
  3045. @end deftypefun
  3046. A scheduling context manages a collection of workers that can be memorized using different data structures. Thus, a generic structure is available in order to simplify the choice of its type.
  3047. Only the list data structure is available but further data structures(like tree) implementations are foreseen.
  3048. @deftp {Data Type} {struct starpu_worker_collection}
  3049. @table @asis
  3050. @item @code{void *workerids}
  3051. The workerids managed by the collection
  3052. @item @code{unsigned nworkers}
  3053. The number of workerids
  3054. @item @code{pthread_key_t cursor_key} (optional)
  3055. The cursor needed to iterate the collection (depending on the data structure)
  3056. @item @code{int type}
  3057. The type of structure (currently STARPU_WORKER_LIST is the only one available)
  3058. @item @code{unsigned (*has_next)(struct starpu_worker_collection *workers)}
  3059. Checks if there is a next worker
  3060. @item @code{int (*get_next)(struct starpu_worker_collection *workers)}
  3061. Gets the next worker
  3062. @item @code{int (*add)(struct starpu_worker_collection *workers, int worker)}
  3063. Adds a worker to the collection
  3064. @item @code{int (*remove)(struct starpu_worker_collection *workers, int worker)}
  3065. Removes a worker from the collection
  3066. @item @code{void* (*init)(struct starpu_worker_collection *workers)}
  3067. Initialize the collection
  3068. @item @code{void (*deinit)(struct starpu_worker_collection *workers)}
  3069. Deinitialize the colection
  3070. @item @code{void (*init_cursor)(struct starpu_worker_collection *workers)} (optional)
  3071. Initialize the cursor if there is one
  3072. @item @code{void (*deinit_cursor)(struct starpu_worker_collection *workers)} (optional)
  3073. Deinitialize the cursor if there is one
  3074. @end table
  3075. @end deftp
  3076. @deftypefun struct starpu_worker_collection* starpu_sched_ctx_create_worker_collection (unsigned @var{sched_ctx_id}, int @var{type})
  3077. Create a worker collection of the type indicated by the last parameter for the context specified through the first parameter.
  3078. @end deftypefun
  3079. @deftypefun void starpu_sched_ctx_delete_worker_collection (unsigned @var{sched_ctx_id})
  3080. Delete the worker collection of the specified scheduling context
  3081. @end deftypefun
  3082. @deftypefun struct starpu_worker_collection* starpu_sched_ctx_get_worker_collection (unsigned @var{sched_ctx_id})
  3083. Return the worker collection managed by the indicated context
  3084. @end deftypefun
  3085. @deftypefun pthread_mutex_t* starpu_sched_ctx_get_changing_ctx_mutex (unsigned @var{sched_ctx_id})
  3086. TODO
  3087. @end deftypefun
  3088. @deftypefun void starpu_sched_ctx_set_context (unsigned *@var{sched_ctx_id})
  3089. Set the scheduling context the subsequent tasks will be submitted to
  3090. @end deftypefun
  3091. @deftypefun unsigned starpu_sched_ctx_get_context (void)
  3092. Return the scheduling context the tasks are currently submitted to
  3093. @end deftypefun
  3094. @deftypefun unsigned starpu_sched_ctx_get_nworkers (unsigned @var{sched_ctx_id})
  3095. Return the number of workers managed by the specified contexts
  3096. (Usually needed to verify if it manages any workers or if it should be blocked)
  3097. @end deftypefun
  3098. @deftypefun unsigned starpu_sched_ctx_get_nshared_workers (unsigned @var{sched_ctx_id}, unsigned @var{sched_ctx_id2})
  3099. Return the number of workers shared by two contexts
  3100. @end deftypefun
  3101. @deftypefun int starpu_sched_ctx_set_min_priority (unsigned @var{sched_ctx_id}, int @var{min_prio})
  3102. Defines the minimum task priority level supported by the scheduling
  3103. policy of the given scheduler context. The
  3104. default minimum priority level is the same as the default priority level which
  3105. is 0 by convention. The application may access that value by calling the
  3106. @code{starpu_sched_ctx_get_min_priority} function. This function should only be
  3107. called from the initialization method of the scheduling policy, and should not
  3108. be used directly from the application.
  3109. @end deftypefun
  3110. @deftypefun int starpu_sched_ctx_set_max_priority (unsigned @var{sched_ctx_id}, int @var{max_prio})
  3111. Defines the maximum priority level supported by the scheduling policy of the given scheduler context. The
  3112. default maximum priority level is 1. The application may access that value by
  3113. calling the @code{starpu_sched_ctx_get_max_priority} function. This function should
  3114. only be called from the initialization method of the scheduling policy, and
  3115. should not be used directly from the application.
  3116. @end deftypefun
  3117. @deftypefun int starpu_sched_ctx_get_min_priority (unsigned @var{sched_ctx_id})
  3118. Returns the current minimum priority level supported by the
  3119. scheduling policy of the given scheduler context.
  3120. @end deftypefun
  3121. @deftypefun int starpu_sched_ctx_get_max_priority (unsigned @var{sched_ctx_id})
  3122. Returns the current maximum priority level supported by the
  3123. scheduling policy of the given scheduler context.
  3124. @end deftypefun
  3125. @node Scheduling Policy
  3126. @section Scheduling Policy
  3127. TODO
  3128. While StarPU comes with a variety of scheduling policies (@pxref{Task
  3129. scheduling policy}), it may sometimes be desirable to implement custom
  3130. policies to address specific problems. The API described below allows
  3131. users to write their own scheduling policy.
  3132. @deftp {Data Type} {struct starpu_sched_policy}
  3133. This structure contains all the methods that implement a scheduling policy. An
  3134. application may specify which scheduling strategy in the @code{sched_policy}
  3135. field of the @code{starpu_conf} structure passed to the @code{starpu_init}
  3136. function. The different fields are:
  3137. @table @asis
  3138. @item @code{void (*init_sched)(unsigned sched_ctx_id)}
  3139. Initialize the scheduling policy.
  3140. @item @code{void (*deinit_sched)(unsigned sched_ctx_id)}
  3141. Cleanup the scheduling policy.
  3142. @item @code{int (*push_task)(struct starpu_task *)}
  3143. Insert a task into the scheduler.
  3144. @item @code{void (*push_task_notify)(struct starpu_task *, int workerid)}
  3145. Notify the scheduler that a task was pushed on a given worker. This method is
  3146. called when a task that was explicitely assigned to a worker becomes ready and
  3147. is about to be executed by the worker. This method therefore permits to keep
  3148. the state of of the scheduler coherent even when StarPU bypasses the scheduling
  3149. strategy.
  3150. @item @code{struct starpu_task *(*pop_task)(unsigned sched_ctx_id)} (optional)
  3151. Get a task from the scheduler. The mutex associated to the worker is already
  3152. taken when this method is called. If this method is defined as @code{NULL}, the
  3153. worker will only execute tasks from its local queue. In this case, the
  3154. @code{push_task} method should use the @code{starpu_push_local_task} method to
  3155. assign tasks to the different workers.
  3156. @item @code{struct starpu_task *(*pop_every_task)(unsigned sched_ctx_id)}
  3157. Remove all available tasks from the scheduler (tasks are chained by the means
  3158. of the prev and next fields of the starpu_task structure). The mutex associated
  3159. to the worker is already taken when this method is called. This is currently
  3160. not used.
  3161. @item @code{void (*pre_exec_hook)(struct starpu_task *)} (optional)
  3162. This method is called every time a task is starting.
  3163. @item @code{void (*post_exec_hook)(struct starpu_task *)} (optional)
  3164. This method is called every time a task has been executed.
  3165. @item @code{void (*add_workers)(unsigned sched_ctx_id, int *workerids, unsigned nworkers)}
  3166. Initialize scheduling structures corresponding to each worker used by the policy.
  3167. @item @code{void (*remove_workers)(unsigned sched_ctx_id, int *workerids, unsigned nworkers)}
  3168. Deinitialize scheduling structures corresponding to each worker used by the policy.
  3169. @item @code{const char *policy_name} (optional)
  3170. Name of the policy.
  3171. @item @code{const char *policy_description} (optional)
  3172. Description of the policy.
  3173. @end table
  3174. @end deftp
  3175. @deftypefun {struct starpu_sched_policy **} starpu_sched_get_predefined_policies ()
  3176. Return an NULL-terminated array of all the predefined scheduling policies.
  3177. @end deftypefun
  3178. @deftypefun void starpu_sched_ctx_set_policy_data (unsigned @var{sched_ctx_id}, {void *} @var{policy_data})
  3179. Each scheduling policy uses some specific data (queues, variables, additional condition variables).
  3180. It is memorize through a local structure. This function assigns it to a scheduling context.
  3181. @end deftypefun
  3182. @deftypefun void* starpu_sched_ctx_get_policy_data (unsigned @var{sched_ctx_id})
  3183. Returns the policy data previously assigned to a context
  3184. @end deftypefun
  3185. @deftypefun int starpu_sched_set_min_priority (int @var{min_prio})
  3186. Defines the minimum task priority level supported by the scheduling policy. The
  3187. default minimum priority level is the same as the default priority level which
  3188. is 0 by convention. The application may access that value by calling the
  3189. @code{starpu_sched_get_min_priority} function. This function should only be
  3190. called from the initialization method of the scheduling policy, and should not
  3191. be used directly from the application.
  3192. @end deftypefun
  3193. @deftypefun int starpu_sched_set_max_priority (int @var{max_prio})
  3194. Defines the maximum priority level supported by the scheduling policy. The
  3195. default maximum priority level is 1. The application may access that value by
  3196. calling the @code{starpu_sched_get_max_priority} function. This function should
  3197. only be called from the initialization method of the scheduling policy, and
  3198. should not be used directly from the application.
  3199. @end deftypefun
  3200. @deftypefun int starpu_sched_get_min_priority (void)
  3201. Returns the current minimum priority level supported by the
  3202. scheduling policy
  3203. @end deftypefun
  3204. @deftypefun int starpu_sched_get_max_priority (void)
  3205. Returns the current maximum priority level supported by the
  3206. scheduling policy
  3207. @end deftypefun
  3208. @deftypefun int starpu_push_local_task (int @var{workerid}, {struct starpu_task} *@var{task}, int @var{back})
  3209. The scheduling policy may put tasks directly into a worker's local queue so
  3210. that it is not always necessary to create its own queue when the local queue
  3211. is sufficient. If @var{back} not null, @var{task} is put at the back of the queue
  3212. where the worker will pop tasks first. Setting @var{back} to 0 therefore ensures
  3213. a FIFO ordering.
  3214. @end deftypefun
  3215. @deftypefun int starpu_push_task_end ({struct starpu_task} *@var{task})
  3216. This function must be called by a scheduler to notify that the given
  3217. task has just been pushed.
  3218. @end deftypefun
  3219. @deftypefun int starpu_worker_can_execute_task (unsigned @var{workerid}, {struct starpu_task *}@var{task}, unsigned {nimpl})
  3220. Check if the worker specified by workerid can execute the codelet. Schedulers need to call it before assigning a task to a worker, otherwise the task may fail to execute.
  3221. @end deftypefun
  3222. @deftypefun double starpu_timing_now (void)
  3223. Return the current date in µs
  3224. @end deftypefun
  3225. @deftypefun uint32_t starpu_task_footprint ({struct starpu_perfmodel *}@var{model}, {struct starpu_task *} @var{task}, {enum starpu_perf_archtype} @var{arch}, unsigned @var{nimpl})
  3226. Returns the footprint for a given task
  3227. @end deftypefun
  3228. @deftypefun double starpu_task_expected_length ({struct starpu_task *}@var{task}, {enum starpu_perf_archtype} @var{arch}, unsigned @var{nimpl})
  3229. Returns expected task duration in µs
  3230. @end deftypefun
  3231. @deftypefun double starpu_worker_get_relative_speedup ({enum starpu_perf_archtype} @var{perf_archtype})
  3232. Returns an estimated speedup factor relative to CPU speed
  3233. @end deftypefun
  3234. @deftypefun double starpu_task_expected_data_transfer_time (unsigned @var{memory_node}, {struct starpu_task *}@var{task})
  3235. Returns expected data transfer time in µs
  3236. @end deftypefun
  3237. @deftypefun double starpu_data_expected_transfer_time (starpu_data_handle_t @var{handle}, unsigned @var{memory_node}, {enum starpu_access_mode} @var{mode})
  3238. Predict the transfer time (in µs) to move a handle to a memory node
  3239. @end deftypefun
  3240. @deftypefun double starpu_task_expected_power ({struct starpu_task *}@var{task}, {enum starpu_perf_archtype} @var{arch}, unsigned @var{nimpl})
  3241. Returns expected power consumption in J
  3242. @end deftypefun
  3243. @deftypefun double starpu_task_expected_conversion_time ({struct starpu_task *}@var{task}, {enum starpu_perf_archtype} @var{arch}, unsigned {nimpl})
  3244. Returns expected conversion time in ms (multiformat interface only)
  3245. @end deftypefun
  3246. @node Running drivers
  3247. @section Running drivers
  3248. @deftypefun int starpu_driver_run ({struct starpu_driver *}@var{d})
  3249. Initialize the given driver, run it until it receives a request to terminate,
  3250. deinitialize it and return 0 on success. It returns -EINVAL if @code{d->type}
  3251. is not a valid StarPU device type (STARPU_CPU_WORKER, STARPU_CUDA_WORKER or
  3252. STARPU_OPENCL_WORKER). This is the same as using the following
  3253. functions: calling @code{starpu_driver_init()}, then calling
  3254. @code{starpu_driver_run_once()} in a loop, and eventually
  3255. @code{starpu_driver_deinit()}.
  3256. @end deftypefun
  3257. @deftypefun int starpu_driver_init (struct starpu_driver *@var{d})
  3258. Initialize the given driver. Returns 0 on success, -EINVAL if
  3259. @code{d->type} is not a valid StarPU device type (STARPU_CPU_WORKER,
  3260. STARPU_CUDA_WORKER or STARPU_OPENCL_WORKER).
  3261. @end deftypefun
  3262. @deftypefun int starpu_driver_run_once (struct starpu_driver *@var{d})
  3263. Run the driver once, then returns 0 on success, -EINVAL if
  3264. @code{d->type} is not a valid StarPU device type (STARPU_CPU_WORKER,
  3265. STARPU_CUDA_WORKER or STARPU_OPENCL_WORKER).
  3266. @end deftypefun
  3267. @deftypefun int starpu_driver_deinit (struct starpu_driver *@var{d})
  3268. Deinitialize the given driver. Returns 0 on success, -EINVAL if
  3269. @code{d->type} is not a valid StarPU device type (STARPU_CPU_WORKER,
  3270. STARPU_CUDA_WORKER or STARPU_OPENCL_WORKER).
  3271. @end deftypefun
  3272. @deftypefun void starpu_drivers_request_termination (void)
  3273. Notify all running drivers they should terminate.
  3274. @end deftypefun
  3275. @node Expert mode
  3276. @section Expert mode
  3277. @deftypefun void starpu_wake_all_blocked_workers (void)
  3278. Wake all the workers, so they can inspect data requests and task submissions
  3279. again.
  3280. @end deftypefun
  3281. @deftypefun int starpu_progression_hook_register (unsigned (*@var{func})(void *arg), void *@var{arg})
  3282. Register a progression hook, to be called when workers are idle.
  3283. @end deftypefun
  3284. @deftypefun void starpu_progression_hook_deregister (int @var{hook_id})
  3285. Unregister a given progression hook.
  3286. @end deftypefun