api.texi 181 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033
  1. @c -*-texinfo-*-
  2. @c This file is part of the StarPU Handbook.
  3. @c Copyright (C) 2009--2011 Universit@'e de Bordeaux 1
  4. @c Copyright (C) 2010, 2011, 2012, 2013 Centre National de la Recherche Scientifique
  5. @c Copyright (C) 2011, 2012 Institut National de Recherche en Informatique et Automatique
  6. @c See the file starpu.texi for copying conditions.
  7. @menu
  8. * Versioning::
  9. * Initialization and Termination::
  10. * Standard memory library::
  11. * Workers' Properties::
  12. * Data Management::
  13. * Data Interfaces::
  14. * Data Partition::
  15. * Multiformat Data Interface::
  16. * Codelets and Tasks::
  17. * Insert Task::
  18. * Explicit Dependencies::
  19. * Implicit Data Dependencies::
  20. * Performance Model API::
  21. * Profiling API::
  22. * Theoretical lower bound on execution time API::
  23. * CUDA extensions::
  24. * OpenCL extensions::
  25. * Miscellaneous helpers::
  26. * FXT Support::
  27. * MPI::
  28. * Task Bundles::
  29. * Task Lists::
  30. * Using Parallel Tasks::
  31. * Scheduling Contexts::
  32. * Scheduling Policy::
  33. * Running drivers::
  34. * Expert mode::
  35. @end menu
  36. @node Versioning
  37. @section Versioning
  38. @defmac STARPU_MAJOR_VERSION
  39. Define the major version of StarPU
  40. @end defmac
  41. @defmac STARPU_MINOR_VERSION
  42. Define the minor version of StarPU
  43. @end defmac
  44. @node Initialization and Termination
  45. @section Initialization and Termination
  46. @deftp {Data Type} {struct starpu_driver}
  47. @table @asis
  48. @item @code{enum starpu_archtype type}
  49. The type of the driver. Only STARPU_CPU_DRIVER, STARPU_CUDA_DRIVER and
  50. STARPU_OPENCL_DRIVER are currently supported.
  51. @item @code{union id} Anonymous union
  52. @table @asis
  53. @item @code{unsigned cpu_id}
  54. Should only be used if type is STARPU_CPU_WORKER.
  55. @item @code{unsigned cuda_id}
  56. Should only be used if type is STARPU_CUDA_WORKER.
  57. @item @code{cl_device_id opencl_id}
  58. Should only be used if type is STARPU_OPENCL_WORKER.
  59. @end table
  60. @end table
  61. @end deftp
  62. @deftp {Data Type} {struct starpu_conf}
  63. This structure is passed to the @code{starpu_init} function in order
  64. to configure StarPU. It has to be initialized with @code{starpu_conf_init}.
  65. When the default value is used, StarPU automatically selects the number of
  66. processing units and takes the default scheduling policy. The environment
  67. variables overwrite the equivalent parameters.
  68. @table @asis
  69. @item @code{const char *sched_policy_name} (default = NULL)
  70. This is the name of the scheduling policy. This can also be specified
  71. with the @code{STARPU_SCHED} environment variable.
  72. @item @code{struct starpu_sched_policy *sched_policy} (default = NULL)
  73. This is the definition of the scheduling policy. This field is ignored
  74. if @code{sched_policy_name} is set.
  75. @item @code{int ncpus} (default = -1)
  76. This is the number of CPU cores that StarPU can use. This can also be
  77. specified with the @code{STARPU_NCPU} environment variable.
  78. @item @code{int ncuda} (default = -1)
  79. This is the number of CUDA devices that StarPU can use. This can also
  80. be specified with the @code{STARPU_NCUDA} environment variable.
  81. @item @code{int nopencl} (default = -1)
  82. This is the number of OpenCL devices that StarPU can use. This can
  83. also be specified with the @code{STARPU_NOPENCL} environment variable.
  84. @item @code{unsigned use_explicit_workers_bindid} (default = 0)
  85. If this flag is set, the @code{workers_bindid} array indicates where the
  86. different workers are bound, otherwise StarPU automatically selects where to
  87. bind the different workers. This can also be specified with the
  88. @code{STARPU_WORKERS_CPUID} environment variable.
  89. @item @code{unsigned workers_bindid[STARPU_NMAXWORKERS]}
  90. If the @code{use_explicit_workers_bindid} flag is set, this array
  91. indicates where to bind the different workers. The i-th entry of the
  92. @code{workers_bindid} indicates the logical identifier of the
  93. processor which should execute the i-th worker. Note that the logical
  94. ordering of the CPUs is either determined by the OS, or provided by
  95. the @code{hwloc} library in case it is available.
  96. @item @code{unsigned use_explicit_workers_cuda_gpuid} (default = 0)
  97. If this flag is set, the CUDA workers will be attached to the CUDA devices
  98. specified in the @code{workers_cuda_gpuid} array. Otherwise, StarPU affects the
  99. CUDA devices in a round-robin fashion. This can also be specified with the
  100. @code{STARPU_WORKERS_CUDAID} environment variable.
  101. @item @code{unsigned workers_cuda_gpuid[STARPU_NMAXWORKERS]}
  102. If the @code{use_explicit_workers_cuda_gpuid} flag is set, this array
  103. contains the logical identifiers of the CUDA devices (as used by
  104. @code{cudaGetDevice}).
  105. @item @code{unsigned use_explicit_workers_opencl_gpuid} (default = 0)
  106. If this flag is set, the OpenCL workers will be attached to the OpenCL devices
  107. specified in the @code{workers_opencl_gpuid} array. Otherwise, StarPU affects
  108. the OpenCL devices in a round-robin fashion. This can also be specified with
  109. the @code{STARPU_WORKERS_OPENCLID} environment variable.
  110. @item @code{unsigned workers_opencl_gpuid[STARPU_NMAXWORKERS]}
  111. If the @code{use_explicit_workers_opencl_gpuid} flag is set, this array
  112. contains the logical identifiers of the OpenCL devices to be used.
  113. @item @code{int calibrate} (default = 0)
  114. If this flag is set, StarPU will calibrate the performance models when
  115. executing tasks. If this value is equal to @code{-1}, the default value is
  116. used. If the value is equal to @code{1}, it will force continuing
  117. calibration. If the value is equal to @code{2}, the existing performance
  118. models will be overwritten. This can also be specified with the
  119. @code{STARPU_CALIBRATE} environment variable.
  120. @item @code{int bus_calibrate} (default = 0)
  121. If this flag is set, StarPU will recalibrate the bus. If this value is equal
  122. to @code{-1}, the default value is used. This can also be specified with the
  123. @code{STARPU_BUS_CALIBRATE} environment variable.
  124. @item @code{int single_combined_worker} (default = 0)
  125. By default, StarPU executes parallel tasks concurrently.
  126. Some parallel libraries (e.g. most OpenMP implementations) however do
  127. not support concurrent calls to parallel code. In such case, setting this flag
  128. makes StarPU only start one parallel task at a time (but other
  129. CPU and GPU tasks are not affected and can be run concurrently). The parallel
  130. task scheduler will however still however still try varying combined worker
  131. sizes to look for the most efficient ones.
  132. This can also be specified with the @code{STARPU_SINGLE_COMBINED_WORKER} environment variable.
  133. @item @code{int disable_asynchronous_copy} (default = 0)
  134. This flag should be set to 1 to disable asynchronous copies between
  135. CPUs and all accelerators. This can also be specified with the
  136. @code{STARPU_DISABLE_ASYNCHRONOUS_COPY} environment variable.
  137. The AMD implementation of OpenCL is known to
  138. fail when copying data asynchronously. When using this implementation,
  139. it is therefore necessary to disable asynchronous data transfers.
  140. This can also be specified at compilation time by giving to the
  141. configure script the option @code{--disable-asynchronous-copy}.
  142. @item @code{int disable_asynchronous_cuda_copy} (default = 0)
  143. This flag should be set to 1 to disable asynchronous copies between
  144. CPUs and CUDA accelerators. This can also be specified with the
  145. @code{STARPU_DISABLE_ASYNCHRONOUS_CUDA_COPY} environment variable.
  146. This can also be specified at compilation time by giving to the
  147. configure script the option @code{--disable-asynchronous-cuda-copy}.
  148. @item @code{int disable_asynchronous_opencl_copy} (default = 0)
  149. This flag should be set to 1 to disable asynchronous copies between
  150. CPUs and OpenCL accelerators. This can also be specified with the
  151. @code{STARPU_DISABLE_ASYNCHRONOUS_OPENCL_COPY} environment variable.
  152. The AMD implementation of OpenCL is known to
  153. fail when copying data asynchronously. When using this implementation,
  154. it is therefore necessary to disable asynchronous data transfers.
  155. This can also be specified at compilation time by giving to the
  156. configure script the option @code{--disable-asynchronous-opencl-copy}.
  157. @item @code{int *cuda_opengl_interoperability} (default = NULL)
  158. This can be set to an array of CUDA device identifiers for which
  159. @code{cudaGLSetGLDevice} should be called instead of @code{cudaSetDevice}. Its
  160. size is specified by the @code{n_cuda_opengl_interoperability} field below
  161. @item @code{int *n_cuda_opengl_interoperability} (default = 0)
  162. This has to be set to the size of the array pointed to by the
  163. @code{cuda_opengl_interoperability} field.
  164. @item @code{struct starpu_driver *not_launched_drivers}
  165. The drivers that should not be launched by StarPU.
  166. @item @code{unsigned n_not_launched_drivers}
  167. The number of StarPU drivers that should not be launched by StarPU.
  168. @item @code{trace_buffer_size}
  169. Specifies the buffer size used for FxT tracing. Starting from FxT version
  170. 0.2.12, the buffer will automatically be flushed when it fills in, but it may
  171. still be interesting to specify a bigger value to avoid any flushing (which
  172. would disturb the trace).
  173. @end table
  174. @end deftp
  175. @deftypefun int starpu_init ({struct starpu_conf *}@var{conf})
  176. This is StarPU initialization method, which must be called prior to any other
  177. StarPU call. It is possible to specify StarPU's configuration (e.g. scheduling
  178. policy, number of cores, ...) by passing a non-null argument. Default
  179. configuration is used if the passed argument is @code{NULL}.
  180. Upon successful completion, this function returns 0. Otherwise, @code{-ENODEV}
  181. indicates that no worker was available (so that StarPU was not initialized).
  182. @end deftypefun
  183. @deftypefun int starpu_conf_init ({struct starpu_conf *}@var{conf})
  184. This function initializes the @var{conf} structure passed as argument
  185. with the default values. In case some configuration parameters are already
  186. specified through environment variables, @code{starpu_conf_init} initializes
  187. the fields of the structure according to the environment variables. For
  188. instance if @code{STARPU_CALIBRATE} is set, its value is put in the
  189. @code{.calibrate} field of the structure passed as argument.
  190. Upon successful completion, this function returns 0. Otherwise, @code{-EINVAL}
  191. indicates that the argument was NULL.
  192. @end deftypefun
  193. @deftypefun void starpu_shutdown (void)
  194. This is StarPU termination method. It must be called at the end of the
  195. application: statistics and other post-mortem debugging information are not
  196. guaranteed to be available until this method has been called.
  197. @end deftypefun
  198. @deftypefun int starpu_asynchronous_copy_disabled (void)
  199. Return 1 if asynchronous data transfers between CPU and accelerators
  200. are disabled.
  201. @end deftypefun
  202. @deftypefun int starpu_asynchronous_cuda_copy_disabled (void)
  203. Return 1 if asynchronous data transfers between CPU and CUDA accelerators
  204. are disabled.
  205. @end deftypefun
  206. @deftypefun int starpu_asynchronous_opencl_copy_disabled (void)
  207. Return 1 if asynchronous data transfers between CPU and OpenCL accelerators
  208. are disabled.
  209. @end deftypefun
  210. @node Standard memory library
  211. @section Standard memory library
  212. @defmac STARPU_MALLOC_PINNED
  213. Value passed to the function @code{starpu_malloc_flags} to
  214. indicate the memory allocation should be pinned.
  215. @end defmac
  216. @defmac STARPU_MALLOC_COUNT
  217. Value passed to the function @code{starpu_malloc_flags} to
  218. indicate the memory allocation should be in the limit defined by
  219. the environment variables @code{STARPU_LIMIT_CUDA_devid_MEM},
  220. @code{STARPU_LIMIT_CUDA_MEM}, @code{STARPU_LIMIT_OPENCL_devid_MEM},
  221. @code{STARPU_LIMIT_OPENCL_MEM} and @code{STARPU_LIMIT_CPU_MEM}
  222. (@pxref{Limit memory}). If no memory is available, it tries to reclaim
  223. memory from StarPU. Memory allocated this way needs to be freed by
  224. calling the @code{starpu_free_flags} function with the same flag.
  225. @end defmac
  226. @deftypefun int starpu_malloc_flags (void **@var{A}, size_t @var{dim}, int @var{flags})
  227. Performs a memory allocation based on the constraints defined by the
  228. given @var{flag}.
  229. @end deftypefun
  230. @deftypefun void starpu_malloc_set_align (size_t @var{align})
  231. This functions sets an alignment constraints for @code{starpu_malloc}
  232. allocations. @var{align} must be a power of two. This is for instance called
  233. automatically by the OpenCL driver to specify its own alignment constraints.
  234. @end deftypefun
  235. @deftypefun int starpu_malloc (void **@var{A}, size_t @var{dim})
  236. This function allocates data of the given size in main memory. It will also try to pin it in
  237. CUDA or OpenCL, so that data transfers from this buffer can be asynchronous, and
  238. thus permit data transfer and computation overlapping. The allocated buffer must
  239. be freed thanks to the @code{starpu_free} function.
  240. @end deftypefun
  241. @deftypefun int starpu_free (void *@var{A})
  242. This function frees memory which has previously been allocated with
  243. @code{starpu_malloc}.
  244. @end deftypefun
  245. @deftypefun int starpu_free_flags (void *@var{A}, size_t @var{dim}, int @var{flags})
  246. This function frees memory by specifying its size. The given
  247. @var{flags} should be consistent with the ones given to
  248. @code{starpu_malloc_flags} when allocating the memory.
  249. @end deftypefun
  250. @deftypefun ssize_t starpu_memory_get_available (unsigned @var{node})
  251. If a memory limit is defined on the given node (@pxref{Limit memory}),
  252. return the amount of available memory on the node. Otherwise return
  253. @code{-1}.
  254. @end deftypefun
  255. @node Workers' Properties
  256. @section Workers' Properties
  257. @deftp {Data Type} {enum starpu_archtype}
  258. The different values are:
  259. @table @asis
  260. @item @code{STARPU_CPU_WORKER}
  261. @item @code{STARPU_CUDA_WORKER}
  262. @item @code{STARPU_OPENCL_WORKER}
  263. @end table
  264. @end deftp
  265. @deftypefun unsigned starpu_worker_get_count (void)
  266. This function returns the number of workers (i.e. processing units executing
  267. StarPU tasks). The returned value should be at most @code{STARPU_NMAXWORKERS}.
  268. @end deftypefun
  269. @deftypefun int starpu_worker_get_count_by_type ({enum starpu_archtype} @var{type})
  270. Returns the number of workers of the given @var{type}. A positive
  271. (or @code{NULL}) value is returned in case of success, @code{-EINVAL} indicates that
  272. the type is not valid otherwise.
  273. @end deftypefun
  274. @deftypefun unsigned starpu_cpu_worker_get_count (void)
  275. This function returns the number of CPUs controlled by StarPU. The returned
  276. value should be at most @code{STARPU_MAXCPUS}.
  277. @end deftypefun
  278. @deftypefun unsigned starpu_cuda_worker_get_count (void)
  279. This function returns the number of CUDA devices controlled by StarPU. The returned
  280. value should be at most @code{STARPU_MAXCUDADEVS}.
  281. @end deftypefun
  282. @deftypefun unsigned starpu_opencl_worker_get_count (void)
  283. This function returns the number of OpenCL devices controlled by StarPU. The returned
  284. value should be at most @code{STARPU_MAXOPENCLDEVS}.
  285. @end deftypefun
  286. @deftypefun int starpu_worker_get_id (void)
  287. This function returns the identifier of the current worker, i.e the one associated to the calling
  288. thread. The returned value is either -1 if the current context is not a StarPU
  289. worker (i.e. when called from the application outside a task or a callback), or
  290. an integer between 0 and @code{starpu_worker_get_count() - 1}.
  291. @end deftypefun
  292. @deftypefun int starpu_worker_get_ids_by_type ({enum starpu_archtype} @var{type}, int *@var{workerids}, int @var{maxsize})
  293. This function gets the list of identifiers of workers with the given
  294. type. It fills the workerids array with the identifiers of the workers that have the type
  295. indicated in the first argument. The maxsize argument indicates the size of the
  296. workids array. The returned value gives the number of identifiers that were put
  297. in the array. @code{-ERANGE} is returned is maxsize is lower than the number of
  298. workers with the appropriate type: in that case, the array is filled with the
  299. maxsize first elements. To avoid such overflows, the value of maxsize can be
  300. chosen by the means of the @code{starpu_worker_get_count_by_type} function, or
  301. by passing a value greater or equal to @code{STARPU_NMAXWORKERS}.
  302. @end deftypefun
  303. @deftypefun int starpu_worker_get_by_type ({enum starpu_archtype} @var{type}, int @var{num})
  304. This returns the identifier of the @var{num}-th worker that has the specified type
  305. @var{type}. If there are no such worker, -1 is returned.
  306. @end deftypefun
  307. @deftypefun int starpu_worker_get_by_devid ({enum starpu_archtype} @var{type}, int @var{devid})
  308. This returns the identifier of the worker that has the specified type
  309. @var{type} and devid @var{devid} (which may not be the n-th, if some devices are
  310. skipped for instance). If there are no such worker, -1 is returned.
  311. @end deftypefun
  312. @deftypefun int starpu_worker_get_devid (int @var{id})
  313. This functions returns the device id of the given worker. The worker
  314. should be identified with the value returned by the @code{starpu_worker_get_id} function. In the case of a
  315. CUDA worker, this device identifier is the logical device identifier exposed by
  316. CUDA (used by the @code{cudaGetDevice} function for instance). The device
  317. identifier of a CPU worker is the logical identifier of the core on which the
  318. worker was bound; this identifier is either provided by the OS or by the
  319. @code{hwloc} library in case it is available.
  320. @end deftypefun
  321. @deftypefun {enum starpu_archtype} starpu_worker_get_type (int @var{id})
  322. This function returns the type of processing unit associated to a
  323. worker. The worker identifier is a value returned by the
  324. @code{starpu_worker_get_id} function). The returned value
  325. indicates the architecture of the worker: @code{STARPU_CPU_WORKER} for a CPU
  326. core, @code{STARPU_CUDA_WORKER} for a CUDA device, and
  327. @code{STARPU_OPENCL_WORKER} for a OpenCL device. The value returned for an invalid
  328. identifier is unspecified.
  329. @end deftypefun
  330. @deftypefun void starpu_worker_get_name (int @var{id}, char *@var{dst}, size_t @var{maxlen})
  331. This function allows to get the name of a given worker.
  332. StarPU associates a unique human readable string to each processing unit. This
  333. function copies at most the @var{maxlen} first bytes of the unique string
  334. associated to a worker identified by its identifier @var{id} into the
  335. @var{dst} buffer. The caller is responsible for ensuring that the @var{dst}
  336. is a valid pointer to a buffer of @var{maxlen} bytes at least. Calling this
  337. function on an invalid identifier results in an unspecified behaviour.
  338. @end deftypefun
  339. @deftypefun unsigned starpu_worker_get_memory_node (unsigned @var{workerid})
  340. This function returns the identifier of the memory node associated to the
  341. worker identified by @var{workerid}.
  342. @end deftypefun
  343. @deftp {Data Type} {enum starpu_node_kind}
  344. todo
  345. @table @asis
  346. @item @code{STARPU_UNUSED}
  347. @item @code{STARPU_CPU_RAM}
  348. @item @code{STARPU_CUDA_RAM}
  349. @item @code{STARPU_OPENCL_RAM}
  350. @end table
  351. @end deftp
  352. @deftypefun {enum starpu_node_kind} starpu_node_get_kind (unsigned @var{node})
  353. Returns the type of the given node as defined by @code{enum
  354. starpu_node_kind}. For example, when defining a new data interface,
  355. this function should be used in the allocation function to determine
  356. on which device the memory needs to be allocated.
  357. @end deftypefun
  358. @node Data Management
  359. @section Data Management
  360. @menu
  361. * Introduction to Data Management::
  362. * Basic Data Management API::
  363. * Access registered data from the application::
  364. @end menu
  365. This section describes the data management facilities provided by StarPU.
  366. We show how to use existing data interfaces in @ref{Data Interfaces}, but developers can
  367. design their own data interfaces if required.
  368. @node Introduction to Data Management
  369. @subsection Introduction
  370. Data management is done at a high-level in StarPU: rather than accessing a mere
  371. list of contiguous buffers, the tasks may manipulate data that are described by
  372. a high-level construct which we call data interface.
  373. An example of data interface is the "vector" interface which describes a
  374. contiguous data array on a spefic memory node. This interface is a simple
  375. structure containing the number of elements in the array, the size of the
  376. elements, and the address of the array in the appropriate address space (this
  377. address may be invalid if there is no valid copy of the array in the memory
  378. node). More informations on the data interfaces provided by StarPU are
  379. given in @ref{Data Interfaces}.
  380. When a piece of data managed by StarPU is used by a task, the task
  381. implementation is given a pointer to an interface describing a valid copy of
  382. the data that is accessible from the current processing unit.
  383. Every worker is associated to a memory node which is a logical abstraction of
  384. the address space from which the processing unit gets its data. For instance,
  385. the memory node associated to the different CPU workers represents main memory
  386. (RAM), the memory node associated to a GPU is DRAM embedded on the device.
  387. Every memory node is identified by a logical index which is accessible from the
  388. @code{starpu_worker_get_memory_node} function. When registering a piece of data
  389. to StarPU, the specified memory node indicates where the piece of data
  390. initially resides (we also call this memory node the home node of a piece of
  391. data).
  392. @node Basic Data Management API
  393. @subsection Basic Data Management API
  394. @deftp {Data Type} {enum starpu_access_mode}
  395. This datatype describes a data access mode. The different available modes are:
  396. @table @asis
  397. @item @code{STARPU_R}: read-only mode.
  398. @item @code{STARPU_W}: write-only mode.
  399. @item @code{STARPU_RW}: read-write mode.
  400. This is equivalent to @code{STARPU_R|STARPU_W}.
  401. @item @code{STARPU_SCRATCH}: scratch memory.
  402. A temporary buffer is allocated for the task, but StarPU does not
  403. enforce data consistency---i.e. each device has its own buffer,
  404. independently from each other (even for CPUs), and no data transfer is
  405. ever performed. This is useful for temporary variables to avoid
  406. allocating/freeing buffers inside each task.
  407. Currently, no behavior is defined concerning the relation with the
  408. @code{STARPU_R} and @code{STARPU_W} modes and the value provided at
  409. registration---i.e., the value of the scratch buffer is undefined at
  410. entry of the codelet function. It is being considered for future
  411. extensions at least to define the initial value. For now, data to be
  412. used in @code{SCRATCH} mode should be registered with node @code{-1} and
  413. a @code{NULL} pointer, since the value of the provided buffer is simply
  414. ignored for now.
  415. @item @code{STARPU_REDUX}: reduction mode. TODO!
  416. @end table
  417. @end deftp
  418. @deftp {Data Type} {starpu_data_handle_t}
  419. StarPU uses @code{starpu_data_handle_t} as an opaque handle to manage a piece of
  420. data. Once a piece of data has been registered to StarPU, it is associated to a
  421. @code{starpu_data_handle_t} which keeps track of the state of the piece of data
  422. over the entire machine, so that we can maintain data consistency and locate
  423. data replicates for instance.
  424. @end deftp
  425. @deftypefun void starpu_data_register (starpu_data_handle_t *@var{handleptr}, unsigned @var{home_node}, void *@var{data_interface}, {struct starpu_data_interface_ops} *@var{ops})
  426. Register a piece of data into the handle located at the @var{handleptr}
  427. address. The @var{data_interface} buffer contains the initial description of the
  428. data in the home node. The @var{ops} argument is a pointer to a structure
  429. describing the different methods used to manipulate this type of interface. See
  430. @ref{struct starpu_data_interface_ops} for more details on this structure.
  431. If @code{home_node} is -1, StarPU will automatically
  432. allocate the memory when it is used for the
  433. first time in write-only mode. Once such data handle has been automatically
  434. allocated, it is possible to access it using any access mode.
  435. Note that StarPU supplies a set of predefined types of interface (e.g. vector or
  436. matrix) which can be registered by the means of helper functions (e.g.
  437. @code{starpu_vector_data_register} or @code{starpu_matrix_data_register}).
  438. @end deftypefun
  439. @deftypefun void starpu_data_register_same ({starpu_data_handle_t *}@var{handledst}, starpu_data_handle_t @var{handlesrc})
  440. Register a new piece of data into the handle @var{handledst} with the
  441. same interface as the handle @var{handlesrc}.
  442. @end deftypefun
  443. @deftypefun void starpu_data_unregister (starpu_data_handle_t @var{handle})
  444. This function unregisters a data handle from StarPU. If the data was
  445. automatically allocated by StarPU because the home node was -1, all
  446. automatically allocated buffers are freed. Otherwise, a valid copy of the data
  447. is put back into the home node in the buffer that was initially registered.
  448. Using a data handle that has been unregistered from StarPU results in an
  449. undefined behaviour.
  450. @end deftypefun
  451. @deftypefun void starpu_data_unregister_no_coherency (starpu_data_handle_t @var{handle})
  452. This is the same as starpu_data_unregister, except that StarPU does not put back
  453. a valid copy into the home node, in the buffer that was initially registered.
  454. @end deftypefun
  455. @deftypefun void starpu_data_unregister_submit (starpu_data_handle_t @var{handle})
  456. Destroy the data handle once it is not needed anymore by any submitted
  457. task. No coherency is assumed.
  458. @end deftypefun
  459. @deftypefun void starpu_data_invalidate (starpu_data_handle_t @var{handle})
  460. Destroy all replicates of the data handle immediately. After data invalidation,
  461. the first access to the handle must be performed in write-only mode.
  462. Accessing an invalidated data in read-mode results in undefined
  463. behaviour.
  464. @end deftypefun
  465. @deftypefun void starpu_data_invalidate_submit (starpu_data_handle_t @var{handle})
  466. Submits invalidation of the data handle after completion of previously submitted tasks.
  467. @end deftypefun
  468. @c TODO create a specific sections about user interaction with the DSM ?
  469. @deftypefun void starpu_data_set_wt_mask (starpu_data_handle_t @var{handle}, uint32_t @var{wt_mask})
  470. This function sets the write-through mask of a given data, i.e. a bitmask of
  471. nodes where the data should be always replicated after modification. It also
  472. prevents the data from being evicted from these nodes when memory gets scarse.
  473. @end deftypefun
  474. @deftypefun int starpu_data_prefetch_on_node (starpu_data_handle_t @var{handle}, unsigned @var{node}, unsigned @var{async})
  475. Issue a prefetch request for a given data to a given node, i.e.
  476. requests that the data be replicated to the given node, so that it is available
  477. there for tasks. If the @var{async} parameter is 0, the call will block until
  478. the transfer is achieved, else the call will return as soon as the request is
  479. scheduled (which may however have to wait for a task completion).
  480. @end deftypefun
  481. @deftypefun starpu_data_handle_t starpu_data_lookup ({const void *}@var{ptr})
  482. Return the handle corresponding to the data pointed to by the @var{ptr}
  483. host pointer.
  484. @end deftypefun
  485. @deftypefun int starpu_data_request_allocation (starpu_data_handle_t @var{handle}, unsigned @var{node})
  486. Explicitly ask StarPU to allocate room for a piece of data on the specified
  487. memory node.
  488. @end deftypefun
  489. @deftypefun void starpu_data_query_status (starpu_data_handle_t @var{handle}, int @var{memory_node}, {int *}@var{is_allocated}, {int *}@var{is_valid}, {int *}@var{is_requested})
  490. Query the status of the handle on the specified memory node.
  491. @end deftypefun
  492. @deftypefun void starpu_data_advise_as_important (starpu_data_handle_t @var{handle}, unsigned @var{is_important})
  493. This function allows to specify that a piece of data can be discarded
  494. without impacting the application.
  495. @end deftypefun
  496. @deftypefun void starpu_data_set_reduction_methods (starpu_data_handle_t @var{handle}, {struct starpu_codelet *}@var{redux_cl}, {struct starpu_codelet *}@var{init_cl})
  497. This sets the codelets to be used for the @var{handle} when it is accessed in
  498. REDUX mode. Per-worker buffers will be initialized with the @var{init_cl}
  499. codelet, and reduction between per-worker buffers will be done with the
  500. @var{redux_cl} codelet.
  501. @end deftypefun
  502. @deftypefun struct starpu_data_interface_ops* starpu_data_get_interface_ops (starpu_data_handle_t @var{handle})
  503. Get a pointer to the structure describing the different methods used
  504. to manipulate the given data. See @ref{struct starpu_data_interface_ops} for more details on this structure.
  505. @end deftypefun
  506. @deftypefun unsigned starpu_data_get_sequential_consistency_flag (starpu_data_handle_t @var{handle})
  507. Return the sequential consistency flag of the given data.
  508. @end deftypefun
  509. @node Access registered data from the application
  510. @subsection Access registered data from the application
  511. @deftypefun int starpu_data_acquire (starpu_data_handle_t @var{handle}, {enum starpu_access_mode} @var{mode})
  512. The application must call this function prior to accessing registered data from
  513. main memory outside tasks. StarPU ensures that the application will get an
  514. up-to-date copy of the data in main memory located where the data was
  515. originally registered, and that all concurrent accesses (e.g. from tasks) will
  516. be consistent with the access mode specified in the @var{mode} argument.
  517. @code{starpu_data_release} must be called once the application does not need to
  518. access the piece of data anymore. Note that implicit data
  519. dependencies are also enforced by @code{starpu_data_acquire}, i.e.
  520. @code{starpu_data_acquire} will wait for all tasks scheduled to work on
  521. the data, unless they have been disabled explictly by calling
  522. @code{starpu_data_set_default_sequential_consistency_flag} or
  523. @code{starpu_data_set_sequential_consistency_flag}.
  524. @code{starpu_data_acquire} is a blocking call, so that it cannot be called from
  525. tasks or from their callbacks (in that case, @code{starpu_data_acquire} returns
  526. @code{-EDEADLK}). Upon successful completion, this function returns 0.
  527. @end deftypefun
  528. @deftypefun int starpu_data_acquire_cb (starpu_data_handle_t @var{handle}, {enum starpu_access_mode} @var{mode}, void (*@var{callback})(void *), void *@var{arg})
  529. @code{starpu_data_acquire_cb} is the asynchronous equivalent of
  530. @code{starpu_data_acquire}. When the data specified in the first argument is
  531. available in the appropriate access mode, the callback function is executed.
  532. The application may access the requested data during the execution of this
  533. callback. The callback function must call @code{starpu_data_release} once the
  534. application does not need to access the piece of data anymore.
  535. Note that implicit data dependencies are also enforced by
  536. @code{starpu_data_acquire_cb} in case they are not disabled.
  537. Contrary to @code{starpu_data_acquire}, this function is non-blocking and may
  538. be called from task callbacks. Upon successful completion, this function
  539. returns 0.
  540. @end deftypefun
  541. @deftypefun int starpu_data_acquire_on_node (starpu_data_handle_t @var{handle}, unsigned @var{node}, {enum starpu_access_mode} @var{mode})
  542. This is the same as @code{starpu_data_acquire}, except that the data will be
  543. available on the given memory node instead of main memory.
  544. @end deftypefun
  545. @deftypefun int starpu_data_acquire_on_node_cb (starpu_data_handle_t @var{handle}, unsigned @var{node}, {enum starpu_access_mode} @var{mode}, void (*@var{callback})(void *), void *@var{arg})
  546. This is the same as @code{starpu_data_acquire_cb}, except that the data will be
  547. available on the given memory node instead of main memory.
  548. @end deftypefun
  549. @defmac STARPU_DATA_ACQUIRE_CB (starpu_data_handle_t @var{handle}, {enum starpu_access_mode} @var{mode}, code)
  550. @code{STARPU_DATA_ACQUIRE_CB} is the same as @code{starpu_data_acquire_cb},
  551. except that the code to be executed in a callback is directly provided as a
  552. macro parameter, and the data handle is automatically released after it. This
  553. permits to easily execute code which depends on the value of some registered
  554. data. This is non-blocking too and may be called from task callbacks.
  555. @end defmac
  556. @deftypefun void starpu_data_release (starpu_data_handle_t @var{handle})
  557. This function releases the piece of data acquired by the application either by
  558. @code{starpu_data_acquire} or by @code{starpu_data_acquire_cb}.
  559. @end deftypefun
  560. @deftypefun void starpu_data_release_on_node (starpu_data_handle_t @var{handle}, unsigned @var{node})
  561. This is the same as @code{starpu_data_release}, except that the data will be
  562. available on the given memory node instead of main memory.
  563. @end deftypefun
  564. @node Data Interfaces
  565. @section Data Interfaces
  566. @menu
  567. * Registering Data::
  568. * Accessing Data Interfaces::
  569. * Defining Interface::
  570. @end menu
  571. @node Registering Data
  572. @subsection Registering Data
  573. There are several ways to register a memory region so that it can be managed by
  574. StarPU. The functions below allow the registration of vectors, 2D matrices, 3D
  575. matrices as well as BCSR and CSR sparse matrices.
  576. @deftypefun void starpu_void_data_register ({starpu_data_handle_t *}@var{handle})
  577. Register a void interface. There is no data really associated to that
  578. interface, but it may be used as a synchronization mechanism. It also
  579. permits to express an abstract piece of data that is managed by the
  580. application internally: this makes it possible to forbid the
  581. concurrent execution of different tasks accessing the same "void" data
  582. in read-write concurrently.
  583. @end deftypefun
  584. @deftypefun void starpu_variable_data_register ({starpu_data_handle_t *}@var{handle}, unsigned @var{home_node}, uintptr_t @var{ptr}, size_t @var{size})
  585. Register the @var{size}-byte element pointed to by @var{ptr}, which is
  586. typically a scalar, and initialize @var{handle} to represent this data
  587. item.
  588. @cartouche
  589. @smallexample
  590. float var;
  591. starpu_data_handle_t var_handle;
  592. starpu_variable_data_register(&var_handle, 0, (uintptr_t)&var, sizeof(var));
  593. @end smallexample
  594. @end cartouche
  595. @end deftypefun
  596. @deftypefun void starpu_vector_data_register ({starpu_data_handle_t *}@var{handle}, unsigned @var{home_node}, uintptr_t @var{ptr}, uint32_t @var{nx}, size_t @var{elemsize})
  597. Register the @var{nx} @var{elemsize}-byte elements pointed to by
  598. @var{ptr} and initialize @var{handle} to represent it.
  599. @cartouche
  600. @smallexample
  601. float vector[NX];
  602. starpu_data_handle_t vector_handle;
  603. starpu_vector_data_register(&vector_handle, 0, (uintptr_t)vector, NX,
  604. sizeof(vector[0]));
  605. @end smallexample
  606. @end cartouche
  607. @end deftypefun
  608. @deftypefun void starpu_matrix_data_register ({starpu_data_handle_t *}@var{handle}, unsigned @var{home_node}, uintptr_t @var{ptr}, uint32_t @var{ld}, uint32_t @var{nx}, uint32_t @var{ny}, size_t @var{elemsize})
  609. Register the @var{nx}x@var{ny} 2D matrix of @var{elemsize}-byte elements
  610. pointed by @var{ptr} and initialize @var{handle} to represent it.
  611. @var{ld} specifies the number of elements between rows.
  612. a value greater than @var{nx} adds padding, which can be useful for
  613. alignment purposes.
  614. @cartouche
  615. @smallexample
  616. float *matrix;
  617. starpu_data_handle_t matrix_handle;
  618. matrix = (float*)malloc(width * height * sizeof(float));
  619. starpu_matrix_data_register(&matrix_handle, 0, (uintptr_t)matrix,
  620. width, width, height, sizeof(float));
  621. @end smallexample
  622. @end cartouche
  623. @end deftypefun
  624. @deftypefun void starpu_block_data_register ({starpu_data_handle_t *}@var{handle}, unsigned @var{home_node}, uintptr_t @var{ptr}, uint32_t @var{ldy}, uint32_t @var{ldz}, uint32_t @var{nx}, uint32_t @var{ny}, uint32_t @var{nz}, size_t @var{elemsize})
  625. Register the @var{nx}x@var{ny}x@var{nz} 3D matrix of @var{elemsize}-byte
  626. elements pointed by @var{ptr} and initialize @var{handle} to represent
  627. it. Again, @var{ldy} and @var{ldz} specify the number of elements
  628. between rows and between z planes.
  629. @cartouche
  630. @smallexample
  631. float *block;
  632. starpu_data_handle_t block_handle;
  633. block = (float*)malloc(nx*ny*nz*sizeof(float));
  634. starpu_block_data_register(&block_handle, 0, (uintptr_t)block,
  635. nx, nx*ny, nx, ny, nz, sizeof(float));
  636. @end smallexample
  637. @end cartouche
  638. @end deftypefun
  639. @deftypefun void starpu_bcsr_data_register (starpu_data_handle_t *@var{handle}, unsigned @var{home_node}, uint32_t @var{nnz}, uint32_t @var{nrow}, uintptr_t @var{nzval}, uint32_t *@var{colind}, uint32_t *@var{rowptr}, uint32_t @var{firstentry}, uint32_t @var{r}, uint32_t @var{c}, size_t @var{elemsize})
  640. This variant of @code{starpu_data_register} uses the BCSR (Blocked
  641. Compressed Sparse Row Representation) sparse matrix interface.
  642. Register the sparse matrix made of @var{nnz} non-zero blocks of elements of size
  643. @var{elemsize} stored in @var{nzval} and initializes @var{handle} to represent
  644. it. Blocks have size @var{r} * @var{c}. @var{nrow} is the number of rows (in
  645. terms of blocks), @code{colind[i]} is the block-column index for block @code{i}
  646. in @code{nzval}, @code{rowptr[i]} is the block-index (in nzval) of the first block of row @code{i}.
  647. @var{firstentry} is the index of the first entry of the given arrays (usually 0
  648. or 1).
  649. @end deftypefun
  650. @deftypefun void starpu_csr_data_register (starpu_data_handle_t *@var{handle}, unsigned @var{home_node}, uint32_t @var{nnz}, uint32_t @var{nrow}, uintptr_t @var{nzval}, uint32_t *@var{colind}, uint32_t *@var{rowptr}, uint32_t @var{firstentry}, size_t @var{elemsize})
  651. This variant of @code{starpu_data_register} uses the CSR (Compressed
  652. Sparse Row Representation) sparse matrix interface.
  653. TODO
  654. @end deftypefun
  655. @deftypefun void starpu_coo_data_register (starpu_data_handle_t *@var{handleptr}, unsigned @var{home_node}, uint32_t @var{nx}, uint32_t @var{ny}, uint32_t @var{n_values}, uint32_t *@var{columns}, uint32_t *@var{rows}, uintptr_t @var{values}, size_t @var{elemsize});
  656. Register the @var{nx}x@var{ny} 2D matrix given in the COO format, using the
  657. @var{columns}, @var{rows}, @var{values} arrays, which must have @var{n_values}
  658. elements of size @var{elemsize}. Initialize @var{handleptr}.
  659. @end deftypefun
  660. @deftypefun {void *} starpu_data_get_interface_on_node (starpu_data_handle_t @var{handle}, unsigned @var{memory_node})
  661. Return the interface associated with @var{handle} on @var{memory_node}.
  662. @end deftypefun
  663. @node Accessing Data Interfaces
  664. @subsection Accessing Data Interfaces
  665. Each data interface is provided with a set of field access functions.
  666. The ones using a @code{void *} parameter aimed to be used in codelet
  667. implementations (see for example the code in @ref{Vector Scaling Using StarPU's API}).
  668. @deftp {Data Type} {enum starpu_data_interface_id}
  669. The different values are:
  670. @table @asis
  671. @item @code{STARPU_MATRIX_INTERFACE_ID}
  672. @item @code{STARPU_BLOCK_INTERFACE_ID}
  673. @item @code{STARPU_VECTOR_INTERFACE_ID}
  674. @item @code{STARPU_CSR_INTERFACE_ID}
  675. @item @code{STARPU_BCSR_INTERFACE_ID}
  676. @item @code{STARPU_VARIABLE_INTERFACE_ID}
  677. @item @code{STARPU_VOID_INTERFACE_ID}
  678. @item @code{STARPU_MULTIFORMAT_INTERFACE_ID}
  679. @item @code{STARPU_COO_INTERCACE_ID}
  680. @item @code{STARPU_NINTERFACES_ID}: number of data interfaces
  681. @end table
  682. @end deftp
  683. @menu
  684. * Accessing Handle::
  685. * Accessing Variable Data Interfaces::
  686. * Accessing Vector Data Interfaces::
  687. * Accessing Matrix Data Interfaces::
  688. * Accessing Block Data Interfaces::
  689. * Accessing BCSR Data Interfaces::
  690. * Accessing CSR Data Interfaces::
  691. * Accessing COO Data Interfaces::
  692. @end menu
  693. @node Accessing Handle
  694. @subsubsection Handle
  695. @deftypefun {void *} starpu_handle_to_pointer (starpu_data_handle_t @var{handle}, unsigned @var{node})
  696. Return the pointer associated with @var{handle} on node @var{node} or
  697. @code{NULL} if @var{handle}'s interface does not support this
  698. operation or data for this handle is not allocated on that node.
  699. @end deftypefun
  700. @deftypefun {void *} starpu_handle_get_local_ptr (starpu_data_handle_t @var{handle})
  701. Return the local pointer associated with @var{handle} or @code{NULL}
  702. if @var{handle}'s interface does not have data allocated locally
  703. @end deftypefun
  704. @deftypefun {enum starpu_data_interface_id} starpu_handle_get_interface_id (starpu_data_handle_t @var{handle})
  705. Return the unique identifier of the interface associated with the given @var{handle}.
  706. @end deftypefun
  707. @deftypefun size_t starpu_handle_get_size (starpu_data_handle_t @var{handle})
  708. Return the size of the data associated with @var{handle}
  709. @end deftypefun
  710. @deftypefun int starpu_handle_pack_data (starpu_data_handle_t @var{handle}, {void **}@var{ptr}, {starpu_ssize_t *}@var{count})
  711. Execute the packing operation of the interface of the data registered
  712. at @var{handle} (@pxref{struct starpu_data_interface_ops}). This
  713. packing operation must allocate a buffer large enough at @var{ptr} and
  714. copy into the newly allocated buffer the data associated to
  715. @var{handle}. @var{count} will be set to the size of the allocated
  716. buffer.
  717. If @var{ptr} is @code{NULL}, the function should not copy the data in the
  718. buffer but just set @var{count} to the size of the buffer which
  719. would have been allocated. The special value @code{-1} indicates the
  720. size is yet unknown.
  721. @end deftypefun
  722. @deftypefun int starpu_handle_unpack_data (starpu_data_handle_t @var{handle}, {void *}@var{ptr}, size_t @var{count})
  723. Unpack in @var{handle} the data located at @var{ptr} of size
  724. @var{count} as described by the interface of the data. The interface
  725. registered at @var{handle} must define a unpacking operation
  726. (@pxref{struct starpu_data_interface_ops}). The memory at the address @code{ptr}
  727. is freed after calling the data unpacking operation.
  728. @end deftypefun
  729. @node Accessing Variable Data Interfaces
  730. @subsubsection Variable Data Interfaces
  731. @deftypefun size_t starpu_variable_get_elemsize (starpu_data_handle_t @var{handle})
  732. Return the size of the variable designated by @var{handle}.
  733. @end deftypefun
  734. @deftypefun uintptr_t starpu_variable_get_local_ptr (starpu_data_handle_t @var{handle})
  735. Return a pointer to the variable designated by @var{handle}.
  736. @end deftypefun
  737. @defmac STARPU_VARIABLE_GET_PTR ({void *}@var{interface})
  738. Return a pointer to the variable designated by @var{interface}.
  739. @end defmac
  740. @defmac STARPU_VARIABLE_GET_ELEMSIZE ({void *}@var{interface})
  741. Return the size of the variable designated by @var{interface}.
  742. @end defmac
  743. @defmac STARPU_VARIABLE_GET_DEV_HANDLE ({void *}@var{interface})
  744. Return a device handle for the variable designated by @var{interface}, to be
  745. used on OpenCL. The offset documented below has to be used in addition to this.
  746. @end defmac
  747. @defmac STARPU_VARIABLE_GET_OFFSET ({void *}@var{interface})
  748. Return the offset in the variable designated by @var{interface}, to be used
  749. with the device handle.
  750. @end defmac
  751. @node Accessing Vector Data Interfaces
  752. @subsubsection Vector Data Interfaces
  753. @deftypefun uint32_t starpu_vector_get_nx (starpu_data_handle_t @var{handle})
  754. Return the number of elements registered into the array designated by @var{handle}.
  755. @end deftypefun
  756. @deftypefun size_t starpu_vector_get_elemsize (starpu_data_handle_t @var{handle})
  757. Return the size of each element of the array designated by @var{handle}.
  758. @end deftypefun
  759. @deftypefun uintptr_t starpu_vector_get_local_ptr (starpu_data_handle_t @var{handle})
  760. Return the local pointer associated with @var{handle}.
  761. @end deftypefun
  762. @defmac STARPU_VECTOR_GET_PTR ({void *}@var{interface})
  763. Return a pointer to the array designated by @var{interface}, valid on CPUs and
  764. CUDA only. For OpenCL, the device handle and offset need to be used instead.
  765. @end defmac
  766. @defmac STARPU_VECTOR_GET_DEV_HANDLE ({void *}@var{interface})
  767. Return a device handle for the array designated by @var{interface}, to be used on OpenCL. the offset
  768. documented below has to be used in addition to this.
  769. @end defmac
  770. @defmac STARPU_VECTOR_GET_OFFSET ({void *}@var{interface})
  771. Return the offset in the array designated by @var{interface}, to be used with the device handle.
  772. @end defmac
  773. @defmac STARPU_VECTOR_GET_NX ({void *}@var{interface})
  774. Return the number of elements registered into the array designated by @var{interface}.
  775. @end defmac
  776. @defmac STARPU_VECTOR_GET_ELEMSIZE ({void *}@var{interface})
  777. Return the size of each element of the array designated by @var{interface}.
  778. @end defmac
  779. @node Accessing Matrix Data Interfaces
  780. @subsubsection Matrix Data Interfaces
  781. @deftypefun uint32_t starpu_matrix_get_nx (starpu_data_handle_t @var{handle})
  782. Return the number of elements on the x-axis of the matrix designated by @var{handle}.
  783. @end deftypefun
  784. @deftypefun uint32_t starpu_matrix_get_ny (starpu_data_handle_t @var{handle})
  785. Return the number of elements on the y-axis of the matrix designated by
  786. @var{handle}.
  787. @end deftypefun
  788. @deftypefun uint32_t starpu_matrix_get_local_ld (starpu_data_handle_t @var{handle})
  789. Return the number of elements between each row of the matrix designated by
  790. @var{handle}. Maybe be equal to nx when there is no padding.
  791. @end deftypefun
  792. @deftypefun uintptr_t starpu_matrix_get_local_ptr (starpu_data_handle_t @var{handle})
  793. Return the local pointer associated with @var{handle}.
  794. @end deftypefun
  795. @deftypefun size_t starpu_matrix_get_elemsize (starpu_data_handle_t @var{handle})
  796. Return the size of the elements registered into the matrix designated by
  797. @var{handle}.
  798. @end deftypefun
  799. @defmac STARPU_MATRIX_GET_PTR ({void *}@var{interface})
  800. Return a pointer to the matrix designated by @var{interface}, valid on CPUs and
  801. CUDA devices only. For OpenCL devices, the device handle and offset need to be
  802. used instead.
  803. @end defmac
  804. @defmac STARPU_MATRIX_GET_DEV_HANDLE ({void *}@var{interface})
  805. Return a device handle for the matrix designated by @var{interface}, to be used
  806. on OpenCL. The offset documented below has to be used in addition to this.
  807. @end defmac
  808. @defmac STARPU_MATRIX_GET_OFFSET ({void *}@var{interface})
  809. Return the offset in the matrix designated by @var{interface}, to be used with
  810. the device handle.
  811. @end defmac
  812. @defmac STARPU_MATRIX_GET_NX ({void *}@var{interface})
  813. Return the number of elements on the x-axis of the matrix designated by
  814. @var{interface}.
  815. @end defmac
  816. @defmac STARPU_MATRIX_GET_NY ({void *}@var{interface})
  817. Return the number of elements on the y-axis of the matrix designated by
  818. @var{interface}.
  819. @end defmac
  820. @defmac STARPU_MATRIX_GET_LD ({void *}@var{interface})
  821. Return the number of elements between each row of the matrix designated by
  822. @var{interface}. May be equal to nx when there is no padding.
  823. @end defmac
  824. @defmac STARPU_MATRIX_GET_ELEMSIZE ({void *}@var{interface})
  825. Return the size of the elements registered into the matrix designated by
  826. @var{interface}.
  827. @end defmac
  828. @node Accessing Block Data Interfaces
  829. @subsubsection Block Data Interfaces
  830. @deftypefun uint32_t starpu_block_get_nx (starpu_data_handle_t @var{handle})
  831. Return the number of elements on the x-axis of the block designated by @var{handle}.
  832. @end deftypefun
  833. @deftypefun uint32_t starpu_block_get_ny (starpu_data_handle_t @var{handle})
  834. Return the number of elements on the y-axis of the block designated by @var{handle}.
  835. @end deftypefun
  836. @deftypefun uint32_t starpu_block_get_nz (starpu_data_handle_t @var{handle})
  837. Return the number of elements on the z-axis of the block designated by @var{handle}.
  838. @end deftypefun
  839. @deftypefun uint32_t starpu_block_get_local_ldy (starpu_data_handle_t @var{handle})
  840. Return the number of elements between each row of the block designated by
  841. @var{handle}, in the format of the current memory node.
  842. @end deftypefun
  843. @deftypefun uint32_t starpu_block_get_local_ldz (starpu_data_handle_t @var{handle})
  844. Return the number of elements between each z plane of the block designated by
  845. @var{handle}, in the format of the current memory node.
  846. @end deftypefun
  847. @deftypefun uintptr_t starpu_block_get_local_ptr (starpu_data_handle_t @var{handle})
  848. Return the local pointer associated with @var{handle}.
  849. @end deftypefun
  850. @deftypefun size_t starpu_block_get_elemsize (starpu_data_handle_t @var{handle})
  851. Return the size of the elements of the block designated by @var{handle}.
  852. @end deftypefun
  853. @defmac STARPU_BLOCK_GET_PTR ({void *}@var{interface})
  854. Return a pointer to the block designated by @var{interface}.
  855. @end defmac
  856. @defmac STARPU_BLOCK_GET_DEV_HANDLE ({void *}@var{interface})
  857. Return a device handle for the block designated by @var{interface}, to be used
  858. on OpenCL. The offset document below has to be used in addition to this.
  859. @end defmac
  860. @defmac STARPU_BLOCK_GET_OFFSET ({void *}@var{interface})
  861. Return the offset in the block designated by @var{interface}, to be used with
  862. the device handle.
  863. @end defmac
  864. @defmac STARPU_BLOCK_GET_NX ({void *}@var{interface})
  865. Return the number of elements on the x-axis of the block designated by @var{handle}.
  866. @end defmac
  867. @defmac STARPU_BLOCK_GET_NY ({void *}@var{interface})
  868. Return the number of elements on the y-axis of the block designated by @var{handle}.
  869. @end defmac
  870. @defmac STARPU_BLOCK_GET_NZ ({void *}@var{interface})
  871. Return the number of elements on the z-axis of the block designated by @var{handle}.
  872. @end defmac
  873. @defmac STARPU_BLOCK_GET_LDY ({void *}@var{interface})
  874. Return the number of elements between each row of the block designated by
  875. @var{interface}. May be equal to nx when there is no padding.
  876. @end defmac
  877. @defmac STARPU_BLOCK_GET_LDZ ({void *}@var{interface})
  878. Return the number of elements between each z plane of the block designated by
  879. @var{interface}. May be equal to nx*ny when there is no padding.
  880. @end defmac
  881. @defmac STARPU_BLOCK_GET_ELEMSIZE ({void *}@var{interface})
  882. Return the size of the elements of the matrix designated by @var{interface}.
  883. @end defmac
  884. @node Accessing BCSR Data Interfaces
  885. @subsubsection BCSR Data Interfaces
  886. @deftypefun uint32_t starpu_bcsr_get_nnz (starpu_data_handle_t @var{handle})
  887. Return the number of non-zero elements in the matrix designated by @var{handle}.
  888. @end deftypefun
  889. @deftypefun uint32_t starpu_bcsr_get_nrow (starpu_data_handle_t @var{handle})
  890. Return the number of rows (in terms of blocks of size r*c) in the matrix
  891. designated by @var{handle}.
  892. @end deftypefun
  893. @deftypefun uint32_t starpu_bcsr_get_firstentry (starpu_data_handle_t @var{handle})
  894. Return the index at which all arrays (the column indexes, the row pointers...)
  895. of the matrix desginated by @var{handle} start.
  896. @end deftypefun
  897. @deftypefun uintptr_t starpu_bcsr_get_local_nzval (starpu_data_handle_t @var{handle})
  898. Return a pointer to the non-zero values of the matrix designated by @var{handle}.
  899. @end deftypefun
  900. @deftypefun {uint32_t *} starpu_bcsr_get_local_colind (starpu_data_handle_t @var{handle})
  901. Return a pointer to the column index, which holds the positions of the non-zero
  902. entries in the matrix designated by @var{handle}.
  903. @end deftypefun
  904. @deftypefun {uint32_t *} starpu_bcsr_get_local_rowptr (starpu_data_handle_t @var{handle})
  905. Return the row pointer array of the matrix designated by @var{handle}.
  906. @end deftypefun
  907. @deftypefun uint32_t starpu_bcsr_get_r (starpu_data_handle_t @var{handle})
  908. Return the number of rows in a block.
  909. @end deftypefun
  910. @deftypefun uint32_t starpu_bcsr_get_c (starpu_data_handle_t @var{handle})
  911. Return the numberof columns in a block.
  912. @end deftypefun
  913. @deftypefun size_t starpu_bcsr_get_elemsize (starpu_data_handle_t @var{handle})
  914. Return the size of the elements in the matrix designated by @var{handle}.
  915. @end deftypefun
  916. @defmac STARPU_BCSR_GET_NNZ ({void *}@var{interface})
  917. Return the number of non-zero values in the matrix designated by @var{interface}.
  918. @end defmac
  919. @defmac STARPU_BCSR_GET_NZVAL ({void *}@var{interface})
  920. Return a pointer to the non-zero values of the matrix designated by @var{interface}.
  921. @end defmac
  922. @defmac STARPU_BCSR_GET_NZVAL_DEV_HANDLE ({void *}@var{interface})
  923. Return a device handle for the array of non-zero values in the matrix designated
  924. by @var{interface}. The offset documented below has to be used in addition to
  925. this.
  926. @end defmac
  927. @defmac STARPU_BCSR_GET_COLIND ({void *}@var{interface})
  928. Return a pointer to the column index of the matrix designated by @var{interface}.
  929. @end defmac
  930. @defmac STARPU_BCSR_GET_COLIND_DEV_HANDLE ({void *}@var{interface})
  931. Return a device handle for the column index of the matrix designated by
  932. @var{interface}. The offset documented below has to be used in addition to
  933. this.
  934. @end defmac
  935. @defmac STARPU_BCSR_GET_ROWPTR ({void *}@var{interface})
  936. Return a pointer to the row pointer array of the matrix designated by @var{interface}.
  937. @end defmac
  938. @defmac STARPU_CSR_GET_ROWPTR_DEV_HANDLE ({void *}@var{interface})
  939. Return a device handle for the row pointer array of the matrix designated by
  940. @var{interface}. The offset documented below has to be used in addition to
  941. this.
  942. @end defmac
  943. @defmac STARPU_BCSR_GET_OFFSET ({void *}@var{interface})
  944. Return the offset in the arrays (coling, rowptr, nzval) of the matrix
  945. designated by @var{interface}, to be used with the device handles.
  946. @end defmac
  947. @node Accessing CSR Data Interfaces
  948. @subsubsection CSR Data Interfaces
  949. @deftypefun uint32_t starpu_csr_get_nnz (starpu_data_handle_t @var{handle})
  950. Return the number of non-zero values in the matrix designated by @var{handle}.
  951. @end deftypefun
  952. @deftypefun uint32_t starpu_csr_get_nrow (starpu_data_handle_t @var{handle})
  953. Return the size of the row pointer array of the matrix designated by @var{handle}.
  954. @end deftypefun
  955. @deftypefun uint32_t starpu_csr_get_firstentry (starpu_data_handle_t @var{handle})
  956. Return the index at which all arrays (the column indexes, the row pointers...)
  957. of the matrix designated by @var{handle} start.
  958. @end deftypefun
  959. @deftypefun uintptr_t starpu_csr_get_local_nzval (starpu_data_handle_t @var{handle})
  960. Return a local pointer to the non-zero values of the matrix designated by @var{handle}.
  961. @end deftypefun
  962. @deftypefun {uint32_t *} starpu_csr_get_local_colind (starpu_data_handle_t @var{handle})
  963. Return a local pointer to the column index of the matrix designated by @var{handle}.
  964. @end deftypefun
  965. @deftypefun {uint32_t *} starpu_csr_get_local_rowptr (starpu_data_handle_t @var{handle})
  966. Return a local pointer to the row pointer array of the matrix designated by @var{handle}.
  967. @end deftypefun
  968. @deftypefun size_t starpu_csr_get_elemsize (starpu_data_handle_t @var{handle})
  969. Return the size of the elements registered into the matrix designated by @var{handle}.
  970. @end deftypefun
  971. @defmac STARPU_CSR_GET_NNZ ({void *}@var{interface})
  972. Return the number of non-zero values in the matrix designated by @var{interface}.
  973. @end defmac
  974. @defmac STARPU_CSR_GET_NROW ({void *}@var{interface})
  975. Return the size of the row pointer array of the matrix designated by @var{interface}.
  976. @end defmac
  977. @defmac STARPU_CSR_GET_NZVAL ({void *}@var{interface})
  978. Return a pointer to the non-zero values of the matrix designated by @var{interface}.
  979. @end defmac
  980. @defmac STARPU_CSR_GET_NZVAL_DEV_HANDLE ({void *}@var{interface})
  981. Return a device handle for the array of non-zero values in the matrix designated
  982. by @var{interface}. The offset documented below has to be used in addition to
  983. this.
  984. @end defmac
  985. @defmac STARPU_CSR_GET_COLIND ({void *}@var{interface})
  986. Return a pointer to the column index of the matrix designated by @var{interface}.
  987. @end defmac
  988. @defmac STARPU_CSR_GET_COLIND_DEV_HANDLE ({void *}@var{interface})
  989. Return a device handle for the column index of the matrix designated by
  990. @var{interface}. The offset documented below has to be used in addition to
  991. this.
  992. @end defmac
  993. @defmac STARPU_CSR_GET_ROWPTR ({void *}@var{interface})
  994. Return a pointer to the row pointer array of the matrix designated by @var{interface}.
  995. @end defmac
  996. @defmac STARPU_CSR_GET_ROWPTR_DEV_HANDLE ({void *}@var{interface})
  997. Return a device handle for the row pointer array of the matrix designated by
  998. @var{interface}. The offset documented below has to be used in addition to
  999. this.
  1000. @end defmac
  1001. @defmac STARPU_CSR_GET_OFFSET ({void *}@var{interface})
  1002. Return the offset in the arrays (colind, rowptr, nzval) of the matrix
  1003. designated by @var{interface}, to be used with the device handles.
  1004. @end defmac
  1005. @defmac STARPU_CSR_GET_FIRSTENTRY ({void *}@var{interface})
  1006. Return the index at which all arrays (the column indexes, the row pointers...)
  1007. of the @var{interface} start.
  1008. @end defmac
  1009. @defmac STARPU_CSR_GET_ELEMSIZE ({void *}@var{interface})
  1010. Return the size of the elements registered into the matrix designated by @var{interface}.
  1011. @end defmac
  1012. @node Accessing COO Data Interfaces
  1013. @subsubsection COO Data Interfaces
  1014. @defmac STARPU_COO_GET_COLUMNS ({void *}@var{interface})
  1015. Return a pointer to the column array of the matrix designated by
  1016. @var{interface}.
  1017. @end defmac
  1018. @defmac STARPU_COO_GET_COLUMNS_DEV_HANDLE ({void *}@var{interface})
  1019. Return a device handle for the column array of the matrix designated by
  1020. @var{interface}, to be used on OpenCL. The offset documented below has to be
  1021. used in addition to this.
  1022. @end defmac
  1023. @defmac STARPU_COO_GET_ROWS (interface)
  1024. Return a pointer to the rows array of the matrix designated by @var{interface}.
  1025. @end defmac
  1026. @defmac STARPU_COO_GET_ROWS_DEV_HANDLE ({void *}@var{interface})
  1027. Return a device handle for the row array of the matrix designated by
  1028. @var{interface}, to be used on OpenCL. The offset documented below has to be
  1029. used in addition to this.
  1030. @end defmac
  1031. @defmac STARPU_COO_GET_VALUES (interface)
  1032. Return a pointer to the values array of the matrix designated by
  1033. @var{interface}.
  1034. @end defmac
  1035. @defmac STARPU_COO_GET_VALUES_DEV_HANDLE ({void *}@var{interface})
  1036. Return a device handle for the value array of the matrix designated by
  1037. @var{interface}, to be used on OpenCL. The offset documented below has to be
  1038. used in addition to this.
  1039. @end defmac
  1040. @defmac STARPU_COO_GET_OFFSET ({void *}@var{itnerface})
  1041. Return the offset in the arrays of the COO matrix designated by @var{interface}.
  1042. @end defmac
  1043. @defmac STARPU_COO_GET_NX (interface)
  1044. Return the number of elements on the x-axis of the matrix designated by
  1045. @var{interface}.
  1046. @end defmac
  1047. @defmac STARPU_COO_GET_NY (interface)
  1048. Return the number of elements on the y-axis of the matrix designated by
  1049. @var{interface}.
  1050. @end defmac
  1051. @defmac STARPU_COO_GET_NVALUES (interface)
  1052. Return the number of values registered in the matrix designated by
  1053. @var{interface}.
  1054. @end defmac
  1055. @defmac STARPU_COO_GET_ELEMSIZE (interface)
  1056. Return the size of the elements registered into the matrix designated by
  1057. @var{interface}.
  1058. @end defmac
  1059. @node Defining Interface
  1060. @subsection Defining Interface
  1061. Applications can provide their own interface as shown in
  1062. @pxref{Defining a New Data Interface}.
  1063. @deftypefun uintptr_t starpu_malloc_on_node (unsigned @var{dst_node}, size_t @var{size})
  1064. Allocate @var{size} bytes on node @var{dst_node}. This returns 0 if allocation
  1065. failed, the allocation method should then return -ENOMEM as allocated size.
  1066. @end deftypefun
  1067. @deftypefun void starpu_free_on_node (unsigned @var{dst_node}, uintptr_t @var{addr}, size_t @var{size})
  1068. Free @var{addr} of @var{size} bytes on node @var{dst_node}.
  1069. @end deftypefun
  1070. @deftp {Data Type} {struct starpu_data_interface_ops}
  1071. @anchor{struct starpu_data_interface_ops}
  1072. Per-interface data transfer methods.
  1073. @table @asis
  1074. @item @code{void (*register_data_handle)(starpu_data_handle_t handle, unsigned home_node, void *data_interface)}
  1075. Register an existing interface into a data handle.
  1076. @item @code{starpu_ssize_t (*allocate_data_on_node)(void *data_interface, unsigned node)}
  1077. Allocate data for the interface on a given node.
  1078. @item @code{ void (*free_data_on_node)(void *data_interface, unsigned node)}
  1079. Free data of the interface on a given node.
  1080. @item @code{ const struct starpu_data_copy_methods *copy_methods}
  1081. ram/cuda/opencl synchronous and asynchronous transfer methods.
  1082. @item @code{ void * (*handle_to_pointer)(starpu_data_handle_t handle, unsigned node)}
  1083. Return the current pointer (if any) for the handle on the given node.
  1084. @item @code{ size_t (*get_size)(starpu_data_handle_t handle)}
  1085. Return an estimation of the size of data, for performance models.
  1086. @item @code{ uint32_t (*footprint)(starpu_data_handle_t handle)}
  1087. Return a 32bit footprint which characterizes the data size.
  1088. @item @code{ int (*compare)(void *data_interface_a, void *data_interface_b)}
  1089. Compare the data size of two interfaces.
  1090. @item @code{ void (*display)(starpu_data_handle_t handle, FILE *f)}
  1091. Dump the sizes of a handle to a file.
  1092. @item @code{enum starpu_data_interface_id interfaceid}
  1093. An identifier that is unique to each interface.
  1094. @item @code{size_t interface_size}
  1095. The size of the interface data descriptor.
  1096. @item @code{int is_multiformat}
  1097. todo
  1098. @item @code{struct starpu_multiformat_data_interface_ops* (*get_mf_ops)(void *data_interface)}
  1099. todo
  1100. @item @code{int (*pack_data)(starpu_data_handle_t handle, unsigned node, void **ptr, ssize_t *count)}
  1101. Pack the data handle into a contiguous buffer at the address
  1102. @code{ptr} and set the size of the newly created buffer in
  1103. @code{count}. If @var{ptr} is @code{NULL}, the function should not copy the data in the
  1104. buffer but just set @var{count} to the size of the buffer which
  1105. would have been allocated. The special value @code{-1} indicates the
  1106. size is yet unknown.
  1107. @item @code{int (*unpack_data)(starpu_data_handle_t handle, unsigned node, void *ptr, size_t count)}
  1108. Unpack the data handle from the contiguous buffer at the address @code{ptr} of size @var{count}
  1109. @end table
  1110. @end deftp
  1111. @deftp {Data Type} {struct starpu_data_copy_methods}
  1112. Defines the per-interface methods. If the @code{any_to_any} method is provided,
  1113. it will be used by default if no more specific method is provided. It can still
  1114. be useful to provide more specific method in case of e.g. available particular
  1115. CUDA or OpenCL support.
  1116. @table @asis
  1117. @item @code{int (*@{ram,cuda,opencl@}_to_@{ram,cuda,opencl@})(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node)}
  1118. These 12 functions define how to copy data from the @var{src_interface}
  1119. interface on the @var{src_node} node to the @var{dst_interface} interface
  1120. on the @var{dst_node} node. They return 0 on success.
  1121. @item @code{int (*@{ram,cuda@}_to_@{ram,cuda@}_async)(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, cudaStream_t stream)}
  1122. These 3 functions (@code{ram_to_ram} is not among these) define how to copy
  1123. data from the @var{src_interface} interface on the @var{src_node} node to the
  1124. @var{dst_interface} interface on the @var{dst_node} node, using the given
  1125. @var{stream}. Must return 0 if the transfer was actually completed completely
  1126. synchronously, or -EAGAIN if at least some transfers are still ongoing and
  1127. should be awaited for by the core.
  1128. @item @code{int (*@{ram,opencl@}_to_@{ram,opencl@}_async)(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, /* cl_event * */ void *event)}
  1129. These 3 functions (@code{ram_to_ram} is not among them) define how to copy
  1130. data from the @var{src_interface} interface on the @var{src_node} node to the
  1131. @var{dst_interface} interface on the @var{dst_node} node, by recording in
  1132. @var{event}, a pointer to a cl_event, the event of the last submitted transfer.
  1133. Must return 0 if the transfer was actually completed completely synchronously,
  1134. or -EAGAIN if at least some transfers are still ongoing and should be awaited
  1135. for by the core.
  1136. @item @code{int (*any_to_any)(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, void *async_data)}
  1137. Define how to copy data from the @var{src_interface} interface on the
  1138. @var{src_node} node to the @var{dst_interface} interface on the @var{dst_node}
  1139. node. This is meant to be implemented through the @var{starpu_interface_copy}
  1140. helper, to which @var{async_data} should be passed as such, and will be used to
  1141. manage asynchronicity. This must return -EAGAIN if any of the
  1142. @var{starpu_interface_copy} calls has returned -EAGAIN (i.e. at least some
  1143. transfer is still ongoing), and return 0 otherwise.
  1144. @end table
  1145. @end deftp
  1146. @deftypefun int starpu_interface_copy (uintptr_t @var{src}, size_t @var{src_offset}, unsigned @var{src_node}, uintptr_t @var{dst}, size_t @var{dst_offset}, unsigned @var{dst_node}, size_t @var{size}, {void *}@var{async_data})
  1147. Copy @var{size} bytes from byte offset @var{src_offset} of @var{src} on
  1148. @var{src_node} to byte offset @var{dst_offset} of @var{dst} on @var{dst_node}.
  1149. This is to be used in the @var{any_to_any} copy method, which is provided with
  1150. the @var{async_data} to be pased to @var{starpu_interface_copy}. this returns
  1151. -EAGAIN if the transfer is still ongoing, or 0 if the transfer is already
  1152. completed.
  1153. @end deftypefun
  1154. @deftypefun uint32_t starpu_crc32_be_n ({void *}@var{input}, size_t @var{n}, uint32_t @var{inputcrc})
  1155. Compute the CRC of a byte buffer seeded by the inputcrc "current
  1156. state". The return value should be considered as the new "current
  1157. state" for future CRC computation. This is used for computing data size
  1158. footprint.
  1159. @end deftypefun
  1160. @deftypefun uint32_t starpu_crc32_be (uint32_t @var{input}, uint32_t @var{inputcrc})
  1161. Compute the CRC of a 32bit number seeded by the inputcrc "current
  1162. state". The return value should be considered as the new "current
  1163. state" for future CRC computation. This is used for computing data size
  1164. footprint.
  1165. @end deftypefun
  1166. @deftypefun uint32_t starpu_crc32_string ({char *}@var{str}, uint32_t @var{inputcrc})
  1167. Compute the CRC of a string seeded by the inputcrc "current state".
  1168. The return value should be considered as the new "current state" for
  1169. future CRC computation. This is used for computing data size footprint.
  1170. @end deftypefun
  1171. @deftypefun int starpu_data_interface_get_next_id (void)
  1172. Returns the next available id for a newly created data interface
  1173. (@pxref{Defining a New Data Interface}).
  1174. @end deftypefun
  1175. @node Data Partition
  1176. @section Data Partition
  1177. @menu
  1178. * Basic API::
  1179. * Predefined filter functions::
  1180. @end menu
  1181. @node Basic API
  1182. @subsection Basic API
  1183. @deftp {Data Type} {struct starpu_data_filter}
  1184. The filter structure describes a data partitioning operation, to be given to the
  1185. @code{starpu_data_partition} function, see @ref{starpu_data_partition}
  1186. for an example. The different fields are:
  1187. @table @asis
  1188. @item @code{void (*filter_func)(void *father_interface, void* child_interface, struct starpu_data_filter *, unsigned id, unsigned nparts)}
  1189. This function fills the @code{child_interface} structure with interface
  1190. information for the @code{id}-th child of the parent @code{father_interface} (among @code{nparts}).
  1191. @item @code{unsigned nchildren}
  1192. This is the number of parts to partition the data into.
  1193. @item @code{unsigned (*get_nchildren)(struct starpu_data_filter *, starpu_data_handle_t initial_handle)}
  1194. This returns the number of children. This can be used instead of @code{nchildren} when the number of
  1195. children depends on the actual data (e.g. the number of blocks in a sparse
  1196. matrix).
  1197. @item @code{struct starpu_data_interface_ops *(*get_child_ops)(struct starpu_data_filter *, unsigned id)}
  1198. In case the resulting children use a different data interface, this function
  1199. returns which interface is used by child number @code{id}.
  1200. @item @code{unsigned filter_arg}
  1201. Allow to define an additional parameter for the filter function.
  1202. @item @code{void *filter_arg_ptr}
  1203. Allow to define an additional pointer parameter for the filter
  1204. function, such as the sizes of the different parts.
  1205. @end table
  1206. @end deftp
  1207. @deftypefun void starpu_data_partition (starpu_data_handle_t @var{initial_handle}, {struct starpu_data_filter *}@var{f})
  1208. @anchor{starpu_data_partition}
  1209. This requests partitioning one StarPU data @var{initial_handle} into several
  1210. subdata according to the filter @var{f}, as shown in the following example:
  1211. @cartouche
  1212. @smallexample
  1213. struct starpu_data_filter f = @{
  1214. .filter_func = starpu_matrix_filter_block,
  1215. .nchildren = nslicesx,
  1216. .get_nchildren = NULL,
  1217. .get_child_ops = NULL
  1218. @};
  1219. starpu_data_partition(A_handle, &f);
  1220. @end smallexample
  1221. @end cartouche
  1222. @end deftypefun
  1223. @deftypefun void starpu_data_unpartition (starpu_data_handle_t @var{root_data}, unsigned @var{gathering_node})
  1224. This unapplies one filter, thus unpartitioning the data. The pieces of data are
  1225. collected back into one big piece in the @var{gathering_node} (usually 0). Tasks
  1226. working on the partitioned data must be already finished when calling @code{starpu_data_unpartition}.
  1227. @cartouche
  1228. @smallexample
  1229. starpu_data_unpartition(A_handle, 0);
  1230. @end smallexample
  1231. @end cartouche
  1232. @end deftypefun
  1233. @deftypefun int starpu_data_get_nb_children (starpu_data_handle_t @var{handle})
  1234. This function returns the number of children.
  1235. @end deftypefun
  1236. @deftypefun starpu_data_handle_t starpu_data_get_child (starpu_data_handle_t @var{handle}, unsigned @var{i})
  1237. Return the @var{i}th child of the given @var{handle}, which must have been partitionned beforehand.
  1238. @end deftypefun
  1239. @deftypefun starpu_data_handle_t starpu_data_get_sub_data (starpu_data_handle_t @var{root_data}, unsigned @var{depth}, ... )
  1240. After partitioning a StarPU data by applying a filter,
  1241. @code{starpu_data_get_sub_data} can be used to get handles for each of
  1242. the data portions. @var{root_data} is the parent data that was
  1243. partitioned. @var{depth} is the number of filters to traverse (in
  1244. case several filters have been applied, to e.g. partition in row
  1245. blocks, and then in column blocks), and the subsequent
  1246. parameters are the indexes. The function returns a handle to the
  1247. subdata.
  1248. @cartouche
  1249. @smallexample
  1250. h = starpu_data_get_sub_data(A_handle, 1, taskx);
  1251. @end smallexample
  1252. @end cartouche
  1253. @end deftypefun
  1254. @deftypefun starpu_data_handle_t starpu_data_vget_sub_data (starpu_data_handle_t @var{root_data}, unsigned @var{depth}, va_list @var{pa})
  1255. This function is similar to @code{starpu_data_get_sub_data} but uses a
  1256. va_list for the parameter list.
  1257. @end deftypefun
  1258. @deftypefun void starpu_data_map_filters (starpu_data_handle_t @var{root_data}, unsigned @var{nfilters}, ...)
  1259. Applies @var{nfilters} filters to the handle designated by @var{root_handle}
  1260. recursively. @var{nfilters} pointers to variables of the type
  1261. starpu_data_filter should be given.
  1262. @end deftypefun
  1263. @deftypefun void starpu_data_vmap_filters (starpu_data_handle_t @var{root_data}, unsigned @var{nfilters}, va_list @var{pa})
  1264. Applies @var{nfilters} filters to the handle designated by @var{root_handle}
  1265. recursively. It uses a va_list of pointers to variables of the typer
  1266. starpu_data_filter.
  1267. @end deftypefun
  1268. @node Predefined filter functions
  1269. @subsection Predefined filter functions
  1270. @menu
  1271. * Partitioning Vector Data::
  1272. * Partitioning Matrix Data::
  1273. * Partitioning 3D Matrix Data::
  1274. * Partitioning BCSR Data::
  1275. @end menu
  1276. This section gives a partial list of the predefined partitioning functions.
  1277. Examples on how to use them are shown in @ref{Partitioning Data}. The complete
  1278. list can be found in @code{starpu_data_filters.h} .
  1279. @node Partitioning Vector Data
  1280. @subsubsection Partitioning Vector Data
  1281. @deftypefun void starpu_vector_filter_block (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1282. Return in @code{*@var{child_interface}} the @var{id}th element of the
  1283. vector represented by @var{father_interface} once partitioned in
  1284. @var{nparts} chunks of equal size.
  1285. @end deftypefun
  1286. @deftypefun void starpu_vector_filter_block_shadow (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1287. Return in @code{*@var{child_interface}} the @var{id}th element of the
  1288. vector represented by @var{father_interface} once partitioned in
  1289. @var{nparts} chunks of equal size with a shadow border @code{filter_arg_ptr}, thus getting a vector of size (n-2*shadow)/nparts+2*shadow
  1290. The @code{filter_arg_ptr} field must be the shadow size casted into @code{void*}.
  1291. IMPORTANT: This can only be used for read-only access, as no coherency is
  1292. enforced for the shadowed parts.
  1293. A usage example is available in examples/filters/shadow.c
  1294. @end deftypefun
  1295. @deftypefun void starpu_vector_filter_list (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1296. Return in @code{*@var{child_interface}} the @var{id}th element of the
  1297. vector represented by @var{father_interface} once partitioned into
  1298. @var{nparts} chunks according to the @code{filter_arg_ptr} field of
  1299. @code{*@var{f}}.
  1300. The @code{filter_arg_ptr} field must point to an array of @var{nparts}
  1301. @code{uint32_t} elements, each of which specifies the number of elements
  1302. in each chunk of the partition.
  1303. @end deftypefun
  1304. @deftypefun void starpu_vector_filter_divide_in_2 (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1305. Return in @code{*@var{child_interface}} the @var{id}th element of the
  1306. vector represented by @var{father_interface} once partitioned in two
  1307. chunks of equal size, ignoring @var{nparts}. Thus, @var{id} must be
  1308. @code{0} or @code{1}.
  1309. @end deftypefun
  1310. @node Partitioning Matrix Data
  1311. @subsubsection Partitioning Matrix Data
  1312. @deftypefun void starpu_matrix_filter_block (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1313. This partitions a dense Matrix along the x dimension, thus getting (x/nparts,y)
  1314. matrices. If nparts does not divide x, the last submatrix contains the
  1315. remainder.
  1316. @end deftypefun
  1317. @deftypefun void starpu_matrix_filter_block_shadow (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1318. This partitions a dense Matrix along the x dimension, with a shadow border
  1319. @code{filter_arg_ptr}, thus getting ((x-2*shadow)/nparts+2*shadow,y)
  1320. matrices. If nparts does not divide x-2*shadow, the last submatrix contains the
  1321. remainder.
  1322. IMPORTANT: This can only be used for read-only access, as no coherency is
  1323. enforced for the shadowed parts.
  1324. A usage example is available in examples/filters/shadow2d.c
  1325. @end deftypefun
  1326. @deftypefun void starpu_matrix_filter_vertical_block (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1327. This partitions a dense Matrix along the y dimension, thus getting (x,y/nparts)
  1328. matrices. If nparts does not divide y, the last submatrix contains the
  1329. remainder.
  1330. @end deftypefun
  1331. @deftypefun void starpu_matrix_filter_vertical_block_shadow (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1332. This partitions a dense Matrix along the y dimension, with a shadow border
  1333. @code{filter_arg_ptr}, thus getting (x,(y-2*shadow)/nparts+2*shadow)
  1334. matrices. If nparts does not divide y-2*shadow, the last submatrix contains the
  1335. remainder.
  1336. IMPORTANT: This can only be used for read-only access, as no coherency is
  1337. enforced for the shadowed parts.
  1338. A usage example is available in examples/filters/shadow2d.c
  1339. @end deftypefun
  1340. @node Partitioning 3D Matrix Data
  1341. @subsubsection Partitioning 3D Matrix Data
  1342. A usage example is available in examples/filters/shadow3d.c
  1343. @deftypefun void starpu_block_filter_block (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1344. This partitions a 3D matrix along the X dimension, thus getting (x/nparts,y,z)
  1345. 3D matrices. If nparts does not divide x, the last submatrix contains the
  1346. remainder.
  1347. @end deftypefun
  1348. @deftypefun void starpu_block_filter_block_shadow (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1349. This partitions a 3D matrix along the X dimension, with a shadow border
  1350. @code{filter_arg_ptr}, thus getting ((x-2*shadow)/nparts+2*shadow,y,z) 3D
  1351. matrices. If nparts does not divide x, the last submatrix contains the
  1352. remainder.
  1353. IMPORTANT: This can only be used for read-only access, as no coherency is
  1354. enforced for the shadowed parts.
  1355. @end deftypefun
  1356. @deftypefun void starpu_block_filter_vertical_block (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1357. This partitions a 3D matrix along the Y dimension, thus getting (x,y/nparts,z)
  1358. 3D matrices. If nparts does not divide y, the last submatrix contains the
  1359. remainder.
  1360. @end deftypefun
  1361. @deftypefun void starpu_block_filter_vertical_block_shadow (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1362. This partitions a 3D matrix along the Y dimension, with a shadow border
  1363. @code{filter_arg_ptr}, thus getting (x,(y-2*shadow)/nparts+2*shadow,z) 3D
  1364. matrices. If nparts does not divide y, the last submatrix contains the
  1365. remainder.
  1366. IMPORTANT: This can only be used for read-only access, as no coherency is
  1367. enforced for the shadowed parts.
  1368. @end deftypefun
  1369. @deftypefun void starpu_block_filter_depth_block (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1370. This partitions a 3D matrix along the Z dimension, thus getting (x,y,z/nparts)
  1371. 3D matrices. If nparts does not divide z, the last submatrix contains the
  1372. remainder.
  1373. @end deftypefun
  1374. @deftypefun void starpu_block_filter_depth_block_shadow (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1375. This partitions a 3D matrix along the Z dimension, with a shadow border
  1376. @code{filter_arg_ptr}, thus getting (x,y,(z-2*shadow)/nparts+2*shadow)
  1377. 3D matrices. If nparts does not divide z, the last submatrix contains the
  1378. remainder.
  1379. IMPORTANT: This can only be used for read-only access, as no coherency is
  1380. enforced for the shadowed parts.
  1381. @end deftypefun
  1382. @node Partitioning BCSR Data
  1383. @subsubsection Partitioning BCSR Data
  1384. @deftypefun void starpu_bcsr_filter_canonical_block (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1385. This partitions a block-sparse matrix into dense matrices.
  1386. @end deftypefun
  1387. @deftypefun void starpu_csr_filter_vertical_block (void *@var{father_interface}, void *@var{child_interface}, {struct starpu_data_filter} *@var{f}, unsigned @var{id}, unsigned @var{nparts})
  1388. This partitions a block-sparse matrix into vertical block-sparse matrices.
  1389. @end deftypefun
  1390. @node Multiformat Data Interface
  1391. @section Multiformat Data Interface
  1392. @deftp {Data Type} {struct starpu_multiformat_data_interface_ops}
  1393. The different fields are:
  1394. @table @asis
  1395. @item @code{size_t cpu_elemsize}
  1396. the size of each element on CPUs,
  1397. @item @code{size_t opencl_elemsize}
  1398. the size of each element on OpenCL devices,
  1399. @item @code{struct starpu_codelet *cpu_to_opencl_cl}
  1400. pointer to a codelet which converts from CPU to OpenCL
  1401. @item @code{struct starpu_codelet *opencl_to_cpu_cl}
  1402. pointer to a codelet which converts from OpenCL to CPU
  1403. @item @code{size_t cuda_elemsize}
  1404. the size of each element on CUDA devices,
  1405. @item @code{struct starpu_codelet *cpu_to_cuda_cl}
  1406. pointer to a codelet which converts from CPU to CUDA
  1407. @item @code{struct starpu_codelet *cuda_to_cpu_cl}
  1408. pointer to a codelet which converts from CUDA to CPU
  1409. @end table
  1410. @end deftp
  1411. @deftypefun void starpu_multiformat_data_register (starpu_data_handle_t *@var{handle}, unsigned @var{home_node}, void *@var{ptr}, uint32_t @var{nobjects}, struct starpu_multiformat_data_interface_ops *@var{format_ops})
  1412. Register a piece of data that can be represented in different ways, depending upon
  1413. the processing unit that manipulates it. It allows the programmer, for instance, to
  1414. use an array of structures when working on a CPU, and a structure of arrays when
  1415. working on a GPU.
  1416. @var{nobjects} is the number of elements in the data. @var{format_ops} describes
  1417. the format.
  1418. @end deftypefun
  1419. @defmac STARPU_MULTIFORMAT_GET_CPU_PTR ({void *}@var{interface})
  1420. returns the local pointer to the data with CPU format.
  1421. @end defmac
  1422. @defmac STARPU_MULTIFORMAT_GET_CUDA_PTR ({void *}@var{interface})
  1423. returns the local pointer to the data with CUDA format.
  1424. @end defmac
  1425. @defmac STARPU_MULTIFORMAT_GET_OPENCL_PTR ({void *}@var{interface})
  1426. returns the local pointer to the data with OpenCL format.
  1427. @end defmac
  1428. @defmac STARPU_MULTIFORMAT_GET_NX ({void *}@var{interface})
  1429. returns the number of elements in the data.
  1430. @end defmac
  1431. @node Codelets and Tasks
  1432. @section Codelets and Tasks
  1433. This section describes the interface to manipulate codelets and tasks.
  1434. @deftp {Data Type} {enum starpu_codelet_type}
  1435. Describes the type of parallel task. The different values are:
  1436. @table @asis
  1437. @item @code{STARPU_SEQ} (default) for classical sequential tasks.
  1438. @item @code{STARPU_SPMD} for a parallel task whose threads are handled by
  1439. StarPU, the code has to use @code{starpu_combined_worker_get_size} and
  1440. @code{starpu_combined_worker_get_rank} to distribute the work
  1441. @item @code{STARPU_FORKJOIN} for a parallel task whose threads are started by
  1442. the codelet function, which has to use @code{starpu_combined_worker_get_size} to
  1443. determine how many threads should be started.
  1444. @end table
  1445. See @ref{Parallel Tasks} for details.
  1446. @end deftp
  1447. @defmac STARPU_CPU
  1448. This macro is used when setting the field @code{where} of a @code{struct
  1449. starpu_codelet} to specify the codelet may be executed on a CPU
  1450. processing unit.
  1451. @end defmac
  1452. @defmac STARPU_CUDA
  1453. This macro is used when setting the field @code{where} of a @code{struct
  1454. starpu_codelet} to specify the codelet may be executed on a CUDA
  1455. processing unit.
  1456. @end defmac
  1457. @defmac STARPU_OPENCL
  1458. This macro is used when setting the field @code{where} of a @code{struct
  1459. starpu_codelet} to specify the codelet may be executed on a OpenCL
  1460. processing unit.
  1461. @end defmac
  1462. @defmac STARPU_MULTIPLE_CPU_IMPLEMENTATIONS
  1463. Setting the field @code{cpu_func} of a @code{struct starpu_codelet}
  1464. with this macro indicates the codelet will have several
  1465. implementations. The use of this macro is deprecated. One should
  1466. always only define the field @code{cpu_funcs}.
  1467. @end defmac
  1468. @defmac STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS
  1469. Setting the field @code{cuda_func} of a @code{struct starpu_codelet}
  1470. with this macro indicates the codelet will have several
  1471. implementations. The use of this macro is deprecated. One should
  1472. always only define the field @code{cuda_funcs}.
  1473. @end defmac
  1474. @defmac STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS
  1475. Setting the field @code{opencl_func} of a @code{struct starpu_codelet}
  1476. with this macro indicates the codelet will have several
  1477. implementations. The use of this macro is deprecated. One should
  1478. always only define the field @code{opencl_funcs}.
  1479. @end defmac
  1480. @deftp {Data Type} {struct starpu_codelet}
  1481. The codelet structure describes a kernel that is possibly implemented on various
  1482. targets. For compatibility, make sure to initialize the whole structure to zero,
  1483. either by using explicit memset, or by letting the compiler implicitly do it in
  1484. e.g. static storage case.
  1485. @table @asis
  1486. @item @code{uint32_t where} (optional)
  1487. Indicates which types of processing units are able to execute the
  1488. codelet. The different values
  1489. @code{STARPU_CPU}, @code{STARPU_CUDA},
  1490. @code{STARPU_OPENCL} can be combined to specify
  1491. on which types of processing units the codelet can be executed.
  1492. @code{STARPU_CPU|STARPU_CUDA} for instance indicates that the codelet is
  1493. implemented for both CPU cores and CUDA devices while @code{STARPU_OPENCL}
  1494. indicates that it is only available on OpenCL devices. If the field is
  1495. unset, its value will be automatically set based on the availability
  1496. of the @code{XXX_funcs} fields defined below.
  1497. @item @code{int (*can_execute)(unsigned workerid, struct starpu_task *task, unsigned nimpl)} (optional)
  1498. Defines a function which should return 1 if the worker designated by
  1499. @var{workerid} can execute the @var{nimpl}th implementation of the
  1500. given @var{task}, 0 otherwise.
  1501. @item @code{enum starpu_codelet_type type} (optional)
  1502. The default is @code{STARPU_SEQ}, i.e. usual sequential implementation. Other
  1503. values (@code{STARPU_SPMD} or @code{STARPU_FORKJOIN} declare that a parallel
  1504. implementation is also available. See @ref{Parallel Tasks} for details.
  1505. @item @code{int max_parallelism} (optional)
  1506. If a parallel implementation is available, this denotes the maximum combined
  1507. worker size that StarPU will use to execute parallel tasks for this codelet.
  1508. @item @code{starpu_cpu_func_t cpu_func} (optional)
  1509. This field has been made deprecated. One should use instead the
  1510. @code{cpu_funcs} field.
  1511. @item @code{starpu_cpu_func_t cpu_funcs[STARPU_MAXIMPLEMENTATIONS]} (optional)
  1512. Is an array of function pointers to the CPU implementations of the codelet.
  1513. It must be terminated by a NULL value.
  1514. The functions prototype must be: @code{void cpu_func(void *buffers[], void *cl_arg)}. The first
  1515. argument being the array of data managed by the data management library, and
  1516. the second argument is a pointer to the argument passed from the @code{cl_arg}
  1517. field of the @code{starpu_task} structure.
  1518. If the @code{where} field is set, then the @code{cpu_funcs} field is
  1519. ignored if @code{STARPU_CPU} does not appear in the @code{where}
  1520. field, it must be non-null otherwise.
  1521. @item @code{starpu_cuda_func_t cuda_func} (optional)
  1522. This field has been made deprecated. One should use instead the
  1523. @code{cuda_funcs} field.
  1524. @item @code{starpu_cuda_func_t cuda_funcs[STARPU_MAXIMPLEMENTATIONS]} (optional)
  1525. Is an array of function pointers to the CUDA implementations of the codelet.
  1526. It must be terminated by a NULL value.
  1527. @emph{The functions must be host-functions written in the CUDA runtime
  1528. API}. Their prototype must
  1529. be: @code{void cuda_func(void *buffers[], void *cl_arg);}.
  1530. If the @code{where} field is set, then the @code{cuda_funcs}
  1531. field is ignored if @code{STARPU_CUDA} does not appear in the @code{where}
  1532. field, it must be non-null otherwise.
  1533. @item @code{starpu_opencl_func_t opencl_func} (optional)
  1534. This field has been made deprecated. One should use instead the
  1535. @code{opencl_funcs} field.
  1536. @item @code{starpu_opencl_func_t opencl_funcs[STARPU_MAXIMPLEMENTATIONS]} (optional)
  1537. Is an array of function pointers to the OpenCL implementations of the codelet.
  1538. It must be terminated by a NULL value.
  1539. The functions prototype must be:
  1540. @code{void opencl_func(void *buffers[], void *cl_arg);}.
  1541. If the @code{where} field is set, then the @code{opencl_funcs} field
  1542. is ignored if @code{STARPU_OPENCL} does not appear in the @code{where}
  1543. field, it must be non-null otherwise.
  1544. @item @code{unsigned nbuffers}
  1545. Specifies the number of arguments taken by the codelet. These arguments are
  1546. managed by the DSM and are accessed from the @code{void *buffers[]}
  1547. array. The constant argument passed with the @code{cl_arg} field of the
  1548. @code{starpu_task} structure is not counted in this number. This value should
  1549. not be above @code{STARPU_NMAXBUFS}.
  1550. @item @code{enum starpu_access_mode modes[STARPU_NMAXBUFS]}
  1551. Is an array of @code{enum starpu_access_mode}. It describes the
  1552. required access modes to the data neeeded by the codelet (e.g.
  1553. @code{STARPU_RW}). The number of entries in this array must be
  1554. specified in the @code{nbuffers} field (defined above), and should not
  1555. exceed @code{STARPU_NMAXBUFS}.
  1556. If unsufficient, this value can be set with the @code{--enable-maxbuffers}
  1557. option when configuring StarPU.
  1558. @item @code{struct starpu_perfmodel *model} (optional)
  1559. This is a pointer to the task duration performance model associated to this
  1560. codelet. This optional field is ignored when set to @code{NULL} or
  1561. when its @code{symbol} field is not set.
  1562. @item @code{struct starpu_perfmodel *power_model} (optional)
  1563. This is a pointer to the task power consumption performance model associated
  1564. to this codelet. This optional field is ignored when set to
  1565. @code{NULL} or when its @code{symbol} field is not set.
  1566. In the case of parallel codelets, this has to account for all processing units
  1567. involved in the parallel execution.
  1568. @item @code{unsigned long per_worker_stats[STARPU_NMAXWORKERS]} (optional)
  1569. Statistics collected at runtime: this is filled by StarPU and should not be
  1570. accessed directly, but for example by calling the
  1571. @code{starpu_display_codelet_stats} function (See
  1572. @ref{starpu_display_codelet_stats} for details).
  1573. @item @code{const char *name} (optional)
  1574. Define the name of the codelet. This can be useful for debugging purposes.
  1575. @end table
  1576. @end deftp
  1577. @deftypefun void starpu_codelet_init ({struct starpu_codelet} *@var{cl})
  1578. Initialize @var{cl} with default values. Codelets should preferably be
  1579. initialized statically as shown in @ref{Defining a Codelet}. However
  1580. such a initialisation is not always possible, e.g. when using C++.
  1581. @end deftypefun
  1582. @deftp {Data Type} {enum starpu_task_status}
  1583. State of a task, can be either of
  1584. @table @asis
  1585. @item @code{STARPU_TASK_INVALID} The task has just been initialized.
  1586. @item @code{STARPU_TASK_BLOCKED} The task has just been submitted, and its dependencies has not been checked yet.
  1587. @item @code{STARPU_TASK_READY} The task is ready for execution.
  1588. @item @code{STARPU_TASK_RUNNING} The task is running on some worker.
  1589. @item @code{STARPU_TASK_FINISHED} The task is finished executing.
  1590. @item @code{STARPU_TASK_BLOCKED_ON_TAG} The task is waiting for a tag.
  1591. @item @code{STARPU_TASK_BLOCKED_ON_TASK} The task is waiting for a task.
  1592. @item @code{STARPU_TASK_BLOCKED_ON_DATA} The task is waiting for some data.
  1593. @end table
  1594. @end deftp
  1595. @deftp {Data Type} {struct starpu_buffer_descr}
  1596. This type is used to describe a data handle along with an
  1597. access mode.
  1598. @table @asis
  1599. @item @code{starpu_data_handle_t handle} describes a data,
  1600. @item @code{enum starpu_access_mode mode} describes its access mode
  1601. @end table
  1602. @end deftp
  1603. @deftp {Data Type} {struct starpu_task}
  1604. The @code{starpu_task} structure describes a task that can be offloaded on the various
  1605. processing units managed by StarPU. It instantiates a codelet. It can either be
  1606. allocated dynamically with the @code{starpu_task_create} method, or declared
  1607. statically. In the latter case, the programmer has to zero the
  1608. @code{starpu_task} structure and to fill the different fields properly. The
  1609. indicated default values correspond to the configuration of a task allocated
  1610. with @code{starpu_task_create}.
  1611. @table @asis
  1612. @item @code{struct starpu_codelet *cl}
  1613. Is a pointer to the corresponding @code{struct starpu_codelet} data structure. This
  1614. describes where the kernel should be executed, and supplies the appropriate
  1615. implementations. When set to @code{NULL}, no code is executed during the tasks,
  1616. such empty tasks can be useful for synchronization purposes.
  1617. @item @code{struct starpu_buffer_descr buffers[STARPU_NMAXBUFS]}
  1618. This field has been made deprecated. One should use instead the
  1619. @code{handles} field to specify the handles to the data accessed by
  1620. the task. The access modes are now defined in the @code{mode} field of
  1621. the @code{struct starpu_codelet cl} field defined above.
  1622. @item @code{starpu_data_handle_t handles[STARPU_NMAXBUFS]}
  1623. Is an array of @code{starpu_data_handle_t}. It specifies the handles
  1624. to the different pieces of data accessed by the task. The number
  1625. of entries in this array must be specified in the @code{nbuffers} field of the
  1626. @code{struct starpu_codelet} structure, and should not exceed
  1627. @code{STARPU_NMAXBUFS}.
  1628. If unsufficient, this value can be set with the @code{--enable-maxbuffers}
  1629. option when configuring StarPU.
  1630. @item @code{void *interfaces[STARPU_NMAXBUFS]}
  1631. The actual data pointers to the memory node where execution will happen, managed
  1632. by the DSM.
  1633. @item @code{void *cl_arg} (optional; default: @code{NULL})
  1634. This pointer is passed to the codelet through the second argument
  1635. of the codelet implementation (e.g. @code{cpu_func} or @code{cuda_func}).
  1636. @item @code{size_t cl_arg_size} (optional)
  1637. For some specific drivers, the @code{cl_arg} pointer cannot not be directly
  1638. given to the driver function. A buffer of size @code{cl_arg_size}
  1639. needs to be allocated on the driver. This buffer is then filled with
  1640. the @code{cl_arg_size} bytes starting at address @code{cl_arg}. In
  1641. this case, the argument given to the codelet is therefore not the
  1642. @code{cl_arg} pointer, but the address of the buffer in local store
  1643. (LS) instead.
  1644. This field is ignored for CPU, CUDA and OpenCL codelets, where the
  1645. @code{cl_arg} pointer is given as such.
  1646. @item @code{void (*callback_func)(void *)} (optional) (default: @code{NULL})
  1647. This is a function pointer of prototype @code{void (*f)(void *)} which
  1648. specifies a possible callback. If this pointer is non-null, the callback
  1649. function is executed @emph{on the host} after the execution of the task. Tasks
  1650. which depend on it might already be executing. The callback is passed the
  1651. value contained in the @code{callback_arg} field. No callback is executed if the
  1652. field is set to @code{NULL}.
  1653. @item @code{void *callback_arg} (optional) (default: @code{NULL})
  1654. This is the pointer passed to the callback function. This field is ignored if
  1655. the @code{callback_func} is set to @code{NULL}.
  1656. @item @code{unsigned use_tag} (optional) (default: @code{0})
  1657. If set, this flag indicates that the task should be associated with the tag
  1658. contained in the @code{tag_id} field. Tag allow the application to synchronize
  1659. with the task and to express task dependencies easily.
  1660. @item @code{starpu_tag_t tag_id}
  1661. This field contains the tag associated to the task if the @code{use_tag} field
  1662. was set, it is ignored otherwise.
  1663. @item @code{unsigned sequential_consistency}
  1664. If this flag is set (which is the default), sequential consistency is enforced
  1665. for the data parameters of this task for which sequential consistency is
  1666. enabled. Clearing this flag permits to disable sequential consistency for this
  1667. task, even if data have it enabled.
  1668. @item @code{unsigned synchronous}
  1669. If this flag is set, the @code{starpu_task_submit} function is blocking and
  1670. returns only when the task has been executed (or if no worker is able to
  1671. process the task). Otherwise, @code{starpu_task_submit} returns immediately.
  1672. @item @code{int priority} (optional) (default: @code{STARPU_DEFAULT_PRIO})
  1673. This field indicates a level of priority for the task. This is an integer value
  1674. that must be set between the return values of the
  1675. @code{starpu_sched_get_min_priority} function for the least important tasks,
  1676. and that of the @code{starpu_sched_get_max_priority} for the most important
  1677. tasks (included). The @code{STARPU_MIN_PRIO} and @code{STARPU_MAX_PRIO} macros
  1678. are provided for convenience and respectively returns value of
  1679. @code{starpu_sched_get_min_priority} and @code{starpu_sched_get_max_priority}.
  1680. Default priority is @code{STARPU_DEFAULT_PRIO}, which is always defined as 0 in
  1681. order to allow static task initialization. Scheduling strategies that take
  1682. priorities into account can use this parameter to take better scheduling
  1683. decisions, but the scheduling policy may also ignore it.
  1684. @item @code{unsigned execute_on_a_specific_worker} (default: @code{0})
  1685. If this flag is set, StarPU will bypass the scheduler and directly affect this
  1686. task to the worker specified by the @code{workerid} field.
  1687. @item @code{unsigned workerid} (optional)
  1688. If the @code{execute_on_a_specific_worker} field is set, this field indicates
  1689. which is the identifier of the worker that should process this task (as
  1690. returned by @code{starpu_worker_get_id}). This field is ignored if
  1691. @code{execute_on_a_specific_worker} field is set to 0.
  1692. @item @code{starpu_task_bundle_t bundle} (optional)
  1693. The bundle that includes this task. If no bundle is used, this should be NULL.
  1694. @item @code{int detach} (optional) (default: @code{1})
  1695. If this flag is set, it is not possible to synchronize with the task
  1696. by the means of @code{starpu_task_wait} later on. Internal data structures
  1697. are only guaranteed to be freed once @code{starpu_task_wait} is called if the
  1698. flag is not set.
  1699. @item @code{int destroy} (optional) (default: @code{0} for starpu_task_init, @code{1} for starpu_task_create)
  1700. If this flag is set, the task structure will automatically be freed, either
  1701. after the execution of the callback if the task is detached, or during
  1702. @code{starpu_task_wait} otherwise. If this flag is not set, dynamically
  1703. allocated data structures will not be freed until @code{starpu_task_destroy} is
  1704. called explicitly. Setting this flag for a statically allocated task structure
  1705. will result in undefined behaviour. The flag is set to 1 when the task is
  1706. created by calling @code{starpu_task_create()}. Note that
  1707. @code{starpu_task_wait_for_all} will not free any task.
  1708. @item @code{int regenerate} (optional)
  1709. If this flag is set, the task will be re-submitted to StarPU once it has been
  1710. executed. This flag must not be set if the destroy flag is set too.
  1711. @item @code{enum starpu_task_status status} (optional)
  1712. Current state of the task.
  1713. @item @code{struct starpu_task_profiling_info *profiling_info} (optional)
  1714. Profiling information for the task.
  1715. @item @code{double predicted} (output field)
  1716. Predicted duration of the task. This field is only set if the scheduling
  1717. strategy used performance models.
  1718. @item @code{double predicted_transfer} (optional)
  1719. Predicted data transfer duration for the task in microseconds. This field is
  1720. only valid if the scheduling strategy uses performance models.
  1721. @item @code{struct starpu_task *prev}
  1722. A pointer to the previous task. This should only be used by StarPU.
  1723. @item @code{struct starpu_task *next}
  1724. A pointer to the next task. This should only be used by StarPU.
  1725. @item @code{unsigned int mf_skip}
  1726. This is only used for tasks that use multiformat handle. This should only be
  1727. used by StarPU.
  1728. @item @code{double flops}
  1729. This can be set to the number of floating points operations that the task
  1730. will have to achieve. This is useful for easily getting GFlops curves from
  1731. @code{starpu_perfmodel_plot}, and for the hypervisor load balancing.
  1732. @item @code{void *starpu_private}
  1733. This is private to StarPU, do not modify. If the task is allocated by hand
  1734. (without starpu_task_create), this field should be set to NULL.
  1735. @item @code{int magic}
  1736. This field is set when initializing a task. It prevents a task from being
  1737. submitted if it has not been properly initialized.
  1738. @end table
  1739. @end deftp
  1740. @deftypefun void starpu_task_init ({struct starpu_task} *@var{task})
  1741. Initialize @var{task} with default values. This function is implicitly
  1742. called by @code{starpu_task_create}. By default, tasks initialized with
  1743. @code{starpu_task_init} must be deinitialized explicitly with
  1744. @code{starpu_task_clean}. Tasks can also be initialized statically,
  1745. using @code{STARPU_TASK_INITIALIZER} defined below.
  1746. @end deftypefun
  1747. @defmac STARPU_TASK_INITIALIZER
  1748. It is possible to initialize statically allocated tasks with this
  1749. value. This is equivalent to initializing a starpu_task structure with
  1750. the @code{starpu_task_init} function defined above.
  1751. @end defmac
  1752. @deftypefun {struct starpu_task *} starpu_task_create (void)
  1753. Allocate a task structure and initialize it with default values. Tasks
  1754. allocated dynamically with @code{starpu_task_create} are automatically freed when the
  1755. task is terminated. This means that the task pointer can not be used any more
  1756. once the task is submitted, since it can be executed at any time (unless
  1757. dependencies make it wait) and thus freed at any time.
  1758. If the destroy flag is explicitly unset, the resources used
  1759. by the task have to be freed by calling
  1760. @code{starpu_task_destroy}.
  1761. @end deftypefun
  1762. @deftypefun void starpu_task_clean ({struct starpu_task} *@var{task})
  1763. Release all the structures automatically allocated to execute @var{task}, but
  1764. not the task structure itself and values set by the user remain unchanged.
  1765. It is thus useful for statically allocated tasks for instance.
  1766. It is also useful when the user wants to execute the same operation several
  1767. times with as least overhead as possible.
  1768. It is called automatically by @code{starpu_task_destroy}.
  1769. It has to be called only after explicitly waiting for the task or after
  1770. @code{starpu_shutdown} (waiting for the callback is not enough, since starpu
  1771. still manipulates the task after calling the callback).
  1772. @end deftypefun
  1773. @deftypefun void starpu_task_destroy ({struct starpu_task} *@var{task})
  1774. Free the resource allocated during @code{starpu_task_create} and
  1775. associated with @var{task}. This function is already called automatically
  1776. after the execution of a task when the @code{destroy} flag of the
  1777. @code{starpu_task} structure is set, which is the default for tasks created by
  1778. @code{starpu_task_create}. Calling this function on a statically allocated task
  1779. results in an undefined behaviour.
  1780. @end deftypefun
  1781. @deftypefun int starpu_task_wait ({struct starpu_task} *@var{task})
  1782. This function blocks until @var{task} has been executed. It is not possible to
  1783. synchronize with a task more than once. It is not possible to wait for
  1784. synchronous or detached tasks.
  1785. Upon successful completion, this function returns 0. Otherwise, @code{-EINVAL}
  1786. indicates that the specified task was either synchronous or detached.
  1787. @end deftypefun
  1788. @deftypefun int starpu_task_submit ({struct starpu_task} *@var{task})
  1789. This function submits @var{task} to StarPU. Calling this function does
  1790. not mean that the task will be executed immediately as there can be data or task
  1791. (tag) dependencies that are not fulfilled yet: StarPU will take care of
  1792. scheduling this task with respect to such dependencies.
  1793. This function returns immediately if the @code{synchronous} field of the
  1794. @code{starpu_task} structure was set to 0, and block until the termination of
  1795. the task otherwise. It is also possible to synchronize the application with
  1796. asynchronous tasks by the means of tags, using the @code{starpu_tag_wait}
  1797. function for instance.
  1798. In case of success, this function returns 0, a return value of @code{-ENODEV}
  1799. means that there is no worker able to process this task (e.g. there is no GPU
  1800. available and this task is only implemented for CUDA devices).
  1801. starpu_task_submit() can be called from anywhere, including codelet
  1802. functions and callbacks, provided that the @code{synchronous} field of the
  1803. @code{starpu_task} structure is left to 0.
  1804. @end deftypefun
  1805. @deftypefun int starpu_task_wait_for_all (void)
  1806. This function blocks until all the tasks that were submitted are terminated. It
  1807. does not destroy these tasks.
  1808. @end deftypefun
  1809. @deftypefun int starpu_task_nready (void)
  1810. @end deftypefun
  1811. @deftypefun int starpu_task_nsubmitted (void)
  1812. Return the number of submitted tasks which have not completed yet.
  1813. @end deftypefun
  1814. @deftypefun int starpu_task_nready (void)
  1815. Return the number of submitted tasks which are ready for execution are already
  1816. executing. It thus does not include tasks waiting for dependencies.
  1817. @end deftypefun
  1818. @deftypefun {struct starpu_task *} starpu_task_get_current (void)
  1819. This function returns the task currently executed by the worker, or
  1820. NULL if it is called either from a thread that is not a task or simply
  1821. because there is no task being executed at the moment.
  1822. @end deftypefun
  1823. @deftypefun void starpu_display_codelet_stats ({struct starpu_codelet} *@var{cl})
  1824. @anchor{starpu_display_codelet_stats}
  1825. Output on @code{stderr} some statistics on the codelet @var{cl}.
  1826. @end deftypefun
  1827. @deftypefun int starpu_task_wait_for_no_ready (void)
  1828. This function waits until there is no more ready task.
  1829. @end deftypefun
  1830. @c Callbacks: what can we put in callbacks ?
  1831. @node Insert Task
  1832. @section Insert Task
  1833. @deftypefun int starpu_insert_task (struct starpu_codelet *@var{cl}, ...)
  1834. Create and submit a task corresponding to @var{cl} with the following
  1835. arguments. The argument list must be zero-terminated.
  1836. The arguments following the codelets can be of the following types:
  1837. @itemize
  1838. @item
  1839. @code{STARPU_R}, @code{STARPU_W}, @code{STARPU_RW}, @code{STARPU_SCRATCH}, @code{STARPU_REDUX} an access mode followed by a data handle;
  1840. @item
  1841. @code{STARPU_DATA_ARRAY} followed by an array of data handles and its number of elements;
  1842. @item
  1843. the specific values @code{STARPU_VALUE}, @code{STARPU_CALLBACK},
  1844. @code{STARPU_CALLBACK_ARG}, @code{STARPU_CALLBACK_WITH_ARG},
  1845. @code{STARPU_PRIORITY}, @code{STARPU_TAG}, @code{STARPU_FLOPS}, followed by the appropriated objects
  1846. as defined below.
  1847. @end itemize
  1848. When using @code{STARPU_DATA_ARRAY}, the access mode of the data
  1849. handles is not defined.
  1850. Parameters to be passed to the codelet implementation are defined
  1851. through the type @code{STARPU_VALUE}. The function
  1852. @code{starpu_codelet_unpack_args} must be called within the codelet
  1853. implementation to retrieve them.
  1854. @end deftypefun
  1855. @defmac STARPU_VALUE
  1856. this macro is used when calling @code{starpu_insert_task}, and must be
  1857. followed by a pointer to a constant value and the size of the constant
  1858. @end defmac
  1859. @defmac STARPU_CALLBACK
  1860. this macro is used when calling @code{starpu_insert_task}, and must be
  1861. followed by a pointer to a callback function
  1862. @end defmac
  1863. @defmac STARPU_CALLBACK_ARG
  1864. this macro is used when calling @code{starpu_insert_task}, and must be
  1865. followed by a pointer to be given as an argument to the callback
  1866. function
  1867. @end defmac
  1868. @defmac STARPU_CALLBACK_WITH_ARG
  1869. this macro is used when calling @code{starpu_insert_task}, and must be
  1870. followed by two pointers: one to a callback function, and the other to
  1871. be given as an argument to the callback function; this is equivalent
  1872. to using both @code{STARPU_CALLBACK} and
  1873. @code{STARPU_CALLBACK_WITH_ARG}
  1874. @end defmac
  1875. @defmac STARPU_PRIORITY
  1876. this macro is used when calling @code{starpu_insert_task}, and must be
  1877. followed by a integer defining a priority level
  1878. @end defmac
  1879. @defmac STARPU_TAG
  1880. this macro is used when calling @code{starpu_insert_task}, and must be
  1881. followed by a tag.
  1882. @end defmac
  1883. @defmac STARPU_FLOPS
  1884. this macro is used when calling @code{starpu_insert_task}, and must be followed
  1885. by an amount of floating point operations, as a double. The user may have to
  1886. explicitly cast into double, otherwise parameter passing will not work.
  1887. @end defmac
  1888. @deftypefun void starpu_codelet_pack_args ({char **}@var{arg_buffer}, {size_t *}@var{arg_buffer_size}, ...)
  1889. Pack arguments of type @code{STARPU_VALUE} into a buffer which can be
  1890. given to a codelet and later unpacked with the function
  1891. @code{starpu_codelet_unpack_args} defined below.
  1892. @end deftypefun
  1893. @deftypefun void starpu_codelet_unpack_args ({void *}@var{cl_arg}, ...)
  1894. Retrieve the arguments of type @code{STARPU_VALUE} associated to a
  1895. task automatically created using the function
  1896. @code{starpu_insert_task} defined above.
  1897. @end deftypefun
  1898. @node Explicit Dependencies
  1899. @section Explicit Dependencies
  1900. @deftypefun void starpu_task_declare_deps_array ({struct starpu_task} *@var{task}, unsigned @var{ndeps}, {struct starpu_task} *@var{task_array}[])
  1901. Declare task dependencies between a @var{task} and an array of tasks of length
  1902. @var{ndeps}. This function must be called prior to the submission of the task,
  1903. but it may called after the submission or the execution of the tasks in the
  1904. array, provided the tasks are still valid (ie. they were not automatically
  1905. destroyed). Calling this function on a task that was already submitted or with
  1906. an entry of @var{task_array} that is not a valid task anymore results in an
  1907. undefined behaviour. If @var{ndeps} is null, no dependency is added. It is
  1908. possible to call @code{starpu_task_declare_deps_array} multiple times on the
  1909. same task, in this case, the dependencies are added. It is possible to have
  1910. redundancy in the task dependencies.
  1911. @end deftypefun
  1912. @deftp {Data Type} {starpu_tag_t}
  1913. This type defines a task logical identifer. It is possible to associate a task with a unique ``tag'' chosen by the application, and to express
  1914. dependencies between tasks by the means of those tags. To do so, fill the
  1915. @code{tag_id} field of the @code{starpu_task} structure with a tag number (can
  1916. be arbitrary) and set the @code{use_tag} field to 1.
  1917. If @code{starpu_tag_declare_deps} is called with this tag number, the task will
  1918. not be started until the tasks which holds the declared dependency tags are
  1919. completed.
  1920. @end deftp
  1921. @deftypefun void starpu_tag_declare_deps (starpu_tag_t @var{id}, unsigned @var{ndeps}, ...)
  1922. Specify the dependencies of the task identified by tag @var{id}. The first
  1923. argument specifies the tag which is configured, the second argument gives the
  1924. number of tag(s) on which @var{id} depends. The following arguments are the
  1925. tags which have to be terminated to unlock the task.
  1926. This function must be called before the associated task is submitted to StarPU
  1927. with @code{starpu_task_submit}.
  1928. Because of the variable arity of @code{starpu_tag_declare_deps}, note that the
  1929. last arguments @emph{must} be of type @code{starpu_tag_t}: constant values
  1930. typically need to be explicitly casted. Using the
  1931. @code{starpu_tag_declare_deps_array} function avoids this hazard.
  1932. @cartouche
  1933. @smallexample
  1934. /* Tag 0x1 depends on tags 0x32 and 0x52 */
  1935. starpu_tag_declare_deps((starpu_tag_t)0x1,
  1936. 2, (starpu_tag_t)0x32, (starpu_tag_t)0x52);
  1937. @end smallexample
  1938. @end cartouche
  1939. @end deftypefun
  1940. @deftypefun void starpu_tag_declare_deps_array (starpu_tag_t @var{id}, unsigned @var{ndeps}, {starpu_tag_t *}@var{array})
  1941. This function is similar to @code{starpu_tag_declare_deps}, except
  1942. that its does not take a variable number of arguments but an array of
  1943. tags of size @var{ndeps}.
  1944. @cartouche
  1945. @smallexample
  1946. /* Tag 0x1 depends on tags 0x32 and 0x52 */
  1947. starpu_tag_t tag_array[2] = @{0x32, 0x52@};
  1948. starpu_tag_declare_deps_array((starpu_tag_t)0x1, 2, tag_array);
  1949. @end smallexample
  1950. @end cartouche
  1951. @end deftypefun
  1952. @deftypefun int starpu_tag_wait (starpu_tag_t @var{id})
  1953. This function blocks until the task associated to tag @var{id} has been
  1954. executed. This is a blocking call which must therefore not be called within
  1955. tasks or callbacks, but only from the application directly. It is possible to
  1956. synchronize with the same tag multiple times, as long as the
  1957. @code{starpu_tag_remove} function is not called. Note that it is still
  1958. possible to synchronize with a tag associated to a task which @code{starpu_task}
  1959. data structure was freed (e.g. if the @code{destroy} flag of the
  1960. @code{starpu_task} was enabled).
  1961. @end deftypefun
  1962. @deftypefun int starpu_tag_wait_array (unsigned @var{ntags}, starpu_tag_t *@var{id})
  1963. This function is similar to @code{starpu_tag_wait} except that it blocks until
  1964. @emph{all} the @var{ntags} tags contained in the @var{id} array are
  1965. terminated.
  1966. @end deftypefun
  1967. @deftypefun void starpu_tag_restart (starpu_tag_t @var{id})
  1968. This function can be used to clear the "already notified" status
  1969. of a tag which is not associated with a task. Before that, calling
  1970. @code{starpu_tag_notify_from_apps} again will not notify the successors. After
  1971. that, the next call to @code{starpu_tag_notify_from_apps} will notify the
  1972. successors.
  1973. @end deftypefun
  1974. @deftypefun void starpu_tag_remove (starpu_tag_t @var{id})
  1975. This function releases the resources associated to tag @var{id}. It can be
  1976. called once the corresponding task has been executed and when there is
  1977. no other tag that depend on this tag anymore.
  1978. @end deftypefun
  1979. @deftypefun void starpu_tag_notify_from_apps (starpu_tag_t @var{id})
  1980. This function explicitly unlocks tag @var{id}. It may be useful in the
  1981. case of applications which execute part of their computation outside StarPU
  1982. tasks (e.g. third-party libraries). It is also provided as a
  1983. convenient tool for the programmer, for instance to entirely construct the task
  1984. DAG before actually giving StarPU the opportunity to execute the tasks. When
  1985. called several times on the same tag, notification will be done only on first
  1986. call, thus implementing "OR" dependencies, until the tag is restarted using
  1987. @code{starpu_tag_restart}.
  1988. @end deftypefun
  1989. @node Implicit Data Dependencies
  1990. @section Implicit Data Dependencies
  1991. In this section, we describe how StarPU makes it possible to insert implicit
  1992. task dependencies in order to enforce sequential data consistency. When this
  1993. data consistency is enabled on a specific data handle, any data access will
  1994. appear as sequentially consistent from the application. For instance, if the
  1995. application submits two tasks that access the same piece of data in read-only
  1996. mode, and then a third task that access it in write mode, dependencies will be
  1997. added between the two first tasks and the third one. Implicit data dependencies
  1998. are also inserted in the case of data accesses from the application.
  1999. @deftypefun void starpu_data_set_default_sequential_consistency_flag (unsigned @var{flag})
  2000. Set the default sequential consistency flag. If a non-zero value is passed, a
  2001. sequential data consistency will be enforced for all handles registered after
  2002. this function call, otherwise it is disabled. By default, StarPU enables
  2003. sequential data consistency. It is also possible to select the data consistency
  2004. mode of a specific data handle with the
  2005. @code{starpu_data_set_sequential_consistency_flag} function.
  2006. @end deftypefun
  2007. @deftypefun unsigned starpu_data_get_default_sequential_consistency_flag (void)
  2008. Return the default sequential consistency flag
  2009. @end deftypefun
  2010. @deftypefun void starpu_data_set_sequential_consistency_flag (starpu_data_handle_t @var{handle}, unsigned @var{flag})
  2011. Sets the data consistency mode associated to a data handle. The consistency
  2012. mode set using this function has the priority over the default mode which can
  2013. be set with @code{starpu_data_set_default_sequential_consistency_flag}.
  2014. @end deftypefun
  2015. @node Performance Model API
  2016. @section Performance Model API
  2017. @deftp {Data Type} {enum starpu_perf_archtype}
  2018. Enumerates the various types of architectures.
  2019. CPU types range within STARPU_CPU_DEFAULT (1 CPU), STARPU_CPU_DEFAULT+1 (2 CPUs), ... STARPU_CPU_DEFAULT + STARPU_MAXCPUS - 1 (STARPU_MAXCPUS CPUs).
  2020. CUDA types range within STARPU_CUDA_DEFAULT (GPU number 0), STARPU_CUDA_DEFAULT + 1 (GPU number 1), ..., STARPU_CUDA_DEFAULT + STARPU_MAXCUDADEVS - 1 (GPU number STARPU_MAXCUDADEVS - 1).
  2021. OpenCL types range within STARPU_OPENCL_DEFAULT (GPU number 0), STARPU_OPENCL_DEFAULT + 1 (GPU number 1), ..., STARPU_OPENCL_DEFAULT + STARPU_MAXOPENCLDEVS - 1 (GPU number STARPU_MAXOPENCLDEVS - 1).
  2022. @table @asis
  2023. @item @code{STARPU_CPU_DEFAULT}
  2024. @item @code{STARPU_CUDA_DEFAULT}
  2025. @item @code{STARPU_OPENCL_DEFAULT}
  2026. @end table
  2027. @end deftp
  2028. @deftp {Data Type} {enum starpu_perfmodel_type}
  2029. The possible values are:
  2030. @table @asis
  2031. @item @code{STARPU_PER_ARCH} for application-provided per-arch cost model functions.
  2032. @item @code{STARPU_COMMON} for application-provided common cost model function, with per-arch factor.
  2033. @item @code{STARPU_HISTORY_BASED} for automatic history-based cost model.
  2034. @item @code{STARPU_REGRESSION_BASED} for automatic linear regression-based cost model (alpha * size ^ beta).
  2035. @item @code{STARPU_NL_REGRESSION_BASED} for automatic non-linear regression-based cost mode (a * size ^ b + c).
  2036. @end table
  2037. @end deftp
  2038. @deftp {Data Type} {struct starpu_perfmodel}
  2039. @anchor{struct starpu_perfmodel}
  2040. contains all information about a performance model. At least the
  2041. @code{type} and @code{symbol} fields have to be filled when defining a
  2042. performance model for a codelet. For compatibility, make sure to initialize the
  2043. whole structure to zero, either by using explicit memset, or by letting the
  2044. compiler implicitly do it in e.g. static storage case.
  2045. If not provided, other fields have to be zero.
  2046. @table @asis
  2047. @item @code{type}
  2048. is the type of performance model @code{enum starpu_perfmodel_type}:
  2049. @code{STARPU_HISTORY_BASED},
  2050. @code{STARPU_REGRESSION_BASED}, @code{STARPU_NL_REGRESSION_BASED}: No
  2051. other fields needs to be provided, this is purely history-based. @code{STARPU_PER_ARCH}:
  2052. @code{per_arch} has to be filled with functions which return the cost in
  2053. micro-seconds. @code{STARPU_COMMON}: @code{cost_function} has to be filled with
  2054. a function that returns the cost in micro-seconds on a CPU, timing on other
  2055. archs will be determined by multiplying by an arch-specific factor.
  2056. @item @code{const char *symbol}
  2057. is the symbol name for the performance model, which will be used as
  2058. file name to store the model. It must be set otherwise the model will
  2059. be ignored.
  2060. @item @code{double (*cost_model)(struct starpu_buffer_descr *)}
  2061. This field is deprecated. Use instead the @code{cost_function} field.
  2062. @item @code{double (*cost_function)(struct starpu_task *, unsigned nimpl)}
  2063. Used by @code{STARPU_COMMON}: takes a task and
  2064. implementation number, and must return a task duration estimation in micro-seconds.
  2065. @item @code{size_t (*size_base)(struct starpu_task *, unsigned nimpl)}
  2066. Used by @code{STARPU_HISTORY_BASED} and
  2067. @code{STARPU_*REGRESSION_BASED}. If not NULL, takes a task and
  2068. implementation number, and returns the size to be used as index for
  2069. history and regression.
  2070. @item @code{struct starpu_perfmodel_per_arch per_arch[STARPU_NARCH_VARIATIONS][STARPU_MAXIMPLEMENTATIONS]}
  2071. Used by @code{STARPU_PER_ARCH}: array of @code{struct
  2072. starpu_per_arch_perfmodel} structures.
  2073. @item @code{unsigned is_loaded}
  2074. Whether the performance model is already loaded from the disk.
  2075. @item @code{unsigned benchmarking}
  2076. Whether the performance model is still being calibrated.
  2077. @item @code{pthread_rwlock_t model_rwlock}
  2078. Lock to protect concurrency between loading from disk (W), updating the values
  2079. (W), and making a performance estimation (R).
  2080. @end table
  2081. @end deftp
  2082. @deftp {Data Type} {struct starpu_perfmodel_regression_model}
  2083. @table @asis
  2084. @item @code{double sumlny} sum of ln(measured)
  2085. @item @code{double sumlnx} sum of ln(size)
  2086. @item @code{double sumlnx2} sum of ln(size)^2
  2087. @item @code{unsigned long minx} minimum size
  2088. @item @code{unsigned long maxx} maximum size
  2089. @item @code{double sumlnxlny} sum of ln(size)*ln(measured)
  2090. @item @code{double alpha} estimated = alpha * size ^ beta
  2091. @item @code{double beta}
  2092. @item @code{unsigned valid} whether the linear regression model is valid (i.e. enough measures)
  2093. @item @code{double a, b, c} estimaed = a size ^b + c
  2094. @item @code{unsigned nl_valid} whether the non-linear regression model is valid (i.e. enough measures)
  2095. @item @code{unsigned nsample} number of sample values for non-linear regression
  2096. @end table
  2097. @end deftp
  2098. @deftp {Data Type} {struct starpu_perfmodel_per_arch}
  2099. contains information about the performance model of a given arch.
  2100. @table @asis
  2101. @item @code{double (*cost_model)(struct starpu_buffer_descr *t)}
  2102. This field is deprecated. Use instead the @code{cost_function} field.
  2103. @item @code{double (*cost_function)(struct starpu_task *task, enum starpu_perf_archtype arch, unsigned nimpl)}
  2104. Used by @code{STARPU_PER_ARCH}, must point to functions which take a task, the
  2105. target arch and implementation number (as mere conveniency, since the array
  2106. is already indexed by these), and must return a task duration estimation in
  2107. micro-seconds.
  2108. @item @code{size_t (*size_base)(struct starpu_task *, enum
  2109. starpu_perf_archtype arch, unsigned nimpl)}
  2110. Same as in @ref{struct starpu_perfmodel}, but per-arch, in
  2111. case it depends on the architecture-specific implementation.
  2112. @item @code{struct starpu_htbl32_node *history}
  2113. The history of performance measurements.
  2114. @item @code{struct starpu_perfmodel_history_list *list}
  2115. Used by @code{STARPU_HISTORY_BASED} and @code{STARPU_NL_REGRESSION_BASED},
  2116. records all execution history measures.
  2117. @item @code{struct starpu_perfmodel_regression_model regression}
  2118. Used by @code{STARPU_HISTORY_REGRESION_BASED} and
  2119. @code{STARPU_NL_REGRESSION_BASED}, contains the estimated factors of the
  2120. regression.
  2121. @end table
  2122. @end deftp
  2123. @deftypefun int starpu_perfmodel_load_symbol ({const char} *@var{symbol}, {struct starpu_perfmodel} *@var{model})
  2124. loads a given performance model. The @var{model} structure has to be completely zero, and will be filled with the information saved in @code{$STARPU_HOME/.starpu}.
  2125. @end deftypefun
  2126. @deftypefun int starpu_perfmodel_unload_model ({struct starpu_perfmodel} *@var{model})
  2127. unloads the given model which has been previously loaded through the function @code{starpu_perfmodel_load_symbol}
  2128. @end deftypefun
  2129. @deftypefun void starpu_perfmodel_debugfilepath ({struct starpu_perfmodel} *@var{model}, {enum starpu_perf_archtype} @var{arch}, char *@var{path}, size_t @var{maxlen}, unsigned nimpl)
  2130. returns the path to the debugging information for the performance model.
  2131. @end deftypefun
  2132. @deftypefun void starpu_perfmodel_get_arch_name ({enum starpu_perf_archtype} @var{arch}, char *@var{archname}, size_t @var{maxlen}, unsigned nimpl)
  2133. returns the architecture name for @var{arch}.
  2134. @end deftypefun
  2135. @deftypefun {enum starpu_perf_archtype} starpu_worker_get_perf_archtype (int @var{workerid})
  2136. returns the architecture type of a given worker.
  2137. @end deftypefun
  2138. @deftypefun int starpu_perfmodel_list ({FILE *}@var{output})
  2139. prints a list of all performance models on @var{output}.
  2140. @end deftypefun
  2141. @deftypefun void starpu_perfmodel_print ({struct starpu_perfmodel *}@var{model}, {enum starpu_perf_archtype} @var{arch}, unsigned @var{nimpl}, {char *}@var{parameter}, {uint32_t *}footprint, {FILE *}@var{output})
  2142. todo
  2143. @end deftypefun
  2144. @deftypefun int starpu_perfmodel_print_all ({struct starpu_perfmodel *}@var{model}, {char *}@var{arch}, @var{char *}parameter, {uint32_t *}@var{footprint}, {FILE *}@var{output})
  2145. todo
  2146. @end deftypefun
  2147. @deftypefun void starpu_bus_print_bandwidth ({FILE *}@var{f})
  2148. prints a matrix of bus bandwidths on @var{f}.
  2149. @end deftypefun
  2150. @deftypefun void starpu_bus_print_affinity ({FILE *}@var{f})
  2151. prints the affinity devices on @var{f}.
  2152. @end deftypefun
  2153. @deftypefun void starpu_topology_print ({FILE *}@var{f})
  2154. prints a description of the topology on @var{f}.
  2155. @end deftypefun
  2156. @deftypefun void starpu_perfmodel_update_history ({struct starpu_perfmodel *}@var{model}, {struct starpu_task *}@var{task}, {enum starpu_perf_archtype} @var{arch}, unsigned @var{cpuid}, unsigned @var{nimpl}, double @var{measured});
  2157. This feeds the performance model @var{model} with an explicit measurement
  2158. @var{measured}, in addition to measurements done by StarPU itself. This can be
  2159. useful when the application already has an existing set of measurements done
  2160. in good conditions, that StarPU could benefit from instead of doing on-line
  2161. measurements. And example of use can be see in @ref{Performance model example}.
  2162. @end deftypefun
  2163. @node Profiling API
  2164. @section Profiling API
  2165. @deftypefun int starpu_profiling_status_set (int @var{status})
  2166. Thie function sets the profiling status. Profiling is activated by passing
  2167. @code{STARPU_PROFILING_ENABLE} in @var{status}. Passing
  2168. @code{STARPU_PROFILING_DISABLE} disables profiling. Calling this function
  2169. resets all profiling measurements. When profiling is enabled, the
  2170. @code{profiling_info} field of the @code{struct starpu_task} structure points
  2171. to a valid @code{struct starpu_task_profiling_info} structure containing
  2172. information about the execution of the task.
  2173. Negative return values indicate an error, otherwise the previous status is
  2174. returned.
  2175. @end deftypefun
  2176. @deftypefun int starpu_profiling_status_get (void)
  2177. Return the current profiling status or a negative value in case there was an error.
  2178. @end deftypefun
  2179. @deftypefun void starpu_set_profiling_id (int @var{new_id})
  2180. This function sets the ID used for profiling trace filename. It needs to be
  2181. called before starpu_init.
  2182. @end deftypefun
  2183. @deftp {Data Type} {struct starpu_task_profiling_info}
  2184. This structure contains information about the execution of a task. It is
  2185. accessible from the @code{.profiling_info} field of the @code{starpu_task}
  2186. structure if profiling was enabled. The different fields are:
  2187. @table @asis
  2188. @item @code{struct timespec submit_time}
  2189. Date of task submission (relative to the initialization of StarPU).
  2190. @item @code{struct timespec push_start_time}
  2191. Time when the task was submitted to the scheduler.
  2192. @item @code{struct timespec push_end_time}
  2193. Time when the scheduler finished with the task submission.
  2194. @item @code{struct timespec pop_start_time}
  2195. Time when the scheduler started to be requested for a task, and eventually gave
  2196. that task.
  2197. @item @code{struct timespec pop_end_time}
  2198. Time when the scheduler finished providing the task for execution.
  2199. @item @code{struct timespec acquire_data_start_time}
  2200. Time when the worker started fetching input data.
  2201. @item @code{struct timespec acquire_data_end_time}
  2202. Time when the worker finished fetching input data.
  2203. @item @code{struct timespec start_time}
  2204. Date of task execution beginning (relative to the initialization of StarPU).
  2205. @item @code{struct timespec end_time}
  2206. Date of task execution termination (relative to the initialization of StarPU).
  2207. @item @code{struct timespec release_data_start_time}
  2208. Time when the worker started releasing data.
  2209. @item @code{struct timespec release_data_end_time}
  2210. Time when the worker finished releasing data.
  2211. @item @code{struct timespec callback_start_time}
  2212. Time when the worker started the application callback for the task.
  2213. @item @code{struct timespec callback_end_time}
  2214. Time when the worker finished the application callback for the task.
  2215. @item @code{workerid}
  2216. Identifier of the worker which has executed the task.
  2217. @item @code{uint64_t used_cycles}
  2218. Number of cycles used by the task, only available in the MoviSim
  2219. @item @code{uint64_t stall_cycles}
  2220. Number of cycles stalled within the task, only available in the MoviSim
  2221. @item @code{double power_consumed}
  2222. Power consumed by the task, only available in the MoviSim
  2223. @end table
  2224. @end deftp
  2225. @deftp {Data Type} {struct starpu_worker_profiling_info}
  2226. This structure contains the profiling information associated to a
  2227. worker. The different fields are:
  2228. @table @asis
  2229. @item @code{struct timespec start_time}
  2230. Starting date for the reported profiling measurements.
  2231. @item @code{struct timespec total_time}
  2232. Duration of the profiling measurement interval.
  2233. @item @code{struct timespec executing_time}
  2234. Time spent by the worker to execute tasks during the profiling measurement interval.
  2235. @item @code{struct timespec sleeping_time}
  2236. Time spent idling by the worker during the profiling measurement interval.
  2237. @item @code{int executed_tasks}
  2238. Number of tasks executed by the worker during the profiling measurement interval.
  2239. @item @code{uint64_t used_cycles}
  2240. Number of cycles used by the worker, only available in the MoviSim
  2241. @item @code{uint64_t stall_cycles}
  2242. Number of cycles stalled within the worker, only available in the MoviSim
  2243. @item @code{double power_consumed}
  2244. Power consumed by the worker, only available in the MoviSim
  2245. @end table
  2246. @end deftp
  2247. @deftypefun int starpu_worker_get_profiling_info (int @var{workerid}, {struct starpu_worker_profiling_info *}@var{worker_info})
  2248. Get the profiling info associated to the worker identified by @var{workerid},
  2249. and reset the profiling measurements. If the @var{worker_info} argument is
  2250. NULL, only reset the counters associated to worker @var{workerid}.
  2251. Upon successful completion, this function returns 0. Otherwise, a negative
  2252. value is returned.
  2253. @end deftypefun
  2254. @deftp {Data Type} {struct starpu_bus_profiling_info}
  2255. The different fields are:
  2256. @table @asis
  2257. @item @code{struct timespec start_time}
  2258. Time of bus profiling startup.
  2259. @item @code{struct timespec total_time}
  2260. Total time of bus profiling.
  2261. @item @code{int long long transferred_bytes}
  2262. Number of bytes transferred during profiling.
  2263. @item @code{int transfer_count}
  2264. Number of transfers during profiling.
  2265. @end table
  2266. @end deftp
  2267. @deftypefun int starpu_bus_get_profiling_info (int @var{busid}, {struct starpu_bus_profiling_info *}@var{bus_info})
  2268. Get the profiling info associated to the worker designated by @var{workerid},
  2269. and reset the profiling measurements. If worker_info is NULL, only reset the
  2270. counters.
  2271. @end deftypefun
  2272. @deftypefun int starpu_bus_get_count (void)
  2273. Return the number of buses in the machine.
  2274. @end deftypefun
  2275. @deftypefun int starpu_bus_get_id (int @var{src}, int @var{dst})
  2276. Return the identifier of the bus between @var{src} and @var{dst}
  2277. @end deftypefun
  2278. @deftypefun int starpu_bus_get_src (int @var{busid})
  2279. Return the source point of bus @var{busid}
  2280. @end deftypefun
  2281. @deftypefun int starpu_bus_get_dst (int @var{busid})
  2282. Return the destination point of bus @var{busid}
  2283. @end deftypefun
  2284. @deftypefun double starpu_timing_timespec_delay_us ({struct timespec} *@var{start}, {struct timespec} *@var{end})
  2285. Returns the time elapsed between @var{start} and @var{end} in microseconds.
  2286. @end deftypefun
  2287. @deftypefun double starpu_timing_timespec_to_us ({struct timespec} *@var{ts})
  2288. Converts the given timespec @var{ts} into microseconds.
  2289. @end deftypefun
  2290. @deftypefun void starpu_bus_profiling_helper_display_summary (void)
  2291. Displays statistics about the bus on stderr. if the environment
  2292. variable @code{STARPU_BUS_STATS} is defined. The function is called
  2293. automatically by @code{starpu_shutdown()}.
  2294. @end deftypefun
  2295. @deftypefun void starpu_worker_profiling_helper_display_summary (void)
  2296. Displays statistics about the workers on stderr if the environment
  2297. variable @code{STARPU_WORKER_STATS} is defined. The function is called
  2298. automatically by @code{starpu_shutdown()}.
  2299. @end deftypefun
  2300. @deftypefun void starpu_memory_display_stats ()
  2301. Display statistics about the current data handles registered within
  2302. StarPU. StarPU must have been configured with the option
  2303. @code{----enable-memory-stats} (@pxref{Memory feedback}).
  2304. @end deftypefun
  2305. @node Theoretical lower bound on execution time API
  2306. @section Theoretical lower bound on execution time
  2307. @deftypefun void starpu_bound_start (int @var{deps}, int @var{prio})
  2308. Start recording tasks (resets stats). @var{deps} tells whether
  2309. dependencies should be recorded too (this is quite expensive)
  2310. @end deftypefun
  2311. @deftypefun void starpu_bound_stop (void)
  2312. Stop recording tasks
  2313. @end deftypefun
  2314. @deftypefun void starpu_bound_print_dot ({FILE *}@var{output})
  2315. Print the DAG that was recorded
  2316. @end deftypefun
  2317. @deftypefun void starpu_bound_compute ({double *}@var{res}, {double *}@var{integer_res}, int @var{integer})
  2318. Get theoretical upper bound (in ms) (needs glpk support detected by @code{configure} script). It returns 0 if some performance models are not calibrated.
  2319. @end deftypefun
  2320. @deftypefun void starpu_bound_print_lp ({FILE *}@var{output})
  2321. Emit the Linear Programming system on @var{output} for the recorded tasks, in
  2322. the lp format
  2323. @end deftypefun
  2324. @deftypefun void starpu_bound_print_mps ({FILE *}@var{output})
  2325. Emit the Linear Programming system on @var{output} for the recorded tasks, in
  2326. the mps format
  2327. @end deftypefun
  2328. @deftypefun void starpu_bound_print ({FILE *}@var{output}, int @var{integer})
  2329. Emit statistics of actual execution vs theoretical upper bound. @var{integer}
  2330. permits to choose between integer solving (which takes a long time but is
  2331. correct), and relaxed solving (which provides an approximate solution).
  2332. @end deftypefun
  2333. @node CUDA extensions
  2334. @section CUDA extensions
  2335. @defmac STARPU_USE_CUDA
  2336. This macro is defined when StarPU has been installed with CUDA
  2337. support. It should be used in your code to detect the availability of
  2338. CUDA as shown in @ref{Full source code for the 'Scaling a Vector' example}.
  2339. @end defmac
  2340. @deftypefun cudaStream_t starpu_cuda_get_local_stream (void)
  2341. This function gets the current worker's CUDA stream.
  2342. StarPU provides a stream for every CUDA device controlled by StarPU. This
  2343. function is only provided for convenience so that programmers can easily use
  2344. asynchronous operations within codelets without having to create a stream by
  2345. hand. Note that the application is not forced to use the stream provided by
  2346. @code{starpu_cuda_get_local_stream} and may also create its own streams.
  2347. Synchronizing with @code{cudaThreadSynchronize()} is allowed, but will reduce
  2348. the likelihood of having all transfers overlapped.
  2349. @end deftypefun
  2350. @deftypefun {const struct cudaDeviceProp *} starpu_cuda_get_device_properties (unsigned @var{workerid})
  2351. This function returns a pointer to device properties for worker @var{workerid}
  2352. (assumed to be a CUDA worker).
  2353. @end deftypefun
  2354. @deftypefun void starpu_cuda_report_error ({const char *}@var{func}, {const char *}@var{file}, int @var{line}, cudaError_t @var{status})
  2355. Report a CUDA error.
  2356. @end deftypefun
  2357. @defmac STARPU_CUDA_REPORT_ERROR (cudaError_t @var{status})
  2358. Calls starpu_cuda_report_error, passing the current function, file and line
  2359. position.
  2360. @end defmac
  2361. @deftypefun int starpu_cuda_copy_async_sync ({void *}@var{src_ptr}, unsigned @var{src_node}, {void *}@var{dst_ptr}, unsigned @var{dst_node}, size_t @var{ssize}, cudaStream_t @var{stream}, {enum cudaMemcpyKind} @var{kind})
  2362. Copy @var{ssize} bytes from the pointer @var{src_ptr} on
  2363. @var{src_node} to the pointer @var{dst_ptr} on @var{dst_node}.
  2364. The function first tries to copy the data asynchronous (unless
  2365. @var{stream} is @code{NULL}. If the asynchronous copy fails or if
  2366. @var{stream} is @code{NULL}, it copies the data synchronously.
  2367. The function returns @code{-EAGAIN} if the asynchronous launch was
  2368. successfull. It returns 0 if the synchronous copy was successful, or
  2369. fails otherwise.
  2370. @end deftypefun
  2371. @deftypefun void starpu_cuda_set_device (unsigned @var{devid})
  2372. Calls @code{cudaSetDevice(devid)} or @code{cudaGLSetGLDevice(devid)}, according to
  2373. whether @code{devid} is among the @code{cuda_opengl_interoperability} field of
  2374. the @code{starpu_conf} structure.
  2375. @end deftypefun
  2376. @deftypefun void starpu_cublas_init (void)
  2377. This function initializes CUBLAS on every CUDA device.
  2378. The CUBLAS library must be initialized prior to any CUBLAS call. Calling
  2379. @code{starpu_cublas_init} will initialize CUBLAS on every CUDA device
  2380. controlled by StarPU. This call blocks until CUBLAS has been properly
  2381. initialized on every device.
  2382. @end deftypefun
  2383. @deftypefun void starpu_cublas_shutdown (void)
  2384. This function synchronously deinitializes the CUBLAS library on every CUDA device.
  2385. @end deftypefun
  2386. @deftypefun void starpu_cublas_report_error ({const char *}@var{func}, {const char *}@var{file}, int @var{line}, cublasStatus @var{status})
  2387. Report a cublas error.
  2388. @end deftypefun
  2389. @defmac STARPU_CUBLAS_REPORT_ERROR (cublasStatus @var{status})
  2390. Calls starpu_cublas_report_error, passing the current function, file and line
  2391. position.
  2392. @end defmac
  2393. @node OpenCL extensions
  2394. @section OpenCL extensions
  2395. @menu
  2396. * Writing OpenCL kernels:: Writing OpenCL kernels
  2397. * Compiling OpenCL kernels:: Compiling OpenCL kernels
  2398. * Loading OpenCL kernels:: Loading OpenCL kernels
  2399. * OpenCL statistics:: Collecting statistics from OpenCL
  2400. * OpenCL utilities:: Utilities for OpenCL
  2401. @end menu
  2402. @defmac STARPU_USE_OPENCL
  2403. This macro is defined when StarPU has been installed with OpenCL
  2404. support. It should be used in your code to detect the availability of
  2405. OpenCL as shown in @ref{Full source code for the 'Scaling a Vector' example}.
  2406. @end defmac
  2407. @node Writing OpenCL kernels
  2408. @subsection Writing OpenCL kernels
  2409. @deftypefun void starpu_opencl_get_context (int @var{devid}, {cl_context *}@var{context})
  2410. Places the OpenCL context of the device designated by @var{devid} into @var{context}.
  2411. @end deftypefun
  2412. @deftypefun void starpu_opencl_get_device (int @var{devid}, {cl_device_id *}@var{device})
  2413. Places the cl_device_id corresponding to @var{devid} in @var{device}.
  2414. @end deftypefun
  2415. @deftypefun void starpu_opencl_get_queue (int @var{devid}, {cl_command_queue *}@var{queue})
  2416. Places the command queue of the the device designated by @var{devid} into @var{queue}.
  2417. @end deftypefun
  2418. @deftypefun void starpu_opencl_get_current_context ({cl_context *}@var{context})
  2419. Return the context of the current worker.
  2420. @end deftypefun
  2421. @deftypefun void starpu_opencl_get_current_queue ({cl_command_queue *}@var{queue})
  2422. Return the computation kernel command queue of the current worker.
  2423. @end deftypefun
  2424. @deftypefun int starpu_opencl_set_kernel_args ({cl_int *}@var{err}, {cl_kernel *}@var{kernel}, ...)
  2425. Sets the arguments of a given kernel. The list of arguments must be given as
  2426. (size_t @var{size_of_the_argument}, cl_mem * @var{pointer_to_the_argument}).
  2427. The last argument must be 0. Returns the number of arguments that were
  2428. successfully set. In case of failure, returns the id of the argument
  2429. that could not be set and @var{err} is set to the error returned by
  2430. OpenCL. Otherwise, returns the number of arguments that were set.
  2431. @cartouche
  2432. @smallexample
  2433. int n;
  2434. cl_int err;
  2435. cl_kernel kernel;
  2436. n = starpu_opencl_set_kernel_args(&err, 2, &kernel,
  2437. sizeof(foo), &foo,
  2438. sizeof(bar), &bar,
  2439. 0);
  2440. if (n != 2)
  2441. fprintf(stderr, "Error : %d\n", err);
  2442. @end smallexample
  2443. @end cartouche
  2444. @end deftypefun
  2445. @node Compiling OpenCL kernels
  2446. @subsection Compiling OpenCL kernels
  2447. Source codes for OpenCL kernels can be stored in a file or in a
  2448. string. StarPU provides functions to build the program executable for
  2449. each available OpenCL device as a @code{cl_program} object. This
  2450. program executable can then be loaded within a specific queue as
  2451. explained in the next section. These are only helpers, Applications
  2452. can also fill a @code{starpu_opencl_program} array by hand for more advanced
  2453. use (e.g. different programs on the different OpenCL devices, for
  2454. relocation purpose for instance).
  2455. @deftp {Data Type} {struct starpu_opencl_program}
  2456. Stores the OpenCL programs as compiled for the different OpenCL
  2457. devices. The different fields are:
  2458. @table @asis
  2459. @item @code{cl_program programs[STARPU_MAXOPENCLDEVS]}
  2460. Stores each program for each OpenCL device.
  2461. @end table
  2462. @end deftp
  2463. @deftypefun int starpu_opencl_load_opencl_from_file ({const char} *@var{source_file_name}, {struct starpu_opencl_program} *@var{opencl_programs}, {const char}* @var{build_options})
  2464. @anchor{starpu_opencl_load_opencl_from_file}
  2465. This function compiles an OpenCL source code stored in a file.
  2466. @end deftypefun
  2467. @deftypefun int starpu_opencl_load_opencl_from_string ({const char} *@var{opencl_program_source}, {struct starpu_opencl_program} *@var{opencl_programs}, {const char}* @var{build_options})
  2468. This function compiles an OpenCL source code stored in a string.
  2469. @end deftypefun
  2470. @deftypefun int starpu_opencl_unload_opencl ({struct starpu_opencl_program} *@var{opencl_programs})
  2471. This function unloads an OpenCL compiled code.
  2472. @end deftypefun
  2473. @deftypefun void starpu_opencl_load_program_source ({const char *}@var{source_file_name}, char *@var{located_file_name}, char *@var{located_dir_name}, char *@var{opencl_program_source})
  2474. @anchor{starpu_opencl_load_program_source}
  2475. Store the contents of the file @var{source_file_name} in the buffer
  2476. @var{opencl_program_source}. The file @var{source_file_name} can be
  2477. located in the current directory, or in the directory specified by the
  2478. environment variable @code{STARPU_OPENCL_PROGRAM_DIR} (@pxref{STARPU_OPENCL_PROGRAM_DIR}), or in the
  2479. directory @code{share/starpu/opencl} of the installation directory of
  2480. StarPU, or in the source directory of StarPU.
  2481. When the file is found, @code{located_file_name} is the full name of
  2482. the file as it has been located on the system, @code{located_dir_name}
  2483. the directory where it has been located. Otherwise, they are both set
  2484. to the empty string.
  2485. @end deftypefun
  2486. @deftypefun int starpu_opencl_compile_opencl_from_file ({const char *}@var{source_file_name}, {const char *} @var{build_options})
  2487. Compile the OpenCL kernel stored in the file @code{source_file_name}
  2488. with the given options @code{build_options} and stores the result in
  2489. the directory @code{$STARPU_HOME/.starpu/opencl} with the same
  2490. filename as @code{source_file_name}. The compilation is done for every
  2491. OpenCL device, and the filename is suffixed with the vendor id and the
  2492. device id of the OpenCL device.
  2493. @end deftypefun
  2494. @deftypefun int starpu_opencl_compile_opencl_from_string ({const char *}@var{opencl_program_source}, {const char *}@var{file_name}, {const char* }@var{build_options})
  2495. Compile the OpenCL kernel in the string @code{opencl_program_source}
  2496. with the given options @code{build_options} and stores the result in
  2497. the directory @code{$STARPU_HOME/.starpu/opencl}
  2498. with the filename
  2499. @code{file_name}. The compilation is done for every
  2500. OpenCL device, and the filename is suffixed with the vendor id and the
  2501. device id of the OpenCL device.
  2502. @end deftypefun
  2503. @deftypefun int starpu_opencl_load_binary_opencl ({const char *}@var{kernel_id}, {struct starpu_opencl_program *}@var{opencl_programs})
  2504. Compile the binary OpenCL kernel identified with @var{id}. For every
  2505. OpenCL device, the binary OpenCL kernel will be loaded from the file
  2506. @code{$STARPU_HOME/.starpu/opencl/<kernel_id>.<device_type>.vendor_id_<vendor_id>_device_id_<device_id>}.
  2507. @end deftypefun
  2508. @node Loading OpenCL kernels
  2509. @subsection Loading OpenCL kernels
  2510. @deftypefun int starpu_opencl_load_kernel (cl_kernel *@var{kernel}, cl_command_queue *@var{queue}, {struct starpu_opencl_program} *@var{opencl_programs}, {const char} *@var{kernel_name}, int @var{devid})
  2511. Create a kernel @var{kernel} for device @var{devid}, on its computation command
  2512. queue returned in @var{queue}, using program @var{opencl_programs} and name
  2513. @var{kernel_name}
  2514. @end deftypefun
  2515. @deftypefun int starpu_opencl_release_kernel (cl_kernel @var{kernel})
  2516. Release the given @var{kernel}, to be called after kernel execution.
  2517. @end deftypefun
  2518. @node OpenCL statistics
  2519. @subsection OpenCL statistics
  2520. @deftypefun int starpu_opencl_collect_stats (cl_event @var{event})
  2521. This function allows to collect statistics on a kernel execution.
  2522. After termination of the kernels, the OpenCL codelet should call this function
  2523. to pass it the even returned by @code{clEnqueueNDRangeKernel}, to let StarPU
  2524. collect statistics about the kernel execution (used cycles, consumed power).
  2525. @end deftypefun
  2526. @node OpenCL utilities
  2527. @subsection OpenCL utilities
  2528. @deftypefun {const char *} starpu_opencl_error_string (cl_int @var{status})
  2529. Return the error message in English corresponding to @var{status}, an
  2530. OpenCL error code.
  2531. @end deftypefun
  2532. @deftypefun void starpu_opencl_display_error ({const char *}@var{func}, {const char *}@var{file}, int @var{line}, {const char *}@var{msg}, cl_int @var{status})
  2533. Given a valid error @var{status}, prints the corresponding error message on
  2534. stdout, along with the given function name @var{func}, the given filename
  2535. @var{file}, the given line number @var{line} and the given message @var{msg}.
  2536. @end deftypefun
  2537. @defmac STARPU_OPENCL_DISPLAY_ERROR (cl_int @var{status})
  2538. Call the function @code{starpu_opencl_display_error} with the given
  2539. error @var{status}, the current function name, current file and line
  2540. number, and a empty message.
  2541. @end defmac
  2542. @deftypefun void starpu_opencl_report_error ({const char *}@var{func}, {const char *}@var{file}, int @var{line}, {const char *}@var{msg}, cl_int @var{status})
  2543. Call the function @code{starpu_opencl_display_error} and abort.
  2544. @end deftypefun
  2545. @defmac STARPU_OPENCL_REPORT_ERROR (cl_int @var{status})
  2546. Call the function @code{starpu_opencl_report_error} with the given
  2547. error @var{status}, with the current function name, current file and
  2548. line number, and a empty message.
  2549. @end defmac
  2550. @defmac STARPU_OPENCL_REPORT_ERROR_WITH_MSG ({const char *}@var{msg}, cl_int @var{status})
  2551. Call the function @code{starpu_opencl_report_error} with the given
  2552. message and the given error @var{status}, with the current function
  2553. name, current file and line number.
  2554. @end defmac
  2555. @deftypefun cl_int starpu_opencl_allocate_memory ({cl_mem *}@var{addr}, size_t @var{size}, cl_mem_flags @var{flags})
  2556. Allocate @var{size} bytes of memory, stored in @var{addr}. @var{flags} must be a
  2557. valid combination of cl_mem_flags values.
  2558. @end deftypefun
  2559. @deftypefun cl_int starpu_opencl_copy_ram_to_opencl ({void *}@var{ptr}, unsigned @var{src_node}, cl_mem @var{buffer}, unsigned @var{dst_node}, size_t @var{size}, size_t @var{offset}, {cl_event *}@var{event}, {int *}@var{ret})
  2560. Copy @var{size} bytes from the given @var{ptr} on
  2561. RAM @var{src_node} to the given @var{buffer} on OpenCL @var{dst_node}.
  2562. @var{offset} is the offset, in bytes, in @var{buffer}.
  2563. if @var{event} is NULL, the copy is synchronous, i.e the queue is
  2564. synchronised before returning. If non NULL, @var{event} can be used
  2565. after the call to wait for this particular copy to complete.
  2566. This function returns CL_SUCCESS if the copy was successful, or a valid OpenCL error code
  2567. otherwise. The integer pointed to by @var{ret} is set to -EAGAIN if the asynchronous launch
  2568. was successful, or to 0 if event was NULL.
  2569. @end deftypefun
  2570. @deftypefun cl_int starpu_opencl_copy_opencl_to_ram (cl_mem @var{buffer}, unsigned @var{src_node}, void *@var{ptr}, unsigned @var{dst_node}, size_t @var{size}, size_t @var{offset}, {cl_event *}@var{event}, {int *}@var{ret})
  2571. Copy @var{size} bytes asynchronously from the given @var{buffer} on
  2572. OpenCL @var{src_node} to the given @var{ptr} on RAM @var{dst_node}.
  2573. @var{offset} is the offset, in bytes, in @var{buffer}.
  2574. if @var{event} is NULL, the copy is synchronous, i.e the queue is
  2575. synchronised before returning. If non NULL, @var{event} can be used
  2576. after the call to wait for this particular copy to complete.
  2577. This function returns CL_SUCCESS if the copy was successful, or a valid OpenCL error code
  2578. otherwise. The integer pointed to by @var{ret} is set to -EAGAIN if the asynchronous launch
  2579. was successful, or to 0 if event was NULL.
  2580. @end deftypefun
  2581. @deftypefun cl_int starpu_opencl_copy_opencl_to_opencl (cl_mem @var{src}, unsigned @var{src_node}, size_t @var{src_offset}, cl_mem @var{dst}, unsigned @var{dst_node}, size_t @var{dst_offset}, size_t @var{size}, {cl_event *}@var{event}, {int *}@var{ret})
  2582. Copy @var{size} bytes asynchronously from byte offset @var{src_offset} of
  2583. @var{src} on OpenCL @var{src_node} to byte offset @var{dst_offset} of @var{dst} on
  2584. OpenCL @var{dst_node}.
  2585. if @var{event} is NULL, the copy is synchronous, i.e the queue is
  2586. synchronised before returning. If non NULL, @var{event} can be used
  2587. after the call to wait for this particular copy to complete.
  2588. This function returns CL_SUCCESS if the copy was successful, or a valid OpenCL error code
  2589. otherwise. The integer pointed to by @var{ret} is set to -EAGAIN if the asynchronous launch
  2590. was successful, or to 0 if event was NULL.
  2591. @end deftypefun
  2592. @deftypefun cl_int starpu_opencl_copy_async_sync (uintptr_t @var{src}, size_t @var{src_offset}, unsigned @var{src_node}, uintptr_t @var{dst}, size_t @var{dst_offset}, unsigned @var{dst_node}, size_t @var{size}, {cl_event *}@var{event})
  2593. Copy @var{size} bytes from byte offset @var{src_offset} of
  2594. @var{src} on @var{src_node} to byte offset @var{dst_offset} of @var{dst} on
  2595. @var{dst_node}. if @var{event} is NULL, the copy is synchronous, i.e the queue is
  2596. synchronised before returning. If non NULL, @var{event} can be used
  2597. after the call to wait for this particular copy to complete.
  2598. The function returns @code{-EAGAIN} if the asynchronous launch was
  2599. successfull. It returns 0 if the synchronous copy was successful, or
  2600. fails otherwise.
  2601. @end deftypefun
  2602. @node Miscellaneous helpers
  2603. @section Miscellaneous helpers
  2604. @deftypefun int starpu_data_cpy (starpu_data_handle_t @var{dst_handle}, starpu_data_handle_t @var{src_handle}, int @var{asynchronous}, void (*@var{callback_func})(void*), void *@var{callback_arg})
  2605. Copy the content of the @var{src_handle} into the @var{dst_handle} handle.
  2606. The @var{asynchronous} parameter indicates whether the function should
  2607. block or not. In the case of an asynchronous call, it is possible to
  2608. synchronize with the termination of this operation either by the means of
  2609. implicit dependencies (if enabled) or by calling
  2610. @code{starpu_task_wait_for_all()}. If @var{callback_func} is not @code{NULL},
  2611. this callback function is executed after the handle has been copied, and it is
  2612. given the @var{callback_arg} pointer as argument.
  2613. @end deftypefun
  2614. @deftypefun void starpu_execute_on_each_worker (void (*@var{func})(void *), void *@var{arg}, uint32_t @var{where})
  2615. This function executes the given function on a subset of workers.
  2616. When calling this method, the offloaded function specified by the first argument is
  2617. executed by every StarPU worker that may execute the function.
  2618. The second argument is passed to the offloaded function.
  2619. The last argument specifies on which types of processing units the function
  2620. should be executed. Similarly to the @var{where} field of the
  2621. @code{struct starpu_codelet} structure, it is possible to specify that the function
  2622. should be executed on every CUDA device and every CPU by passing
  2623. @code{STARPU_CPU|STARPU_CUDA}.
  2624. This function blocks until the function has been executed on every appropriate
  2625. processing units, so that it may not be called from a callback function for
  2626. instance.
  2627. @end deftypefun
  2628. @node FXT Support
  2629. @section FXT Support
  2630. @deftypefun void starpu_fxt_start_profiling (void)
  2631. Start recording the trace. The trace is by default started from
  2632. @code{starpu_init()} call, but can be paused by using
  2633. @code{starpu_fxt_stop_profiling}, in which case
  2634. @code{starpu_fxt_start_profiling} should be called to specify when to resume
  2635. recording events.
  2636. @end deftypefun
  2637. @deftypefun void starpu_fxt_stop_profiling (void)
  2638. Stop recording the trace. The trace is by default stopped at
  2639. @code{starpu_shutdown()} call. @code{starpu_fxt_stop_profiling} can however be
  2640. used to stop it earlier. @code{starpu_fxt_start_profiling} can then be called to
  2641. start recording it again, etc.
  2642. @end deftypefun
  2643. @node MPI
  2644. @section MPI
  2645. @menu
  2646. * Initialisation::
  2647. * Communication::
  2648. * Communication Cache::
  2649. * MPI Insert Task::
  2650. * Collective Operations::
  2651. @end menu
  2652. @node Initialisation
  2653. @subsection Initialisation
  2654. @deftypefun int starpu_mpi_init (int *@var{argc}, char ***@var{argv}, int initialize_mpi)
  2655. Initializes the starpumpi library. @code{initialize_mpi} indicates if
  2656. MPI should be initialized or not by StarPU. If the value is not @code{0},
  2657. MPI will be initialized by calling @code{MPI_Init_Thread(argc, argv,
  2658. MPI_THREAD_SERIALIZED, ...)}.
  2659. @end deftypefun
  2660. @deftypefun int starpu_mpi_initialize (void)
  2661. This function has been made deprecated. One should use instead the
  2662. function @code{starpu_mpi_init()} defined above.
  2663. This function does not call @code{MPI_Init}, it should be called beforehand.
  2664. @end deftypefun
  2665. @deftypefun int starpu_mpi_initialize_extended (int *@var{rank}, int *@var{world_size})
  2666. This function has been made deprecated. One should use instead the
  2667. function @code{starpu_mpi_init()} defined above.
  2668. MPI will be initialized by starpumpi by calling @code{MPI_Init_Thread(argc, argv,
  2669. MPI_THREAD_SERIALIZED, ...)}.
  2670. @end deftypefun
  2671. @deftypefun int starpu_mpi_shutdown (void)
  2672. Cleans the starpumpi library. This must be called between calling
  2673. @code{starpu_mpi} functions and @code{starpu_shutdown()}.
  2674. @code{MPI_Finalize()} will be called if StarPU-MPI has been initialized
  2675. by @code{starpu_mpi_init()}.
  2676. @end deftypefun
  2677. @deftypefun void starpu_mpi_comm_amounts_retrieve (size_t *@var{comm_amounts})
  2678. Retrieve the current amount of communications from the current node in
  2679. the array @code{comm_amounts} which must have a size greater or equal
  2680. to the world size. Communications statistics must be enabled
  2681. (@pxref{STARPU_COMM_STATS}).
  2682. @end deftypefun
  2683. @deftypefun void starpu_mpi_set_communication_tag (int @var{tag})
  2684. @anchor{starpu_mpi_set_communication_tag}
  2685. Tell StarPU-MPI which MPI tag to use for all its communications.
  2686. @end deftypefun
  2687. @deftypefun int starpu_mpi_get_communication_tag (void)
  2688. @anchor{starpu_mpi_get_communication_tag}
  2689. Returns the MPI tag which will be used for all StarPU-MPI communications.
  2690. @end deftypefun
  2691. @node Communication
  2692. @subsection Communication
  2693. @deftypefun int starpu_mpi_send (starpu_data_handle_t @var{data_handle}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm})
  2694. Performs a standard-mode, blocking send of @var{data_handle} to the
  2695. node @var{dest} using the message tag @code{mpi_tag} within the
  2696. communicator @var{comm}.
  2697. @end deftypefun
  2698. @deftypefun int starpu_mpi_recv (starpu_data_handle_t @var{data_handle}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm}, MPI_Status *@var{status})
  2699. Performs a standard-mode, blocking receive in @var{data_handle} from the
  2700. node @var{source} using the message tag @code{mpi_tag} within the
  2701. communicator @var{comm}.
  2702. @end deftypefun
  2703. @deftypefun int starpu_mpi_isend (starpu_data_handle_t @var{data_handle}, starpu_mpi_req *@var{req}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm})
  2704. Posts a standard-mode, non blocking send of @var{data_handle} to the
  2705. node @var{dest} using the message tag @code{mpi_tag} within the
  2706. communicator @var{comm}. After the call, the pointer to the request
  2707. @var{req} can be used to test or to wait for the completion of the communication.
  2708. @end deftypefun
  2709. @deftypefun int starpu_mpi_irecv (starpu_data_handle_t @var{data_handle}, starpu_mpi_req *@var{req}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm})
  2710. Posts a nonblocking receive in @var{data_handle} from the
  2711. node @var{source} using the message tag @code{mpi_tag} within the
  2712. communicator @var{comm}. After the call, the pointer to the request
  2713. @var{req} can be used to test or to wait for the completion of the communication.
  2714. @end deftypefun
  2715. @deftypefun int starpu_mpi_isend_detached (starpu_data_handle_t @var{data_handle}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm}, void (*@var{callback})(void *), void *@var{arg})
  2716. Posts a standard-mode, non blocking send of @var{data_handle} to the
  2717. node @var{dest} using the message tag @code{mpi_tag} within the
  2718. communicator @var{comm}. On completion, the @var{callback} function is
  2719. called with the argument @var{arg}. Similarly to the pthread detached
  2720. functionality, when a detached communication completes, its resources
  2721. are automatically released back to the system, there is no need to
  2722. test or to wait for the completion of the request.
  2723. @end deftypefun
  2724. @deftypefun int starpu_mpi_irecv_detached (starpu_data_handle_t @var{data_handle}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm}, void (*@var{callback})(void *), void *@var{arg})
  2725. Posts a nonblocking receive in @var{data_handle} from the
  2726. node @var{source} using the message tag @code{mpi_tag} within the
  2727. communicator @var{comm}. On completion, the @var{callback} function is
  2728. called with the argument @var{arg}. Similarly to the pthread detached
  2729. functionality, when a detached communication completes, its resources
  2730. are automatically released back to the system, there is no need to
  2731. test or to wait for the completion of the request.
  2732. @end deftypefun
  2733. @deftypefun int starpu_mpi_wait (starpu_mpi_req *@var{req}, MPI_Status *@var{status})
  2734. Returns when the operation identified by request @var{req} is complete.
  2735. @end deftypefun
  2736. @deftypefun int starpu_mpi_test (starpu_mpi_req *@var{req}, int *@var{flag}, MPI_Status *@var{status})
  2737. If the operation identified by @var{req} is complete, set @var{flag}
  2738. to 1. The @var{status} object is set to contain information on the
  2739. completed operation.
  2740. @end deftypefun
  2741. @deftypefun int starpu_mpi_barrier (MPI_Comm @var{comm})
  2742. Blocks the caller until all group members of the communicator
  2743. @var{comm} have called it.
  2744. @end deftypefun
  2745. @deftypefun int starpu_mpi_isend_detached_unlock_tag (starpu_data_handle_t @var{data_handle}, int @var{dest}, int @var{mpi_tag}, MPI_Comm @var{comm}, starpu_tag_t @var{tag})
  2746. Posts a standard-mode, non blocking send of @var{data_handle} to the
  2747. node @var{dest} using the message tag @code{mpi_tag} within the
  2748. communicator @var{comm}. On completion, @var{tag} is unlocked.
  2749. @end deftypefun
  2750. @deftypefun int starpu_mpi_irecv_detached_unlock_tag (starpu_data_handle_t @var{data_handle}, int @var{source}, int @var{mpi_tag}, MPI_Comm @var{comm}, starpu_tag_t @var{tag})
  2751. Posts a nonblocking receive in @var{data_handle} from the
  2752. node @var{source} using the message tag @code{mpi_tag} within the
  2753. communicator @var{comm}. On completion, @var{tag} is unlocked.
  2754. @end deftypefun
  2755. @deftypefun int starpu_mpi_isend_array_detached_unlock_tag (unsigned @var{array_size}, starpu_data_handle_t *@var{data_handle}, int *@var{dest}, int *@var{mpi_tag}, MPI_Comm *@var{comm}, starpu_tag_t @var{tag})
  2756. Posts @var{array_size} standard-mode, non blocking send. Each post
  2757. sends the n-th data of the array @var{data_handle} to the n-th node of
  2758. the array @var{dest}
  2759. using the n-th message tag of the array @code{mpi_tag} within the n-th
  2760. communicator of the array
  2761. @var{comm}. On completion of the all the requests, @var{tag} is unlocked.
  2762. @end deftypefun
  2763. @deftypefun int starpu_mpi_irecv_array_detached_unlock_tag (unsigned @var{array_size}, starpu_data_handle_t *@var{data_handle}, int *@var{source}, int *@var{mpi_tag}, MPI_Comm *@var{comm}, starpu_tag_t @var{tag})
  2764. Posts @var{array_size} nonblocking receive. Each post receives in the
  2765. n-th data of the array @var{data_handle} from the n-th
  2766. node of the array @var{source} using the n-th message tag of the array
  2767. @code{mpi_tag} within the n-th communicator of the array @var{comm}.
  2768. On completion of the all the requests, @var{tag} is unlocked.
  2769. @end deftypefun
  2770. @node Communication Cache
  2771. @subsection Communication Cache
  2772. @deftypefun void starpu_mpi_cache_flush (MPI_Comm @var{comm}, starpu_data_handle_t @var{data_handle})
  2773. Clear the send and receive communication cache for the data
  2774. @var{data_handle}. The function has to be called synchronously by all
  2775. the MPI nodes.
  2776. The function does nothing if the cache mechanism is disabled (@pxref{STARPU_MPI_CACHE}).
  2777. @end deftypefun
  2778. @deftypefun void starpu_mpi_cache_flush_all_data (MPI_Comm @var{comm})
  2779. Clear the send and receive communication cache for all data. The
  2780. function has to be called synchronously by all the MPI nodes.
  2781. The function does nothing if the cache mechanism is disabled (@pxref{STARPU_MPI_CACHE}).
  2782. @end deftypefun
  2783. @node MPI Insert Task
  2784. @subsection MPI Insert Task
  2785. @deftypefun int starpu_data_set_tag (starpu_data_handle_t @var{handle}, int @var{tag})
  2786. Tell StarPU-MPI which MPI tag to use when exchanging the data.
  2787. @end deftypefun
  2788. @deftypefun int starpu_data_get_tag (starpu_data_handle_t @var{handle})
  2789. Returns the MPI tag to be used when exchanging the data.
  2790. @end deftypefun
  2791. @deftypefun int starpu_data_set_rank (starpu_data_handle_t @var{handle}, int @var{rank})
  2792. Tell StarPU-MPI which MPI node "owns" a given data, that is, the node which will
  2793. always keep an up-to-date value, and will by default execute tasks which write
  2794. to it.
  2795. @end deftypefun
  2796. @deftypefun int starpu_data_get_rank (starpu_data_handle_t @var{handle})
  2797. Returns the last value set by @code{starpu_data_set_rank}.
  2798. @end deftypefun
  2799. @deftypefun starpu_data_handle_t starpu_data_get_data_handle_from_tag (int @var{tag})
  2800. Returns the data handle associated to the MPI tag, or NULL if there is not.
  2801. @end deftypefun
  2802. @defmac STARPU_EXECUTE_ON_NODE
  2803. this macro is used when calling @code{starpu_mpi_insert_task}, and
  2804. must be followed by a integer value which specified the node on which
  2805. to execute the codelet.
  2806. @end defmac
  2807. @defmac STARPU_EXECUTE_ON_DATA
  2808. this macro is used when calling @code{starpu_mpi_insert_task}, and
  2809. must be followed by a data handle to specify that the node owning the
  2810. given data will execute the codelet.
  2811. @end defmac
  2812. @deftypefun int starpu_mpi_insert_task (MPI_Comm @var{comm}, struct starpu_codelet *@var{codelet}, ...)
  2813. Create and submit a task corresponding to @var{codelet} with the following
  2814. arguments. The argument list must be zero-terminated.
  2815. The arguments following the codelets are the same types as for the
  2816. function @code{starpu_insert_task} defined in @ref{Insert Task
  2817. Utility}. The extra argument @code{STARPU_EXECUTE_ON_NODE} followed by an
  2818. integer allows to specify the MPI node to execute the codelet. It is also
  2819. possible to specify that the node owning a specific data will execute
  2820. the codelet, by using @code{STARPU_EXECUTE_ON_DATA} followed by a data
  2821. handle.
  2822. The internal algorithm is as follows:
  2823. @enumerate
  2824. @item Find out which MPI node is going to execute the codelet.
  2825. @enumerate
  2826. @item If there is only one node owning data in W mode, it will
  2827. be selected;
  2828. @item If there is several nodes owning data in W node, the one
  2829. selected will be the one having the least data in R mode so as
  2830. to minimize the amount of data to be transfered;
  2831. @item The argument @code{STARPU_EXECUTE_ON_NODE} followed by an
  2832. integer can be used to specify the node;
  2833. @item The argument @code{STARPU_EXECUTE_ON_DATA} followed by a
  2834. data handle can be used to specify that the node owing the given
  2835. data will execute the codelet.
  2836. @end enumerate
  2837. @item Send and receive data as requested. Nodes owning data which need to be
  2838. read by the task are sending them to the MPI node which will execute it. The
  2839. latter receives them.
  2840. @item Execute the codelet. This is done by the MPI node selected in the
  2841. 1st step of the algorithm.
  2842. @item If several MPI nodes own data to be written to, send written
  2843. data back to their owners.
  2844. @end enumerate
  2845. The algorithm also includes a communication cache mechanism that
  2846. allows not to send data twice to the same MPI node, unless the data
  2847. has been modified. The cache can be disabled
  2848. (@pxref{STARPU_MPI_CACHE}).
  2849. @c todo parler plus du cache
  2850. @end deftypefun
  2851. @deftypefun void starpu_mpi_get_data_on_node (MPI_Comm @var{comm}, starpu_data_handle_t @var{data_handle}, int @var{node})
  2852. Transfer data @var{data_handle} to MPI node @var{node}, sending it from its
  2853. owner if needed. At least the target node and the owner have to call the
  2854. function.
  2855. @end deftypefun
  2856. @deftypefun void starpu_mpi_get_data_on_node_detached (MPI_Comm @var{comm}, starpu_data_handle_t @var{data_handle}, int @var{node}, {void (*}@var{callback})(void*), {void *}@var{arg})
  2857. Transfer data @var{data_handle} to MPI node @var{node}, sending it from its
  2858. owner if needed. At least the target node and the owner have to call the
  2859. function. On reception, the @var{callback} function is called with the
  2860. argument @var{arg}.
  2861. @end deftypefun
  2862. @node Collective Operations
  2863. @subsection Collective Operations
  2864. @deftypefun void starpu_mpi_redux_data (MPI_Comm @var{comm}, starpu_data_handle_t @var{data_handle})
  2865. Perform a reduction on the given data. All nodes send the data to its
  2866. owner node which will perform a reduction.
  2867. @end deftypefun
  2868. @deftypefun int starpu_mpi_scatter_detached (starpu_data_handle_t *@var{data_handles}, int @var{count}, int @var{root}, MPI_Comm @var{comm}, {void (*}@var{scallback})(void *), {void *}@var{sarg}, {void (*}@var{rcallback})(void *), {void *}@var{rarg})
  2869. Scatter data among processes of the communicator based on the ownership of
  2870. the data. For each data of the array @var{data_handles}, the
  2871. process @var{root} sends the data to the process owning this data.
  2872. Processes receiving data must have valid data handles to receive them.
  2873. On completion of the collective communication, the @var{scallback} function is
  2874. called with the argument @var{sarg} on the process @var{root}, the @var{rcallback} function is
  2875. called with the argument @var{rarg} on any other process.
  2876. @end deftypefun
  2877. @deftypefun int starpu_mpi_gather_detached (starpu_data_handle_t *@var{data_handles}, int @var{count}, int @var{root}, MPI_Comm @var{comm}, {void (*}@var{scallback})(void *), {void *}@var{sarg}, {void (*}@var{rcallback})(void *), {void *}@var{rarg})
  2878. Gather data from the different processes of the communicator onto the
  2879. process @var{root}. Each process owning data handle in the array
  2880. @var{data_handles} will send them to the process @var{root}. The
  2881. process @var{root} must have valid data handles to receive the data.
  2882. On completion of the collective communication, the @var{rcallback} function is
  2883. called with the argument @var{rarg} on the process @var{root}, the @var{scallback} function is
  2884. called with the argument @var{sarg} on any other process.
  2885. @end deftypefun
  2886. @node Task Bundles
  2887. @section Task Bundles
  2888. @deftp {Data Type} {starpu_task_bundle_t}
  2889. Opaque structure describing a list of tasks that should be scheduled
  2890. on the same worker whenever it's possible. It must be considered as a
  2891. hint given to the scheduler as there is no guarantee that they will be
  2892. executed on the same worker.
  2893. @end deftp
  2894. @deftypefun void starpu_task_bundle_create ({starpu_task_bundle_t *}@var{bundle})
  2895. Factory function creating and initializing @var{bundle}, when the call returns, memory needed is allocated and @var{bundle} is ready to use.
  2896. @end deftypefun
  2897. @deftypefun int starpu_task_bundle_insert (starpu_task_bundle_t @var{bundle}, {struct starpu_task *}@var{task})
  2898. Insert @var{task} in @var{bundle}. Until @var{task} is removed from @var{bundle} its expected length and data transfer time will be considered along those of the other tasks of @var{bundle}.
  2899. This function mustn't be called if @var{bundle} is already closed and/or @var{task} is already submitted.
  2900. @end deftypefun
  2901. @deftypefun int starpu_task_bundle_remove (starpu_task_bundle_t @var{bundle}, {struct starpu_task *}@var{task})
  2902. Remove @var{task} from @var{bundle}.
  2903. Of course @var{task} must have been previously inserted @var{bundle}.
  2904. This function mustn't be called if @var{bundle} is already closed and/or @var{task} is already submitted. Doing so would result in undefined behaviour.
  2905. @end deftypefun
  2906. @deftypefun void starpu_task_bundle_close (starpu_task_bundle_t @var{bundle})
  2907. Inform the runtime that the user won't modify @var{bundle} anymore, it means no more inserting or removing task. Thus the runtime can destroy it when possible.
  2908. @end deftypefun
  2909. @deftypefun double starpu_task_bundle_expected_length (starpu_task_bundle_t @var{bundle}, {enum starpu_perf_archtype} @var{arch}, unsigned @var{nimpl})
  2910. Return the expected duration of the entire task bundle in µs.
  2911. @end deftypefun
  2912. @deftypefun double starpu_task_bundle_expected_power (starpu_task_bundle_t @var{bundle}, enum starpu_perf_archtype @var{arch}, unsigned @var{nimpl})
  2913. Return the expected power consumption of the entire task bundle in J.
  2914. @end deftypefun
  2915. @deftypefun double starpu_task_bundle_expected_data_transfer_time (starpu_task_bundle_t @var{bundle}, unsigned @var{memory_node})
  2916. Return the time (in µs) expected to transfer all data used within the bundle.
  2917. @end deftypefun
  2918. @node Task Lists
  2919. @section Task Lists
  2920. @deftp {Data Type} {struct starpu_task_list}
  2921. Stores a double-chained list of tasks
  2922. @end deftp
  2923. @deftypefun void starpu_task_list_init ({struct starpu_task_list *}@var{list})
  2924. Initialize a list structure
  2925. @end deftypefun
  2926. @deftypefun void starpu_task_list_push_front ({struct starpu_task_list *}@var{list}, {struct starpu_task *}@var{task})
  2927. Push a task at the front of a list
  2928. @end deftypefun
  2929. @deftypefun void starpu_task_list_push_back ({struct starpu_task_list *}@var{list}, {struct starpu_task *}@var{task})
  2930. Push a task at the back of a list
  2931. @end deftypefun
  2932. @deftypefun {struct starpu_task *} starpu_task_list_front ({struct starpu_task_list *}@var{list})
  2933. Get the front of the list (without removing it)
  2934. @end deftypefun
  2935. @deftypefun {struct starpu_task *} starpu_task_list_back ({struct starpu_task_list *}@var{list})
  2936. Get the back of the list (without removing it)
  2937. @end deftypefun
  2938. @deftypefun int starpu_task_list_empty ({struct starpu_task_list *}@var{list})
  2939. Test if a list is empty
  2940. @end deftypefun
  2941. @deftypefun void starpu_task_list_erase ({struct starpu_task_list *}@var{list}, {struct starpu_task *}@var{task})
  2942. Remove an element from the list
  2943. @end deftypefun
  2944. @deftypefun {struct starpu_task *} starpu_task_list_pop_front ({struct starpu_task_list *}@var{list})
  2945. Remove the element at the front of the list
  2946. @end deftypefun
  2947. @deftypefun {struct starpu_task *} starpu_task_list_pop_back ({struct starpu_task_list *}@var{list})
  2948. Remove the element at the back of the list
  2949. @end deftypefun
  2950. @deftypefun {struct starpu_task *} starpu_task_list_begin ({struct starpu_task_list *}@var{list})
  2951. Get the first task of the list.
  2952. @end deftypefun
  2953. @deftypefun {struct starpu_task *} starpu_task_list_end ({struct starpu_task_list *}@var{list})
  2954. Get the end of the list.
  2955. @end deftypefun
  2956. @deftypefun {struct starpu_task *} starpu_task_list_next ({struct starpu_task *}@var{task})
  2957. Get the next task of the list. This is not erase-safe.
  2958. @end deftypefun
  2959. @node Using Parallel Tasks
  2960. @section Using Parallel Tasks
  2961. These are used by parallel tasks:
  2962. @deftypefun int starpu_combined_worker_get_size (void)
  2963. Return the size of the current combined worker, i.e. the total number of cpus
  2964. running the same task in the case of SPMD parallel tasks, or the total number
  2965. of threads that the task is allowed to start in the case of FORKJOIN parallel
  2966. tasks.
  2967. @end deftypefun
  2968. @deftypefun int starpu_combined_worker_get_rank (void)
  2969. Return the rank of the current thread within the combined worker. Can only be
  2970. used in FORKJOIN parallel tasks, to know which part of the task to work on.
  2971. @end deftypefun
  2972. Most of these are used for schedulers which support parallel tasks.
  2973. @deftypefun unsigned starpu_combined_worker_get_count (void)
  2974. Return the number of different combined workers.
  2975. @end deftypefun
  2976. @deftypefun int starpu_combined_worker_get_id (void)
  2977. Return the identifier of the current combined worker.
  2978. @end deftypefun
  2979. @deftypefun int starpu_combined_worker_assign_workerid (int @var{nworkers}, int @var{workerid_array}[])
  2980. Register a new combined worker and get its identifier
  2981. @end deftypefun
  2982. @deftypefun int starpu_combined_worker_get_description (int @var{workerid}, {int *}@var{worker_size}, {int **}@var{combined_workerid})
  2983. Get the description of a combined worker
  2984. @end deftypefun
  2985. @deftypefun int starpu_combined_worker_can_execute_task (unsigned @var{workerid}, {struct starpu_task *}@var{task}, unsigned @var{nimpl})
  2986. Variant of starpu_worker_can_execute_task compatible with combined workers
  2987. @end deftypefun
  2988. @deftp {Data Type} {struct starpu_machine_topology}
  2989. @table @asis
  2990. @item @code{unsigned nworkers}
  2991. Total number of workers.
  2992. @item @code{unsigned ncombinedworkers}
  2993. Total number of combined workers.
  2994. @item @code{hwloc_topology_t hwtopology}
  2995. Topology as detected by hwloc.
  2996. To maintain ABI compatibility when hwloc is not available, the field
  2997. is replaced with @code{void *dummy}
  2998. @item @code{unsigned nhwcpus}
  2999. Total number of CPUs, as detected by the topology code. May be different from
  3000. the actual number of CPU workers.
  3001. @item @code{unsigned nhwcudagpus}
  3002. Total number of CUDA devices, as detected. May be different from the actual
  3003. number of CUDA workers.
  3004. @item @code{unsigned nhwopenclgpus}
  3005. Total number of OpenCL devices, as detected. May be different from the actual
  3006. number of CUDA workers.
  3007. @item @code{unsigned ncpus}
  3008. Actual number of CPU workers used by StarPU.
  3009. @item @code{unsigned ncudagpus}
  3010. Actual number of CUDA workers used by StarPU.
  3011. @item @code{unsigned nopenclgpus}
  3012. Actual number of OpenCL workers used by StarPU.
  3013. @item @code{unsigned workers_bindid[STARPU_NMAXWORKERS]}
  3014. Indicates the successive cpu identifier that should be used to bind the
  3015. workers. It is either filled according to the user's explicit
  3016. parameters (from starpu_conf) or according to the STARPU_WORKERS_CPUID env.
  3017. variable. Otherwise, a round-robin policy is used to distributed the workers
  3018. over the cpus.
  3019. @item @code{unsigned workers_cuda_gpuid[STARPU_NMAXWORKERS]}
  3020. Indicates the successive cpu identifier that should be used by the CUDA
  3021. driver. It is either filled according to the user's explicit parameters (from
  3022. starpu_conf) or according to the STARPU_WORKERS_CUDAID env. variable. Otherwise,
  3023. they are taken in ID order.
  3024. @item @code{unsigned workers_opencl_gpuid[STARPU_NMAXWORKERS]}
  3025. Indicates the successive cpu identifier that should be used by the OpenCL
  3026. driver. It is either filled according to the user's explicit parameters (from
  3027. starpu_conf) or according to the STARPU_WORKERS_OPENCLID env. variable. Otherwise,
  3028. they are taken in ID order.
  3029. @end table
  3030. @end deftp
  3031. @node Scheduling Contexts
  3032. @section Scheduling Contexts
  3033. StarPU permits on one hand grouping workers in combined workers in order to execute a parallel task and on the other hand grouping tasks in bundles that will be executed by a single specified worker.
  3034. In contrast when we group workers in scheduling contexts we submit starpu tasks to them and we schedule them with the policy assigned to the context.
  3035. Scheduling contexts can be created, deleted and modified dynamically.
  3036. @deftypefun unsigned starpu_sched_ctx_create (const char *@var{policy_name}, int *@var{workerids_ctx}, int @var{nworkers_ctx}, const char *@var{sched_ctx_name})
  3037. This function creates a scheduling context which uses the scheduling policy indicated in the first argument and assigns the workers indicated in the second argument to execute the tasks submitted to it.
  3038. The return value represents the identifier of the context that has just been created. It will be further used to indicate the context the tasks will be submitted to. The return value should be at most @code{STARPU_NMAX_SCHED_CTXS}.
  3039. @end deftypefun
  3040. @deftypefun void starpu_sched_ctx_delete (unsigned @var{sched_ctx_id})
  3041. Delete scheduling context @var{sched_ctx_id} and transfer remaining workers to the inheritor scheduling context.
  3042. @end deftypefun
  3043. @deftypefun void starpu_sched_ctx_add_workers ({int *}@var{workerids_ctx}, int @var{nworkers_ctx}, unsigned @var{sched_ctx_id})
  3044. This function adds dynamically the workers indicated in the first argument to the context indicated in the last argument. The last argument cannot be greater than @code{STARPU_NMAX_SCHED_CTXS}.
  3045. @end deftypefun
  3046. @deftypefun void starpu_sched_ctx_remove_workers ({int *}@var{workerids_ctx}, int @var{nworkers_ctx}, unsigned @var{sched_ctx_id})
  3047. This function removes the workers indicated in the first argument from the context indicated in the last argument. The last argument cannot be greater than @code{STARPU_NMAX_SCHED_CTXS}.
  3048. @end deftypefun
  3049. A scheduling context manages a collection of workers that can be memorized using different data structures. Thus, a generic structure is available in order to simplify the choice of its type.
  3050. Only the list data structure is available but further data structures(like tree) implementations are foreseen.
  3051. @deftp {Data Type} {struct starpu_worker_collection}
  3052. @table @asis
  3053. @item @code{void *workerids}
  3054. The workerids managed by the collection
  3055. @item @code{unsigned nworkers}
  3056. The number of workerids
  3057. @item @code{pthread_key_t cursor_key} (optional)
  3058. The cursor needed to iterate the collection (depending on the data structure)
  3059. @item @code{int type}
  3060. The type of structure (currently STARPU_WORKER_LIST is the only one available)
  3061. @item @code{unsigned (*has_next)(struct starpu_worker_collection *workers)}
  3062. Checks if there is a next worker
  3063. @item @code{int (*get_next)(struct starpu_worker_collection *workers)}
  3064. Gets the next worker
  3065. @item @code{int (*add)(struct starpu_worker_collection *workers, int worker)}
  3066. Adds a worker to the collection
  3067. @item @code{int (*remove)(struct starpu_worker_collection *workers, int worker)}
  3068. Removes a worker from the collection
  3069. @item @code{void* (*init)(struct starpu_worker_collection *workers)}
  3070. Initialize the collection
  3071. @item @code{void (*deinit)(struct starpu_worker_collection *workers)}
  3072. Deinitialize the colection
  3073. @item @code{void (*init_cursor)(struct starpu_worker_collection *workers)} (optional)
  3074. Initialize the cursor if there is one
  3075. @item @code{void (*deinit_cursor)(struct starpu_worker_collection *workers)} (optional)
  3076. Deinitialize the cursor if there is one
  3077. @end table
  3078. @end deftp
  3079. @deftypefun struct starpu_worker_collection* starpu_sched_ctx_create_worker_collection (unsigned @var{sched_ctx_id}, int @var{type})
  3080. Create a worker collection of the type indicated by the last parameter for the context specified through the first parameter.
  3081. @end deftypefun
  3082. @deftypefun void starpu_sched_ctx_delete_worker_collection (unsigned @var{sched_ctx_id})
  3083. Delete the worker collection of the specified scheduling context
  3084. @end deftypefun
  3085. @deftypefun struct starpu_worker_collection* starpu_sched_ctx_get_worker_collection (unsigned @var{sched_ctx_id})
  3086. Return the worker collection managed by the indicated context
  3087. @end deftypefun
  3088. @deftypefun pthread_mutex_t* starpu_sched_ctx_get_changing_ctx_mutex (unsigned @var{sched_ctx_id})
  3089. TODO
  3090. @end deftypefun
  3091. @deftypefun void starpu_sched_ctx_set_context (unsigned *@var{sched_ctx_id})
  3092. Set the scheduling context the subsequent tasks will be submitted to
  3093. @end deftypefun
  3094. @deftypefun unsigned starpu_sched_ctx_get_context (void)
  3095. Return the scheduling context the tasks are currently submitted to
  3096. @end deftypefun
  3097. @deftypefun unsigned starpu_sched_ctx_get_nworkers (unsigned @var{sched_ctx_id})
  3098. Return the number of workers managed by the specified contexts
  3099. (Usually needed to verify if it manages any workers or if it should be blocked)
  3100. @end deftypefun
  3101. @deftypefun unsigned starpu_sched_ctx_get_nshared_workers (unsigned @var{sched_ctx_id}, unsigned @var{sched_ctx_id2})
  3102. Return the number of workers shared by two contexts
  3103. @end deftypefun
  3104. @deftypefun int starpu_sched_ctx_set_min_priority (unsigned @var{sched_ctx_id}, int @var{min_prio})
  3105. Defines the minimum task priority level supported by the scheduling
  3106. policy of the given scheduler context. The
  3107. default minimum priority level is the same as the default priority level which
  3108. is 0 by convention. The application may access that value by calling the
  3109. @code{starpu_sched_ctx_get_min_priority} function. This function should only be
  3110. called from the initialization method of the scheduling policy, and should not
  3111. be used directly from the application.
  3112. @end deftypefun
  3113. @deftypefun int starpu_sched_ctx_set_max_priority (unsigned @var{sched_ctx_id}, int @var{max_prio})
  3114. Defines the maximum priority level supported by the scheduling policy of the given scheduler context. The
  3115. default maximum priority level is 1. The application may access that value by
  3116. calling the @code{starpu_sched_ctx_get_max_priority} function. This function should
  3117. only be called from the initialization method of the scheduling policy, and
  3118. should not be used directly from the application.
  3119. @end deftypefun
  3120. @deftypefun int starpu_sched_ctx_get_min_priority (unsigned @var{sched_ctx_id})
  3121. Returns the current minimum priority level supported by the
  3122. scheduling policy of the given scheduler context.
  3123. @end deftypefun
  3124. @deftypefun int starpu_sched_ctx_get_max_priority (unsigned @var{sched_ctx_id})
  3125. Returns the current maximum priority level supported by the
  3126. scheduling policy of the given scheduler context.
  3127. @end deftypefun
  3128. @node Scheduling Policy
  3129. @section Scheduling Policy
  3130. TODO
  3131. While StarPU comes with a variety of scheduling policies (@pxref{Task
  3132. scheduling policy}), it may sometimes be desirable to implement custom
  3133. policies to address specific problems. The API described below allows
  3134. users to write their own scheduling policy.
  3135. @deftp {Data Type} {struct starpu_sched_policy}
  3136. This structure contains all the methods that implement a scheduling policy. An
  3137. application may specify which scheduling strategy in the @code{sched_policy}
  3138. field of the @code{starpu_conf} structure passed to the @code{starpu_init}
  3139. function. The different fields are:
  3140. @table @asis
  3141. @item @code{void (*init_sched)(unsigned sched_ctx_id)}
  3142. Initialize the scheduling policy.
  3143. @item @code{void (*deinit_sched)(unsigned sched_ctx_id)}
  3144. Cleanup the scheduling policy.
  3145. @item @code{int (*push_task)(struct starpu_task *)}
  3146. Insert a task into the scheduler.
  3147. @item @code{void (*push_task_notify)(struct starpu_task *, int workerid)}
  3148. Notify the scheduler that a task was pushed on a given worker. This method is
  3149. called when a task that was explicitely assigned to a worker becomes ready and
  3150. is about to be executed by the worker. This method therefore permits to keep
  3151. the state of of the scheduler coherent even when StarPU bypasses the scheduling
  3152. strategy.
  3153. @item @code{struct starpu_task *(*pop_task)(unsigned sched_ctx_id)} (optional)
  3154. Get a task from the scheduler. The mutex associated to the worker is already
  3155. taken when this method is called. If this method is defined as @code{NULL}, the
  3156. worker will only execute tasks from its local queue. In this case, the
  3157. @code{push_task} method should use the @code{starpu_push_local_task} method to
  3158. assign tasks to the different workers.
  3159. @item @code{struct starpu_task *(*pop_every_task)(unsigned sched_ctx_id)}
  3160. Remove all available tasks from the scheduler (tasks are chained by the means
  3161. of the prev and next fields of the starpu_task structure). The mutex associated
  3162. to the worker is already taken when this method is called. This is currently
  3163. not used.
  3164. @item @code{void (*pre_exec_hook)(struct starpu_task *)} (optional)
  3165. This method is called every time a task is starting.
  3166. @item @code{void (*post_exec_hook)(struct starpu_task *)} (optional)
  3167. This method is called every time a task has been executed.
  3168. @item @code{void (*add_workers)(unsigned sched_ctx_id, int *workerids, unsigned nworkers)}
  3169. Initialize scheduling structures corresponding to each worker used by the policy.
  3170. @item @code{void (*remove_workers)(unsigned sched_ctx_id, int *workerids, unsigned nworkers)}
  3171. Deinitialize scheduling structures corresponding to each worker used by the policy.
  3172. @item @code{const char *policy_name} (optional)
  3173. Name of the policy.
  3174. @item @code{const char *policy_description} (optional)
  3175. Description of the policy.
  3176. @end table
  3177. @end deftp
  3178. @deftypefun {struct starpu_sched_policy **} starpu_sched_get_predefined_policies ()
  3179. Return an NULL-terminated array of all the predefined scheduling policies.
  3180. @end deftypefun
  3181. @deftypefun void starpu_sched_ctx_set_policy_data (unsigned @var{sched_ctx_id}, {void *} @var{policy_data})
  3182. Each scheduling policy uses some specific data (queues, variables, additional condition variables).
  3183. It is memorize through a local structure. This function assigns it to a scheduling context.
  3184. @end deftypefun
  3185. @deftypefun void* starpu_sched_ctx_get_policy_data (unsigned @var{sched_ctx_id})
  3186. Returns the policy data previously assigned to a context
  3187. @end deftypefun
  3188. @deftypefun int starpu_sched_set_min_priority (int @var{min_prio})
  3189. Defines the minimum task priority level supported by the scheduling policy. The
  3190. default minimum priority level is the same as the default priority level which
  3191. is 0 by convention. The application may access that value by calling the
  3192. @code{starpu_sched_get_min_priority} function. This function should only be
  3193. called from the initialization method of the scheduling policy, and should not
  3194. be used directly from the application.
  3195. @end deftypefun
  3196. @deftypefun int starpu_sched_set_max_priority (int @var{max_prio})
  3197. Defines the maximum priority level supported by the scheduling policy. The
  3198. default maximum priority level is 1. The application may access that value by
  3199. calling the @code{starpu_sched_get_max_priority} function. This function should
  3200. only be called from the initialization method of the scheduling policy, and
  3201. should not be used directly from the application.
  3202. @end deftypefun
  3203. @deftypefun int starpu_sched_get_min_priority (void)
  3204. Returns the current minimum priority level supported by the
  3205. scheduling policy
  3206. @end deftypefun
  3207. @deftypefun int starpu_sched_get_max_priority (void)
  3208. Returns the current maximum priority level supported by the
  3209. scheduling policy
  3210. @end deftypefun
  3211. @deftypefun int starpu_push_local_task (int @var{workerid}, {struct starpu_task} *@var{task}, int @var{back})
  3212. The scheduling policy may put tasks directly into a worker's local queue so
  3213. that it is not always necessary to create its own queue when the local queue
  3214. is sufficient. If @var{back} not null, @var{task} is put at the back of the queue
  3215. where the worker will pop tasks first. Setting @var{back} to 0 therefore ensures
  3216. a FIFO ordering.
  3217. @end deftypefun
  3218. @deftypefun int starpu_push_task_end ({struct starpu_task} *@var{task})
  3219. This function must be called by a scheduler to notify that the given
  3220. task has just been pushed.
  3221. @end deftypefun
  3222. @deftypefun int starpu_worker_can_execute_task (unsigned @var{workerid}, {struct starpu_task *}@var{task}, unsigned {nimpl})
  3223. Check if the worker specified by workerid can execute the codelet. Schedulers need to call it before assigning a task to a worker, otherwise the task may fail to execute.
  3224. @end deftypefun
  3225. @deftypefun double starpu_timing_now (void)
  3226. Return the current date in µs
  3227. @end deftypefun
  3228. @deftypefun uint32_t starpu_task_footprint ({struct starpu_perfmodel *}@var{model}, {struct starpu_task *} @var{task}, {enum starpu_perf_archtype} @var{arch}, unsigned @var{nimpl})
  3229. Returns the footprint for a given task
  3230. @end deftypefun
  3231. @deftypefun double starpu_task_expected_length ({struct starpu_task *}@var{task}, {enum starpu_perf_archtype} @var{arch}, unsigned @var{nimpl})
  3232. Returns expected task duration in µs
  3233. @end deftypefun
  3234. @deftypefun double starpu_worker_get_relative_speedup ({enum starpu_perf_archtype} @var{perf_archtype})
  3235. Returns an estimated speedup factor relative to CPU speed
  3236. @end deftypefun
  3237. @deftypefun double starpu_task_expected_data_transfer_time (unsigned @var{memory_node}, {struct starpu_task *}@var{task})
  3238. Returns expected data transfer time in µs
  3239. @end deftypefun
  3240. @deftypefun double starpu_data_expected_transfer_time (starpu_data_handle_t @var{handle}, unsigned @var{memory_node}, {enum starpu_access_mode} @var{mode})
  3241. Predict the transfer time (in µs) to move a handle to a memory node
  3242. @end deftypefun
  3243. @deftypefun double starpu_task_expected_power ({struct starpu_task *}@var{task}, {enum starpu_perf_archtype} @var{arch}, unsigned @var{nimpl})
  3244. Returns expected power consumption in J
  3245. @end deftypefun
  3246. @deftypefun double starpu_task_expected_conversion_time ({struct starpu_task *}@var{task}, {enum starpu_perf_archtype} @var{arch}, unsigned {nimpl})
  3247. Returns expected conversion time in ms (multiformat interface only)
  3248. @end deftypefun
  3249. @node Running drivers
  3250. @section Running drivers
  3251. @deftypefun int starpu_driver_run ({struct starpu_driver *}@var{d})
  3252. Initialize the given driver, run it until it receives a request to terminate,
  3253. deinitialize it and return 0 on success. It returns -EINVAL if @code{d->type}
  3254. is not a valid StarPU device type (STARPU_CPU_WORKER, STARPU_CUDA_WORKER or
  3255. STARPU_OPENCL_WORKER). This is the same as using the following
  3256. functions: calling @code{starpu_driver_init()}, then calling
  3257. @code{starpu_driver_run_once()} in a loop, and eventually
  3258. @code{starpu_driver_deinit()}.
  3259. @end deftypefun
  3260. @deftypefun int starpu_driver_init (struct starpu_driver *@var{d})
  3261. Initialize the given driver. Returns 0 on success, -EINVAL if
  3262. @code{d->type} is not a valid StarPU device type (STARPU_CPU_WORKER,
  3263. STARPU_CUDA_WORKER or STARPU_OPENCL_WORKER).
  3264. @end deftypefun
  3265. @deftypefun int starpu_driver_run_once (struct starpu_driver *@var{d})
  3266. Run the driver once, then returns 0 on success, -EINVAL if
  3267. @code{d->type} is not a valid StarPU device type (STARPU_CPU_WORKER,
  3268. STARPU_CUDA_WORKER or STARPU_OPENCL_WORKER).
  3269. @end deftypefun
  3270. @deftypefun int starpu_driver_deinit (struct starpu_driver *@var{d})
  3271. Deinitialize the given driver. Returns 0 on success, -EINVAL if
  3272. @code{d->type} is not a valid StarPU device type (STARPU_CPU_WORKER,
  3273. STARPU_CUDA_WORKER or STARPU_OPENCL_WORKER).
  3274. @end deftypefun
  3275. @deftypefun void starpu_drivers_request_termination (void)
  3276. Notify all running drivers they should terminate.
  3277. @end deftypefun
  3278. @node Expert mode
  3279. @section Expert mode
  3280. @deftypefun void starpu_wake_all_blocked_workers (void)
  3281. Wake all the workers, so they can inspect data requests and task submissions
  3282. again.
  3283. @end deftypefun
  3284. @deftypefun int starpu_progression_hook_register (unsigned (*@var{func})(void *arg), void *@var{arg})
  3285. Register a progression hook, to be called when workers are idle.
  3286. @end deftypefun
  3287. @deftypefun void starpu_progression_hook_deregister (int @var{hook_id})
  3288. Unregister a given progression hook.
  3289. @end deftypefun