workers.c 81 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2008-2020 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. * Copyright (C) 2011 Télécom-SudParis
  5. * Copyright (C) 2013 Thibaut Lambert
  6. * Copyright (C) 2016 Uppsala University
  7. *
  8. * StarPU is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU Lesser General Public License as published by
  10. * the Free Software Foundation; either version 2.1 of the License, or (at
  11. * your option) any later version.
  12. *
  13. * StarPU is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  16. *
  17. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  18. */
  19. #include <stdlib.h>
  20. #include <stdio.h>
  21. #ifdef __linux__
  22. #include <sys/utsname.h>
  23. #endif
  24. #include <common/config.h>
  25. #include <common/utils.h>
  26. #include <common/graph.h>
  27. #include <core/progress_hook.h>
  28. #include <core/idle_hook.h>
  29. #include <core/workers.h>
  30. #include <core/debug.h>
  31. #include <core/disk.h>
  32. #include <core/task.h>
  33. #include <core/detect_combined_workers.h>
  34. #include <datawizard/malloc.h>
  35. #include <profiling/profiling.h>
  36. #include <profiling/bound.h>
  37. #include <sched_policies/sched_component.h>
  38. #include <datawizard/memory_nodes.h>
  39. #include <common/knobs.h>
  40. #include <drivers/mp_common/sink_common.h>
  41. #include <drivers/mpi/driver_mpi_common.h>
  42. #include <drivers/cpu/driver_cpu.h>
  43. #include <drivers/cuda/driver_cuda.h>
  44. #include <drivers/opencl/driver_opencl.h>
  45. #include <drivers/mic/driver_mic_source.h>
  46. #include <drivers/mpi/driver_mpi_source.h>
  47. #include <drivers/disk/driver_disk.h>
  48. #ifdef STARPU_SIMGRID
  49. #include <core/simgrid.h>
  50. #endif
  51. #if defined(_WIN32) && !defined(__CYGWIN__)
  52. #include <windows.h>
  53. #endif
  54. /* global knobs */
  55. static int __g_calibrate_knob;
  56. static int __g_enable_catch_signal_knob;
  57. /* per-worker knobs */
  58. static int __w_bind_to_pu_knob;
  59. static int __w_enable_worker_knob;
  60. static struct starpu_perf_knob_group * __kg_starpu_global;
  61. static struct starpu_perf_knob_group * __kg_starpu_worker__per_worker;
  62. static void global_knobs__set(const struct starpu_perf_knob * const knob, void *context, const struct starpu_perf_knob_value * const value)
  63. {
  64. /* context is not used for global knobs */
  65. STARPU_ASSERT(context == NULL);
  66. (void)context;
  67. if (knob->id == __g_calibrate_knob)
  68. {
  69. _starpu_set_calibrate_flag((unsigned)value->val_int32_t);
  70. }
  71. else if (knob->id == __g_enable_catch_signal_knob)
  72. {
  73. _starpu_set_catch_signals(!!value->val_int32_t);
  74. }
  75. else
  76. {
  77. STARPU_ASSERT(0);
  78. abort();
  79. }
  80. }
  81. static void global_knobs__get(const struct starpu_perf_knob * const knob, void *context, struct starpu_perf_knob_value * const value)
  82. {
  83. /* context is not used for global knobs */
  84. STARPU_ASSERT(context == NULL);
  85. (void)context;
  86. if (knob->id == __g_calibrate_knob)
  87. {
  88. value->val_int32_t = (int32_t)_starpu_get_calibrate_flag();
  89. }
  90. else if (knob->id == __g_enable_catch_signal_knob)
  91. {
  92. value->val_int32_t = _starpu_get_catch_signals();
  93. }
  94. else
  95. {
  96. STARPU_ASSERT(0);
  97. abort();
  98. }
  99. }
  100. void worker_knobs__set(const struct starpu_perf_knob * const knob, void *context, const struct starpu_perf_knob_value * const value)
  101. {
  102. const unsigned workerid = *(unsigned *)context;
  103. struct _starpu_worker * const worker = _starpu_get_worker_struct(workerid);
  104. if (knob->id == __w_bind_to_pu_knob)
  105. {
  106. STARPU_ASSERT(value->val_int32_t >= 0);
  107. worker->bindid_requested = value->val_int32_t;
  108. }
  109. else if (knob->id == __w_enable_worker_knob)
  110. {
  111. worker->enable_knob = !!value->val_int32_t;
  112. }
  113. else
  114. {
  115. STARPU_ASSERT(0);
  116. abort();
  117. }
  118. }
  119. void worker_knobs__get(const struct starpu_perf_knob * const knob, void *context, struct starpu_perf_knob_value * const value)
  120. {
  121. const unsigned workerid = *(unsigned *)context;
  122. struct _starpu_worker * const worker = _starpu_get_worker_struct(workerid);
  123. if (knob->id == __w_bind_to_pu_knob)
  124. {
  125. value->val_int32_t = worker->bindid;
  126. }
  127. else if (knob->id == __w_enable_worker_knob)
  128. {
  129. value->val_int32_t = worker->enable_knob;
  130. }
  131. else
  132. {
  133. STARPU_ASSERT(0);
  134. abort();
  135. }
  136. }
  137. void _starpu__workers_c__register_knobs(void)
  138. {
  139. {
  140. const enum starpu_perf_knob_scope scope = starpu_perf_knob_scope_global;
  141. __kg_starpu_global = _starpu_perf_knob_group_register(scope, global_knobs__set, global_knobs__get);
  142. __STARPU_PERF_KNOB_REG("starpu.global", __kg_starpu_global, g_calibrate_knob, int32, "enable or disable performance models calibration (override STARPU_CALIBRATE env var)");
  143. __STARPU_PERF_KNOB_REG("starpu.global", __kg_starpu_global, g_enable_catch_signal_knob, int32, "enable or disable signal catching (override STARPU_CATCH_SIGNALS env var)");
  144. }
  145. {
  146. const enum starpu_perf_knob_scope scope = starpu_perf_knob_scope_per_worker;
  147. __kg_starpu_worker__per_worker = _starpu_perf_knob_group_register(scope, worker_knobs__set, worker_knobs__get);
  148. __STARPU_PERF_KNOB_REG("starpu.worker", __kg_starpu_worker__per_worker, w_bind_to_pu_knob, int32, "bind worker to PU (PU logical number, override StarPU binding env vars)");
  149. __STARPU_PERF_KNOB_REG("starpu.worker", __kg_starpu_worker__per_worker, w_enable_worker_knob, int32, "enable assigning task to that worker (1:Enabled | [0:Disabled])");
  150. }
  151. #if 0
  152. {
  153. const enum starpu_perf_knob_scope scope = starpu_perf_knob_scope_per_scheduler;
  154. __kg_starpu_worker__per_scheduler = _starpu_perf_knob_group_register(scope, sched_knobs__set, sched_knobs__get);
  155. }
  156. #endif
  157. }
  158. void _starpu__workers_c__unregister_knobs(void)
  159. {
  160. _starpu_perf_knob_group_unregister(__kg_starpu_global);
  161. _starpu_perf_knob_group_unregister(__kg_starpu_worker__per_worker);
  162. __kg_starpu_global = NULL;
  163. __kg_starpu_worker__per_worker = NULL;
  164. }
  165. /* acquire/release semantic for concurrent initialization/de-initialization */
  166. static starpu_pthread_mutex_t init_mutex = STARPU_PTHREAD_MUTEX_INITIALIZER;
  167. static starpu_pthread_cond_t init_cond = STARPU_PTHREAD_COND_INITIALIZER;
  168. static int init_count = 0;
  169. static enum initialization initialized = UNINITIALIZED;
  170. int _starpu_keys_initialized STARPU_ATTRIBUTE_INTERNAL;
  171. starpu_pthread_key_t _starpu_worker_key STARPU_ATTRIBUTE_INTERNAL;
  172. starpu_pthread_key_t _starpu_worker_set_key STARPU_ATTRIBUTE_INTERNAL;
  173. struct _starpu_machine_config _starpu_config STARPU_ATTRIBUTE_INTERNAL;
  174. static int check_entire_platform;
  175. int _starpu_worker_parallel_blocks;
  176. /* Pointers to argc and argv
  177. */
  178. static int *my_argc = 0;
  179. static char ***my_argv = NULL;
  180. void _starpu__workers_c__register_kobs(void)
  181. {
  182. /* TODO */
  183. }
  184. struct starpu_driver_info starpu_driver_info[STARPU_NARCH];
  185. void starpu_driver_info_register(enum starpu_worker_archtype archtype, const struct starpu_driver_info *info)
  186. {
  187. starpu_driver_info[archtype] = *info;
  188. }
  189. struct starpu_memory_driver_info starpu_memory_driver_info[STARPU_MAX_RAM+1];
  190. void starpu_memory_driver_info_register(enum starpu_node_kind kind, const struct starpu_memory_driver_info *info)
  191. {
  192. starpu_memory_driver_info[kind] = *info;
  193. }
  194. /* Initialize value of static argc and argv, called when the process begins
  195. */
  196. void _starpu_set_argc_argv(int *argc_param, char ***argv_param)
  197. {
  198. my_argc = argc_param;
  199. my_argv = argv_param;
  200. }
  201. int *_starpu_get_argc()
  202. {
  203. return my_argc;
  204. }
  205. char ***_starpu_get_argv()
  206. {
  207. return my_argv;
  208. }
  209. int starpu_is_initialized(void)
  210. {
  211. return initialized != UNINITIALIZED;
  212. }
  213. void starpu_wait_initialized(void)
  214. {
  215. STARPU_PTHREAD_MUTEX_LOCK(&init_mutex);
  216. while (initialized != INITIALIZED)
  217. STARPU_PTHREAD_COND_WAIT(&init_cond, &init_mutex);
  218. STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
  219. }
  220. /* Makes sure that at least one of the workers of type <arch> can execute
  221. * <task>, for at least one of its implementations. */
  222. static uint32_t _starpu_worker_exists_and_can_execute(struct starpu_task *task,
  223. enum starpu_worker_archtype arch)
  224. {
  225. _starpu_codelet_check_deprecated_fields(task->cl);
  226. /* make sure there is a worker on the machine able to execute the
  227. task, independent of the sched_ctx, this latter may receive latter on
  228. the necessary worker - the user or the hypervisor should take care this happens */
  229. struct _starpu_sched_ctx *sched_ctx = check_entire_platform == 1 ? _starpu_get_initial_sched_ctx() : _starpu_get_sched_ctx_struct(task->sched_ctx);
  230. struct starpu_worker_collection *workers = sched_ctx->workers;
  231. struct starpu_sched_ctx_iterator it;
  232. workers->init_iterator(workers, &it);
  233. while(workers->has_next(workers, &it))
  234. {
  235. int i = workers->get_next(workers, &it);
  236. if (starpu_worker_get_type(i) != arch)
  237. continue;
  238. unsigned impl;
  239. for (impl = 0; impl < STARPU_MAXIMPLEMENTATIONS; impl++)
  240. {
  241. /* We could call task->cl->can_execute(i, task, impl)
  242. here, it would definitely work. It is probably
  243. cheaper to check whether it is necessary in order to
  244. avoid a useless function call, though. */
  245. unsigned test_implementation = 0;
  246. switch (arch)
  247. {
  248. case STARPU_CPU_WORKER:
  249. if (task->cl->cpu_funcs[impl] != NULL)
  250. test_implementation = 1;
  251. break;
  252. case STARPU_CUDA_WORKER:
  253. if (task->cl->cuda_funcs[impl] != NULL)
  254. test_implementation = 1;
  255. break;
  256. case STARPU_OPENCL_WORKER:
  257. if (task->cl->opencl_funcs[impl] != NULL)
  258. test_implementation = 1;
  259. break;
  260. case STARPU_MIC_WORKER:
  261. if (task->cl->cpu_funcs_name[impl] != NULL || task->cl->mic_funcs[impl] != NULL)
  262. test_implementation = 1;
  263. break;
  264. case STARPU_MPI_MS_WORKER:
  265. if (task->cl->cpu_funcs_name[impl] != NULL || task->cl->mpi_ms_funcs[impl] != NULL)
  266. test_implementation = 1;
  267. break;
  268. default:
  269. STARPU_ABORT();
  270. }
  271. if (!test_implementation)
  272. continue;
  273. if (task->cl->can_execute)
  274. return task->cl->can_execute(i, task, impl);
  275. if(test_implementation)
  276. return 1;
  277. }
  278. }
  279. return 0;
  280. }
  281. /* in case a task is submitted, we may check whether there exists a worker
  282. that may execute the task or not */
  283. uint32_t _starpu_worker_exists(struct starpu_task *task)
  284. {
  285. _starpu_codelet_check_deprecated_fields(task->cl);
  286. if (task->where == STARPU_NOWHERE)
  287. return 1;
  288. /* if the task belongs to the init context we can
  289. check out all the worker mask of the machine
  290. if not we should iterate on the workers of the ctx
  291. and verify if it exists a worker able to exec the task */
  292. if(task->sched_ctx == 0)
  293. {
  294. if (!(task->where & _starpu_config.worker_mask))
  295. return 0;
  296. if (!task->cl->can_execute)
  297. return 1;
  298. }
  299. #if defined(STARPU_USE_CPU) || defined(STARPU_SIMGRID)
  300. if ((task->where & STARPU_CPU) &&
  301. _starpu_worker_exists_and_can_execute(task, STARPU_CPU_WORKER))
  302. return 1;
  303. #endif
  304. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  305. if ((task->where & STARPU_CUDA) &&
  306. _starpu_worker_exists_and_can_execute(task, STARPU_CUDA_WORKER))
  307. return 1;
  308. #endif
  309. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  310. if ((task->where & STARPU_OPENCL) &&
  311. _starpu_worker_exists_and_can_execute(task, STARPU_OPENCL_WORKER))
  312. return 1;
  313. #endif
  314. #ifdef STARPU_USE_MIC
  315. if ((task->where & STARPU_MIC) &&
  316. _starpu_worker_exists_and_can_execute(task, STARPU_MIC_WORKER))
  317. return 1;
  318. #endif
  319. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  320. if ((task->where & STARPU_MPI_MS) &&
  321. _starpu_worker_exists_and_can_execute(task, STARPU_MPI_MS_WORKER))
  322. return 1;
  323. #endif
  324. return 0;
  325. }
  326. uint32_t _starpu_can_submit_cuda_task(void)
  327. {
  328. return STARPU_CUDA & _starpu_config.worker_mask;
  329. }
  330. uint32_t _starpu_can_submit_cpu_task(void)
  331. {
  332. return STARPU_CPU & _starpu_config.worker_mask;
  333. }
  334. uint32_t _starpu_can_submit_opencl_task(void)
  335. {
  336. return STARPU_OPENCL & _starpu_config.worker_mask;
  337. }
  338. static inline int _starpu_can_use_nth_implementation(enum starpu_worker_archtype arch, struct starpu_codelet *cl, unsigned nimpl)
  339. {
  340. switch(arch)
  341. {
  342. case STARPU_ANY_WORKER:
  343. {
  344. int cpu_func_enabled=1, cuda_func_enabled=1, opencl_func_enabled=1;
  345. /* TODO: MIC */
  346. #if defined(STARPU_USE_CPU) || defined(STARPU_SIMGRID)
  347. starpu_cpu_func_t cpu_func = _starpu_task_get_cpu_nth_implementation(cl, nimpl);
  348. cpu_func_enabled = cpu_func != NULL && starpu_cpu_worker_get_count();
  349. #endif
  350. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  351. starpu_cuda_func_t cuda_func = _starpu_task_get_cuda_nth_implementation(cl, nimpl);
  352. cuda_func_enabled = cuda_func != NULL && starpu_cuda_worker_get_count();
  353. #endif
  354. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  355. starpu_opencl_func_t opencl_func = _starpu_task_get_opencl_nth_implementation(cl, nimpl);
  356. opencl_func_enabled = opencl_func != NULL && starpu_opencl_worker_get_count();
  357. #endif
  358. return cpu_func_enabled && cuda_func_enabled && opencl_func_enabled;
  359. }
  360. case STARPU_CPU_WORKER:
  361. {
  362. starpu_cpu_func_t func = _starpu_task_get_cpu_nth_implementation(cl, nimpl);
  363. return func != NULL;
  364. }
  365. case STARPU_CUDA_WORKER:
  366. {
  367. starpu_cuda_func_t func = _starpu_task_get_cuda_nth_implementation(cl, nimpl);
  368. return func != NULL;
  369. }
  370. case STARPU_OPENCL_WORKER:
  371. {
  372. starpu_opencl_func_t func = _starpu_task_get_opencl_nth_implementation(cl, nimpl);
  373. return func != NULL;
  374. }
  375. case STARPU_MIC_WORKER:
  376. {
  377. starpu_mic_func_t func = _starpu_task_get_mic_nth_implementation(cl, nimpl);
  378. const char *func_name = _starpu_task_get_cpu_name_nth_implementation(cl, nimpl);
  379. return func != NULL || func_name != NULL;
  380. }
  381. case STARPU_MPI_MS_WORKER:
  382. {
  383. starpu_mpi_ms_func_t func = _starpu_task_get_mpi_ms_nth_implementation(cl, nimpl);
  384. const char *func_name = _starpu_task_get_cpu_name_nth_implementation(cl, nimpl);
  385. return func != NULL || func_name != NULL;
  386. }
  387. default:
  388. STARPU_ASSERT_MSG(0, "Unknown arch type %d", arch);
  389. }
  390. return 0;
  391. }
  392. /* Test if this task can be processed on this worker, regardless of the implementation */
  393. /* must be called with sched_mutex locked to protect state_blocked */
  394. static inline int _starpu_can_execute_task_any_impl(unsigned workerid, struct starpu_task *task)
  395. {
  396. if (!_starpu_config.workers[workerid].enable_knob)
  397. return 0;
  398. if (task->workerids_len)
  399. {
  400. size_t div = sizeof(*task->workerids) * 8;
  401. if (workerid / div >= task->workerids_len || ! (task->workerids[workerid / div] & (1UL << workerid % div)))
  402. return 0;
  403. }
  404. /* if the worker is blocked in a parallel ctx don't submit tasks on it */
  405. #ifdef STARPU_DEVEL
  406. #warning FIXME: this is very expensive, while can_execute is supposed to be not very costly so schedulers can call it a lot
  407. #endif
  408. if(starpu_worker_is_blocked_in_parallel(workerid))
  409. return 0;
  410. if (!(task->where & _starpu_config.workers[workerid].worker_mask))
  411. return 0;
  412. return 1;
  413. }
  414. /* must be called with sched_mutex locked to protect state_blocked_in_parallel */
  415. int starpu_worker_can_execute_task(unsigned workerid, struct starpu_task *task, unsigned nimpl)
  416. {
  417. /* TODO: check that the task operand sizes will fit on that device */
  418. return _starpu_can_execute_task_any_impl(workerid, task) &&
  419. _starpu_can_use_nth_implementation(_starpu_config.workers[workerid].arch, task->cl, nimpl) &&
  420. (!task->cl->can_execute || task->cl->can_execute(workerid, task, nimpl));
  421. }
  422. /* must be called with sched_mutex locked to protect state_blocked_in_parallel */
  423. int starpu_worker_can_execute_task_impl(unsigned workerid, struct starpu_task *task, unsigned *impl_mask)
  424. {
  425. if (!_starpu_can_execute_task_any_impl(workerid, task))
  426. return 0;
  427. unsigned mask;
  428. int i;
  429. enum starpu_worker_archtype arch;
  430. struct starpu_codelet *cl;
  431. /* TODO: check that the task operand sizes will fit on that device */
  432. cl = task->cl;
  433. mask = 0;
  434. arch = _starpu_config.workers[workerid].arch;
  435. if (!task->cl->can_execute)
  436. {
  437. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  438. if (_starpu_can_use_nth_implementation(arch, cl, i))
  439. {
  440. mask |= 1U << i;
  441. if (!impl_mask)
  442. break;
  443. }
  444. }
  445. else
  446. {
  447. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  448. if (_starpu_can_use_nth_implementation(arch, cl, i)
  449. && (!task->cl->can_execute || task->cl->can_execute(workerid, task, i)))
  450. {
  451. mask |= 1U << i;
  452. if (!impl_mask)
  453. break;
  454. }
  455. }
  456. if (impl_mask)
  457. *impl_mask = mask;
  458. return mask != 0;
  459. }
  460. /* must be called with sched_mutex locked to protect state_blocked */
  461. int starpu_worker_can_execute_task_first_impl(unsigned workerid, struct starpu_task *task, unsigned *nimpl)
  462. {
  463. if (!_starpu_can_execute_task_any_impl(workerid, task))
  464. return 0;
  465. int i;
  466. enum starpu_worker_archtype arch;
  467. struct starpu_codelet *cl;
  468. /* TODO: check that the task operand sizes will fit on that device */
  469. cl = task->cl;
  470. arch = _starpu_config.workers[workerid].arch;
  471. if (!task->cl->can_execute)
  472. {
  473. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  474. if (_starpu_can_use_nth_implementation(arch, cl, i))
  475. {
  476. if (nimpl)
  477. *nimpl = i;
  478. return 1;
  479. }
  480. }
  481. else
  482. {
  483. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  484. if (_starpu_can_use_nth_implementation(arch, cl, i)
  485. && (task->cl->can_execute(workerid, task, i)))
  486. {
  487. if (nimpl)
  488. *nimpl = i;
  489. return 1;
  490. }
  491. }
  492. return 0;
  493. }
  494. int starpu_combined_worker_can_execute_task(unsigned workerid, struct starpu_task *task, unsigned nimpl)
  495. {
  496. /* TODO: check that the task operand sizes will fit on that device */
  497. struct starpu_codelet *cl = task->cl;
  498. unsigned nworkers = _starpu_config.topology.nworkers;
  499. /* Is this a parallel worker ? */
  500. if (workerid < nworkers)
  501. {
  502. if (!_starpu_config.workers[workerid].enable_knob)
  503. return 0;
  504. return !!((task->where & _starpu_config.workers[workerid].worker_mask) &&
  505. _starpu_can_use_nth_implementation(_starpu_config.workers[workerid].arch, task->cl, nimpl) &&
  506. (!task->cl->can_execute || task->cl->can_execute(workerid, task, nimpl)));
  507. }
  508. else
  509. {
  510. if (cl->type == STARPU_SPMD
  511. #ifdef STARPU_HAVE_HWLOC
  512. || cl->type == STARPU_FORKJOIN
  513. #else
  514. #ifdef __GLIBC__
  515. || cl->type == STARPU_FORKJOIN
  516. #endif
  517. #endif
  518. )
  519. {
  520. /* TODO we should add other types of constraints */
  521. /* Is the worker larger than requested ? */
  522. int worker_size = (int)_starpu_config.combined_workers[workerid - nworkers].worker_size;
  523. int worker0 = _starpu_config.combined_workers[workerid - nworkers].combined_workerid[0];
  524. return !!((worker_size <= task->cl->max_parallelism) &&
  525. _starpu_can_use_nth_implementation(_starpu_config.workers[worker0].arch, task->cl, nimpl) &&
  526. (!task->cl->can_execute || task->cl->can_execute(workerid, task, nimpl)));
  527. }
  528. else
  529. {
  530. /* We have a sequential task but a parallel worker */
  531. return 0;
  532. }
  533. }
  534. }
  535. /*
  536. * Runtime initialization methods
  537. */
  538. static void _starpu_init_worker_queue(struct _starpu_worker *worker)
  539. {
  540. _starpu_memory_node_register_condition(worker, &worker->sched_cond, worker->memory_node);
  541. }
  542. /*
  543. * Returns 0 if the given driver is one of the drivers that must be launched by
  544. * the application itself, and not by StarPU, 1 otherwise.
  545. */
  546. static unsigned _starpu_may_launch_driver(struct starpu_conf *conf,
  547. struct starpu_driver *d)
  548. {
  549. if (conf->n_not_launched_drivers == 0 || conf->not_launched_drivers == NULL)
  550. return 1;
  551. /* Is <d> in conf->not_launched_drivers ? */
  552. unsigned i;
  553. for (i = 0; i < conf->n_not_launched_drivers; i++)
  554. {
  555. if (d->type != conf->not_launched_drivers[i].type)
  556. continue;
  557. switch (d->type)
  558. {
  559. case STARPU_CPU_WORKER:
  560. if (d->id.cpu_id == conf->not_launched_drivers[i].id.cpu_id)
  561. return 0;
  562. break;
  563. case STARPU_CUDA_WORKER:
  564. if (d->id.cuda_id == conf->not_launched_drivers[i].id.cuda_id)
  565. return 0;
  566. break;
  567. #ifdef STARPU_USE_OPENCL
  568. case STARPU_OPENCL_WORKER:
  569. if (d->id.opencl_id == conf->not_launched_drivers[i].id.opencl_id)
  570. return 0;
  571. break;
  572. #endif
  573. default:
  574. STARPU_ABORT();
  575. }
  576. }
  577. return 1;
  578. }
  579. #ifdef STARPU_PERF_DEBUG
  580. struct itimerval prof_itimer;
  581. #endif
  582. void _starpu_worker_init(struct _starpu_worker *workerarg, struct _starpu_machine_config *pconfig)
  583. {
  584. workerarg->config = pconfig;
  585. STARPU_PTHREAD_MUTEX_INIT(&workerarg->mutex, NULL);
  586. /* arch initialized by topology.c */
  587. /* worker_mask initialized by topology.c */
  588. /* perf_arch initialized by topology.c */
  589. /* worker_thread initialized by _starpu_launch_drivers */
  590. /* devid initialized by topology.c */
  591. /* subworkerid initialized by topology.c */
  592. /* bindid initialized by topology.c */
  593. /* workerid initialized by topology.c */
  594. workerarg->combined_workerid = workerarg->workerid;
  595. workerarg->current_rank = 0;
  596. workerarg->worker_size = 1;
  597. STARPU_PTHREAD_COND_INIT(&workerarg->started_cond, NULL);
  598. STARPU_PTHREAD_COND_INIT(&workerarg->ready_cond, NULL);
  599. /* memory_node initialized by topology.c */
  600. STARPU_PTHREAD_COND_INIT(&workerarg->sched_cond, NULL);
  601. STARPU_PTHREAD_MUTEX_INIT(&workerarg->sched_mutex, NULL);
  602. starpu_task_list_init(&workerarg->local_tasks);
  603. _starpu_ctx_change_list_init(&workerarg->ctx_change_list);
  604. workerarg->local_ordered_tasks = NULL;
  605. workerarg->local_ordered_tasks_size = 0;
  606. workerarg->current_ordered_task = 0;
  607. workerarg->current_ordered_task_order = 1;
  608. workerarg->current_task = NULL;
  609. #ifdef STARPU_SIMGRID
  610. starpu_pthread_wait_init(&workerarg->wait);
  611. starpu_pthread_queue_register(&workerarg->wait, &_starpu_simgrid_task_queue[workerarg->workerid]);
  612. #endif
  613. workerarg->task_transferring = NULL;
  614. workerarg->nb_buffers_transferred = 0;
  615. workerarg->nb_buffers_totransfer = 0;
  616. workerarg->first_task = 0;
  617. workerarg->ntasks = 0;
  618. /* set initialized by topology.c */
  619. workerarg->pipeline_length = 0;
  620. workerarg->pipeline_stuck = 0;
  621. workerarg->worker_is_running = 0;
  622. workerarg->worker_is_initialized = 0;
  623. workerarg->wait_for_worker_initialization = 0;
  624. workerarg->status = STATUS_INITIALIZING;
  625. workerarg->state_keep_awake = 0;
  626. /* name initialized by driver */
  627. /* short_name initialized by driver */
  628. workerarg->run_by_starpu = 1;
  629. workerarg->driver_ops = NULL;
  630. workerarg->sched_ctx_list = NULL;
  631. workerarg->tmp_sched_ctx = -1;
  632. workerarg->nsched_ctxs = 0;
  633. _starpu_barrier_counter_init(&workerarg->tasks_barrier, 0);
  634. workerarg->has_prev_init = 0;
  635. int ctx;
  636. for(ctx = 0; ctx < STARPU_NMAX_SCHED_CTXS; ctx++)
  637. workerarg->removed_from_ctx[ctx] = 0;
  638. workerarg->spinning_backoff = 1;
  639. for(ctx = 0; ctx < STARPU_NMAX_SCHED_CTXS; ctx++)
  640. {
  641. workerarg->shares_tasks_lists[ctx] = 0;
  642. workerarg->poped_in_ctx[ctx] = 0;
  643. }
  644. workerarg->reverse_phase[0] = 0;
  645. workerarg->reverse_phase[1] = 0;
  646. workerarg->pop_ctx_priority = 1;
  647. workerarg->is_slave_somewhere = 0;
  648. workerarg->state_relax_refcnt = 1;
  649. #ifdef STARPU_SPINLOCK_CHECK
  650. workerarg->relax_on_file = __FILE__;
  651. workerarg->relax_on_line = __LINE__;
  652. workerarg->relax_on_func = __starpu_func__;
  653. workerarg->relax_off_file = NULL;
  654. workerarg->relax_off_line = 0;
  655. workerarg->relax_off_func = NULL;
  656. #endif
  657. workerarg->state_sched_op_pending = 0;
  658. workerarg->state_changing_ctx_waiting = 0;
  659. workerarg->state_changing_ctx_notice = 0;
  660. workerarg->state_blocked_in_parallel_observed = 0;
  661. workerarg->state_blocked_in_parallel = 0;
  662. workerarg->state_block_in_parallel_req = 0;
  663. workerarg->state_block_in_parallel_ack = 0;
  664. workerarg->state_unblock_in_parallel_req = 0;
  665. workerarg->state_unblock_in_parallel_ack = 0;
  666. workerarg->block_in_parallel_ref_count = 0;
  667. _starpu_perf_counter_sample_init(&workerarg->perf_counter_sample, starpu_perf_counter_scope_per_worker);
  668. workerarg->enable_knob = 1;
  669. workerarg->bindid_requested = -1;
  670. /* cpu_set/hwloc_cpu_set/hwloc_obj initialized in topology.c */
  671. }
  672. static void _starpu_worker_deinit(struct _starpu_worker *workerarg)
  673. {
  674. (void) workerarg;
  675. #ifdef STARPU_SIMGRID
  676. starpu_pthread_queue_unregister(&workerarg->wait, &_starpu_simgrid_task_queue[workerarg->workerid]);
  677. starpu_pthread_wait_destroy(&workerarg->wait);
  678. #endif
  679. _starpu_perf_counter_sample_exit(&workerarg->perf_counter_sample);
  680. }
  681. #ifdef STARPU_USE_FXT
  682. void _starpu_worker_start(struct _starpu_worker *worker, enum starpu_worker_archtype archtype, unsigned sync)
  683. {
  684. unsigned devid = worker->devid;
  685. unsigned memnode = worker->memory_node;
  686. _STARPU_TRACE_WORKER_INIT_START(archtype, worker->workerid, devid, memnode, worker->bindid, sync);
  687. }
  688. #endif
  689. void _starpu_driver_start(struct _starpu_worker *worker, enum starpu_worker_archtype archtype, unsigned sync STARPU_ATTRIBUTE_UNUSED)
  690. {
  691. (void) archtype;
  692. int devid = worker->devid;
  693. (void) devid;
  694. #ifdef STARPU_USE_FXT
  695. _STARPU_TRACE_REGISTER_THREAD(worker->bindid);
  696. _starpu_worker_start(worker, archtype, sync);
  697. #endif
  698. _starpu_set_local_worker_key(worker);
  699. STARPU_PTHREAD_MUTEX_LOCK(&worker->mutex);
  700. worker->worker_is_running = 1;
  701. STARPU_PTHREAD_COND_SIGNAL(&worker->started_cond);
  702. STARPU_PTHREAD_MUTEX_UNLOCK(&worker->mutex);
  703. _starpu_bind_thread_on_cpu(worker->bindid, worker->workerid, NULL);
  704. #if defined(STARPU_PERF_DEBUG) && !defined(STARPU_SIMGRID)
  705. setitimer(ITIMER_PROF, &prof_itimer, NULL);
  706. #endif
  707. _STARPU_DEBUG("worker %p %d for dev %d is ready on logical cpu %d\n", worker, worker->workerid, devid, worker->bindid);
  708. #ifdef STARPU_HAVE_HWLOC
  709. _STARPU_DEBUG("worker %p %d cpuset start at %d\n", worker, worker->workerid, hwloc_bitmap_first(worker->hwloc_cpu_set));
  710. #endif
  711. }
  712. static void _starpu_launch_drivers(struct _starpu_machine_config *pconfig)
  713. {
  714. pconfig->running = 1;
  715. pconfig->pause_depth = 0;
  716. pconfig->submitting = 1;
  717. STARPU_HG_DISABLE_CHECKING(pconfig->watchdog_ok);
  718. unsigned nworkers = pconfig->topology.nworkers;
  719. unsigned worker;
  720. #if defined(STARPU_PERF_DEBUG) && !defined(STARPU_SIMGRID)
  721. /* Get itimer of the main thread, to set it for the worker threads */
  722. getitimer(ITIMER_PROF, &prof_itimer);
  723. #endif
  724. STARPU_AYU_INIT();
  725. /* Launch workers asynchronously */
  726. for (worker = 0; worker < nworkers; worker++)
  727. {
  728. struct _starpu_worker *workerarg = &pconfig->workers[worker];
  729. unsigned devid = workerarg->devid;
  730. workerarg->wait_for_worker_initialization = 0;
  731. _STARPU_DEBUG("initialising worker %u/%u\n", worker, nworkers);
  732. _starpu_init_worker_queue(workerarg);
  733. struct starpu_driver driver;
  734. driver.type = workerarg->arch;
  735. switch (workerarg->arch)
  736. {
  737. #if defined(STARPU_USE_CPU) || defined(STARPU_SIMGRID)
  738. case STARPU_CPU_WORKER:
  739. {
  740. driver.id.cpu_id = devid;
  741. workerarg->driver_ops = &_starpu_driver_cpu_ops;
  742. workerarg->wait_for_worker_initialization = 1;
  743. if (_starpu_may_launch_driver(&pconfig->conf, &driver))
  744. {
  745. STARPU_PTHREAD_CREATE_ON(
  746. "CPU",
  747. &workerarg->worker_thread,
  748. NULL,
  749. _starpu_cpu_worker,
  750. workerarg,
  751. _starpu_simgrid_get_host_by_worker(workerarg));
  752. }
  753. else
  754. {
  755. workerarg->run_by_starpu = 0;
  756. }
  757. break;
  758. }
  759. #endif
  760. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  761. case STARPU_CUDA_WORKER:
  762. {
  763. driver.id.cuda_id = devid;
  764. workerarg->driver_ops = &_starpu_driver_cuda_ops;
  765. struct _starpu_worker_set *worker_set = workerarg->set;
  766. if (worker_set->workers != workerarg)
  767. /* We are not the first worker of the
  768. * set, don't start a thread for it. */
  769. break;
  770. worker_set->set_is_initialized = 0;
  771. worker_set->wait_for_set_initialization = 1;
  772. workerarg->wait_for_worker_initialization = 0;
  773. if (_starpu_may_launch_driver(&pconfig->conf, &driver))
  774. {
  775. STARPU_PTHREAD_CREATE_ON(
  776. "CUDA",
  777. &worker_set->worker_thread,
  778. NULL,
  779. _starpu_cuda_worker,
  780. worker_set,
  781. _starpu_simgrid_get_host_by_worker(workerarg));
  782. }
  783. else
  784. {
  785. workerarg->run_by_starpu = 0;
  786. }
  787. break;
  788. }
  789. #endif
  790. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  791. case STARPU_OPENCL_WORKER:
  792. {
  793. #ifndef STARPU_SIMGRID
  794. starpu_opencl_get_device(devid, &driver.id.opencl_id);
  795. workerarg->driver_ops = &_starpu_driver_opencl_ops;
  796. workerarg->wait_for_worker_initialization = 1;
  797. if (_starpu_may_launch_driver(&pconfig->conf, &driver))
  798. {
  799. STARPU_PTHREAD_CREATE_ON(
  800. "OpenCL",
  801. &workerarg->worker_thread,
  802. NULL,
  803. _starpu_opencl_worker,
  804. workerarg,
  805. _starpu_simgrid_get_host_by_worker(workerarg));
  806. }
  807. else
  808. {
  809. workerarg->run_by_starpu = 0;
  810. }
  811. #endif
  812. break;
  813. }
  814. #endif
  815. #ifdef STARPU_USE_MIC
  816. case STARPU_MIC_WORKER:
  817. {
  818. /* We spawn only one thread
  819. * per MIC device, which will control all MIC
  820. * workers of this device. (by using a worker set). */
  821. struct _starpu_worker_set *worker_set = workerarg->set;
  822. if (worker_set->workers != workerarg)
  823. break;
  824. worker_set->set_is_initialized = 0;
  825. worker_set->wait_for_set_initialization = 1;
  826. workerarg->wait_for_worker_initialization = 0;
  827. STARPU_PTHREAD_CREATE_ON(
  828. "MIC",
  829. &worker_set->worker_thread,
  830. NULL,
  831. _starpu_mic_src_worker,
  832. worker_set,
  833. _starpu_simgrid_get_host_by_worker(workerarg));
  834. break;
  835. }
  836. #endif /* STARPU_USE_MIC */
  837. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  838. case STARPU_MPI_MS_WORKER:
  839. {
  840. /* We spawn only one thread
  841. * per MPI device, which will control all MPI
  842. * workers of this device. (by using a worker set). */
  843. struct _starpu_worker_set *worker_set = workerarg->set;
  844. if (worker_set->workers != workerarg)
  845. break;
  846. worker_set->set_is_initialized = 0;
  847. worker_set->wait_for_set_initialization = 1;
  848. workerarg->wait_for_worker_initialization = 0;
  849. #ifdef STARPU_MPI_MASTER_SLAVE_MULTIPLE_THREAD
  850. /* if MPI has multiple threads supports
  851. * we launch 1 thread per device
  852. * else
  853. * we launch one thread for all devices
  854. */
  855. STARPU_PTHREAD_CREATE_ON(
  856. "MPI MS",
  857. &worker_set->worker_thread,
  858. NULL,
  859. _starpu_mpi_src_worker,
  860. worker_set,
  861. _starpu_simgrid_get_host_by_worker(workerarg));
  862. #endif /* STARPU_MPI_MASTER_SLAVE_MULTIPLE_THREAD */
  863. break;
  864. }
  865. #endif /* STARPU_USE_MPI_MASTER_SLAVE */
  866. default:
  867. STARPU_ABORT();
  868. }
  869. #ifdef STARPU_USE_FXT
  870. /* In tracing mode, make sure the thread is really started
  871. * before starting another one, to make sure they appear in
  872. * order in the trace.
  873. */
  874. if ((!workerarg->set || workerarg->set->workers == workerarg)
  875. && workerarg->run_by_starpu == 1 && workerarg->arch != STARPU_MPI_MS_WORKER)
  876. {
  877. STARPU_PTHREAD_MUTEX_LOCK(&workerarg->mutex);
  878. while (!workerarg->worker_is_running)
  879. STARPU_PTHREAD_COND_WAIT(&workerarg->started_cond, &workerarg->mutex);
  880. STARPU_PTHREAD_MUTEX_UNLOCK(&workerarg->mutex);
  881. }
  882. #endif
  883. }
  884. #if defined(STARPU_USE_MPI_MASTER_SLAVE) && !defined(STARPU_MPI_MASTER_SLAVE_MULTIPLE_THREAD)
  885. if (pconfig->topology.ndevices[STARPU_MPI_MS_WORKER] > 0)
  886. {
  887. struct _starpu_worker_set * worker_set_zero = &mpi_worker_set[0];
  888. struct _starpu_worker * worker_zero = &worker_set_zero->workers[0];
  889. STARPU_PTHREAD_CREATE_ON(
  890. "zero",
  891. &worker_set_zero->worker_thread,
  892. NULL,
  893. _starpu_mpi_src_worker,
  894. &mpi_worker_set,
  895. _starpu_simgrid_get_host_by_worker(worker_zero));
  896. /* We use the first worker to know if everything are finished */
  897. #ifdef STARPU_USE_FXT
  898. STARPU_PTHREAD_MUTEX_LOCK(&worker_zero->mutex);
  899. while (!worker_zero->worker_is_running)
  900. STARPU_PTHREAD_COND_WAIT(&worker_zero->started_cond, &worker_zero->mutex);
  901. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_zero->mutex);
  902. #endif
  903. STARPU_PTHREAD_MUTEX_LOCK(&worker_set_zero->mutex);
  904. while (!worker_set_zero->set_is_initialized)
  905. STARPU_PTHREAD_COND_WAIT(&worker_set_zero->ready_cond,
  906. &worker_set_zero->mutex);
  907. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_set_zero->mutex);
  908. worker_set_zero->started = 1;
  909. worker_set_zero->worker_thread = mpi_worker_set[0].worker_thread;
  910. }
  911. #endif
  912. for (worker = 0; worker < nworkers; worker++)
  913. {
  914. struct _starpu_worker *workerarg = &pconfig->workers[worker];
  915. _STARPU_DEBUG("waiting for worker %u initialization\n", worker);
  916. if (!workerarg->run_by_starpu)
  917. break;
  918. struct _starpu_worker_set *worker_set = workerarg->set;
  919. if (worker_set && worker_set->wait_for_set_initialization == 1)
  920. {
  921. STARPU_PTHREAD_MUTEX_LOCK(&worker_set->mutex);
  922. while (!worker_set->set_is_initialized)
  923. STARPU_PTHREAD_COND_WAIT(&worker_set->ready_cond,
  924. &worker_set->mutex);
  925. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_set->mutex);
  926. worker_set->started = 1;
  927. worker_set->wait_for_set_initialization = 0;
  928. }
  929. else if (workerarg->wait_for_worker_initialization == 1)
  930. {
  931. STARPU_PTHREAD_MUTEX_LOCK(&workerarg->mutex);
  932. while (!workerarg->worker_is_initialized)
  933. STARPU_PTHREAD_COND_WAIT(&workerarg->ready_cond, &workerarg->mutex);
  934. STARPU_PTHREAD_MUTEX_UNLOCK(&workerarg->mutex);
  935. workerarg->wait_for_worker_initialization = 0;
  936. }
  937. }
  938. _STARPU_DEBUG("finished launching drivers\n");
  939. }
  940. /* Initialize the starpu_conf with default values */
  941. int starpu_conf_init(struct starpu_conf *conf)
  942. {
  943. if (!conf)
  944. return -EINVAL;
  945. memset(conf, 0, sizeof(*conf));
  946. conf->magic = 42;
  947. conf->will_use_mpi = 0;
  948. conf->sched_policy_name = starpu_getenv("STARPU_SCHED");
  949. conf->sched_policy = NULL;
  950. conf->global_sched_ctx_min_priority = starpu_get_env_number("STARPU_MIN_PRIO");
  951. conf->global_sched_ctx_max_priority = starpu_get_env_number("STARPU_MAX_PRIO");
  952. conf->catch_signals = starpu_get_env_number_default("STARPU_CATCH_SIGNALS", 1);
  953. /* Note that starpu_get_env_number returns -1 in case the variable is
  954. * not defined */
  955. /* Backward compatibility: check the value of STARPU_NCPUS if
  956. * STARPU_NCPU is not set. */
  957. conf->ncpus = starpu_get_env_number("STARPU_NCPU");
  958. if (conf->ncpus == -1)
  959. conf->ncpus = starpu_get_env_number("STARPU_NCPUS");
  960. conf->reserve_ncpus = starpu_get_env_number("STARPU_RESERVE_NCPU");
  961. int main_thread_bind = starpu_get_env_number_default("STARPU_MAIN_THREAD_BIND", 0);
  962. if (main_thread_bind)
  963. conf->reserve_ncpus++;
  964. conf->ncuda = starpu_get_env_number("STARPU_NCUDA");
  965. conf->nopencl = starpu_get_env_number("STARPU_NOPENCL");
  966. conf->nmic = starpu_get_env_number("STARPU_NMIC");
  967. conf->nmpi_ms = starpu_get_env_number("STARPU_NMPI_MS");
  968. conf->calibrate = starpu_get_env_number("STARPU_CALIBRATE");
  969. conf->bus_calibrate = starpu_get_env_number("STARPU_BUS_CALIBRATE");
  970. conf->mic_sink_program_path = starpu_getenv("STARPU_MIC_PROGRAM_PATH");
  971. if (conf->calibrate == -1)
  972. conf->calibrate = 0;
  973. if (conf->bus_calibrate == -1)
  974. conf->bus_calibrate = 0;
  975. conf->use_explicit_workers_bindid = 0; /* TODO */
  976. conf->use_explicit_workers_cuda_gpuid = 0; /* TODO */
  977. conf->use_explicit_workers_opencl_gpuid = 0; /* TODO */
  978. conf->use_explicit_workers_mic_deviceid = 0; /* TODO */
  979. conf->use_explicit_workers_mpi_ms_deviceid = 0; /* TODO */
  980. conf->single_combined_worker = starpu_get_env_number("STARPU_SINGLE_COMBINED_WORKER");
  981. if (conf->single_combined_worker == -1)
  982. conf->single_combined_worker = 0;
  983. #if defined(STARPU_DISABLE_ASYNCHRONOUS_COPY)
  984. conf->disable_asynchronous_copy = 1;
  985. #else
  986. conf->disable_asynchronous_copy = starpu_get_env_number("STARPU_DISABLE_ASYNCHRONOUS_COPY");
  987. if (conf->disable_asynchronous_copy == -1)
  988. conf->disable_asynchronous_copy = 0;
  989. #endif
  990. #if defined(STARPU_DISABLE_ASYNCHRONOUS_CUDA_COPY)
  991. conf->disable_asynchronous_cuda_copy = 1;
  992. #else
  993. conf->disable_asynchronous_cuda_copy = starpu_get_env_number("STARPU_DISABLE_ASYNCHRONOUS_CUDA_COPY");
  994. if (conf->disable_asynchronous_cuda_copy == -1)
  995. conf->disable_asynchronous_cuda_copy = 0;
  996. #endif
  997. #if defined(STARPU_DISABLE_ASYNCHRONOUS_OPENCL_COPY)
  998. conf->disable_asynchronous_opencl_copy = 1;
  999. #else
  1000. conf->disable_asynchronous_opencl_copy = starpu_get_env_number("STARPU_DISABLE_ASYNCHRONOUS_OPENCL_COPY");
  1001. if (conf->disable_asynchronous_opencl_copy == -1)
  1002. conf->disable_asynchronous_opencl_copy = 0;
  1003. #endif
  1004. #if defined(STARPU_DISABLE_ASYNCHRONOUS_MIC_COPY)
  1005. conf->disable_asynchronous_mic_copy = 1;
  1006. #else
  1007. conf->disable_asynchronous_mic_copy = starpu_get_env_number("STARPU_DISABLE_ASYNCHRONOUS_MIC_COPY");
  1008. if (conf->disable_asynchronous_mic_copy == -1)
  1009. conf->disable_asynchronous_mic_copy = 0;
  1010. #endif
  1011. #if defined(STARPU_DISABLE_ASYNCHRONOUS_MPI_MS_COPY)
  1012. conf->disable_asynchronous_mpi_ms_copy = 1;
  1013. #else
  1014. conf->disable_asynchronous_mpi_ms_copy = starpu_get_env_number("STARPU_DISABLE_ASYNCHRONOUS_MPI_MS_COPY");
  1015. if(conf->disable_asynchronous_mpi_ms_copy == -1)
  1016. conf->disable_asynchronous_mpi_ms_copy = 0;
  1017. #endif
  1018. /* 64MiB by default */
  1019. conf->trace_buffer_size = ((uint64_t) starpu_get_env_number_default("STARPU_TRACE_BUFFER_SIZE", 64)) << 20;
  1020. conf->driver_spinning_backoff_min = (unsigned) starpu_get_env_number_default("STARPU_BACKOFF_MIN", 1);
  1021. conf->driver_spinning_backoff_max = (unsigned) starpu_get_env_number_default("STARPU_BACKOFF_MAX", 32);
  1022. /* Do not start performance counter collection by default */
  1023. conf->start_perf_counter_collection = 0;
  1024. return 0;
  1025. }
  1026. int starpu_conf_noworker(struct starpu_conf *conf)
  1027. {
  1028. conf->ncpus = 0;
  1029. conf->ncuda = 0;
  1030. conf->nopencl = 0;
  1031. conf->nmic = 0;
  1032. conf->nmpi_ms = 0;
  1033. return 0;
  1034. }
  1035. static void _starpu_conf_set_value_against_environment(char *name, int *value, int precedence_over_env)
  1036. {
  1037. if (precedence_over_env == 0)
  1038. {
  1039. int number;
  1040. number = starpu_get_env_number(name);
  1041. if (number != -1)
  1042. {
  1043. *value = number;
  1044. }
  1045. }
  1046. }
  1047. void _starpu_conf_check_environment(struct starpu_conf *conf)
  1048. {
  1049. char *sched = starpu_getenv("STARPU_SCHED");
  1050. if (sched)
  1051. {
  1052. conf->sched_policy_name = sched;
  1053. }
  1054. _starpu_conf_set_value_against_environment("STARPU_NCPUS", &conf->ncpus, conf->precedence_over_environment_variables);
  1055. _starpu_conf_set_value_against_environment("STARPU_NCPU", &conf->ncpus, conf->precedence_over_environment_variables);
  1056. _starpu_conf_set_value_against_environment("STARPU_RESERVE_NCPU", &conf->reserve_ncpus, conf->precedence_over_environment_variables);
  1057. int main_thread_bind = starpu_get_env_number_default("STARPU_MAIN_THREAD_BIND", 0);
  1058. if (main_thread_bind)
  1059. conf->reserve_ncpus++;
  1060. _starpu_conf_set_value_against_environment("STARPU_NCUDA", &conf->ncuda, conf->precedence_over_environment_variables);
  1061. _starpu_conf_set_value_against_environment("STARPU_NOPENCL", &conf->nopencl, conf->precedence_over_environment_variables);
  1062. _starpu_conf_set_value_against_environment("STARPU_CALIBRATE", &conf->calibrate, conf->precedence_over_environment_variables);
  1063. _starpu_conf_set_value_against_environment("STARPU_BUS_CALIBRATE", &conf->bus_calibrate, conf->precedence_over_environment_variables);
  1064. #ifdef STARPU_SIMGRID
  1065. if (conf->calibrate == 2)
  1066. {
  1067. _STARPU_DISP("Warning: History will be cleared due to calibrate or STARPU_CALIBRATE being set to 2. This will prevent simgrid from having task simulation times!");
  1068. }
  1069. if (conf->bus_calibrate)
  1070. {
  1071. _STARPU_DISP("Warning: Bus calibration will be cleared due to bus_calibrate or STARPU_BUS_CALIBRATE being set. This will prevent simgrid from having data transfer simulation times!");
  1072. }
  1073. #endif
  1074. _starpu_conf_set_value_against_environment("STARPU_SINGLE_COMBINED_WORKER", &conf->single_combined_worker, conf->precedence_over_environment_variables);
  1075. _starpu_conf_set_value_against_environment("STARPU_DISABLE_ASYNCHRONOUS_COPY", &conf->disable_asynchronous_copy, conf->precedence_over_environment_variables);
  1076. _starpu_conf_set_value_against_environment("STARPU_DISABLE_ASYNCHRONOUS_CUDA_COPY", &conf->disable_asynchronous_cuda_copy, conf->precedence_over_environment_variables);
  1077. _starpu_conf_set_value_against_environment("STARPU_DISABLE_ASYNCHRONOUS_OPENCL_COPY", &conf->disable_asynchronous_opencl_copy, conf->precedence_over_environment_variables);
  1078. _starpu_conf_set_value_against_environment("STARPU_DISABLE_ASYNCHRONOUS_MIC_COPY", &conf->disable_asynchronous_mic_copy, conf->precedence_over_environment_variables);
  1079. _starpu_conf_set_value_against_environment("STARPU_DISABLE_ASYNCHRONOUS_MPI_MS_COPY", &conf->disable_asynchronous_mpi_ms_copy, conf->precedence_over_environment_variables);
  1080. }
  1081. struct starpu_tree* starpu_workers_get_tree(void)
  1082. {
  1083. return _starpu_config.topology.tree;
  1084. }
  1085. #if HWLOC_API_VERSION >= 0x20000
  1086. #define NORMAL_CHILD(obj) 1
  1087. #else
  1088. #define NORMAL_CHILD(obj) ((obj)->type < HWLOC_OBJ_BRIDGE)
  1089. #endif
  1090. #ifdef STARPU_HAVE_HWLOC
  1091. static void _fill_tree(struct starpu_tree *tree, hwloc_obj_t curr_obj, unsigned depth, hwloc_topology_t topology, struct starpu_tree *father)
  1092. {
  1093. unsigned i, j;
  1094. unsigned arity;
  1095. #if HWLOC_API_VERSION >= 0x20000
  1096. arity = curr_obj->arity;
  1097. #else
  1098. arity = 0;
  1099. for(i = 0; i < curr_obj->arity; i++)
  1100. {
  1101. if (!NORMAL_CHILD(curr_obj->children[i]))
  1102. /* I/O stuff, stop caring */
  1103. break;
  1104. arity++;
  1105. }
  1106. #endif
  1107. if (arity == 1)
  1108. {
  1109. /* Nothing interestin here, skip level */
  1110. _fill_tree(tree, curr_obj->children[0], depth+1, topology, father);
  1111. return;
  1112. }
  1113. starpu_tree_insert(tree, curr_obj->logical_index, depth, curr_obj->type == HWLOC_OBJ_PU, arity, father);
  1114. starpu_tree_prepare_children(arity, tree);
  1115. j = 0;
  1116. for(i = 0; i < arity; i++)
  1117. {
  1118. hwloc_obj_t child = curr_obj->children[i];
  1119. if (!NORMAL_CHILD(child))
  1120. /* I/O stuff, stop caring (shouldn't happen, though) */
  1121. break;
  1122. #if 0
  1123. char string[128];
  1124. hwloc_obj_snprintf(string, sizeof(string), topology, child, "#", 0);
  1125. printf("%*s%s %d is_pu %d \n", 0, "", string, child->logical_index, child->type == HWLOC_OBJ_PU);
  1126. #endif
  1127. _fill_tree(&tree->nodes[j], child, depth+1, topology, tree);
  1128. j++;
  1129. }
  1130. }
  1131. #endif
  1132. static void _starpu_build_tree(void)
  1133. {
  1134. #ifdef STARPU_HAVE_HWLOC
  1135. struct starpu_tree *tree;
  1136. _STARPU_MALLOC(tree, sizeof(struct starpu_tree));
  1137. _starpu_config.topology.tree = tree;
  1138. hwloc_obj_t root = hwloc_get_root_obj(_starpu_config.topology.hwtopology);
  1139. #if 0
  1140. char string[128];
  1141. hwloc_obj_snprintf(string, sizeof(string), topology, root, "#", 0);
  1142. printf("%*s%s %d is_pu = %d \n", 0, "", string, root->logical_index, root->type == HWLOC_OBJ_PU);
  1143. #endif
  1144. /* level, is_pu, is in the tree (it will be true only after add) */
  1145. _fill_tree(tree, root, 0, _starpu_config.topology.hwtopology, NULL);
  1146. #endif
  1147. }
  1148. static starpu_pthread_mutex_t sig_handlers_mutex = STARPU_PTHREAD_MUTEX_INITIALIZER;
  1149. static void (*act_sigint)(int);
  1150. static void (*act_sigsegv)(int);
  1151. static void (*act_sigtrap)(int);
  1152. void _starpu_handler(int sig)
  1153. {
  1154. #ifdef STARPU_VERBOSE
  1155. _STARPU_MSG("Catching signal '%d'\n", sig);
  1156. #endif
  1157. #ifdef STARPU_USE_FXT
  1158. _starpu_fxt_dump_file();
  1159. #endif
  1160. if (sig == SIGINT)
  1161. {
  1162. void (*sig_act)(int) = act_sigint;
  1163. if (sig_act == NULL)
  1164. sig_act = SIG_DFL;
  1165. signal(SIGINT, sig_act);
  1166. }
  1167. if (sig == SIGSEGV)
  1168. {
  1169. void (*sig_act)(int) = act_sigsegv;
  1170. if (sig_act == NULL)
  1171. sig_act = SIG_DFL;
  1172. signal(SIGSEGV, sig_act);
  1173. }
  1174. #ifdef SIGTRAP
  1175. if (sig == SIGTRAP)
  1176. {
  1177. void (*sig_act)(int) = act_sigtrap;
  1178. if (sig_act == NULL)
  1179. sig_act = SIG_DFL;
  1180. signal(SIGTRAP, sig_act);
  1181. }
  1182. #endif
  1183. #ifdef STARPU_VERBOSE
  1184. _STARPU_MSG("Rearming signal '%d'\n", sig);
  1185. #endif
  1186. raise(sig);
  1187. }
  1188. void _starpu_catch_signals(void)
  1189. {
  1190. if (_starpu_config.conf.catch_signals == 1)
  1191. {
  1192. static void (*old_sig_act)(int);
  1193. old_sig_act = signal(SIGINT, _starpu_handler);
  1194. if (old_sig_act != _starpu_handler)
  1195. act_sigint = old_sig_act;
  1196. old_sig_act = signal(SIGSEGV, _starpu_handler);
  1197. if (old_sig_act != _starpu_handler)
  1198. act_sigsegv = old_sig_act;
  1199. #ifdef SIGTRAP
  1200. old_sig_act = signal(SIGTRAP, _starpu_handler);
  1201. if (old_sig_act != _starpu_handler)
  1202. act_sigtrap = old_sig_act;
  1203. #endif
  1204. }
  1205. else
  1206. {
  1207. if (act_sigint != NULL)
  1208. {
  1209. signal(SIGINT, act_sigint);
  1210. act_sigint = NULL;
  1211. }
  1212. if (act_sigsegv != NULL)
  1213. {
  1214. signal(SIGSEGV, act_sigsegv);
  1215. act_sigsegv = NULL;
  1216. }
  1217. #ifdef SIGTRAP
  1218. if (act_sigtrap != NULL)
  1219. {
  1220. signal(SIGTRAP, act_sigtrap);
  1221. act_sigtrap = NULL;
  1222. }
  1223. #endif
  1224. }
  1225. }
  1226. void _starpu_set_catch_signals(int do_catch_signal)
  1227. {
  1228. STARPU_PTHREAD_MUTEX_LOCK(&sig_handlers_mutex);
  1229. _starpu_config.conf.catch_signals = do_catch_signal;
  1230. _starpu_catch_signals();
  1231. STARPU_PTHREAD_MUTEX_UNLOCK(&sig_handlers_mutex);
  1232. }
  1233. int _starpu_get_catch_signals(void)
  1234. {
  1235. return _starpu_config.conf.catch_signals;
  1236. }
  1237. void starpu_drivers_preinit(void)
  1238. {
  1239. _starpu_cpu_preinit();
  1240. _starpu_cuda_preinit();
  1241. _starpu_opencl_preinit();
  1242. _starpu_mic_preinit();
  1243. _starpu_mpi_ms_preinit();
  1244. _starpu_disk_preinit();
  1245. }
  1246. int starpu_init(struct starpu_conf *user_conf)
  1247. {
  1248. return starpu_initialize(user_conf, NULL, NULL);
  1249. }
  1250. int starpu_initialize(struct starpu_conf *user_conf, int *argc, char ***argv)
  1251. {
  1252. int is_a_sink = 0; /* Always defined. If the MP infrastructure is not
  1253. * used, we cannot be a sink. */
  1254. unsigned worker;
  1255. (void)argc;
  1256. (void)argv;
  1257. /* This initializes _starpu_silent, thus needs to be early */
  1258. _starpu_util_init();
  1259. STARPU_HG_DISABLE_CHECKING(_starpu_worker_parallel_blocks);
  1260. #ifdef STARPU_SIMGRID
  1261. /* This initializes the simgrid thread library, thus needs to be early */
  1262. _starpu_simgrid_init_early(argc, argv);
  1263. #endif
  1264. STARPU_PTHREAD_MUTEX_LOCK(&init_mutex);
  1265. while (initialized == CHANGING)
  1266. /* Wait for the other one changing it */
  1267. STARPU_PTHREAD_COND_WAIT(&init_cond, &init_mutex);
  1268. init_count++;
  1269. if (initialized == INITIALIZED)
  1270. {
  1271. /* He initialized it, don't do it again, and let the others get the mutex */
  1272. STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
  1273. return 0;
  1274. }
  1275. /* initialized == UNINITIALIZED */
  1276. initialized = CHANGING;
  1277. STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
  1278. #ifdef STARPU_USE_MP
  1279. _starpu_set_argc_argv(argc, argv);
  1280. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1281. if (_starpu_mpi_common_mp_init() == -ENODEV)
  1282. {
  1283. initialized = UNINITIALIZED;
  1284. return -ENODEV;
  1285. }
  1286. /* In MPI case we look at the rank to know if we are a sink */
  1287. if (!_starpu_mpi_common_is_src_node())
  1288. setenv("STARPU_SINK", "STARPU_MPI_MS", 1);
  1289. # endif
  1290. /* If StarPU was configured to use MP sinks, we have to control the
  1291. * kind on node we are running on : host or sink ? */
  1292. if (starpu_getenv("STARPU_SINK"))
  1293. is_a_sink = 1;
  1294. #endif /* STARPU_USE_MP */
  1295. int ret;
  1296. #ifdef STARPU_OPENMP
  1297. _starpu_omp_dummy_init();
  1298. #endif
  1299. #ifdef STARPU_SIMGRID
  1300. /* Warn when the lots of stacks malloc()-ated by simgrid for transfer
  1301. * processes will take a long time to get initialized */
  1302. char *perturb = starpu_getenv("MALLOC_PERTURB_");
  1303. if (perturb && perturb[0] && atoi(perturb) != 0)
  1304. _STARPU_DISP("Warning: MALLOC_PERTURB_ is set to non-zero, this makes simgrid run very slow\n");
  1305. #else
  1306. #ifdef __GNUC__
  1307. #ifndef __OPTIMIZE__
  1308. _STARPU_DISP("Warning: StarPU was configured with --enable-debug (-O0), and is thus not optimized\n");
  1309. #endif
  1310. #endif
  1311. #ifdef STARPU_SPINLOCK_CHECK
  1312. _STARPU_DISP("Warning: StarPU was configured with --enable-spinlock-check, which slows down a bit\n");
  1313. #endif
  1314. #if 0
  1315. #ifndef STARPU_NO_ASSERT
  1316. _STARPU_DISP("Warning: StarPU was configured without --enable-fast\n");
  1317. #endif
  1318. #endif
  1319. #ifdef STARPU_MEMORY_STATS
  1320. _STARPU_DISP("Warning: StarPU was configured with --enable-memory-stats, which slows down a bit\n");
  1321. #endif
  1322. #ifdef STARPU_VERBOSE
  1323. _STARPU_DISP("Warning: StarPU was configured with --enable-verbose, which slows down a bit\n");
  1324. #endif
  1325. #ifdef STARPU_USE_FXT
  1326. _STARPU_DISP("Warning: StarPU was configured with --with-fxt, which slows down a bit, limits scalability and makes worker initialization sequential\n");
  1327. #endif
  1328. #ifdef STARPU_FXT_LOCK_TRACES
  1329. _STARPU_DISP("Warning: StarPU was configured with --enable-fxt-lock, which slows down things a huge lot, and is really only meant for StarPU insides debugging. Did you really want to enable that?\n");
  1330. #endif
  1331. #ifdef STARPU_PERF_DEBUG
  1332. _STARPU_DISP("Warning: StarPU was configured with --enable-perf-debug, which slows down a bit\n");
  1333. #endif
  1334. #ifdef STARPU_MODEL_DEBUG
  1335. _STARPU_DISP("Warning: StarPU was configured with --enable-model-debug, which slows down a bit\n");
  1336. #endif
  1337. #ifdef __linux__
  1338. {
  1339. struct utsname buf;
  1340. if (uname(&buf) == 0
  1341. && (!strncmp(buf.release, "4.7.", 4)
  1342. || !strncmp(buf.release, "4.8.", 4)))
  1343. _STARPU_DISP("Warning: This system is running a 4.7 or 4.8 kernel. These have a severe scheduling performance regression issue, please upgrade to at least 4.9.\n");
  1344. }
  1345. #endif
  1346. #endif
  1347. if (starpu_getenv("STARPU_ENABLE_STATS"))
  1348. {
  1349. _STARPU_DISP("Warning: STARPU_ENABLE_STATS is enabled, which slows down a bit\n");
  1350. }
  1351. #if defined(_WIN32) && !defined(__CYGWIN__)
  1352. WSADATA wsadata;
  1353. WSAStartup(MAKEWORD(1,0), &wsadata);
  1354. #endif
  1355. STARPU_AYU_PREINIT();
  1356. /* store the pointer to the user explicit configuration during the
  1357. * initialization */
  1358. if (user_conf == NULL)
  1359. starpu_conf_init(&_starpu_config.conf);
  1360. else
  1361. {
  1362. if (user_conf->magic != 42)
  1363. {
  1364. _STARPU_DISP("starpu_conf structure needs to be initialized with starpu_conf_init\n");
  1365. return -EINVAL;
  1366. }
  1367. _starpu_config.conf = *user_conf;
  1368. }
  1369. _starpu_conf_check_environment(&_starpu_config.conf);
  1370. /* Make a copy of arrays */
  1371. if (_starpu_config.conf.sched_policy_name)
  1372. _starpu_config.conf.sched_policy_name = strdup(_starpu_config.conf.sched_policy_name);
  1373. if (_starpu_config.conf.mic_sink_program_path)
  1374. _starpu_config.conf.mic_sink_program_path = strdup(_starpu_config.conf.mic_sink_program_path);
  1375. if (_starpu_config.conf.n_cuda_opengl_interoperability)
  1376. {
  1377. size_t size = _starpu_config.conf.n_cuda_opengl_interoperability * sizeof(*_starpu_config.conf.cuda_opengl_interoperability);
  1378. unsigned *copy;
  1379. _STARPU_MALLOC(copy, size);
  1380. memcpy(copy, _starpu_config.conf.cuda_opengl_interoperability, size);
  1381. _starpu_config.conf.cuda_opengl_interoperability = copy;
  1382. }
  1383. if (_starpu_config.conf.n_not_launched_drivers)
  1384. {
  1385. size_t size = _starpu_config.conf.n_not_launched_drivers * sizeof(*_starpu_config.conf.not_launched_drivers);
  1386. struct starpu_driver *copy;
  1387. _STARPU_MALLOC(copy, size);
  1388. memcpy(copy, _starpu_config.conf.not_launched_drivers, size);
  1389. _starpu_config.conf.not_launched_drivers = copy;
  1390. }
  1391. _starpu_sched_init();
  1392. _starpu_job_init();
  1393. _starpu_graph_init();
  1394. _starpu_init_all_sched_ctxs(&_starpu_config);
  1395. _starpu_init_progression_hooks();
  1396. _starpu_init_idle_hooks();
  1397. _starpu_init_tags();
  1398. #ifdef STARPU_USE_FXT
  1399. _starpu_fxt_init_profiling(_starpu_config.conf.trace_buffer_size);
  1400. #endif
  1401. _starpu_open_debug_logfile();
  1402. _starpu_data_interface_init();
  1403. _starpu_timing_init();
  1404. _starpu_load_bus_performance_files();
  1405. /* Let drivers register themselves */
  1406. starpu_drivers_preinit();
  1407. /* Note: nothing before here should be allocating anything, in case we
  1408. * actually return ENODEV here */
  1409. /* Depending on whether we are a MP sink or not, we must build the
  1410. * topology with MP nodes or not. */
  1411. ret = _starpu_build_topology(&_starpu_config, is_a_sink);
  1412. /* sink doesn't exit even if no worker discorvered */
  1413. if (ret && !is_a_sink)
  1414. {
  1415. starpu_perfmodel_free_sampling();
  1416. STARPU_PTHREAD_MUTEX_LOCK(&init_mutex);
  1417. init_count--;
  1418. _starpu_destroy_machine_config(&_starpu_config);
  1419. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1420. if (_starpu_mpi_common_is_mp_initialized())
  1421. _starpu_mpi_common_mp_deinit();
  1422. #endif
  1423. initialized = UNINITIALIZED;
  1424. /* Let somebody else try to do it */
  1425. STARPU_PTHREAD_COND_SIGNAL(&init_cond);
  1426. STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
  1427. #ifdef STARPU_USE_FXT
  1428. _starpu_stop_fxt_profiling();
  1429. #endif
  1430. return ret;
  1431. }
  1432. _starpu_profiling_init();
  1433. _starpu_task_init();
  1434. for (worker = 0; worker < _starpu_config.topology.nworkers; worker++)
  1435. _starpu_worker_init(&_starpu_config.workers[worker], &_starpu_config);
  1436. //FIXME: find out if the variable STARPU_CHECK_ENTIRE_PLATFORM is really needed, for now, just set 1 as a default value
  1437. check_entire_platform = 1;//starpu_get_env_number("STARPU_CHECK_ENTIRE_PLATFORM");
  1438. _starpu_config.disable_kernels = starpu_get_env_number("STARPU_DISABLE_KERNELS");
  1439. STARPU_PTHREAD_KEY_CREATE(&_starpu_worker_key, NULL);
  1440. STARPU_PTHREAD_KEY_CREATE(&_starpu_worker_set_key, NULL);
  1441. _starpu_keys_initialized = 1;
  1442. STARPU_WMB();
  1443. _starpu_build_tree();
  1444. if (!is_a_sink)
  1445. {
  1446. struct starpu_sched_policy *selected_policy = _starpu_select_sched_policy(&_starpu_config, _starpu_config.conf.sched_policy_name);
  1447. _starpu_create_sched_ctx(selected_policy, NULL, -1, 1, "init", (_starpu_config.conf.global_sched_ctx_min_priority != -1), _starpu_config.conf.global_sched_ctx_min_priority, (_starpu_config.conf.global_sched_ctx_max_priority != -1), _starpu_config.conf.global_sched_ctx_max_priority, 1, _starpu_config.conf.sched_policy_init, NULL, 0, NULL, 0);
  1448. }
  1449. _starpu_initialize_registered_performance_models();
  1450. _starpu_perf_counter_init(&_starpu_config);
  1451. _starpu_perf_knob_init();
  1452. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  1453. _starpu_cuda_init();
  1454. #endif
  1455. #ifdef STARPU_SIMGRID
  1456. _starpu_simgrid_init();
  1457. #endif
  1458. /* Launch "basic" workers (ie. non-combined workers) */
  1459. if (!is_a_sink)
  1460. _starpu_launch_drivers(&_starpu_config);
  1461. /* Allocate swap, if any */
  1462. if (!is_a_sink)
  1463. _starpu_swap_init();
  1464. _starpu_watchdog_init();
  1465. _starpu_profiling_start();
  1466. STARPU_PTHREAD_MUTEX_LOCK(&init_mutex);
  1467. initialized = INITIALIZED;
  1468. /* Tell everybody that we initialized */
  1469. STARPU_PTHREAD_COND_BROADCAST(&init_cond);
  1470. STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
  1471. int main_thread_cpuid = starpu_get_env_number_default("STARPU_MAIN_THREAD_CPUID", -1);
  1472. int main_thread_coreid = starpu_get_env_number_default("STARPU_MAIN_THREAD_COREID", -1);
  1473. if (main_thread_cpuid >= 0 && main_thread_coreid >= 0)
  1474. {
  1475. _STARPU_DISP("Warning: STARPU_MAIN_THREAD_CPUID and STARPU_MAIN_THREAD_COREID cannot be set at the same time. STARPU_MAIN_THREAD_CPUID will be used.\n");
  1476. }
  1477. if (main_thread_cpuid == -1 && main_thread_coreid >= 0)
  1478. {
  1479. main_thread_cpuid = main_thread_coreid * _starpu_get_nhyperthreads();
  1480. }
  1481. int main_thread_bind = starpu_get_env_number_default("STARPU_MAIN_THREAD_BIND", 0);
  1482. int main_thread_activity = STARPU_NONACTIVETHREAD;
  1483. if (main_thread_bind)
  1484. {
  1485. main_thread_activity = STARPU_ACTIVETHREAD;
  1486. if (main_thread_cpuid == -1)
  1487. main_thread_cpuid = starpu_get_next_bindid(STARPU_THREAD_ACTIVE, NULL, 0);
  1488. }
  1489. if (main_thread_cpuid >= 0)
  1490. _starpu_bind_thread_on_cpu(main_thread_cpuid, main_thread_activity, "main");
  1491. _STARPU_DEBUG("Initialisation finished\n");
  1492. #ifdef STARPU_USE_MP
  1493. /* Finally, if we are a MP sink, we never leave this function. Else,
  1494. * we enter an infinite event loop which listen for MP commands from
  1495. * the source. */
  1496. if (is_a_sink)
  1497. {
  1498. _starpu_sink_common_worker();
  1499. /* We should normally never leave the loop as we don't want to
  1500. * really initialize STARPU */
  1501. STARPU_ASSERT(0);
  1502. }
  1503. #endif
  1504. _starpu_catch_signals();
  1505. /* if MPI is enabled, binding display will be done later, after MPI initialization */
  1506. if (!_starpu_config.conf.will_use_mpi && starpu_get_env_number_default("STARPU_DISPLAY_BINDINGS", 0))
  1507. {
  1508. fprintf(stdout, "== Binding ==\n");
  1509. starpu_display_bindings();
  1510. fprintf(stdout, "== End of binding ==\n");
  1511. fflush(stdout);
  1512. }
  1513. return 0;
  1514. }
  1515. /*
  1516. * Handle runtime termination
  1517. */
  1518. static void _starpu_terminate_workers(struct _starpu_machine_config *pconfig)
  1519. {
  1520. int status = 0;
  1521. unsigned workerid;
  1522. unsigned n;
  1523. starpu_wake_all_blocked_workers();
  1524. for (workerid = 0; workerid < pconfig->topology.nworkers; workerid++)
  1525. {
  1526. _STARPU_DEBUG("wait for worker %u\n", workerid);
  1527. struct _starpu_worker_set *set = pconfig->workers[workerid].set;
  1528. struct _starpu_worker *worker = &pconfig->workers[workerid];
  1529. /* in case StarPU termination code is called from a callback,
  1530. * we have to check if starpu_pthread_self() is the worker itself */
  1531. if (set && set->nworkers > 0)
  1532. {
  1533. if (set->started)
  1534. {
  1535. if (!starpu_pthread_equal(starpu_pthread_self(), set->worker_thread))
  1536. status = starpu_pthread_join(set->worker_thread, NULL);
  1537. if (status)
  1538. {
  1539. #ifdef STARPU_VERBOSE
  1540. _STARPU_DEBUG("starpu_pthread_join -> %d\n", status);
  1541. #endif
  1542. }
  1543. set->started = 0;
  1544. }
  1545. }
  1546. else
  1547. {
  1548. if (!worker->run_by_starpu)
  1549. goto out;
  1550. if (!starpu_pthread_equal(starpu_pthread_self(), worker->worker_thread))
  1551. status = starpu_pthread_join(worker->worker_thread, NULL);
  1552. if (status)
  1553. {
  1554. #ifdef STARPU_VERBOSE
  1555. _STARPU_DEBUG("starpu_pthread_join -> %d\n", status);
  1556. #endif
  1557. }
  1558. }
  1559. out:
  1560. STARPU_ASSERT(starpu_task_list_empty(&worker->local_tasks));
  1561. for (n = 0; n < worker->local_ordered_tasks_size; n++)
  1562. STARPU_ASSERT(worker->local_ordered_tasks[n] == NULL);
  1563. _starpu_sched_ctx_list_delete(&worker->sched_ctx_list);
  1564. free(worker->local_ordered_tasks);
  1565. STARPU_ASSERT(_starpu_ctx_change_list_empty(&worker->ctx_change_list));
  1566. }
  1567. }
  1568. /* Condition variable and mutex used to pause/resume. */
  1569. static starpu_pthread_cond_t pause_cond = STARPU_PTHREAD_COND_INITIALIZER;
  1570. static starpu_pthread_mutex_t pause_mutex = STARPU_PTHREAD_MUTEX_INITIALIZER;
  1571. void _starpu_may_pause(void)
  1572. {
  1573. /* pause_depth is just protected by a memory barrier */
  1574. STARPU_RMB();
  1575. if (STARPU_UNLIKELY(_starpu_config.pause_depth > 0))
  1576. {
  1577. STARPU_PTHREAD_MUTEX_LOCK(&pause_mutex);
  1578. if (_starpu_config.pause_depth > 0)
  1579. {
  1580. STARPU_PTHREAD_COND_WAIT(&pause_cond, &pause_mutex);
  1581. }
  1582. STARPU_PTHREAD_MUTEX_UNLOCK(&pause_mutex);
  1583. }
  1584. }
  1585. void starpu_pause()
  1586. {
  1587. STARPU_HG_DISABLE_CHECKING(_starpu_config.pause_depth);
  1588. _starpu_config.pause_depth += 1;
  1589. starpu_fxt_trace_user_event_string("starpu_pause");
  1590. }
  1591. void starpu_resume()
  1592. {
  1593. STARPU_PTHREAD_MUTEX_LOCK(&pause_mutex);
  1594. _starpu_config.pause_depth -= 1;
  1595. if (!_starpu_config.pause_depth)
  1596. {
  1597. STARPU_PTHREAD_COND_BROADCAST(&pause_cond);
  1598. }
  1599. STARPU_PTHREAD_MUTEX_UNLOCK(&pause_mutex);
  1600. starpu_fxt_trace_user_event_string("starpu_resume");
  1601. }
  1602. unsigned _starpu_worker_can_block(unsigned memnode STARPU_ATTRIBUTE_UNUSED, struct _starpu_worker *worker STARPU_ATTRIBUTE_UNUSED)
  1603. {
  1604. #ifdef STARPU_NON_BLOCKING_DRIVERS
  1605. return 0;
  1606. #else
  1607. /* do not block if a sched_ctx change operation is pending */
  1608. if (worker->state_changing_ctx_notice)
  1609. return 0;
  1610. unsigned can_block = 1;
  1611. struct starpu_driver driver;
  1612. driver.type = worker->arch;
  1613. switch (driver.type)
  1614. {
  1615. case STARPU_CPU_WORKER:
  1616. driver.id.cpu_id = worker->devid;
  1617. break;
  1618. case STARPU_CUDA_WORKER:
  1619. driver.id.cuda_id = worker->devid;
  1620. break;
  1621. #ifdef STARPU_USE_OPENCL
  1622. case STARPU_OPENCL_WORKER:
  1623. starpu_opencl_get_device(worker->devid, &driver.id.opencl_id);
  1624. break;
  1625. #endif
  1626. default:
  1627. goto always_launch;
  1628. }
  1629. if (!_starpu_may_launch_driver(&_starpu_config.conf, &driver))
  1630. return 0;
  1631. always_launch:
  1632. #ifndef STARPU_SIMGRID
  1633. if (!_starpu_check_that_no_data_request_exists(memnode))
  1634. can_block = 0;
  1635. #endif
  1636. if (!_starpu_machine_is_running())
  1637. can_block = 0;
  1638. if (!_starpu_execute_registered_progression_hooks())
  1639. can_block = 0;
  1640. return can_block;
  1641. #endif
  1642. }
  1643. static void _starpu_kill_all_workers(struct _starpu_machine_config *pconfig)
  1644. {
  1645. /* set the flag which will tell workers to stop */
  1646. ANNOTATE_HAPPENS_AFTER(&_starpu_config.running);
  1647. pconfig->running = 0;
  1648. /* running is just protected by a memory barrier */
  1649. ANNOTATE_HAPPENS_BEFORE(&_starpu_config.running);
  1650. STARPU_WMB();
  1651. starpu_wake_all_blocked_workers();
  1652. }
  1653. void starpu_display_stats()
  1654. {
  1655. starpu_profiling_bus_helper_display_summary();
  1656. starpu_profiling_worker_helper_display_summary();
  1657. }
  1658. void starpu_shutdown(void)
  1659. {
  1660. unsigned worker;
  1661. STARPU_PTHREAD_MUTEX_LOCK(&init_mutex);
  1662. init_count--;
  1663. STARPU_ASSERT_MSG(init_count >= 0, "Number of calls to starpu_shutdown() can not be higher than the number of calls to starpu_init()\n");
  1664. if (init_count)
  1665. {
  1666. _STARPU_DEBUG("Still somebody needing StarPU, don't deinitialize\n");
  1667. STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
  1668. return;
  1669. }
  1670. /* We're last */
  1671. initialized = CHANGING;
  1672. STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
  1673. /* If the workers are frozen, no progress can be made. */
  1674. STARPU_ASSERT(_starpu_config.pause_depth <= 0);
  1675. starpu_task_wait_for_no_ready();
  1676. /* tell all workers to shutdown */
  1677. _starpu_kill_all_workers(&_starpu_config);
  1678. unsigned i;
  1679. unsigned nb_numa_nodes = starpu_memory_nodes_get_numa_count();
  1680. for (i=0; i<nb_numa_nodes; i++)
  1681. {
  1682. _starpu_free_all_automatically_allocated_buffers(i);
  1683. }
  1684. {
  1685. int stats = starpu_get_env_number("STARPU_STATS");
  1686. if (stats != 0)
  1687. {
  1688. _starpu_display_msi_stats(stderr);
  1689. _starpu_display_alloc_cache_stats(stderr);
  1690. }
  1691. }
  1692. starpu_profiling_bus_helper_display_summary();
  1693. starpu_profiling_worker_helper_display_summary();
  1694. starpu_bound_clear();
  1695. _starpu_deinitialize_registered_performance_models();
  1696. _starpu_watchdog_shutdown();
  1697. /* wait for their termination */
  1698. _starpu_terminate_workers(&_starpu_config);
  1699. {
  1700. int stats = starpu_get_env_number("STARPU_MEMORY_STATS");
  1701. if (stats != 0)
  1702. {
  1703. // Display statistics on data which have not been unregistered
  1704. starpu_data_display_memory_stats();
  1705. }
  1706. }
  1707. _starpu_delete_all_sched_ctxs();
  1708. _starpu_sched_component_workers_destroy();
  1709. for (worker = 0; worker < _starpu_config.topology.nworkers; worker++)
  1710. _starpu_worker_deinit(&_starpu_config.workers[worker]);
  1711. _starpu_profiling_terminate();
  1712. _starpu_disk_unregister();
  1713. #ifdef STARPU_HAVE_HWLOC
  1714. starpu_tree_free(_starpu_config.topology.tree);
  1715. free(_starpu_config.topology.tree);
  1716. #endif
  1717. _starpu_destroy_topology(&_starpu_config);
  1718. _starpu_initialized_combined_workers = 0;
  1719. #ifdef STARPU_USE_FXT
  1720. _starpu_stop_fxt_profiling();
  1721. #endif
  1722. _starpu_data_interface_shutdown();
  1723. _starpu_job_fini();
  1724. /* Drop all remaining tags */
  1725. _starpu_tag_clear();
  1726. #ifdef STARPU_OPENMP
  1727. _starpu_omp_dummy_shutdown();
  1728. #endif
  1729. _starpu_perf_knob_exit();
  1730. _starpu_perf_counter_exit();
  1731. _starpu_close_debug_logfile();
  1732. _starpu_keys_initialized = 0;
  1733. STARPU_PTHREAD_KEY_DELETE(_starpu_worker_key);
  1734. STARPU_PTHREAD_KEY_DELETE(_starpu_worker_set_key);
  1735. _starpu_task_deinit();
  1736. STARPU_PTHREAD_MUTEX_LOCK(&init_mutex);
  1737. initialized = UNINITIALIZED;
  1738. /* Let someone else that wants to initialize it again do it */
  1739. STARPU_PTHREAD_COND_SIGNAL(&init_cond);
  1740. STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
  1741. /* Clear memory */
  1742. free((char*) _starpu_config.conf.sched_policy_name);
  1743. free(_starpu_config.conf.mic_sink_program_path);
  1744. if (_starpu_config.conf.n_cuda_opengl_interoperability)
  1745. free(_starpu_config.conf.cuda_opengl_interoperability);
  1746. if (_starpu_config.conf.n_not_launched_drivers)
  1747. free(_starpu_config.conf.not_launched_drivers);
  1748. STARPU_AYU_FINISH();
  1749. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1750. if (_starpu_mpi_common_is_mp_initialized())
  1751. _starpu_mpi_common_mp_deinit();
  1752. #endif
  1753. _starpu_print_idle_time();
  1754. _STARPU_DEBUG("Shutdown finished\n");
  1755. #ifdef STARPU_SIMGRID
  1756. /* This finalizes the simgrid thread library, thus needs to be late */
  1757. _starpu_simgrid_deinit();
  1758. #endif
  1759. }
  1760. #undef starpu_worker_get_count
  1761. unsigned starpu_worker_get_count(void)
  1762. {
  1763. return _starpu_config.topology.nworkers;
  1764. }
  1765. unsigned starpu_worker_is_blocked_in_parallel(int workerid)
  1766. {
  1767. if (!_starpu_worker_parallel_blocks)
  1768. return 0;
  1769. int relax_own_observation_state = 0;
  1770. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  1771. STARPU_ASSERT(worker != NULL);
  1772. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&worker->sched_mutex);
  1773. struct _starpu_worker *cur_worker = NULL;
  1774. int cur_workerid = starpu_worker_get_id();
  1775. if (workerid != cur_workerid)
  1776. {
  1777. /* in order to observe the 'blocked' state of a worker from
  1778. * another worker, we must avoid race conditions between
  1779. * 'blocked' state changes and state observations. This is the
  1780. * purpose of this 'if' block. */
  1781. cur_worker = cur_workerid >= 0 ? _starpu_get_worker_struct(cur_workerid) : NULL;
  1782. relax_own_observation_state = (cur_worker != NULL) && (cur_worker->state_relax_refcnt == 0);
  1783. if (relax_own_observation_state && !worker->state_relax_refcnt)
  1784. {
  1785. /* moreover, when a worker (cur_worker != NULL)
  1786. * observes another worker, we need to take special
  1787. * care to avoid live locks, thus the observing worker
  1788. * must enter the relaxed state (if not relaxed
  1789. * already) before doing the observation in mutual
  1790. * exclusion */
  1791. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  1792. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&cur_worker->sched_mutex);
  1793. cur_worker->state_relax_refcnt = 1;
  1794. STARPU_PTHREAD_COND_BROADCAST(&cur_worker->sched_cond);
  1795. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&cur_worker->sched_mutex);
  1796. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&worker->sched_mutex);
  1797. }
  1798. /* the observer waits for a safe window to observe the state,
  1799. * and also waits for any pending blocking state change
  1800. * requests to be processed, in order to not obtain an
  1801. * ephemeral information */
  1802. while (!worker->state_relax_refcnt
  1803. || worker->state_block_in_parallel_req
  1804. || worker->state_unblock_in_parallel_req)
  1805. {
  1806. STARPU_PTHREAD_COND_WAIT(&worker->sched_cond, &worker->sched_mutex);
  1807. }
  1808. }
  1809. unsigned ret = _starpu_config.workers[workerid].state_blocked_in_parallel;
  1810. /* once a worker state has been observed, the worker is 'tainted' for the next one full sched_op,
  1811. * to avoid changing the observed worker state - on which the observer
  1812. * made a scheduling decision - after the fact. */
  1813. worker->state_blocked_in_parallel_observed = 1;
  1814. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  1815. if (relax_own_observation_state)
  1816. {
  1817. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&cur_worker->sched_mutex);
  1818. cur_worker->state_relax_refcnt = 0;
  1819. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&cur_worker->sched_mutex);
  1820. }
  1821. return ret;
  1822. }
  1823. unsigned starpu_worker_is_slave_somewhere(int workerid)
  1824. {
  1825. starpu_worker_lock(workerid);
  1826. unsigned ret = _starpu_config.workers[workerid].is_slave_somewhere;
  1827. starpu_worker_unlock(workerid);
  1828. return ret;
  1829. }
  1830. int starpu_worker_get_count_by_type(enum starpu_worker_archtype type)
  1831. {
  1832. unsigned n = 0;
  1833. if (type != STARPU_ANY_WORKER)
  1834. {
  1835. if (type >= STARPU_NARCH)
  1836. return -EINVAL;
  1837. unsigned i;
  1838. for (i = 0; i < _starpu_config.topology.ndevices[type]; i++)
  1839. n += _starpu_config.topology.nworker[type][i];
  1840. return n;
  1841. }
  1842. for (type = 0; type < STARPU_NARCH; type++)
  1843. n += starpu_worker_get_count_by_type(type);
  1844. return n;
  1845. }
  1846. unsigned starpu_combined_worker_get_count(void)
  1847. {
  1848. return _starpu_config.topology.ncombinedworkers;
  1849. }
  1850. unsigned starpu_cpu_worker_get_count(void)
  1851. {
  1852. return starpu_worker_get_count_by_type(STARPU_CPU_WORKER);
  1853. }
  1854. unsigned starpu_cuda_worker_get_count(void)
  1855. {
  1856. return starpu_worker_get_count_by_type(STARPU_CUDA_WORKER);
  1857. }
  1858. unsigned starpu_opencl_worker_get_count(void)
  1859. {
  1860. return starpu_worker_get_count_by_type(STARPU_OPENCL_WORKER);
  1861. }
  1862. int starpu_asynchronous_copy_disabled(void)
  1863. {
  1864. return _starpu_config.conf.disable_asynchronous_copy;
  1865. }
  1866. int starpu_asynchronous_cuda_copy_disabled(void)
  1867. {
  1868. return _starpu_config.conf.disable_asynchronous_cuda_copy;
  1869. }
  1870. int starpu_asynchronous_opencl_copy_disabled(void)
  1871. {
  1872. return _starpu_config.conf.disable_asynchronous_opencl_copy;
  1873. }
  1874. int starpu_asynchronous_mic_copy_disabled(void)
  1875. {
  1876. return _starpu_config.conf.disable_asynchronous_mic_copy;
  1877. }
  1878. int starpu_asynchronous_mpi_ms_copy_disabled(void)
  1879. {
  1880. return _starpu_config.conf.disable_asynchronous_mpi_ms_copy;
  1881. }
  1882. unsigned starpu_mic_worker_get_count(void)
  1883. {
  1884. return starpu_worker_get_count_by_type(STARPU_MIC_WORKER);
  1885. }
  1886. unsigned starpu_mpi_ms_worker_get_count(void)
  1887. {
  1888. return starpu_worker_get_count_by_type(STARPU_MPI_MS_WORKER);
  1889. }
  1890. /* When analyzing performance, it is useful to see what is the processing unit
  1891. * that actually performed the task. This function returns the id of the
  1892. * processing unit actually executing it, therefore it makes no sense to use it
  1893. * within the callbacks of SPU functions for instance. If called by some thread
  1894. * that is not controlled by StarPU, starpu_worker_get_id returns -1. */
  1895. #undef starpu_worker_get_id
  1896. int starpu_worker_get_id(void)
  1897. {
  1898. struct _starpu_worker * worker;
  1899. worker = _starpu_get_local_worker_key();
  1900. if (worker)
  1901. {
  1902. return worker->workerid;
  1903. }
  1904. else
  1905. {
  1906. /* there is no worker associated to that thread, perhaps it is
  1907. * a thread from the application or this is some SPU worker */
  1908. return -1;
  1909. }
  1910. }
  1911. #define starpu_worker_get_id _starpu_worker_get_id
  1912. #undef _starpu_worker_get_id_check
  1913. unsigned _starpu_worker_get_id_check(const char *f, int l)
  1914. {
  1915. (void) f;
  1916. (void) l;
  1917. int id = _starpu_worker_get_id();
  1918. STARPU_ASSERT_MSG(id>=0, "%s:%d Cannot be called from outside a worker\n", f, l);
  1919. return id;
  1920. }
  1921. int starpu_combined_worker_get_id(void)
  1922. {
  1923. struct _starpu_worker *worker;
  1924. worker = _starpu_get_local_worker_key();
  1925. if (worker)
  1926. {
  1927. return worker->combined_workerid;
  1928. }
  1929. else
  1930. {
  1931. /* there is no worker associated to that thread, perhaps it is
  1932. * a thread from the application or this is some SPU worker */
  1933. return -1;
  1934. }
  1935. }
  1936. int starpu_combined_worker_get_size(void)
  1937. {
  1938. struct _starpu_worker *worker;
  1939. worker = _starpu_get_local_worker_key();
  1940. if (worker)
  1941. {
  1942. return worker->worker_size;
  1943. }
  1944. else
  1945. {
  1946. /* there is no worker associated to that thread, perhaps it is
  1947. * a thread from the application or this is some SPU worker */
  1948. return -1;
  1949. }
  1950. }
  1951. int starpu_combined_worker_get_rank(void)
  1952. {
  1953. struct _starpu_worker *worker;
  1954. worker = _starpu_get_local_worker_key();
  1955. if (worker)
  1956. {
  1957. return worker->current_rank;
  1958. }
  1959. else
  1960. {
  1961. /* there is no worker associated to that thread, perhaps it is
  1962. * a thread from the application or this is some SPU worker */
  1963. return -1;
  1964. }
  1965. }
  1966. int starpu_worker_get_subworkerid(int id)
  1967. {
  1968. return _starpu_config.workers[id].subworkerid;
  1969. }
  1970. int starpu_worker_get_devid(int id)
  1971. {
  1972. return _starpu_config.workers[id].devid;
  1973. }
  1974. unsigned starpu_worker_is_combined_worker(int id)
  1975. {
  1976. return id >= (int)_starpu_config.topology.nworkers;
  1977. }
  1978. struct _starpu_combined_worker *_starpu_get_combined_worker_struct(unsigned id)
  1979. {
  1980. unsigned basic_worker_count = starpu_worker_get_count();
  1981. //_STARPU_DEBUG("basic_worker_count:%d\n",basic_worker_count);
  1982. STARPU_ASSERT(id >= basic_worker_count);
  1983. return &_starpu_config.combined_workers[id - basic_worker_count];
  1984. }
  1985. enum starpu_worker_archtype starpu_worker_get_type(int id)
  1986. {
  1987. return _starpu_config.workers[id].arch;
  1988. }
  1989. unsigned starpu_worker_get_ids_by_type(enum starpu_worker_archtype type, int *workerids, unsigned maxsize)
  1990. {
  1991. unsigned nworkers = starpu_worker_get_count();
  1992. unsigned cnt = 0;
  1993. unsigned id;
  1994. for (id = 0; id < nworkers; id++)
  1995. {
  1996. if (type == STARPU_ANY_WORKER || starpu_worker_get_type(id) == type)
  1997. {
  1998. /* Perhaps the array is too small ? */
  1999. if (cnt >= maxsize)
  2000. return -ERANGE;
  2001. workerids[cnt++] = id;
  2002. }
  2003. }
  2004. return cnt;
  2005. }
  2006. int starpu_worker_get_by_type(enum starpu_worker_archtype type, int num)
  2007. {
  2008. unsigned nworkers = starpu_worker_get_count();
  2009. int cnt = 0;
  2010. unsigned id;
  2011. for (id = 0; id < nworkers; id++)
  2012. {
  2013. if (type == STARPU_ANY_WORKER || starpu_worker_get_type(id) == type)
  2014. {
  2015. if (num == cnt)
  2016. return id;
  2017. cnt++;
  2018. }
  2019. }
  2020. /* Not found */
  2021. return -1;
  2022. }
  2023. int starpu_worker_get_by_devid(enum starpu_worker_archtype type, int devid)
  2024. {
  2025. unsigned nworkers = starpu_worker_get_count();
  2026. unsigned id;
  2027. for (id = 0; id < nworkers; id++)
  2028. if (starpu_worker_get_type(id) == type && starpu_worker_get_devid(id) == devid)
  2029. return id;
  2030. /* Not found */
  2031. return -1;
  2032. }
  2033. int starpu_worker_get_devids(enum starpu_worker_archtype type, int *devids, int num)
  2034. {
  2035. unsigned nworkers = starpu_worker_get_count();
  2036. int workerids[nworkers];
  2037. unsigned ndevice_workers = starpu_worker_get_ids_by_type(type, workerids, nworkers);
  2038. unsigned ndevids = 0;
  2039. if(ndevice_workers > 0)
  2040. {
  2041. unsigned id, devid;
  2042. int cnt = 0;
  2043. unsigned found = 0;
  2044. for(id = 0; id < ndevice_workers; id++)
  2045. {
  2046. int curr_devid;
  2047. curr_devid = _starpu_config.workers[workerids[id]].devid;
  2048. for(devid = 0; devid < ndevids; devid++)
  2049. {
  2050. if(curr_devid == devids[devid])
  2051. {
  2052. found = 1;
  2053. break;
  2054. }
  2055. }
  2056. if(!found)
  2057. {
  2058. devids[ndevids++] = curr_devid;
  2059. cnt++;
  2060. }
  2061. else
  2062. found = 0;
  2063. if(cnt == num)
  2064. break;
  2065. }
  2066. }
  2067. return ndevids;
  2068. }
  2069. void starpu_worker_get_name(int id, char *dst, size_t maxlen)
  2070. {
  2071. char *name = _starpu_config.workers[id].name;
  2072. snprintf(dst, maxlen, "%s", name);
  2073. }
  2074. int starpu_worker_get_bindid(int workerid)
  2075. {
  2076. return _starpu_config.workers[workerid].bindid;
  2077. }
  2078. int starpu_bindid_get_workerids(int bindid, int **workerids)
  2079. {
  2080. if (bindid >= (int) _starpu_config.nbindid)
  2081. return 0;
  2082. *workerids = _starpu_config.bindid_workers[bindid].workerids;
  2083. return _starpu_config.bindid_workers[bindid].nworkers;
  2084. }
  2085. int starpu_worker_get_stream_workerids(unsigned devid, int *workerids, enum starpu_worker_archtype type)
  2086. {
  2087. unsigned nworkers = starpu_worker_get_count();
  2088. int nw = 0;
  2089. unsigned id;
  2090. for (id = 0; id < nworkers; id++)
  2091. {
  2092. if (_starpu_config.workers[id].devid == devid &&
  2093. (type == STARPU_ANY_WORKER || _starpu_config.workers[id].arch == type))
  2094. workerids[nw++] = id;
  2095. }
  2096. return nw;
  2097. }
  2098. void starpu_worker_get_sched_condition(int workerid, starpu_pthread_mutex_t **sched_mutex, starpu_pthread_cond_t **sched_cond)
  2099. {
  2100. STARPU_ASSERT(workerid >= 0 && workerid < STARPU_NMAXWORKERS);
  2101. *sched_cond = &_starpu_config.workers[workerid].sched_cond;
  2102. *sched_mutex = &_starpu_config.workers[workerid].sched_mutex;
  2103. }
  2104. /* returns 1 if the call results in initiating a transition of worker WORKERID
  2105. * from sleeping state to awake
  2106. * returns 0 if worker WORKERID is not sleeping or the wake-up transition
  2107. * already has been initiated
  2108. */
  2109. static int starpu_wakeup_worker_locked(int workerid, starpu_pthread_cond_t *sched_cond, starpu_pthread_mutex_t *mutex STARPU_ATTRIBUTE_UNUSED)
  2110. {
  2111. #ifdef STARPU_SIMGRID
  2112. starpu_pthread_queue_broadcast(&_starpu_simgrid_task_queue[workerid]);
  2113. #endif
  2114. if (_starpu_config.workers[workerid].status == STATUS_SCHEDULING || _starpu_config.workers[workerid].status == STATUS_SLEEPING_SCHEDULING)
  2115. {
  2116. _starpu_config.workers[workerid].state_keep_awake = 1;
  2117. return 0;
  2118. }
  2119. else if (_starpu_config.workers[workerid].status == STATUS_SLEEPING)
  2120. {
  2121. int ret = 0;
  2122. if (_starpu_config.workers[workerid].state_keep_awake != 1)
  2123. {
  2124. _starpu_config.workers[workerid].state_keep_awake = 1;
  2125. ret = 1;
  2126. }
  2127. /* cond_broadcast is required over cond_signal since
  2128. * the condition is share for multiple purpose */
  2129. STARPU_PTHREAD_COND_BROADCAST(sched_cond);
  2130. return ret;
  2131. }
  2132. return 0;
  2133. }
  2134. static int starpu_wakeup_worker_no_relax(int workerid, starpu_pthread_cond_t *sched_cond, starpu_pthread_mutex_t *sched_mutex)
  2135. {
  2136. int success;
  2137. STARPU_PTHREAD_MUTEX_LOCK_SCHED(sched_mutex);
  2138. success = starpu_wakeup_worker_locked(workerid, sched_cond, sched_mutex);
  2139. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(sched_mutex);
  2140. return success;
  2141. }
  2142. int starpu_wake_worker_locked(int workerid)
  2143. {
  2144. starpu_pthread_mutex_t *sched_mutex;
  2145. starpu_pthread_cond_t *sched_cond;
  2146. starpu_worker_get_sched_condition(workerid, &sched_mutex, &sched_cond);
  2147. return starpu_wakeup_worker_locked(workerid, sched_cond, sched_mutex);
  2148. }
  2149. int starpu_wake_worker_no_relax(int workerid)
  2150. {
  2151. starpu_pthread_mutex_t *sched_mutex;
  2152. starpu_pthread_cond_t *sched_cond;
  2153. starpu_worker_get_sched_condition(workerid, &sched_mutex, &sched_cond);
  2154. return starpu_wakeup_worker_no_relax(workerid, sched_cond, sched_mutex);
  2155. }
  2156. int starpu_worker_get_nids_by_type(enum starpu_worker_archtype type, int *workerids, int maxsize)
  2157. {
  2158. unsigned nworkers = starpu_worker_get_count();
  2159. int cnt = 0;
  2160. unsigned id;
  2161. for (id = 0; id < nworkers; id++)
  2162. {
  2163. if (type == STARPU_ANY_WORKER || starpu_worker_get_type(id) == type)
  2164. {
  2165. /* Perhaps the array is too small ? */
  2166. if (cnt >= maxsize)
  2167. return cnt;
  2168. workerids[cnt++] = id;
  2169. }
  2170. }
  2171. return cnt;
  2172. }
  2173. int starpu_worker_get_nids_ctx_free_by_type(enum starpu_worker_archtype type, int *workerids, int maxsize)
  2174. {
  2175. unsigned nworkers = starpu_worker_get_count();
  2176. int cnt = 0;
  2177. unsigned id;
  2178. for (id = 0; id < nworkers; id++)
  2179. {
  2180. if (type == STARPU_ANY_WORKER || starpu_worker_get_type(id) == type)
  2181. {
  2182. /* Perhaps the array is too small ? */
  2183. if (cnt >= maxsize)
  2184. return cnt;
  2185. unsigned found = 0;
  2186. int s;
  2187. for(s = 1; s < STARPU_NMAX_SCHED_CTXS; s++)
  2188. {
  2189. if(_starpu_config.sched_ctxs[s].id != STARPU_NMAX_SCHED_CTXS)
  2190. {
  2191. struct starpu_worker_collection *workers = _starpu_config.sched_ctxs[s].workers;
  2192. struct starpu_sched_ctx_iterator it;
  2193. workers->init_iterator(workers, &it);
  2194. while(workers->has_next(workers, &it))
  2195. {
  2196. unsigned worker = workers->get_next(workers, &it);
  2197. if(worker == id)
  2198. {
  2199. found = 1;
  2200. break;
  2201. }
  2202. }
  2203. if(found)
  2204. break;
  2205. }
  2206. }
  2207. if(!found)
  2208. workerids[cnt++] = id;
  2209. }
  2210. }
  2211. return cnt;
  2212. }
  2213. void starpu_get_version(int *major, int *minor, int *release)
  2214. {
  2215. *major = STARPU_MAJOR_VERSION;
  2216. *minor = STARPU_MINOR_VERSION;
  2217. *release = STARPU_RELEASE_VERSION;
  2218. }
  2219. unsigned starpu_worker_get_sched_ctx_list(int workerid, unsigned **sched_ctxs)
  2220. {
  2221. unsigned s = 0;
  2222. unsigned nsched_ctxs = _starpu_worker_get_nsched_ctxs(workerid);
  2223. _STARPU_MALLOC(*sched_ctxs, nsched_ctxs*sizeof(unsigned));
  2224. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  2225. struct _starpu_sched_ctx_elt *e = NULL;
  2226. struct _starpu_sched_ctx_list_iterator list_it;
  2227. _starpu_sched_ctx_list_iterator_init(worker->sched_ctx_list, &list_it);
  2228. while (_starpu_sched_ctx_list_iterator_has_next(&list_it))
  2229. {
  2230. e = _starpu_sched_ctx_list_iterator_get_next(&list_it);
  2231. (*sched_ctxs)[s++] = e->sched_ctx;
  2232. }
  2233. return nsched_ctxs;
  2234. }
  2235. const char *starpu_worker_get_type_as_string(enum starpu_worker_archtype type)
  2236. {
  2237. const char *ret = starpu_driver_info[type].name_upper;
  2238. if (!ret)
  2239. ret = "unknown";
  2240. return ret;
  2241. }
  2242. const char *starpu_worker_get_type_as_env_var(enum starpu_worker_archtype type)
  2243. {
  2244. const char *ret = starpu_driver_info[type].name_var;
  2245. if (!ret)
  2246. ret = "UNKNOWN";
  2247. return ret;
  2248. }
  2249. void _starpu_worker_set_stream_ctx(unsigned workerid, struct _starpu_sched_ctx *sched_ctx)
  2250. {
  2251. STARPU_ASSERT(workerid < starpu_worker_get_count());
  2252. struct _starpu_worker *w = _starpu_get_worker_struct(workerid);
  2253. w->stream_ctx = sched_ctx;
  2254. }
  2255. struct _starpu_sched_ctx* _starpu_worker_get_ctx_stream(unsigned stream_workerid)
  2256. {
  2257. if (stream_workerid >= starpu_worker_get_count())
  2258. return NULL;
  2259. struct _starpu_worker *w = _starpu_get_worker_struct(stream_workerid);
  2260. return w->stream_ctx;
  2261. }
  2262. unsigned starpu_worker_get_sched_ctx_id_stream(unsigned stream_workerid)
  2263. {
  2264. if (stream_workerid >= starpu_worker_get_count())
  2265. return STARPU_NMAX_SCHED_CTXS;
  2266. struct _starpu_worker *w = _starpu_get_worker_struct(stream_workerid);
  2267. return w->stream_ctx != NULL ? w->stream_ctx->id : STARPU_NMAX_SCHED_CTXS;
  2268. }
  2269. void starpu_worker_display_names(FILE *output, enum starpu_worker_archtype type)
  2270. {
  2271. int nworkers = starpu_worker_get_count_by_type(type);
  2272. if (nworkers <= 0)
  2273. {
  2274. fprintf(output, "No %s worker\n", starpu_worker_get_type_as_string(type));
  2275. }
  2276. else
  2277. {
  2278. int i, ids[nworkers];
  2279. starpu_worker_get_ids_by_type(type, ids, nworkers);
  2280. fprintf(output, "%d %s worker%s:\n", nworkers, starpu_worker_get_type_as_string(type), nworkers==1?"":"s");
  2281. for(i = 0; i < nworkers; i++)
  2282. {
  2283. char name[256];
  2284. starpu_worker_get_name(ids[i], name, 256);
  2285. fprintf(output, "\t%s\n", name);
  2286. }
  2287. }
  2288. }
  2289. void _starpu_worker_refuse_task(struct _starpu_worker *worker, struct starpu_task *task)
  2290. {
  2291. if (worker->pipeline_length || worker->arch == STARPU_OPENCL_WORKER)
  2292. {
  2293. int j;
  2294. for (j = 0; j < worker->ntasks; j++)
  2295. {
  2296. const int j_mod = (j+worker->first_task)%STARPU_MAX_PIPELINE;
  2297. if (task == worker->current_tasks[j_mod])
  2298. {
  2299. worker->current_tasks[j_mod] = NULL;
  2300. if (j == 0)
  2301. {
  2302. worker->first_task = (worker->first_task + 1) % STARPU_MAX_PIPELINE;
  2303. worker->current_task = NULL;
  2304. _starpu_set_current_task(NULL);
  2305. }
  2306. break;
  2307. }
  2308. }
  2309. STARPU_ASSERT(j<worker->ntasks);
  2310. }
  2311. else
  2312. {
  2313. worker->current_task = NULL;
  2314. _starpu_set_current_task(NULL);
  2315. }
  2316. worker->ntasks--;
  2317. task->prefetched = 0;
  2318. int res = _starpu_push_task_to_workers(task);
  2319. STARPU_ASSERT_MSG(res == 0, "_starpu_push_task_to_workers() unexpectedly returned = %d\n", res);
  2320. }
  2321. int starpu_worker_sched_op_pending(void)
  2322. {
  2323. return _starpu_worker_sched_op_pending();
  2324. }
  2325. #undef starpu_worker_relax_on
  2326. void starpu_worker_relax_on(void)
  2327. {
  2328. _starpu_worker_relax_on();
  2329. }
  2330. #undef starpu_worker_relax_off
  2331. void starpu_worker_relax_off(void)
  2332. {
  2333. _starpu_worker_relax_off();
  2334. }
  2335. #undef starpu_worker_get_relax_state
  2336. int starpu_worker_get_relax_state(void)
  2337. {
  2338. return _starpu_worker_get_relax_state();
  2339. }
  2340. #undef starpu_worker_lock
  2341. void starpu_worker_lock(int workerid)
  2342. {
  2343. _starpu_worker_lock(workerid);
  2344. }
  2345. #undef starpu_worker_trylock
  2346. int starpu_worker_trylock(int workerid)
  2347. {
  2348. return _starpu_worker_trylock(workerid);
  2349. }
  2350. #undef starpu_worker_unlock
  2351. void starpu_worker_unlock(int workerid)
  2352. {
  2353. _starpu_worker_unlock(workerid);
  2354. }
  2355. #undef starpu_worker_lock_self
  2356. void starpu_worker_lock_self(void)
  2357. {
  2358. _starpu_worker_lock_self();
  2359. }
  2360. #undef starpu_worker_unlock_self
  2361. void starpu_worker_unlock_self(void)
  2362. {
  2363. _starpu_worker_unlock_self();
  2364. }
  2365. #undef starpu_wake_worker_relax
  2366. int starpu_wake_worker_relax(int workerid)
  2367. {
  2368. return _starpu_wake_worker_relax(workerid);
  2369. }
  2370. #ifdef STARPU_HAVE_HWLOC
  2371. hwloc_cpuset_t starpu_worker_get_hwloc_cpuset(int workerid)
  2372. {
  2373. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  2374. return hwloc_bitmap_dup(worker->hwloc_cpu_set);
  2375. }
  2376. hwloc_obj_t starpu_worker_get_hwloc_obj(int workerid)
  2377. {
  2378. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  2379. return worker->hwloc_obj;
  2380. }
  2381. #endif
  2382. /* Light version of _starpu_wake_worker_relax, which, when possible,
  2383. * speculatively sets keep_awake on the target worker without waiting that
  2384. * worker to enter the relaxed state.
  2385. */
  2386. int starpu_wake_worker_relax_light(int workerid)
  2387. {
  2388. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  2389. STARPU_ASSERT(worker != NULL);
  2390. int cur_workerid = starpu_worker_get_id();
  2391. if (workerid != cur_workerid)
  2392. {
  2393. starpu_worker_relax_on();
  2394. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&worker->sched_mutex);
  2395. while (!worker->state_relax_refcnt)
  2396. {
  2397. /* Attempt a fast path if the worker is not really asleep */
  2398. if (_starpu_config.workers[workerid].status == STATUS_SCHEDULING
  2399. || _starpu_config.workers[workerid].status == STATUS_SLEEPING_SCHEDULING)
  2400. {
  2401. _starpu_config.workers[workerid].state_keep_awake = 1;
  2402. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  2403. starpu_worker_relax_off();
  2404. return 1;
  2405. }
  2406. STARPU_PTHREAD_COND_WAIT(&worker->sched_cond, &worker->sched_mutex);
  2407. }
  2408. }
  2409. else
  2410. {
  2411. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&worker->sched_mutex);
  2412. }
  2413. int ret = starpu_wake_worker_locked(workerid);
  2414. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  2415. if (workerid != cur_workerid)
  2416. {
  2417. starpu_worker_relax_off();
  2418. }
  2419. return ret;
  2420. }
  2421. #ifdef STARPU_WORKER_CALLBACKS
  2422. void starpu_worker_set_going_to_sleep_callback(void (*callback)(unsigned workerid))
  2423. {
  2424. STARPU_ASSERT(_starpu_config.conf.callback_worker_going_to_sleep);
  2425. _starpu_config.conf.callback_worker_going_to_sleep = callback;
  2426. }
  2427. void starpu_worker_set_waking_up_callback(void (*callback)(unsigned workerid))
  2428. {
  2429. STARPU_ASSERT(_starpu_config.conf.callback_worker_waking_up);
  2430. _starpu_config.conf.callback_worker_waking_up = callback;
  2431. }
  2432. #endif
  2433. enum starpu_node_kind starpu_worker_get_memory_node_kind(enum starpu_worker_archtype type)
  2434. {
  2435. enum starpu_node_kind kind = starpu_driver_info[type].memory_kind;
  2436. STARPU_ASSERT_MSG(kind != (enum starpu_node_kind) -1, "no memory for archtype %d", type);
  2437. return kind;
  2438. }