workers.c 79 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2008-2020 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. * Copyright (C) 2011 Télécom-SudParis
  5. * Copyright (C) 2013 Thibaut Lambert
  6. * Copyright (C) 2016 Uppsala University
  7. *
  8. * StarPU is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU Lesser General Public License as published by
  10. * the Free Software Foundation; either version 2.1 of the License, or (at
  11. * your option) any later version.
  12. *
  13. * StarPU is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  16. *
  17. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  18. */
  19. #include <stdlib.h>
  20. #include <stdio.h>
  21. #ifdef __linux__
  22. #include <sys/utsname.h>
  23. #endif
  24. #include <common/config.h>
  25. #include <common/utils.h>
  26. #include <common/graph.h>
  27. #include <core/progress_hook.h>
  28. #include <core/idle_hook.h>
  29. #include <core/workers.h>
  30. #include <core/debug.h>
  31. #include <core/disk.h>
  32. #include <core/task.h>
  33. #include <core/detect_combined_workers.h>
  34. #include <datawizard/malloc.h>
  35. #include <profiling/profiling.h>
  36. #include <profiling/bound.h>
  37. #include <sched_policies/sched_component.h>
  38. #include <datawizard/memory_nodes.h>
  39. #include <common/knobs.h>
  40. #include <drivers/mp_common/sink_common.h>
  41. #include <drivers/mpi/driver_mpi_common.h>
  42. #include <drivers/cpu/driver_cpu.h>
  43. #include <drivers/cuda/driver_cuda.h>
  44. #include <drivers/opencl/driver_opencl.h>
  45. #ifdef STARPU_SIMGRID
  46. #include <core/simgrid.h>
  47. #endif
  48. #if defined(_WIN32) && !defined(__CYGWIN__)
  49. #include <windows.h>
  50. #endif
  51. /* global knobs */
  52. static int __g_calibrate_knob;
  53. static int __g_enable_catch_signal_knob;
  54. /* per-worker knobs */
  55. static int __w_bind_to_pu_knob;
  56. static int __w_enable_worker_knob;
  57. static struct starpu_perf_knob_group * __kg_starpu_global;
  58. static struct starpu_perf_knob_group * __kg_starpu_worker__per_worker;
  59. static void global_knobs__set(const struct starpu_perf_knob * const knob, void *context, const struct starpu_perf_knob_value * const value)
  60. {
  61. /* context is not used for global knobs */
  62. STARPU_ASSERT(context == NULL);
  63. (void)context;
  64. if (knob->id == __g_calibrate_knob)
  65. {
  66. _starpu_set_calibrate_flag((unsigned)value->val_int32_t);
  67. }
  68. else if (knob->id == __g_enable_catch_signal_knob)
  69. {
  70. _starpu_set_catch_signals(!!value->val_int32_t);
  71. }
  72. else
  73. {
  74. STARPU_ASSERT(0);
  75. abort();
  76. }
  77. }
  78. static void global_knobs__get(const struct starpu_perf_knob * const knob, void *context, struct starpu_perf_knob_value * const value)
  79. {
  80. /* context is not used for global knobs */
  81. STARPU_ASSERT(context == NULL);
  82. (void)context;
  83. if (knob->id == __g_calibrate_knob)
  84. {
  85. value->val_int32_t = (int32_t)_starpu_get_calibrate_flag();
  86. }
  87. else if (knob->id == __g_enable_catch_signal_knob)
  88. {
  89. value->val_int32_t = _starpu_get_catch_signals();
  90. }
  91. else
  92. {
  93. STARPU_ASSERT(0);
  94. abort();
  95. }
  96. }
  97. void worker_knobs__set(const struct starpu_perf_knob * const knob, void *context, const struct starpu_perf_knob_value * const value)
  98. {
  99. const unsigned workerid = *(unsigned *)context;
  100. struct _starpu_worker * const worker = _starpu_get_worker_struct(workerid);
  101. if (knob->id == __w_bind_to_pu_knob)
  102. {
  103. STARPU_ASSERT(value->val_int32_t >= 0);
  104. worker->bindid_requested = value->val_int32_t;
  105. }
  106. else if (knob->id == __w_enable_worker_knob)
  107. {
  108. worker->enable_knob = !!value->val_int32_t;
  109. }
  110. else
  111. {
  112. STARPU_ASSERT(0);
  113. abort();
  114. }
  115. }
  116. void worker_knobs__get(const struct starpu_perf_knob * const knob, void *context, struct starpu_perf_knob_value * const value)
  117. {
  118. const unsigned workerid = *(unsigned *)context;
  119. struct _starpu_worker * const worker = _starpu_get_worker_struct(workerid);
  120. if (knob->id == __w_bind_to_pu_knob)
  121. {
  122. value->val_int32_t = worker->bindid;
  123. }
  124. else if (knob->id == __w_enable_worker_knob)
  125. {
  126. value->val_int32_t = worker->enable_knob;
  127. }
  128. else
  129. {
  130. STARPU_ASSERT(0);
  131. abort();
  132. }
  133. }
  134. void _starpu__workers_c__register_knobs(void)
  135. {
  136. {
  137. const enum starpu_perf_knob_scope scope = starpu_perf_knob_scope_global;
  138. __kg_starpu_global = _starpu_perf_knob_group_register(scope, global_knobs__set, global_knobs__get);
  139. __STARPU_PERF_KNOB_REG("starpu.global", __kg_starpu_global, g_calibrate_knob, int32, "enable or disable performance models calibration (override STARPU_CALIBRATE env var)");
  140. __STARPU_PERF_KNOB_REG("starpu.global", __kg_starpu_global, g_enable_catch_signal_knob, int32, "enable or disable signal catching (override STARPU_CATCH_SIGNALS env var)");
  141. }
  142. {
  143. const enum starpu_perf_knob_scope scope = starpu_perf_knob_scope_per_worker;
  144. __kg_starpu_worker__per_worker = _starpu_perf_knob_group_register(scope, worker_knobs__set, worker_knobs__get);
  145. __STARPU_PERF_KNOB_REG("starpu.worker", __kg_starpu_worker__per_worker, w_bind_to_pu_knob, int32, "bind worker to PU (PU logical number, override StarPU binding env vars)");
  146. __STARPU_PERF_KNOB_REG("starpu.worker", __kg_starpu_worker__per_worker, w_enable_worker_knob, int32, "enable assigning task to that worker (1:Enabled | [0:Disabled])");
  147. }
  148. #if 0
  149. {
  150. const enum starpu_perf_knob_scope scope = starpu_perf_knob_scope_per_scheduler;
  151. __kg_starpu_worker__per_scheduler = _starpu_perf_knob_group_register(scope, sched_knobs__set, sched_knobs__get);
  152. }
  153. #endif
  154. }
  155. void _starpu__workers_c__unregister_knobs(void)
  156. {
  157. _starpu_perf_knob_group_unregister(__kg_starpu_global);
  158. _starpu_perf_knob_group_unregister(__kg_starpu_worker__per_worker);
  159. __kg_starpu_global = NULL;
  160. __kg_starpu_worker__per_worker = NULL;
  161. }
  162. /* acquire/release semantic for concurrent initialization/de-initialization */
  163. static starpu_pthread_mutex_t init_mutex = STARPU_PTHREAD_MUTEX_INITIALIZER;
  164. static starpu_pthread_cond_t init_cond = STARPU_PTHREAD_COND_INITIALIZER;
  165. static int init_count = 0;
  166. static enum initialization initialized = UNINITIALIZED;
  167. int _starpu_keys_initialized STARPU_ATTRIBUTE_INTERNAL;
  168. starpu_pthread_key_t _starpu_worker_key STARPU_ATTRIBUTE_INTERNAL;
  169. starpu_pthread_key_t _starpu_worker_set_key STARPU_ATTRIBUTE_INTERNAL;
  170. struct _starpu_machine_config _starpu_config STARPU_ATTRIBUTE_INTERNAL;
  171. static int check_entire_platform;
  172. int _starpu_worker_parallel_blocks;
  173. /* Pointers to argc and argv
  174. */
  175. static int *my_argc = 0;
  176. static char ***my_argv = NULL;
  177. void _starpu__workers_c__register_kobs(void)
  178. {
  179. /* TODO */
  180. }
  181. /* Initialize value of static argc and argv, called when the process begins
  182. */
  183. void _starpu_set_argc_argv(int *argc_param, char ***argv_param)
  184. {
  185. my_argc = argc_param;
  186. my_argv = argv_param;
  187. }
  188. int *_starpu_get_argc()
  189. {
  190. return my_argc;
  191. }
  192. char ***_starpu_get_argv()
  193. {
  194. return my_argv;
  195. }
  196. int starpu_is_initialized(void)
  197. {
  198. return initialized == INITIALIZED;
  199. }
  200. void starpu_wait_initialized(void)
  201. {
  202. STARPU_PTHREAD_MUTEX_LOCK(&init_mutex);
  203. while (initialized != INITIALIZED)
  204. STARPU_PTHREAD_COND_WAIT(&init_cond, &init_mutex);
  205. STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
  206. }
  207. /* Makes sure that at least one of the workers of type <arch> can execute
  208. * <task>, for at least one of its implementations. */
  209. static uint32_t _starpu_worker_exists_and_can_execute(struct starpu_task *task,
  210. enum starpu_worker_archtype arch)
  211. {
  212. _starpu_codelet_check_deprecated_fields(task->cl);
  213. /* make sure there is a worker on the machine able to execute the
  214. task, independent of the sched_ctx, this latter may receive latter on
  215. the necessary worker - the user or the hypervisor should take care this happens */
  216. struct _starpu_sched_ctx *sched_ctx = check_entire_platform == 1 ? _starpu_get_initial_sched_ctx() : _starpu_get_sched_ctx_struct(task->sched_ctx);
  217. struct starpu_worker_collection *workers = sched_ctx->workers;
  218. struct starpu_sched_ctx_iterator it;
  219. workers->init_iterator(workers, &it);
  220. while(workers->has_next(workers, &it))
  221. {
  222. int i = workers->get_next(workers, &it);
  223. if (starpu_worker_get_type(i) != arch)
  224. continue;
  225. unsigned impl;
  226. for (impl = 0; impl < STARPU_MAXIMPLEMENTATIONS; impl++)
  227. {
  228. /* We could call task->cl->can_execute(i, task, impl)
  229. here, it would definitely work. It is probably
  230. cheaper to check whether it is necessary in order to
  231. avoid a useless function call, though. */
  232. unsigned test_implementation = 0;
  233. switch (arch)
  234. {
  235. case STARPU_CPU_WORKER:
  236. if (task->cl->cpu_funcs[impl] != NULL)
  237. test_implementation = 1;
  238. break;
  239. case STARPU_CUDA_WORKER:
  240. if (task->cl->cuda_funcs[impl] != NULL)
  241. test_implementation = 1;
  242. break;
  243. case STARPU_OPENCL_WORKER:
  244. if (task->cl->opencl_funcs[impl] != NULL)
  245. test_implementation = 1;
  246. break;
  247. case STARPU_MIC_WORKER:
  248. if (task->cl->cpu_funcs_name[impl] != NULL || task->cl->mic_funcs[impl] != NULL)
  249. test_implementation = 1;
  250. break;
  251. case STARPU_MPI_MS_WORKER:
  252. if (task->cl->cpu_funcs_name[impl] != NULL || task->cl->mpi_ms_funcs[impl] != NULL)
  253. test_implementation = 1;
  254. break;
  255. default:
  256. STARPU_ABORT();
  257. }
  258. if (!test_implementation)
  259. continue;
  260. if (task->cl->can_execute)
  261. return task->cl->can_execute(i, task, impl);
  262. if(test_implementation)
  263. return 1;
  264. }
  265. }
  266. return 0;
  267. }
  268. /* in case a task is submitted, we may check whether there exists a worker
  269. that may execute the task or not */
  270. uint32_t _starpu_worker_exists(struct starpu_task *task)
  271. {
  272. _starpu_codelet_check_deprecated_fields(task->cl);
  273. if (task->where == STARPU_NOWHERE)
  274. return 1;
  275. /* if the task belongs to the init context we can
  276. check out all the worker mask of the machine
  277. if not we should iterate on the workers of the ctx
  278. and verify if it exists a worker able to exec the task */
  279. if(task->sched_ctx == 0)
  280. {
  281. if (!(task->where & _starpu_config.worker_mask))
  282. return 0;
  283. if (!task->cl->can_execute)
  284. return 1;
  285. }
  286. #if defined(STARPU_USE_CPU) || defined(STARPU_SIMGRID)
  287. if ((task->where & STARPU_CPU) &&
  288. _starpu_worker_exists_and_can_execute(task, STARPU_CPU_WORKER))
  289. return 1;
  290. #endif
  291. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  292. if ((task->where & STARPU_CUDA) &&
  293. _starpu_worker_exists_and_can_execute(task, STARPU_CUDA_WORKER))
  294. return 1;
  295. #endif
  296. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  297. if ((task->where & STARPU_OPENCL) &&
  298. _starpu_worker_exists_and_can_execute(task, STARPU_OPENCL_WORKER))
  299. return 1;
  300. #endif
  301. #ifdef STARPU_USE_MIC
  302. if ((task->where & STARPU_MIC) &&
  303. _starpu_worker_exists_and_can_execute(task, STARPU_MIC_WORKER))
  304. return 1;
  305. #endif
  306. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  307. if ((task->where & STARPU_MPI_MS) &&
  308. _starpu_worker_exists_and_can_execute(task, STARPU_MPI_MS_WORKER))
  309. return 1;
  310. #endif
  311. return 0;
  312. }
  313. uint32_t _starpu_can_submit_cuda_task(void)
  314. {
  315. return STARPU_CUDA & _starpu_config.worker_mask;
  316. }
  317. uint32_t _starpu_can_submit_cpu_task(void)
  318. {
  319. return STARPU_CPU & _starpu_config.worker_mask;
  320. }
  321. uint32_t _starpu_can_submit_opencl_task(void)
  322. {
  323. return STARPU_OPENCL & _starpu_config.worker_mask;
  324. }
  325. static inline int _starpu_can_use_nth_implementation(enum starpu_worker_archtype arch, struct starpu_codelet *cl, unsigned nimpl)
  326. {
  327. switch(arch)
  328. {
  329. case STARPU_ANY_WORKER:
  330. {
  331. int cpu_func_enabled=1, cuda_func_enabled=1, opencl_func_enabled=1;
  332. /* TODO: MIC */
  333. #if defined(STARPU_USE_CPU) || defined(STARPU_SIMGRID)
  334. starpu_cpu_func_t cpu_func = _starpu_task_get_cpu_nth_implementation(cl, nimpl);
  335. cpu_func_enabled = cpu_func != NULL && starpu_cpu_worker_get_count();
  336. #endif
  337. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  338. starpu_cuda_func_t cuda_func = _starpu_task_get_cuda_nth_implementation(cl, nimpl);
  339. cuda_func_enabled = cuda_func != NULL && starpu_cuda_worker_get_count();
  340. #endif
  341. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  342. starpu_opencl_func_t opencl_func = _starpu_task_get_opencl_nth_implementation(cl, nimpl);
  343. opencl_func_enabled = opencl_func != NULL && starpu_opencl_worker_get_count();
  344. #endif
  345. return cpu_func_enabled && cuda_func_enabled && opencl_func_enabled;
  346. }
  347. case STARPU_CPU_WORKER:
  348. {
  349. starpu_cpu_func_t func = _starpu_task_get_cpu_nth_implementation(cl, nimpl);
  350. return func != NULL;
  351. }
  352. case STARPU_CUDA_WORKER:
  353. {
  354. starpu_cuda_func_t func = _starpu_task_get_cuda_nth_implementation(cl, nimpl);
  355. return func != NULL;
  356. }
  357. case STARPU_OPENCL_WORKER:
  358. {
  359. starpu_opencl_func_t func = _starpu_task_get_opencl_nth_implementation(cl, nimpl);
  360. return func != NULL;
  361. }
  362. case STARPU_MIC_WORKER:
  363. {
  364. starpu_mic_func_t func = _starpu_task_get_mic_nth_implementation(cl, nimpl);
  365. const char *func_name = _starpu_task_get_cpu_name_nth_implementation(cl, nimpl);
  366. return func != NULL || func_name != NULL;
  367. }
  368. case STARPU_MPI_MS_WORKER:
  369. {
  370. starpu_mpi_ms_func_t func = _starpu_task_get_mpi_ms_nth_implementation(cl, nimpl);
  371. const char *func_name = _starpu_task_get_cpu_name_nth_implementation(cl, nimpl);
  372. return func != NULL || func_name != NULL;
  373. }
  374. default:
  375. STARPU_ASSERT_MSG(0, "Unknown arch type %d", arch);
  376. }
  377. return 0;
  378. }
  379. /* Test if this task can be processed on this worker, regardless of the implementation */
  380. /* must be called with sched_mutex locked to protect state_blocked */
  381. static inline int _starpu_can_execute_task_any_impl(unsigned workerid, struct starpu_task *task)
  382. {
  383. if (!_starpu_config.workers[workerid].enable_knob)
  384. return 0;
  385. if (task->workerids_len)
  386. {
  387. size_t div = sizeof(*task->workerids) * 8;
  388. if (workerid / div >= task->workerids_len || ! (task->workerids[workerid / div] & (1UL << workerid % div)))
  389. return 0;
  390. }
  391. /* if the worker is blocked in a parallel ctx don't submit tasks on it */
  392. #ifdef STARPU_DEVEL
  393. #warning FIXME: this is very expensive, while can_execute is supposed to be not very costly so schedulers can call it a lot
  394. #endif
  395. if(starpu_worker_is_blocked_in_parallel(workerid))
  396. return 0;
  397. if (!(task->where & _starpu_config.workers[workerid].worker_mask))
  398. return 0;
  399. return 1;
  400. }
  401. /* must be called with sched_mutex locked to protect state_blocked_in_parallel */
  402. int starpu_worker_can_execute_task(unsigned workerid, struct starpu_task *task, unsigned nimpl)
  403. {
  404. /* TODO: check that the task operand sizes will fit on that device */
  405. return _starpu_can_execute_task_any_impl(workerid, task) &&
  406. _starpu_can_use_nth_implementation(_starpu_config.workers[workerid].arch, task->cl, nimpl) &&
  407. (!task->cl->can_execute || task->cl->can_execute(workerid, task, nimpl));
  408. }
  409. /* must be called with sched_mutex locked to protect state_blocked_in_parallel */
  410. int starpu_worker_can_execute_task_impl(unsigned workerid, struct starpu_task *task, unsigned *impl_mask)
  411. {
  412. if (!_starpu_can_execute_task_any_impl(workerid, task))
  413. return 0;
  414. unsigned mask;
  415. int i;
  416. enum starpu_worker_archtype arch;
  417. struct starpu_codelet *cl;
  418. /* TODO: check that the task operand sizes will fit on that device */
  419. cl = task->cl;
  420. mask = 0;
  421. arch = _starpu_config.workers[workerid].arch;
  422. if (!task->cl->can_execute)
  423. {
  424. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  425. if (_starpu_can_use_nth_implementation(arch, cl, i))
  426. {
  427. mask |= 1U << i;
  428. if (!impl_mask)
  429. break;
  430. }
  431. }
  432. else
  433. {
  434. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  435. if (_starpu_can_use_nth_implementation(arch, cl, i)
  436. && (!task->cl->can_execute || task->cl->can_execute(workerid, task, i)))
  437. {
  438. mask |= 1U << i;
  439. if (!impl_mask)
  440. break;
  441. }
  442. }
  443. if (impl_mask)
  444. *impl_mask = mask;
  445. return mask != 0;
  446. }
  447. /* must be called with sched_mutex locked to protect state_blocked */
  448. int starpu_worker_can_execute_task_first_impl(unsigned workerid, struct starpu_task *task, unsigned *nimpl)
  449. {
  450. if (!_starpu_can_execute_task_any_impl(workerid, task))
  451. return 0;
  452. int i;
  453. enum starpu_worker_archtype arch;
  454. struct starpu_codelet *cl;
  455. /* TODO: check that the task operand sizes will fit on that device */
  456. cl = task->cl;
  457. arch = _starpu_config.workers[workerid].arch;
  458. if (!task->cl->can_execute)
  459. {
  460. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  461. if (_starpu_can_use_nth_implementation(arch, cl, i))
  462. {
  463. if (nimpl)
  464. *nimpl = i;
  465. return 1;
  466. }
  467. }
  468. else
  469. {
  470. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  471. if (_starpu_can_use_nth_implementation(arch, cl, i)
  472. && (task->cl->can_execute(workerid, task, i)))
  473. {
  474. if (nimpl)
  475. *nimpl = i;
  476. return 1;
  477. }
  478. }
  479. return 0;
  480. }
  481. int starpu_combined_worker_can_execute_task(unsigned workerid, struct starpu_task *task, unsigned nimpl)
  482. {
  483. /* TODO: check that the task operand sizes will fit on that device */
  484. struct starpu_codelet *cl = task->cl;
  485. unsigned nworkers = _starpu_config.topology.nworkers;
  486. /* Is this a parallel worker ? */
  487. if (workerid < nworkers)
  488. {
  489. if (!_starpu_config.workers[workerid].enable_knob)
  490. return 0;
  491. return !!((task->where & _starpu_config.workers[workerid].worker_mask) &&
  492. _starpu_can_use_nth_implementation(_starpu_config.workers[workerid].arch, task->cl, nimpl) &&
  493. (!task->cl->can_execute || task->cl->can_execute(workerid, task, nimpl)));
  494. }
  495. else
  496. {
  497. if (cl->type == STARPU_SPMD
  498. #ifdef STARPU_HAVE_HWLOC
  499. || cl->type == STARPU_FORKJOIN
  500. #else
  501. #ifdef __GLIBC__
  502. || cl->type == STARPU_FORKJOIN
  503. #endif
  504. #endif
  505. )
  506. {
  507. /* TODO we should add other types of constraints */
  508. /* Is the worker larger than requested ? */
  509. int worker_size = (int)_starpu_config.combined_workers[workerid - nworkers].worker_size;
  510. int worker0 = _starpu_config.combined_workers[workerid - nworkers].combined_workerid[0];
  511. return !!((worker_size <= task->cl->max_parallelism) &&
  512. _starpu_can_use_nth_implementation(_starpu_config.workers[worker0].arch, task->cl, nimpl) &&
  513. (!task->cl->can_execute || task->cl->can_execute(workerid, task, nimpl)));
  514. }
  515. else
  516. {
  517. /* We have a sequential task but a parallel worker */
  518. return 0;
  519. }
  520. }
  521. }
  522. /*
  523. * Runtime initialization methods
  524. */
  525. static void _starpu_init_worker_queue(struct _starpu_worker *worker)
  526. {
  527. _starpu_memory_node_register_condition(worker, &worker->sched_cond, worker->memory_node);
  528. }
  529. /*
  530. * Returns 0 if the given driver is one of the drivers that must be launched by
  531. * the application itself, and not by StarPU, 1 otherwise.
  532. */
  533. static unsigned _starpu_may_launch_driver(struct starpu_conf *conf,
  534. struct starpu_driver *d)
  535. {
  536. if (conf->n_not_launched_drivers == 0 || conf->not_launched_drivers == NULL)
  537. return 1;
  538. /* Is <d> in conf->not_launched_drivers ? */
  539. unsigned i;
  540. for (i = 0; i < conf->n_not_launched_drivers; i++)
  541. {
  542. if (d->type != conf->not_launched_drivers[i].type)
  543. continue;
  544. switch (d->type)
  545. {
  546. case STARPU_CPU_WORKER:
  547. if (d->id.cpu_id == conf->not_launched_drivers[i].id.cpu_id)
  548. return 0;
  549. break;
  550. case STARPU_CUDA_WORKER:
  551. if (d->id.cuda_id == conf->not_launched_drivers[i].id.cuda_id)
  552. return 0;
  553. break;
  554. case STARPU_OPENCL_WORKER:
  555. if (d->id.opencl_id == conf->not_launched_drivers[i].id.opencl_id)
  556. return 0;
  557. break;
  558. default:
  559. STARPU_ABORT();
  560. }
  561. }
  562. return 1;
  563. }
  564. #ifdef STARPU_PERF_DEBUG
  565. struct itimerval prof_itimer;
  566. #endif
  567. void _starpu_worker_init(struct _starpu_worker *workerarg, struct _starpu_machine_config *pconfig)
  568. {
  569. workerarg->config = pconfig;
  570. STARPU_PTHREAD_MUTEX_INIT(&workerarg->mutex, NULL);
  571. /* arch initialized by topology.c */
  572. /* worker_mask initialized by topology.c */
  573. /* perf_arch initialized by topology.c */
  574. /* worker_thread initialized by _starpu_launch_drivers */
  575. /* devid initialized by topology.c */
  576. /* subworkerid initialized by topology.c */
  577. /* bindid initialized by topology.c */
  578. /* workerid initialized by topology.c */
  579. workerarg->combined_workerid = workerarg->workerid;
  580. workerarg->current_rank = 0;
  581. workerarg->worker_size = 1;
  582. STARPU_PTHREAD_COND_INIT(&workerarg->started_cond, NULL);
  583. STARPU_PTHREAD_COND_INIT(&workerarg->ready_cond, NULL);
  584. /* memory_node initialized by topology.c */
  585. STARPU_PTHREAD_COND_INIT(&workerarg->sched_cond, NULL);
  586. STARPU_PTHREAD_MUTEX_INIT(&workerarg->sched_mutex, NULL);
  587. starpu_task_list_init(&workerarg->local_tasks);
  588. _starpu_ctx_change_list_init(&workerarg->ctx_change_list);
  589. workerarg->local_ordered_tasks = NULL;
  590. workerarg->local_ordered_tasks_size = 0;
  591. workerarg->current_ordered_task = 0;
  592. workerarg->current_ordered_task_order = 1;
  593. workerarg->current_task = NULL;
  594. #ifdef STARPU_SIMGRID
  595. starpu_pthread_wait_init(&workerarg->wait);
  596. starpu_pthread_queue_register(&workerarg->wait, &_starpu_simgrid_task_queue[workerarg->workerid]);
  597. #endif
  598. workerarg->task_transferring = NULL;
  599. workerarg->nb_buffers_transferred = 0;
  600. workerarg->nb_buffers_totransfer = 0;
  601. workerarg->first_task = 0;
  602. workerarg->ntasks = 0;
  603. /* set initialized by topology.c */
  604. workerarg->pipeline_length = 0;
  605. workerarg->pipeline_stuck = 0;
  606. workerarg->worker_is_running = 0;
  607. workerarg->worker_is_initialized = 0;
  608. workerarg->wait_for_worker_initialization = 0;
  609. workerarg->status = STATUS_INITIALIZING;
  610. workerarg->state_keep_awake = 0;
  611. /* name initialized by driver */
  612. /* short_name initialized by driver */
  613. workerarg->run_by_starpu = 1;
  614. workerarg->driver_ops = NULL;
  615. workerarg->sched_ctx_list = NULL;
  616. workerarg->tmp_sched_ctx = -1;
  617. workerarg->nsched_ctxs = 0;
  618. _starpu_barrier_counter_init(&workerarg->tasks_barrier, 0);
  619. workerarg->has_prev_init = 0;
  620. int ctx;
  621. for(ctx = 0; ctx < STARPU_NMAX_SCHED_CTXS; ctx++)
  622. workerarg->removed_from_ctx[ctx] = 0;
  623. workerarg->spinning_backoff = 1;
  624. for(ctx = 0; ctx < STARPU_NMAX_SCHED_CTXS; ctx++)
  625. {
  626. workerarg->shares_tasks_lists[ctx] = 0;
  627. workerarg->poped_in_ctx[ctx] = 0;
  628. }
  629. workerarg->reverse_phase[0] = 0;
  630. workerarg->reverse_phase[1] = 0;
  631. workerarg->pop_ctx_priority = 1;
  632. workerarg->is_slave_somewhere = 0;
  633. workerarg->state_relax_refcnt = 1;
  634. #ifdef STARPU_SPINLOCK_CHECK
  635. workerarg->relax_on_file = __FILE__;
  636. workerarg->relax_on_line = __LINE__;
  637. workerarg->relax_on_func = __starpu_func__;
  638. workerarg->relax_off_file = NULL;
  639. workerarg->relax_off_line = 0;
  640. workerarg->relax_off_func = NULL;
  641. #endif
  642. workerarg->state_sched_op_pending = 0;
  643. workerarg->state_changing_ctx_waiting = 0;
  644. workerarg->state_changing_ctx_notice = 0;
  645. workerarg->state_blocked_in_parallel_observed = 0;
  646. workerarg->state_blocked_in_parallel = 0;
  647. workerarg->state_block_in_parallel_req = 0;
  648. workerarg->state_block_in_parallel_ack = 0;
  649. workerarg->state_unblock_in_parallel_req = 0;
  650. workerarg->state_unblock_in_parallel_ack = 0;
  651. workerarg->block_in_parallel_ref_count = 0;
  652. _starpu_perf_counter_sample_init(&workerarg->perf_counter_sample, starpu_perf_counter_scope_per_worker);
  653. workerarg->enable_knob = 1;
  654. workerarg->bindid_requested = -1;
  655. /* cpu_set/hwloc_cpu_set/hwloc_obj initialized in topology.c */
  656. }
  657. static void _starpu_worker_deinit(struct _starpu_worker *workerarg)
  658. {
  659. (void) workerarg;
  660. #ifdef STARPU_SIMGRID
  661. starpu_pthread_queue_unregister(&workerarg->wait, &_starpu_simgrid_task_queue[workerarg->workerid]);
  662. starpu_pthread_wait_destroy(&workerarg->wait);
  663. #endif
  664. _starpu_perf_counter_sample_exit(&workerarg->perf_counter_sample);
  665. }
  666. #ifdef STARPU_USE_FXT
  667. void _starpu_worker_start(struct _starpu_worker *worker, unsigned fut_key, unsigned sync)
  668. {
  669. unsigned devid = worker->devid;
  670. unsigned memnode = worker->memory_node;
  671. _STARPU_TRACE_WORKER_INIT_START(fut_key, worker->workerid, devid, memnode, worker->bindid, sync);
  672. }
  673. #endif
  674. void _starpu_driver_start(struct _starpu_worker *worker, unsigned fut_key, unsigned sync STARPU_ATTRIBUTE_UNUSED)
  675. {
  676. (void) fut_key;
  677. int devid = worker->devid;
  678. (void) devid;
  679. #ifdef STARPU_USE_FXT
  680. _STARPU_TRACE_REGISTER_THREAD(worker->bindid);
  681. _starpu_worker_start(worker, fut_key, sync);
  682. #endif
  683. _starpu_set_local_worker_key(worker);
  684. STARPU_PTHREAD_MUTEX_LOCK(&worker->mutex);
  685. worker->worker_is_running = 1;
  686. STARPU_PTHREAD_COND_SIGNAL(&worker->started_cond);
  687. STARPU_PTHREAD_MUTEX_UNLOCK(&worker->mutex);
  688. _starpu_bind_thread_on_cpu(worker->bindid, worker->workerid, NULL);
  689. #if defined(STARPU_PERF_DEBUG) && !defined(STARPU_SIMGRID)
  690. setitimer(ITIMER_PROF, &prof_itimer, NULL);
  691. #endif
  692. _STARPU_DEBUG("worker %p %d for dev %d is ready on logical cpu %d\n", worker, worker->workerid, devid, worker->bindid);
  693. #ifdef STARPU_HAVE_HWLOC
  694. _STARPU_DEBUG("worker %p %d cpuset start at %d\n", worker, worker->workerid, hwloc_bitmap_first(worker->hwloc_cpu_set));
  695. #endif
  696. }
  697. static void _starpu_launch_drivers(struct _starpu_machine_config *pconfig)
  698. {
  699. pconfig->running = 1;
  700. pconfig->pause_depth = 0;
  701. pconfig->submitting = 1;
  702. STARPU_HG_DISABLE_CHECKING(pconfig->watchdog_ok);
  703. unsigned nworkers = pconfig->topology.nworkers;
  704. unsigned worker;
  705. #if defined(STARPU_PERF_DEBUG) && !defined(STARPU_SIMGRID)
  706. /* Get itimer of the main thread, to set it for the worker threads */
  707. getitimer(ITIMER_PROF, &prof_itimer);
  708. #endif
  709. STARPU_AYU_INIT();
  710. /* Launch workers asynchronously */
  711. for (worker = 0; worker < nworkers; worker++)
  712. {
  713. struct _starpu_worker *workerarg = &pconfig->workers[worker];
  714. unsigned devid = workerarg->devid;
  715. workerarg->wait_for_worker_initialization = 0;
  716. _STARPU_DEBUG("initialising worker %u/%u\n", worker, nworkers);
  717. _starpu_init_worker_queue(workerarg);
  718. struct starpu_driver driver;
  719. driver.type = workerarg->arch;
  720. switch (workerarg->arch)
  721. {
  722. #if defined(STARPU_USE_CPU) || defined(STARPU_SIMGRID)
  723. case STARPU_CPU_WORKER:
  724. {
  725. driver.id.cpu_id = devid;
  726. workerarg->driver_ops = &_starpu_driver_cpu_ops;
  727. workerarg->wait_for_worker_initialization = 1;
  728. if (_starpu_may_launch_driver(&pconfig->conf, &driver))
  729. {
  730. STARPU_PTHREAD_CREATE_ON(
  731. "CPU",
  732. &workerarg->worker_thread,
  733. NULL,
  734. _starpu_cpu_worker,
  735. workerarg,
  736. _starpu_simgrid_get_host_by_worker(workerarg));
  737. }
  738. else
  739. {
  740. workerarg->run_by_starpu = 0;
  741. }
  742. break;
  743. }
  744. #endif
  745. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  746. case STARPU_CUDA_WORKER:
  747. {
  748. driver.id.cuda_id = devid;
  749. workerarg->driver_ops = &_starpu_driver_cuda_ops;
  750. struct _starpu_worker_set *worker_set = workerarg->set;
  751. if (worker_set->workers != workerarg)
  752. /* We are not the first worker of the
  753. * set, don't start a thread for it. */
  754. break;
  755. worker_set->set_is_initialized = 0;
  756. worker_set->wait_for_set_initialization = 1;
  757. workerarg->wait_for_worker_initialization = 0;
  758. if (_starpu_may_launch_driver(&pconfig->conf, &driver))
  759. {
  760. STARPU_PTHREAD_CREATE_ON(
  761. "CUDA",
  762. &worker_set->worker_thread,
  763. NULL,
  764. _starpu_cuda_worker,
  765. worker_set,
  766. _starpu_simgrid_get_host_by_worker(workerarg));
  767. }
  768. else
  769. {
  770. workerarg->run_by_starpu = 0;
  771. }
  772. break;
  773. }
  774. #endif
  775. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  776. case STARPU_OPENCL_WORKER:
  777. {
  778. #ifndef STARPU_SIMGRID
  779. starpu_opencl_get_device(devid, &driver.id.opencl_id);
  780. workerarg->driver_ops = &_starpu_driver_opencl_ops;
  781. workerarg->wait_for_worker_initialization = 1;
  782. if (_starpu_may_launch_driver(&pconfig->conf, &driver))
  783. {
  784. STARPU_PTHREAD_CREATE_ON(
  785. "OpenCL",
  786. &workerarg->worker_thread,
  787. NULL,
  788. _starpu_opencl_worker,
  789. workerarg,
  790. _starpu_simgrid_get_host_by_worker(workerarg));
  791. }
  792. else
  793. {
  794. workerarg->run_by_starpu = 0;
  795. }
  796. #endif
  797. break;
  798. }
  799. #endif
  800. #ifdef STARPU_USE_MIC
  801. case STARPU_MIC_WORKER:
  802. {
  803. /* We spawn only one thread
  804. * per MIC device, which will control all MIC
  805. * workers of this device. (by using a worker set). */
  806. struct _starpu_worker_set *worker_set = workerarg->set;
  807. if (worker_set->workers != workerarg)
  808. break;
  809. worker_set->set_is_initialized = 0;
  810. worker_set->wait_for_set_initialization = 1;
  811. workerarg->wait_for_worker_initialization = 0;
  812. STARPU_PTHREAD_CREATE_ON(
  813. "MIC",
  814. &worker_set->worker_thread,
  815. NULL,
  816. _starpu_mic_src_worker,
  817. worker_set,
  818. _starpu_simgrid_get_host_by_worker(workerarg));
  819. break;
  820. }
  821. #endif /* STARPU_USE_MIC */
  822. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  823. case STARPU_MPI_MS_WORKER:
  824. {
  825. /* We spawn only one thread
  826. * per MPI device, which will control all MPI
  827. * workers of this device. (by using a worker set). */
  828. struct _starpu_worker_set *worker_set = workerarg->set;
  829. if (worker_set->workers != workerarg)
  830. break;
  831. worker_set->set_is_initialized = 0;
  832. worker_set->wait_for_set_initialization = 1;
  833. workerarg->wait_for_worker_initialization = 0;
  834. #ifdef STARPU_MPI_MASTER_SLAVE_MULTIPLE_THREAD
  835. /* if MPI has multiple threads supports
  836. * we launch 1 thread per device
  837. * else
  838. * we launch one thread for all devices
  839. */
  840. STARPU_PTHREAD_CREATE_ON(
  841. "MPI MS",
  842. &worker_set->worker_thread,
  843. NULL,
  844. _starpu_mpi_src_worker,
  845. worker_set,
  846. _starpu_simgrid_get_host_by_worker(workerarg));
  847. #endif /* STARPU_MPI_MASTER_SLAVE_MULTIPLE_THREAD */
  848. break;
  849. }
  850. #endif /* STARPU_USE_MPI_MASTER_SLAVE */
  851. default:
  852. STARPU_ABORT();
  853. }
  854. #ifdef STARPU_USE_FXT
  855. /* In tracing mode, make sure the thread is really started
  856. * before starting another one, to make sure they appear in
  857. * order in the trace.
  858. */
  859. if ((!workerarg->set || workerarg->set->workers == workerarg)
  860. && workerarg->run_by_starpu == 1 && workerarg->arch != STARPU_MPI_MS_WORKER)
  861. {
  862. STARPU_PTHREAD_MUTEX_LOCK(&workerarg->mutex);
  863. while (!workerarg->worker_is_running)
  864. STARPU_PTHREAD_COND_WAIT(&workerarg->started_cond, &workerarg->mutex);
  865. STARPU_PTHREAD_MUTEX_UNLOCK(&workerarg->mutex);
  866. }
  867. #endif
  868. }
  869. #if defined(STARPU_USE_MPI_MASTER_SLAVE) && !defined(STARPU_MPI_MASTER_SLAVE_MULTIPLE_THREAD)
  870. if (pconfig->topology.nmpidevices > 0)
  871. {
  872. struct _starpu_worker_set * worker_set_zero = &mpi_worker_set[0];
  873. struct _starpu_worker * worker_zero = &worker_set_zero->workers[0];
  874. STARPU_PTHREAD_CREATE_ON(
  875. "zero",
  876. &worker_set_zero->worker_thread,
  877. NULL,
  878. _starpu_mpi_src_worker,
  879. &mpi_worker_set,
  880. _starpu_simgrid_get_host_by_worker(worker_zero));
  881. /* We use the first worker to know if everything are finished */
  882. #ifdef STARPU_USE_FXT
  883. STARPU_PTHREAD_MUTEX_LOCK(&worker_zero->mutex);
  884. while (!worker_zero->worker_is_running)
  885. STARPU_PTHREAD_COND_WAIT(&worker_zero->started_cond, &worker_zero->mutex);
  886. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_zero->mutex);
  887. #endif
  888. STARPU_PTHREAD_MUTEX_LOCK(&worker_set_zero->mutex);
  889. while (!worker_set_zero->set_is_initialized)
  890. STARPU_PTHREAD_COND_WAIT(&worker_set_zero->ready_cond,
  891. &worker_set_zero->mutex);
  892. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_set_zero->mutex);
  893. worker_set_zero->started = 1;
  894. worker_set_zero->worker_thread = mpi_worker_set[0].worker_thread;
  895. }
  896. #endif
  897. for (worker = 0; worker < nworkers; worker++)
  898. {
  899. struct _starpu_worker *workerarg = &pconfig->workers[worker];
  900. _STARPU_DEBUG("waiting for worker %u initialization\n", worker);
  901. if (!workerarg->run_by_starpu)
  902. break;
  903. struct _starpu_worker_set *worker_set = workerarg->set;
  904. if (worker_set && worker_set->wait_for_set_initialization == 1)
  905. {
  906. STARPU_PTHREAD_MUTEX_LOCK(&worker_set->mutex);
  907. while (!worker_set->set_is_initialized)
  908. STARPU_PTHREAD_COND_WAIT(&worker_set->ready_cond,
  909. &worker_set->mutex);
  910. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_set->mutex);
  911. worker_set->started = 1;
  912. worker_set->wait_for_set_initialization = 0;
  913. }
  914. else if (workerarg->wait_for_worker_initialization == 1)
  915. {
  916. STARPU_PTHREAD_MUTEX_LOCK(&workerarg->mutex);
  917. while (!workerarg->worker_is_initialized)
  918. STARPU_PTHREAD_COND_WAIT(&workerarg->ready_cond, &workerarg->mutex);
  919. STARPU_PTHREAD_MUTEX_UNLOCK(&workerarg->mutex);
  920. workerarg->wait_for_worker_initialization = 0;
  921. }
  922. }
  923. _STARPU_DEBUG("finished launching drivers\n");
  924. }
  925. /* Initialize the starpu_conf with default values */
  926. int starpu_conf_init(struct starpu_conf *conf)
  927. {
  928. if (!conf)
  929. return -EINVAL;
  930. memset(conf, 0, sizeof(*conf));
  931. conf->magic = 42;
  932. conf->sched_policy_name = starpu_getenv("STARPU_SCHED");
  933. conf->sched_policy = NULL;
  934. conf->global_sched_ctx_min_priority = starpu_get_env_number("STARPU_MIN_PRIO");
  935. conf->global_sched_ctx_max_priority = starpu_get_env_number("STARPU_MAX_PRIO");
  936. conf->catch_signals = starpu_get_env_number_default("STARPU_CATCH_SIGNALS", 1);
  937. /* Note that starpu_get_env_number returns -1 in case the variable is
  938. * not defined */
  939. /* Backward compatibility: check the value of STARPU_NCPUS if
  940. * STARPU_NCPU is not set. */
  941. conf->ncpus = starpu_get_env_number("STARPU_NCPU");
  942. if (conf->ncpus == -1)
  943. conf->ncpus = starpu_get_env_number("STARPU_NCPUS");
  944. conf->reserve_ncpus = starpu_get_env_number("STARPU_RESERVE_NCPU");
  945. int main_thread_bind = starpu_get_env_number_default("STARPU_MAIN_THREAD_BIND", 0);
  946. if (main_thread_bind)
  947. conf->reserve_ncpus++;
  948. conf->ncuda = starpu_get_env_number("STARPU_NCUDA");
  949. conf->nopencl = starpu_get_env_number("STARPU_NOPENCL");
  950. conf->nmic = starpu_get_env_number("STARPU_NMIC");
  951. conf->nmpi_ms = starpu_get_env_number("STARPU_NMPI_MS");
  952. conf->calibrate = starpu_get_env_number("STARPU_CALIBRATE");
  953. conf->bus_calibrate = starpu_get_env_number("STARPU_BUS_CALIBRATE");
  954. conf->mic_sink_program_path = starpu_getenv("STARPU_MIC_PROGRAM_PATH");
  955. if (conf->calibrate == -1)
  956. conf->calibrate = 0;
  957. if (conf->bus_calibrate == -1)
  958. conf->bus_calibrate = 0;
  959. conf->use_explicit_workers_bindid = 0; /* TODO */
  960. conf->use_explicit_workers_cuda_gpuid = 0; /* TODO */
  961. conf->use_explicit_workers_opencl_gpuid = 0; /* TODO */
  962. conf->use_explicit_workers_mic_deviceid = 0; /* TODO */
  963. conf->use_explicit_workers_mpi_ms_deviceid = 0; /* TODO */
  964. conf->single_combined_worker = starpu_get_env_number("STARPU_SINGLE_COMBINED_WORKER");
  965. if (conf->single_combined_worker == -1)
  966. conf->single_combined_worker = 0;
  967. #if defined(STARPU_DISABLE_ASYNCHRONOUS_COPY)
  968. conf->disable_asynchronous_copy = 1;
  969. #else
  970. conf->disable_asynchronous_copy = starpu_get_env_number("STARPU_DISABLE_ASYNCHRONOUS_COPY");
  971. if (conf->disable_asynchronous_copy == -1)
  972. conf->disable_asynchronous_copy = 0;
  973. #endif
  974. #if defined(STARPU_DISABLE_ASYNCHRONOUS_CUDA_COPY)
  975. conf->disable_asynchronous_cuda_copy = 1;
  976. #else
  977. conf->disable_asynchronous_cuda_copy = starpu_get_env_number("STARPU_DISABLE_ASYNCHRONOUS_CUDA_COPY");
  978. if (conf->disable_asynchronous_cuda_copy == -1)
  979. conf->disable_asynchronous_cuda_copy = 0;
  980. #endif
  981. #if defined(STARPU_DISABLE_ASYNCHRONOUS_OPENCL_COPY)
  982. conf->disable_asynchronous_opencl_copy = 1;
  983. #else
  984. conf->disable_asynchronous_opencl_copy = starpu_get_env_number("STARPU_DISABLE_ASYNCHRONOUS_OPENCL_COPY");
  985. if (conf->disable_asynchronous_opencl_copy == -1)
  986. conf->disable_asynchronous_opencl_copy = 0;
  987. #endif
  988. #if defined(STARPU_DISABLE_ASYNCHRONOUS_MIC_COPY)
  989. conf->disable_asynchronous_mic_copy = 1;
  990. #else
  991. conf->disable_asynchronous_mic_copy = starpu_get_env_number("STARPU_DISABLE_ASYNCHRONOUS_MIC_COPY");
  992. if (conf->disable_asynchronous_mic_copy == -1)
  993. conf->disable_asynchronous_mic_copy = 0;
  994. #endif
  995. #if defined(STARPU_DISABLE_ASYNCHRONOUS_MPI_MS_COPY)
  996. conf->disable_asynchronous_mpi_ms_copy = 1;
  997. #else
  998. conf->disable_asynchronous_mpi_ms_copy = starpu_get_env_number("STARPU_DISABLE_ASYNCHRONOUS_MPI_MS_COPY");
  999. if(conf->disable_asynchronous_mpi_ms_copy == -1)
  1000. conf->disable_asynchronous_mpi_ms_copy = 0;
  1001. #endif
  1002. /* 64MiB by default */
  1003. conf->trace_buffer_size = ((uint64_t) starpu_get_env_number_default("STARPU_TRACE_BUFFER_SIZE", 64)) << 20;
  1004. /* Do not start performance counter collection by default */
  1005. conf->start_perf_counter_collection = 0;
  1006. return 0;
  1007. }
  1008. static void _starpu_conf_set_value_against_environment(char *name, int *value, int precedence_over_env)
  1009. {
  1010. if (precedence_over_env == 0)
  1011. {
  1012. int number;
  1013. number = starpu_get_env_number(name);
  1014. if (number != -1)
  1015. {
  1016. *value = number;
  1017. }
  1018. }
  1019. }
  1020. void _starpu_conf_check_environment(struct starpu_conf *conf)
  1021. {
  1022. char *sched = starpu_getenv("STARPU_SCHED");
  1023. if (sched)
  1024. {
  1025. conf->sched_policy_name = sched;
  1026. }
  1027. _starpu_conf_set_value_against_environment("STARPU_NCPUS", &conf->ncpus, conf->precedence_over_environment_variables);
  1028. _starpu_conf_set_value_against_environment("STARPU_NCPU", &conf->ncpus, conf->precedence_over_environment_variables);
  1029. _starpu_conf_set_value_against_environment("STARPU_RESERVE_NCPU", &conf->reserve_ncpus, conf->precedence_over_environment_variables);
  1030. int main_thread_bind = starpu_get_env_number_default("STARPU_MAIN_THREAD_BIND", 0);
  1031. if (main_thread_bind)
  1032. conf->reserve_ncpus++;
  1033. _starpu_conf_set_value_against_environment("STARPU_NCUDA", &conf->ncuda, conf->precedence_over_environment_variables);
  1034. _starpu_conf_set_value_against_environment("STARPU_NOPENCL", &conf->nopencl, conf->precedence_over_environment_variables);
  1035. _starpu_conf_set_value_against_environment("STARPU_CALIBRATE", &conf->calibrate, conf->precedence_over_environment_variables);
  1036. _starpu_conf_set_value_against_environment("STARPU_BUS_CALIBRATE", &conf->bus_calibrate, conf->precedence_over_environment_variables);
  1037. #ifdef STARPU_SIMGRID
  1038. if (conf->calibrate == 2)
  1039. {
  1040. _STARPU_DISP("Warning: History will be cleared due to calibrate or STARPU_CALIBRATE being set to 2. This will prevent simgrid from having task simulation times!");
  1041. }
  1042. if (conf->bus_calibrate)
  1043. {
  1044. _STARPU_DISP("Warning: Bus calibration will be cleared due to bus_calibrate or STARPU_BUS_CALIBRATE being set. This will prevent simgrid from having data transfer simulation times!");
  1045. }
  1046. #endif
  1047. _starpu_conf_set_value_against_environment("STARPU_SINGLE_COMBINED_WORKER", &conf->single_combined_worker, conf->precedence_over_environment_variables);
  1048. _starpu_conf_set_value_against_environment("STARPU_DISABLE_ASYNCHRONOUS_COPY", &conf->disable_asynchronous_copy, conf->precedence_over_environment_variables);
  1049. _starpu_conf_set_value_against_environment("STARPU_DISABLE_ASYNCHRONOUS_CUDA_COPY", &conf->disable_asynchronous_cuda_copy, conf->precedence_over_environment_variables);
  1050. _starpu_conf_set_value_against_environment("STARPU_DISABLE_ASYNCHRONOUS_OPENCL_COPY", &conf->disable_asynchronous_opencl_copy, conf->precedence_over_environment_variables);
  1051. _starpu_conf_set_value_against_environment("STARPU_DISABLE_ASYNCHRONOUS_MIC_COPY", &conf->disable_asynchronous_mic_copy, conf->precedence_over_environment_variables);
  1052. _starpu_conf_set_value_against_environment("STARPU_DISABLE_ASYNCHRONOUS_MPI_MS_COPY", &conf->disable_asynchronous_mpi_ms_copy, conf->precedence_over_environment_variables);
  1053. }
  1054. struct starpu_tree* starpu_workers_get_tree(void)
  1055. {
  1056. return _starpu_config.topology.tree;
  1057. }
  1058. #if HWLOC_API_VERSION >= 0x20000
  1059. #define NORMAL_CHILD(obj) 1
  1060. #else
  1061. #define NORMAL_CHILD(obj) ((obj)->type < HWLOC_OBJ_BRIDGE)
  1062. #endif
  1063. #ifdef STARPU_HAVE_HWLOC
  1064. static void _fill_tree(struct starpu_tree *tree, hwloc_obj_t curr_obj, unsigned depth, hwloc_topology_t topology, struct starpu_tree *father)
  1065. {
  1066. unsigned i, j;
  1067. unsigned arity;
  1068. #if HWLOC_API_VERSION >= 0x20000
  1069. arity = curr_obj->arity;
  1070. #else
  1071. arity = 0;
  1072. for(i = 0; i < curr_obj->arity; i++)
  1073. {
  1074. if (!NORMAL_CHILD(curr_obj->children[i]))
  1075. /* I/O stuff, stop caring */
  1076. break;
  1077. arity++;
  1078. }
  1079. #endif
  1080. if (arity == 1)
  1081. {
  1082. /* Nothing interestin here, skip level */
  1083. _fill_tree(tree, curr_obj->children[0], depth+1, topology, father);
  1084. return;
  1085. }
  1086. starpu_tree_insert(tree, curr_obj->logical_index, depth, curr_obj->type == HWLOC_OBJ_PU, arity, father);
  1087. starpu_tree_prepare_children(arity, tree);
  1088. j = 0;
  1089. for(i = 0; i < arity; i++)
  1090. {
  1091. hwloc_obj_t child = curr_obj->children[i];
  1092. if (!NORMAL_CHILD(child))
  1093. /* I/O stuff, stop caring (shouldn't happen, though) */
  1094. break;
  1095. #if 0
  1096. char string[128];
  1097. hwloc_obj_snprintf(string, sizeof(string), topology, child, "#", 0);
  1098. printf("%*s%s %d is_pu %d \n", 0, "", string, child->logical_index, child->type == HWLOC_OBJ_PU);
  1099. #endif
  1100. _fill_tree(&tree->nodes[j], child, depth+1, topology, tree);
  1101. j++;
  1102. }
  1103. }
  1104. #endif
  1105. static void _starpu_build_tree(void)
  1106. {
  1107. #ifdef STARPU_HAVE_HWLOC
  1108. struct starpu_tree *tree;
  1109. _STARPU_MALLOC(tree, sizeof(struct starpu_tree));
  1110. _starpu_config.topology.tree = tree;
  1111. hwloc_obj_t root = hwloc_get_root_obj(_starpu_config.topology.hwtopology);
  1112. #if 0
  1113. char string[128];
  1114. hwloc_obj_snprintf(string, sizeof(string), topology, root, "#", 0);
  1115. printf("%*s%s %d is_pu = %d \n", 0, "", string, root->logical_index, root->type == HWLOC_OBJ_PU);
  1116. #endif
  1117. /* level, is_pu, is in the tree (it will be true only after add) */
  1118. _fill_tree(tree, root, 0, _starpu_config.topology.hwtopology, NULL);
  1119. #endif
  1120. }
  1121. static starpu_pthread_mutex_t sig_handlers_mutex = STARPU_PTHREAD_MUTEX_INITIALIZER;
  1122. static void (*act_sigint)(int);
  1123. static void (*act_sigsegv)(int);
  1124. static void (*act_sigtrap)(int);
  1125. void _starpu_handler(int sig)
  1126. {
  1127. #ifdef STARPU_VERBOSE
  1128. _STARPU_MSG("Catching signal '%d'\n", sig);
  1129. #endif
  1130. #ifdef STARPU_USE_FXT
  1131. _starpu_fxt_dump_file();
  1132. #endif
  1133. if (sig == SIGINT)
  1134. {
  1135. void (*sig_act)(int) = act_sigint;
  1136. if (sig_act == NULL)
  1137. sig_act = SIG_DFL;
  1138. signal(SIGINT, sig_act);
  1139. }
  1140. if (sig == SIGSEGV)
  1141. {
  1142. void (*sig_act)(int) = act_sigsegv;
  1143. if (sig_act == NULL)
  1144. sig_act = SIG_DFL;
  1145. signal(SIGSEGV, sig_act);
  1146. }
  1147. #ifdef SIGTRAP
  1148. if (sig == SIGTRAP)
  1149. {
  1150. void (*sig_act)(int) = act_sigtrap;
  1151. if (sig_act == NULL)
  1152. sig_act = SIG_DFL;
  1153. signal(SIGTRAP, sig_act);
  1154. }
  1155. #endif
  1156. #ifdef STARPU_VERBOSE
  1157. _STARPU_MSG("Rearming signal '%d'\n", sig);
  1158. #endif
  1159. raise(sig);
  1160. }
  1161. void _starpu_catch_signals(void)
  1162. {
  1163. if (_starpu_config.conf.catch_signals == 1)
  1164. {
  1165. static void (*old_sig_act)(int);
  1166. old_sig_act = signal(SIGINT, _starpu_handler);
  1167. if (old_sig_act != _starpu_handler)
  1168. act_sigint = old_sig_act;
  1169. old_sig_act = signal(SIGSEGV, _starpu_handler);
  1170. if (old_sig_act != _starpu_handler)
  1171. act_sigsegv = old_sig_act;
  1172. #ifdef SIGTRAP
  1173. old_sig_act = signal(SIGTRAP, _starpu_handler);
  1174. if (old_sig_act != _starpu_handler)
  1175. act_sigtrap = old_sig_act;
  1176. #endif
  1177. }
  1178. else
  1179. {
  1180. if (act_sigint != NULL)
  1181. {
  1182. signal(SIGINT, act_sigint);
  1183. act_sigint = NULL;
  1184. }
  1185. if (act_sigsegv != NULL)
  1186. {
  1187. signal(SIGSEGV, act_sigsegv);
  1188. act_sigsegv = NULL;
  1189. }
  1190. #ifdef SIGTRAP
  1191. if (act_sigtrap != NULL)
  1192. {
  1193. signal(SIGTRAP, act_sigtrap);
  1194. act_sigtrap = NULL;
  1195. }
  1196. #endif
  1197. }
  1198. }
  1199. void _starpu_set_catch_signals(int do_catch_signal)
  1200. {
  1201. STARPU_PTHREAD_MUTEX_LOCK(&sig_handlers_mutex);
  1202. _starpu_config.conf.catch_signals = do_catch_signal;
  1203. _starpu_catch_signals();
  1204. STARPU_PTHREAD_MUTEX_UNLOCK(&sig_handlers_mutex);
  1205. }
  1206. int _starpu_get_catch_signals(void)
  1207. {
  1208. return _starpu_config.conf.catch_signals;
  1209. }
  1210. int starpu_init(struct starpu_conf *user_conf)
  1211. {
  1212. return starpu_initialize(user_conf, NULL, NULL);
  1213. }
  1214. int starpu_initialize(struct starpu_conf *user_conf, int *argc, char ***argv)
  1215. {
  1216. int is_a_sink = 0; /* Always defined. If the MP infrastructure is not
  1217. * used, we cannot be a sink. */
  1218. unsigned worker;
  1219. (void)argc;
  1220. (void)argv;
  1221. /* This initializes _starpu_silent, thus needs to be early */
  1222. _starpu_util_init();
  1223. STARPU_HG_DISABLE_CHECKING(_starpu_worker_parallel_blocks);
  1224. #ifdef STARPU_SIMGRID
  1225. /* This initializes the simgrid thread library, thus needs to be early */
  1226. _starpu_simgrid_init_early(argc, argv);
  1227. #endif
  1228. STARPU_PTHREAD_MUTEX_LOCK(&init_mutex);
  1229. while (initialized == CHANGING)
  1230. /* Wait for the other one changing it */
  1231. STARPU_PTHREAD_COND_WAIT(&init_cond, &init_mutex);
  1232. init_count++;
  1233. if (initialized == INITIALIZED)
  1234. {
  1235. /* He initialized it, don't do it again, and let the others get the mutex */
  1236. STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
  1237. return 0;
  1238. }
  1239. /* initialized == UNINITIALIZED */
  1240. initialized = CHANGING;
  1241. STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
  1242. #ifdef STARPU_USE_MP
  1243. _starpu_set_argc_argv(argc, argv);
  1244. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1245. if (_starpu_mpi_common_mp_init() == -ENODEV)
  1246. {
  1247. initialized = UNINITIALIZED;
  1248. return -ENODEV;
  1249. }
  1250. /* In MPI case we look at the rank to know if we are a sink */
  1251. if (!_starpu_mpi_common_is_src_node())
  1252. setenv("STARPU_SINK", "STARPU_MPI_MS", 1);
  1253. # endif
  1254. /* If StarPU was configured to use MP sinks, we have to control the
  1255. * kind on node we are running on : host or sink ? */
  1256. if (starpu_getenv("STARPU_SINK"))
  1257. is_a_sink = 1;
  1258. #endif /* STARPU_USE_MP */
  1259. int ret;
  1260. #ifdef STARPU_OPENMP
  1261. _starpu_omp_dummy_init();
  1262. #endif
  1263. #ifdef STARPU_SIMGRID
  1264. /* Warn when the lots of stacks malloc()-ated by simgrid for transfer
  1265. * processes will take a long time to get initialized */
  1266. char *perturb = starpu_getenv("MALLOC_PERTURB_");
  1267. if (perturb && perturb[0] && atoi(perturb) != 0)
  1268. _STARPU_DISP("Warning: MALLOC_PERTURB_ is set to non-zero, this makes simgrid run very slow\n");
  1269. #else
  1270. #ifdef __GNUC__
  1271. #ifndef __OPTIMIZE__
  1272. _STARPU_DISP("Warning: StarPU was configured with --enable-debug (-O0), and is thus not optimized\n");
  1273. #endif
  1274. #endif
  1275. #ifdef STARPU_SPINLOCK_CHECK
  1276. _STARPU_DISP("Warning: StarPU was configured with --enable-spinlock-check, which slows down a bit\n");
  1277. #endif
  1278. #if 0
  1279. #ifndef STARPU_NO_ASSERT
  1280. _STARPU_DISP("Warning: StarPU was configured without --enable-fast\n");
  1281. #endif
  1282. #endif
  1283. #ifdef STARPU_MEMORY_STATS
  1284. _STARPU_DISP("Warning: StarPU was configured with --enable-memory-stats, which slows down a bit\n");
  1285. #endif
  1286. #ifdef STARPU_VERBOSE
  1287. _STARPU_DISP("Warning: StarPU was configured with --enable-verbose, which slows down a bit\n");
  1288. #endif
  1289. #ifdef STARPU_USE_FXT
  1290. _STARPU_DISP("Warning: StarPU was configured with --with-fxt, which slows down a bit, limits scalability and makes worker initialization sequential\n");
  1291. #endif
  1292. #ifdef STARPU_FXT_LOCK_TRACES
  1293. _STARPU_DISP("Warning: StarPU was configured with --enable-fxt-lock, which slows down things a huge lot, and is really only meant for StarPU insides debugging. Did you really want to enable that?\n");
  1294. #endif
  1295. #ifdef STARPU_PERF_DEBUG
  1296. _STARPU_DISP("Warning: StarPU was configured with --enable-perf-debug, which slows down a bit\n");
  1297. #endif
  1298. #ifdef STARPU_MODEL_DEBUG
  1299. _STARPU_DISP("Warning: StarPU was configured with --enable-model-debug, which slows down a bit\n");
  1300. #endif
  1301. #ifdef __linux__
  1302. {
  1303. struct utsname buf;
  1304. if (uname(&buf) == 0
  1305. && (!strncmp(buf.release, "4.7.", 4)
  1306. || !strncmp(buf.release, "4.8.", 4)))
  1307. _STARPU_DISP("Warning: This system is running a 4.7 or 4.8 kernel. These have a severe scheduling performance regression issue, please upgrade to at least 4.9.\n");
  1308. }
  1309. #endif
  1310. #endif
  1311. if (starpu_getenv("STARPU_ENABLE_STATS"))
  1312. {
  1313. _STARPU_DISP("Warning: STARPU_ENABLE_STATS is enabled, which slows down a bit\n");
  1314. }
  1315. #if defined(_WIN32) && !defined(__CYGWIN__)
  1316. WSADATA wsadata;
  1317. WSAStartup(MAKEWORD(1,0), &wsadata);
  1318. #endif
  1319. STARPU_AYU_PREINIT();
  1320. /* store the pointer to the user explicit configuration during the
  1321. * initialization */
  1322. if (user_conf == NULL)
  1323. starpu_conf_init(&_starpu_config.conf);
  1324. else
  1325. {
  1326. if (user_conf->magic != 42)
  1327. {
  1328. _STARPU_DISP("starpu_conf structure needs to be initialized with starpu_conf_init\n");
  1329. return -EINVAL;
  1330. }
  1331. _starpu_config.conf = *user_conf;
  1332. }
  1333. _starpu_conf_check_environment(&_starpu_config.conf);
  1334. /* Make a copy of arrays */
  1335. if (_starpu_config.conf.sched_policy_name)
  1336. _starpu_config.conf.sched_policy_name = strdup(_starpu_config.conf.sched_policy_name);
  1337. if (_starpu_config.conf.mic_sink_program_path)
  1338. _starpu_config.conf.mic_sink_program_path = strdup(_starpu_config.conf.mic_sink_program_path);
  1339. if (_starpu_config.conf.n_cuda_opengl_interoperability)
  1340. {
  1341. size_t size = _starpu_config.conf.n_cuda_opengl_interoperability * sizeof(*_starpu_config.conf.cuda_opengl_interoperability);
  1342. unsigned *copy;
  1343. _STARPU_MALLOC(copy, size);
  1344. memcpy(copy, _starpu_config.conf.cuda_opengl_interoperability, size);
  1345. _starpu_config.conf.cuda_opengl_interoperability = copy;
  1346. }
  1347. if (_starpu_config.conf.n_not_launched_drivers)
  1348. {
  1349. size_t size = _starpu_config.conf.n_not_launched_drivers * sizeof(*_starpu_config.conf.not_launched_drivers);
  1350. struct starpu_driver *copy;
  1351. _STARPU_MALLOC(copy, size);
  1352. memcpy(copy, _starpu_config.conf.not_launched_drivers, size);
  1353. _starpu_config.conf.not_launched_drivers = copy;
  1354. }
  1355. _starpu_sched_init();
  1356. _starpu_job_init();
  1357. _starpu_graph_init();
  1358. _starpu_init_all_sched_ctxs(&_starpu_config);
  1359. _starpu_init_progression_hooks();
  1360. _starpu_init_idle_hooks();
  1361. _starpu_init_tags();
  1362. #ifdef STARPU_USE_FXT
  1363. _starpu_fxt_init_profiling(_starpu_config.conf.trace_buffer_size);
  1364. #endif
  1365. _starpu_open_debug_logfile();
  1366. _starpu_data_interface_init();
  1367. _starpu_timing_init();
  1368. _starpu_profiling_init();
  1369. _starpu_load_bus_performance_files();
  1370. /* Depending on whether we are a MP sink or not, we must build the
  1371. * topology with MP nodes or not. */
  1372. ret = _starpu_build_topology(&_starpu_config, is_a_sink);
  1373. /* sink doesn't exit even if no worker discorvered */
  1374. if (ret && !is_a_sink)
  1375. {
  1376. starpu_perfmodel_free_sampling();
  1377. STARPU_PTHREAD_MUTEX_LOCK(&init_mutex);
  1378. init_count--;
  1379. _starpu_destroy_machine_config(&_starpu_config);
  1380. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1381. if (_starpu_mpi_common_is_mp_initialized())
  1382. _starpu_mpi_common_mp_deinit();
  1383. #endif
  1384. initialized = UNINITIALIZED;
  1385. /* Let somebody else try to do it */
  1386. STARPU_PTHREAD_COND_SIGNAL(&init_cond);
  1387. STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
  1388. #ifdef STARPU_USE_FXT
  1389. _starpu_stop_fxt_profiling();
  1390. #endif
  1391. return ret;
  1392. }
  1393. _starpu_task_init();
  1394. for (worker = 0; worker < _starpu_config.topology.nworkers; worker++)
  1395. _starpu_worker_init(&_starpu_config.workers[worker], &_starpu_config);
  1396. //FIXME: find out if the variable STARPU_CHECK_ENTIRE_PLATFORM is really needed, for now, just set 1 as a default value
  1397. check_entire_platform = 1;//starpu_get_env_number("STARPU_CHECK_ENTIRE_PLATFORM");
  1398. _starpu_config.disable_kernels = starpu_get_env_number("STARPU_DISABLE_KERNELS");
  1399. STARPU_PTHREAD_KEY_CREATE(&_starpu_worker_key, NULL);
  1400. STARPU_PTHREAD_KEY_CREATE(&_starpu_worker_set_key, NULL);
  1401. _starpu_keys_initialized = 1;
  1402. STARPU_WMB();
  1403. _starpu_build_tree();
  1404. if (!is_a_sink)
  1405. {
  1406. struct starpu_sched_policy *selected_policy = _starpu_select_sched_policy(&_starpu_config, _starpu_config.conf.sched_policy_name);
  1407. _starpu_create_sched_ctx(selected_policy, NULL, -1, 1, "init", (_starpu_config.conf.global_sched_ctx_min_priority != -1), _starpu_config.conf.global_sched_ctx_min_priority, (_starpu_config.conf.global_sched_ctx_max_priority != -1), _starpu_config.conf.global_sched_ctx_max_priority, 1, _starpu_config.conf.sched_policy_init, NULL, 0, NULL, 0);
  1408. }
  1409. _starpu_initialize_registered_performance_models();
  1410. _starpu_perf_counter_init(&_starpu_config);
  1411. _starpu_perf_knob_init();
  1412. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  1413. _starpu_cuda_init();
  1414. #endif
  1415. #ifdef STARPU_SIMGRID
  1416. _starpu_simgrid_init();
  1417. #endif
  1418. /* Launch "basic" workers (ie. non-combined workers) */
  1419. if (!is_a_sink)
  1420. _starpu_launch_drivers(&_starpu_config);
  1421. /* Allocate swap, if any */
  1422. if (!is_a_sink)
  1423. _starpu_swap_init();
  1424. _starpu_watchdog_init();
  1425. _starpu_profiling_start();
  1426. STARPU_PTHREAD_MUTEX_LOCK(&init_mutex);
  1427. initialized = INITIALIZED;
  1428. /* Tell everybody that we initialized */
  1429. STARPU_PTHREAD_COND_BROADCAST(&init_cond);
  1430. STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
  1431. int main_thread_cpuid = starpu_get_env_number_default("STARPU_MAIN_THREAD_CPUID", -1);
  1432. int main_thread_bind = starpu_get_env_number_default("STARPU_MAIN_THREAD_BIND", 0);
  1433. int main_thread_activity = STARPU_NONACTIVETHREAD;
  1434. if (main_thread_bind)
  1435. {
  1436. main_thread_activity = STARPU_ACTIVETHREAD;
  1437. if (main_thread_cpuid == -1)
  1438. main_thread_cpuid = starpu_get_next_bindid(STARPU_THREAD_ACTIVE, NULL, 0);
  1439. }
  1440. if (main_thread_cpuid >= 0)
  1441. _starpu_bind_thread_on_cpu(main_thread_cpuid, main_thread_activity, "main");
  1442. _STARPU_DEBUG("Initialisation finished\n");
  1443. #ifdef STARPU_USE_MP
  1444. /* Finally, if we are a MP sink, we never leave this function. Else,
  1445. * we enter an infinite event loop which listen for MP commands from
  1446. * the source. */
  1447. if (is_a_sink)
  1448. {
  1449. _starpu_sink_common_worker();
  1450. /* We should normally never leave the loop as we don't want to
  1451. * really initialize STARPU */
  1452. STARPU_ASSERT(0);
  1453. }
  1454. #endif
  1455. _starpu_catch_signals();
  1456. return 0;
  1457. }
  1458. /*
  1459. * Handle runtime termination
  1460. */
  1461. static void _starpu_terminate_workers(struct _starpu_machine_config *pconfig)
  1462. {
  1463. int status = 0;
  1464. unsigned workerid;
  1465. unsigned n;
  1466. starpu_wake_all_blocked_workers();
  1467. for (workerid = 0; workerid < pconfig->topology.nworkers; workerid++)
  1468. {
  1469. _STARPU_DEBUG("wait for worker %u\n", workerid);
  1470. struct _starpu_worker_set *set = pconfig->workers[workerid].set;
  1471. struct _starpu_worker *worker = &pconfig->workers[workerid];
  1472. /* in case StarPU termination code is called from a callback,
  1473. * we have to check if starpu_pthread_self() is the worker itself */
  1474. if (set && set->nworkers > 0)
  1475. {
  1476. if (set->started)
  1477. {
  1478. if (!starpu_pthread_equal(starpu_pthread_self(), set->worker_thread))
  1479. status = starpu_pthread_join(set->worker_thread, NULL);
  1480. if (status)
  1481. {
  1482. #ifdef STARPU_VERBOSE
  1483. _STARPU_DEBUG("starpu_pthread_join -> %d\n", status);
  1484. #endif
  1485. }
  1486. set->started = 0;
  1487. }
  1488. }
  1489. else
  1490. {
  1491. if (!worker->run_by_starpu)
  1492. goto out;
  1493. if (!starpu_pthread_equal(starpu_pthread_self(), worker->worker_thread))
  1494. status = starpu_pthread_join(worker->worker_thread, NULL);
  1495. if (status)
  1496. {
  1497. #ifdef STARPU_VERBOSE
  1498. _STARPU_DEBUG("starpu_pthread_join -> %d\n", status);
  1499. #endif
  1500. }
  1501. }
  1502. out:
  1503. STARPU_ASSERT(starpu_task_list_empty(&worker->local_tasks));
  1504. for (n = 0; n < worker->local_ordered_tasks_size; n++)
  1505. STARPU_ASSERT(worker->local_ordered_tasks[n] == NULL);
  1506. _starpu_sched_ctx_list_delete(&worker->sched_ctx_list);
  1507. free(worker->local_ordered_tasks);
  1508. STARPU_ASSERT(_starpu_ctx_change_list_empty(&worker->ctx_change_list));
  1509. }
  1510. }
  1511. /* Condition variable and mutex used to pause/resume. */
  1512. static starpu_pthread_cond_t pause_cond = STARPU_PTHREAD_COND_INITIALIZER;
  1513. static starpu_pthread_mutex_t pause_mutex = STARPU_PTHREAD_MUTEX_INITIALIZER;
  1514. void _starpu_may_pause(void)
  1515. {
  1516. /* pause_depth is just protected by a memory barrier */
  1517. STARPU_RMB();
  1518. if (STARPU_UNLIKELY(_starpu_config.pause_depth > 0))
  1519. {
  1520. STARPU_PTHREAD_MUTEX_LOCK(&pause_mutex);
  1521. if (_starpu_config.pause_depth > 0)
  1522. {
  1523. STARPU_PTHREAD_COND_WAIT(&pause_cond, &pause_mutex);
  1524. }
  1525. STARPU_PTHREAD_MUTEX_UNLOCK(&pause_mutex);
  1526. }
  1527. }
  1528. void starpu_pause()
  1529. {
  1530. STARPU_HG_DISABLE_CHECKING(_starpu_config.pause_depth);
  1531. _starpu_config.pause_depth += 1;
  1532. }
  1533. void starpu_resume()
  1534. {
  1535. STARPU_PTHREAD_MUTEX_LOCK(&pause_mutex);
  1536. _starpu_config.pause_depth -= 1;
  1537. if (!_starpu_config.pause_depth)
  1538. {
  1539. STARPU_PTHREAD_COND_BROADCAST(&pause_cond);
  1540. }
  1541. STARPU_PTHREAD_MUTEX_UNLOCK(&pause_mutex);
  1542. }
  1543. unsigned _starpu_worker_can_block(unsigned memnode STARPU_ATTRIBUTE_UNUSED, struct _starpu_worker *worker STARPU_ATTRIBUTE_UNUSED)
  1544. {
  1545. #ifdef STARPU_NON_BLOCKING_DRIVERS
  1546. return 0;
  1547. #else
  1548. /* do not block if a sched_ctx change operation is pending */
  1549. if (worker->state_changing_ctx_notice)
  1550. return 0;
  1551. unsigned can_block = 1;
  1552. struct starpu_driver driver;
  1553. driver.type = worker->arch;
  1554. switch (driver.type)
  1555. {
  1556. case STARPU_CPU_WORKER:
  1557. driver.id.cpu_id = worker->devid;
  1558. break;
  1559. case STARPU_CUDA_WORKER:
  1560. driver.id.cuda_id = worker->devid;
  1561. break;
  1562. #ifdef STARPU_USE_OPENCL
  1563. case STARPU_OPENCL_WORKER:
  1564. starpu_opencl_get_device(worker->devid, &driver.id.opencl_id);
  1565. break;
  1566. #endif
  1567. default:
  1568. goto always_launch;
  1569. }
  1570. if (!_starpu_may_launch_driver(&_starpu_config.conf, &driver))
  1571. return 0;
  1572. always_launch:
  1573. #ifndef STARPU_SIMGRID
  1574. if (!_starpu_check_that_no_data_request_exists(memnode))
  1575. can_block = 0;
  1576. #endif
  1577. if (!_starpu_machine_is_running())
  1578. can_block = 0;
  1579. if (!_starpu_execute_registered_progression_hooks())
  1580. can_block = 0;
  1581. return can_block;
  1582. #endif
  1583. }
  1584. static void _starpu_kill_all_workers(struct _starpu_machine_config *pconfig)
  1585. {
  1586. /* set the flag which will tell workers to stop */
  1587. ANNOTATE_HAPPENS_AFTER(&_starpu_config.running);
  1588. pconfig->running = 0;
  1589. /* running is just protected by a memory barrier */
  1590. ANNOTATE_HAPPENS_BEFORE(&_starpu_config.running);
  1591. STARPU_WMB();
  1592. starpu_wake_all_blocked_workers();
  1593. }
  1594. void starpu_display_stats()
  1595. {
  1596. starpu_profiling_bus_helper_display_summary();
  1597. starpu_profiling_worker_helper_display_summary();
  1598. }
  1599. void starpu_shutdown(void)
  1600. {
  1601. unsigned worker;
  1602. STARPU_PTHREAD_MUTEX_LOCK(&init_mutex);
  1603. init_count--;
  1604. STARPU_ASSERT_MSG(init_count >= 0, "Number of calls to starpu_shutdown() can not be higher than the number of calls to starpu_init()\n");
  1605. if (init_count)
  1606. {
  1607. _STARPU_DEBUG("Still somebody needing StarPU, don't deinitialize\n");
  1608. STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
  1609. return;
  1610. }
  1611. /* We're last */
  1612. initialized = CHANGING;
  1613. STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
  1614. /* If the workers are frozen, no progress can be made. */
  1615. STARPU_ASSERT(_starpu_config.pause_depth <= 0);
  1616. starpu_task_wait_for_no_ready();
  1617. /* tell all workers to shutdown */
  1618. _starpu_kill_all_workers(&_starpu_config);
  1619. unsigned i;
  1620. unsigned nb_numa_nodes = starpu_memory_nodes_get_numa_count();
  1621. for (i=0; i<nb_numa_nodes; i++)
  1622. {
  1623. _starpu_free_all_automatically_allocated_buffers(i);
  1624. }
  1625. {
  1626. int stats = starpu_get_env_number("STARPU_STATS");
  1627. if (stats != 0)
  1628. {
  1629. _starpu_display_msi_stats(stderr);
  1630. _starpu_display_alloc_cache_stats(stderr);
  1631. }
  1632. }
  1633. starpu_profiling_bus_helper_display_summary();
  1634. starpu_profiling_worker_helper_display_summary();
  1635. starpu_bound_clear();
  1636. _starpu_deinitialize_registered_performance_models();
  1637. _starpu_watchdog_shutdown();
  1638. /* wait for their termination */
  1639. _starpu_terminate_workers(&_starpu_config);
  1640. {
  1641. int stats = starpu_get_env_number("STARPU_MEMORY_STATS");
  1642. if (stats != 0)
  1643. {
  1644. // Display statistics on data which have not been unregistered
  1645. starpu_data_display_memory_stats();
  1646. }
  1647. }
  1648. _starpu_delete_all_sched_ctxs();
  1649. _starpu_sched_component_workers_destroy();
  1650. for (worker = 0; worker < _starpu_config.topology.nworkers; worker++)
  1651. _starpu_worker_deinit(&_starpu_config.workers[worker]);
  1652. _starpu_profiling_terminate();
  1653. _starpu_disk_unregister();
  1654. #ifdef STARPU_HAVE_HWLOC
  1655. starpu_tree_free(_starpu_config.topology.tree);
  1656. free(_starpu_config.topology.tree);
  1657. #endif
  1658. _starpu_destroy_topology(&_starpu_config);
  1659. _starpu_initialized_combined_workers = 0;
  1660. #ifdef STARPU_USE_FXT
  1661. _starpu_stop_fxt_profiling();
  1662. #endif
  1663. _starpu_data_interface_shutdown();
  1664. _starpu_job_fini();
  1665. /* Drop all remaining tags */
  1666. _starpu_tag_clear();
  1667. #ifdef STARPU_OPENMP
  1668. _starpu_omp_dummy_shutdown();
  1669. #endif
  1670. _starpu_perf_knob_exit();
  1671. _starpu_perf_counter_exit();
  1672. _starpu_close_debug_logfile();
  1673. _starpu_keys_initialized = 0;
  1674. STARPU_PTHREAD_KEY_DELETE(_starpu_worker_key);
  1675. STARPU_PTHREAD_KEY_DELETE(_starpu_worker_set_key);
  1676. _starpu_task_deinit();
  1677. STARPU_PTHREAD_MUTEX_LOCK(&init_mutex);
  1678. initialized = UNINITIALIZED;
  1679. /* Let someone else that wants to initialize it again do it */
  1680. STARPU_PTHREAD_COND_SIGNAL(&init_cond);
  1681. STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
  1682. /* Clear memory */
  1683. free((char*) _starpu_config.conf.sched_policy_name);
  1684. free(_starpu_config.conf.mic_sink_program_path);
  1685. if (_starpu_config.conf.n_cuda_opengl_interoperability)
  1686. free(_starpu_config.conf.cuda_opengl_interoperability);
  1687. if (_starpu_config.conf.n_not_launched_drivers)
  1688. free(_starpu_config.conf.not_launched_drivers);
  1689. STARPU_AYU_FINISH();
  1690. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  1691. if (_starpu_mpi_common_is_mp_initialized())
  1692. _starpu_mpi_common_mp_deinit();
  1693. #endif
  1694. _starpu_print_idle_time();
  1695. _STARPU_DEBUG("Shutdown finished\n");
  1696. #ifdef STARPU_SIMGRID
  1697. /* This finalizes the simgrid thread library, thus needs to be late */
  1698. _starpu_simgrid_deinit();
  1699. #endif
  1700. }
  1701. #undef starpu_worker_get_count
  1702. unsigned starpu_worker_get_count(void)
  1703. {
  1704. return _starpu_config.topology.nworkers;
  1705. }
  1706. unsigned starpu_worker_is_blocked_in_parallel(int workerid)
  1707. {
  1708. if (!_starpu_worker_parallel_blocks)
  1709. return 0;
  1710. int relax_own_observation_state = 0;
  1711. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  1712. STARPU_ASSERT(worker != NULL);
  1713. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&worker->sched_mutex);
  1714. struct _starpu_worker *cur_worker = NULL;
  1715. int cur_workerid = starpu_worker_get_id();
  1716. if (workerid != cur_workerid)
  1717. {
  1718. /* in order to observe the 'blocked' state of a worker from
  1719. * another worker, we must avoid race conditions between
  1720. * 'blocked' state changes and state observations. This is the
  1721. * purpose of this 'if' block. */
  1722. cur_worker = cur_workerid >= 0 ? _starpu_get_worker_struct(cur_workerid) : NULL;
  1723. relax_own_observation_state = (cur_worker != NULL) && (cur_worker->state_relax_refcnt == 0);
  1724. if (relax_own_observation_state && !worker->state_relax_refcnt)
  1725. {
  1726. /* moreover, when a worker (cur_worker != NULL)
  1727. * observes another worker, we need to take special
  1728. * care to avoid live locks, thus the observing worker
  1729. * must enter the relaxed state (if not relaxed
  1730. * already) before doing the observation in mutual
  1731. * exclusion */
  1732. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  1733. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&cur_worker->sched_mutex);
  1734. cur_worker->state_relax_refcnt = 1;
  1735. STARPU_PTHREAD_COND_BROADCAST(&cur_worker->sched_cond);
  1736. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&cur_worker->sched_mutex);
  1737. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&worker->sched_mutex);
  1738. }
  1739. /* the observer waits for a safe window to observe the state,
  1740. * and also waits for any pending blocking state change
  1741. * requests to be processed, in order to not obtain an
  1742. * ephemeral information */
  1743. while (!worker->state_relax_refcnt
  1744. || worker->state_block_in_parallel_req
  1745. || worker->state_unblock_in_parallel_req)
  1746. {
  1747. STARPU_PTHREAD_COND_WAIT(&worker->sched_cond, &worker->sched_mutex);
  1748. }
  1749. }
  1750. unsigned ret = _starpu_config.workers[workerid].state_blocked_in_parallel;
  1751. /* once a worker state has been observed, the worker is 'tainted' for the next one full sched_op,
  1752. * to avoid changing the observed worker state - on which the observer
  1753. * made a scheduling decision - after the fact. */
  1754. worker->state_blocked_in_parallel_observed = 1;
  1755. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  1756. if (relax_own_observation_state)
  1757. {
  1758. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&cur_worker->sched_mutex);
  1759. cur_worker->state_relax_refcnt = 0;
  1760. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&cur_worker->sched_mutex);
  1761. }
  1762. return ret;
  1763. }
  1764. unsigned starpu_worker_is_slave_somewhere(int workerid)
  1765. {
  1766. starpu_worker_lock(workerid);
  1767. unsigned ret = _starpu_config.workers[workerid].is_slave_somewhere;
  1768. starpu_worker_unlock(workerid);
  1769. return ret;
  1770. }
  1771. int starpu_worker_get_count_by_type(enum starpu_worker_archtype type)
  1772. {
  1773. switch (type)
  1774. {
  1775. case STARPU_CPU_WORKER:
  1776. return _starpu_config.topology.ncpus;
  1777. case STARPU_CUDA_WORKER:
  1778. return _starpu_config.topology.ncudagpus * _starpu_config.topology.nworkerpercuda;
  1779. case STARPU_OPENCL_WORKER:
  1780. return _starpu_config.topology.nopenclgpus;
  1781. case STARPU_MIC_WORKER:
  1782. return _starpu_config.topology.nmicdevices;
  1783. case STARPU_MPI_MS_WORKER:
  1784. return _starpu_config.topology.nmpidevices;
  1785. case STARPU_ANY_WORKER:
  1786. return _starpu_config.topology.ncpus+
  1787. _starpu_config.topology.ncudagpus * _starpu_config.topology.nworkerpercuda+
  1788. _starpu_config.topology.nopenclgpus+
  1789. _starpu_config.topology.nmicdevices+
  1790. _starpu_config.topology.nmpidevices;
  1791. default:
  1792. return -EINVAL;
  1793. }
  1794. }
  1795. unsigned starpu_combined_worker_get_count(void)
  1796. {
  1797. return _starpu_config.topology.ncombinedworkers;
  1798. }
  1799. unsigned starpu_cpu_worker_get_count(void)
  1800. {
  1801. return _starpu_config.topology.ncpus;
  1802. }
  1803. unsigned starpu_cuda_worker_get_count(void)
  1804. {
  1805. return _starpu_config.topology.ncudagpus * _starpu_config.topology.nworkerpercuda;
  1806. }
  1807. unsigned starpu_opencl_worker_get_count(void)
  1808. {
  1809. return _starpu_config.topology.nopenclgpus;
  1810. }
  1811. int starpu_asynchronous_copy_disabled(void)
  1812. {
  1813. return _starpu_config.conf.disable_asynchronous_copy;
  1814. }
  1815. int starpu_asynchronous_cuda_copy_disabled(void)
  1816. {
  1817. return _starpu_config.conf.disable_asynchronous_cuda_copy;
  1818. }
  1819. int starpu_asynchronous_opencl_copy_disabled(void)
  1820. {
  1821. return _starpu_config.conf.disable_asynchronous_opencl_copy;
  1822. }
  1823. int starpu_asynchronous_mic_copy_disabled(void)
  1824. {
  1825. return _starpu_config.conf.disable_asynchronous_mic_copy;
  1826. }
  1827. int starpu_asynchronous_mpi_ms_copy_disabled(void)
  1828. {
  1829. return _starpu_config.conf.disable_asynchronous_mpi_ms_copy;
  1830. }
  1831. unsigned starpu_mic_worker_get_count(void)
  1832. {
  1833. int i = 0, count = 0;
  1834. for (i = 0; i < STARPU_MAXMICDEVS; i++)
  1835. count += _starpu_config.topology.nmiccores[i];
  1836. return count;
  1837. }
  1838. unsigned starpu_mpi_ms_worker_get_count(void)
  1839. {
  1840. return _starpu_config.topology.nmpidevices;
  1841. }
  1842. /* When analyzing performance, it is useful to see what is the processing unit
  1843. * that actually performed the task. This function returns the id of the
  1844. * processing unit actually executing it, therefore it makes no sense to use it
  1845. * within the callbacks of SPU functions for instance. If called by some thread
  1846. * that is not controlled by StarPU, starpu_worker_get_id returns -1. */
  1847. #undef starpu_worker_get_id
  1848. int starpu_worker_get_id(void)
  1849. {
  1850. struct _starpu_worker * worker;
  1851. worker = _starpu_get_local_worker_key();
  1852. if (worker)
  1853. {
  1854. return worker->workerid;
  1855. }
  1856. else
  1857. {
  1858. /* there is no worker associated to that thread, perhaps it is
  1859. * a thread from the application or this is some SPU worker */
  1860. return -1;
  1861. }
  1862. }
  1863. #define starpu_worker_get_id _starpu_worker_get_id
  1864. #undef _starpu_worker_get_id_check
  1865. unsigned _starpu_worker_get_id_check(const char *f, int l)
  1866. {
  1867. (void) f;
  1868. (void) l;
  1869. int id = _starpu_worker_get_id();
  1870. STARPU_ASSERT_MSG(id>=0, "%s:%d Cannot be called from outside a worker\n", f, l);
  1871. return id;
  1872. }
  1873. int starpu_combined_worker_get_id(void)
  1874. {
  1875. struct _starpu_worker *worker;
  1876. worker = _starpu_get_local_worker_key();
  1877. if (worker)
  1878. {
  1879. return worker->combined_workerid;
  1880. }
  1881. else
  1882. {
  1883. /* there is no worker associated to that thread, perhaps it is
  1884. * a thread from the application or this is some SPU worker */
  1885. return -1;
  1886. }
  1887. }
  1888. int starpu_combined_worker_get_size(void)
  1889. {
  1890. struct _starpu_worker *worker;
  1891. worker = _starpu_get_local_worker_key();
  1892. if (worker)
  1893. {
  1894. return worker->worker_size;
  1895. }
  1896. else
  1897. {
  1898. /* there is no worker associated to that thread, perhaps it is
  1899. * a thread from the application or this is some SPU worker */
  1900. return -1;
  1901. }
  1902. }
  1903. int starpu_combined_worker_get_rank(void)
  1904. {
  1905. struct _starpu_worker *worker;
  1906. worker = _starpu_get_local_worker_key();
  1907. if (worker)
  1908. {
  1909. return worker->current_rank;
  1910. }
  1911. else
  1912. {
  1913. /* there is no worker associated to that thread, perhaps it is
  1914. * a thread from the application or this is some SPU worker */
  1915. return -1;
  1916. }
  1917. }
  1918. int starpu_worker_get_subworkerid(int id)
  1919. {
  1920. return _starpu_config.workers[id].subworkerid;
  1921. }
  1922. int starpu_worker_get_devid(int id)
  1923. {
  1924. return _starpu_config.workers[id].devid;
  1925. }
  1926. unsigned starpu_worker_is_combined_worker(int id)
  1927. {
  1928. return id >= (int)_starpu_config.topology.nworkers;
  1929. }
  1930. struct _starpu_combined_worker *_starpu_get_combined_worker_struct(unsigned id)
  1931. {
  1932. unsigned basic_worker_count = starpu_worker_get_count();
  1933. //_STARPU_DEBUG("basic_worker_count:%d\n",basic_worker_count);
  1934. STARPU_ASSERT(id >= basic_worker_count);
  1935. return &_starpu_config.combined_workers[id - basic_worker_count];
  1936. }
  1937. enum starpu_worker_archtype starpu_worker_get_type(int id)
  1938. {
  1939. return _starpu_config.workers[id].arch;
  1940. }
  1941. unsigned starpu_worker_get_ids_by_type(enum starpu_worker_archtype type, int *workerids, unsigned maxsize)
  1942. {
  1943. unsigned nworkers = starpu_worker_get_count();
  1944. unsigned cnt = 0;
  1945. unsigned id;
  1946. for (id = 0; id < nworkers; id++)
  1947. {
  1948. if (type == STARPU_ANY_WORKER || starpu_worker_get_type(id) == type)
  1949. {
  1950. /* Perhaps the array is too small ? */
  1951. if (cnt >= maxsize)
  1952. return -ERANGE;
  1953. workerids[cnt++] = id;
  1954. }
  1955. }
  1956. return cnt;
  1957. }
  1958. int starpu_worker_get_by_type(enum starpu_worker_archtype type, int num)
  1959. {
  1960. unsigned nworkers = starpu_worker_get_count();
  1961. int cnt = 0;
  1962. unsigned id;
  1963. for (id = 0; id < nworkers; id++)
  1964. {
  1965. if (type == STARPU_ANY_WORKER || starpu_worker_get_type(id) == type)
  1966. {
  1967. if (num == cnt)
  1968. return id;
  1969. cnt++;
  1970. }
  1971. }
  1972. /* Not found */
  1973. return -1;
  1974. }
  1975. int starpu_worker_get_by_devid(enum starpu_worker_archtype type, int devid)
  1976. {
  1977. unsigned nworkers = starpu_worker_get_count();
  1978. unsigned id;
  1979. for (id = 0; id < nworkers; id++)
  1980. if (starpu_worker_get_type(id) == type && starpu_worker_get_devid(id) == devid)
  1981. return id;
  1982. /* Not found */
  1983. return -1;
  1984. }
  1985. int starpu_worker_get_devids(enum starpu_worker_archtype type, int *devids, int num)
  1986. {
  1987. unsigned nworkers = starpu_worker_get_count();
  1988. int workerids[nworkers];
  1989. unsigned ndevice_workers = starpu_worker_get_ids_by_type(type, workerids, nworkers);
  1990. unsigned ndevids = 0;
  1991. if(ndevice_workers > 0)
  1992. {
  1993. unsigned id, devid;
  1994. int cnt = 0;
  1995. unsigned found = 0;
  1996. for(id = 0; id < ndevice_workers; id++)
  1997. {
  1998. int curr_devid;
  1999. curr_devid = _starpu_config.workers[workerids[id]].devid;
  2000. for(devid = 0; devid < ndevids; devid++)
  2001. {
  2002. if(curr_devid == devids[devid])
  2003. {
  2004. found = 1;
  2005. break;
  2006. }
  2007. }
  2008. if(!found)
  2009. {
  2010. devids[ndevids++] = curr_devid;
  2011. cnt++;
  2012. }
  2013. else
  2014. found = 0;
  2015. if(cnt == num)
  2016. break;
  2017. }
  2018. }
  2019. return ndevids;
  2020. }
  2021. void starpu_worker_get_name(int id, char *dst, size_t maxlen)
  2022. {
  2023. char *name = _starpu_config.workers[id].name;
  2024. snprintf(dst, maxlen, "%s", name);
  2025. }
  2026. int starpu_worker_get_bindid(int workerid)
  2027. {
  2028. return _starpu_config.workers[workerid].bindid;
  2029. }
  2030. int starpu_bindid_get_workerids(int bindid, int **workerids)
  2031. {
  2032. if (bindid >= (int) _starpu_config.nbindid)
  2033. return 0;
  2034. *workerids = _starpu_config.bindid_workers[bindid].workerids;
  2035. return _starpu_config.bindid_workers[bindid].nworkers;
  2036. }
  2037. int starpu_worker_get_stream_workerids(unsigned devid, int *workerids, enum starpu_worker_archtype type)
  2038. {
  2039. unsigned nworkers = starpu_worker_get_count();
  2040. int nw = 0;
  2041. unsigned id;
  2042. for (id = 0; id < nworkers; id++)
  2043. {
  2044. if (_starpu_config.workers[id].devid == devid &&
  2045. (type == STARPU_ANY_WORKER || _starpu_config.workers[id].arch == type))
  2046. workerids[nw++] = id;
  2047. }
  2048. return nw;
  2049. }
  2050. void starpu_worker_get_sched_condition(int workerid, starpu_pthread_mutex_t **sched_mutex, starpu_pthread_cond_t **sched_cond)
  2051. {
  2052. STARPU_ASSERT(workerid >= 0 && workerid < STARPU_NMAXWORKERS);
  2053. *sched_cond = &_starpu_config.workers[workerid].sched_cond;
  2054. *sched_mutex = &_starpu_config.workers[workerid].sched_mutex;
  2055. }
  2056. /* returns 1 if the call results in initiating a transition of worker WORKERID
  2057. * from sleeping state to awake
  2058. * returns 0 if worker WORKERID is not sleeping or the wake-up transition
  2059. * already has been initiated
  2060. */
  2061. static int starpu_wakeup_worker_locked(int workerid, starpu_pthread_cond_t *sched_cond, starpu_pthread_mutex_t *mutex STARPU_ATTRIBUTE_UNUSED)
  2062. {
  2063. #ifdef STARPU_SIMGRID
  2064. starpu_pthread_queue_broadcast(&_starpu_simgrid_task_queue[workerid]);
  2065. #endif
  2066. if (_starpu_config.workers[workerid].status == STATUS_SCHEDULING || _starpu_config.workers[workerid].status == STATUS_SLEEPING_SCHEDULING)
  2067. {
  2068. _starpu_config.workers[workerid].state_keep_awake = 1;
  2069. return 0;
  2070. }
  2071. else if (_starpu_config.workers[workerid].status == STATUS_SLEEPING)
  2072. {
  2073. int ret = 0;
  2074. if (_starpu_config.workers[workerid].state_keep_awake != 1)
  2075. {
  2076. _starpu_config.workers[workerid].state_keep_awake = 1;
  2077. ret = 1;
  2078. }
  2079. /* cond_broadcast is required over cond_signal since
  2080. * the condition is share for multiple purpose */
  2081. STARPU_PTHREAD_COND_BROADCAST(sched_cond);
  2082. return ret;
  2083. }
  2084. return 0;
  2085. }
  2086. static int starpu_wakeup_worker_no_relax(int workerid, starpu_pthread_cond_t *sched_cond, starpu_pthread_mutex_t *sched_mutex)
  2087. {
  2088. int success;
  2089. STARPU_PTHREAD_MUTEX_LOCK_SCHED(sched_mutex);
  2090. success = starpu_wakeup_worker_locked(workerid, sched_cond, sched_mutex);
  2091. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(sched_mutex);
  2092. return success;
  2093. }
  2094. int starpu_wake_worker_locked(int workerid)
  2095. {
  2096. starpu_pthread_mutex_t *sched_mutex;
  2097. starpu_pthread_cond_t *sched_cond;
  2098. starpu_worker_get_sched_condition(workerid, &sched_mutex, &sched_cond);
  2099. return starpu_wakeup_worker_locked(workerid, sched_cond, sched_mutex);
  2100. }
  2101. int starpu_wake_worker_no_relax(int workerid)
  2102. {
  2103. starpu_pthread_mutex_t *sched_mutex;
  2104. starpu_pthread_cond_t *sched_cond;
  2105. starpu_worker_get_sched_condition(workerid, &sched_mutex, &sched_cond);
  2106. return starpu_wakeup_worker_no_relax(workerid, sched_cond, sched_mutex);
  2107. }
  2108. int starpu_worker_get_nids_by_type(enum starpu_worker_archtype type, int *workerids, int maxsize)
  2109. {
  2110. unsigned nworkers = starpu_worker_get_count();
  2111. int cnt = 0;
  2112. unsigned id;
  2113. for (id = 0; id < nworkers; id++)
  2114. {
  2115. if (type == STARPU_ANY_WORKER || starpu_worker_get_type(id) == type)
  2116. {
  2117. /* Perhaps the array is too small ? */
  2118. if (cnt >= maxsize)
  2119. return cnt;
  2120. workerids[cnt++] = id;
  2121. }
  2122. }
  2123. return cnt;
  2124. }
  2125. int starpu_worker_get_nids_ctx_free_by_type(enum starpu_worker_archtype type, int *workerids, int maxsize)
  2126. {
  2127. unsigned nworkers = starpu_worker_get_count();
  2128. int cnt = 0;
  2129. unsigned id;
  2130. for (id = 0; id < nworkers; id++)
  2131. {
  2132. if (type == STARPU_ANY_WORKER || starpu_worker_get_type(id) == type)
  2133. {
  2134. /* Perhaps the array is too small ? */
  2135. if (cnt >= maxsize)
  2136. return cnt;
  2137. unsigned found = 0;
  2138. int s;
  2139. for(s = 1; s < STARPU_NMAX_SCHED_CTXS; s++)
  2140. {
  2141. if(_starpu_config.sched_ctxs[s].id != STARPU_NMAX_SCHED_CTXS)
  2142. {
  2143. struct starpu_worker_collection *workers = _starpu_config.sched_ctxs[s].workers;
  2144. struct starpu_sched_ctx_iterator it;
  2145. workers->init_iterator(workers, &it);
  2146. while(workers->has_next(workers, &it))
  2147. {
  2148. unsigned worker = workers->get_next(workers, &it);
  2149. if(worker == id)
  2150. {
  2151. found = 1;
  2152. break;
  2153. }
  2154. }
  2155. if(found)
  2156. break;
  2157. }
  2158. }
  2159. if(!found)
  2160. workerids[cnt++] = id;
  2161. }
  2162. }
  2163. return cnt;
  2164. }
  2165. void starpu_get_version(int *major, int *minor, int *release)
  2166. {
  2167. *major = STARPU_MAJOR_VERSION;
  2168. *minor = STARPU_MINOR_VERSION;
  2169. *release = STARPU_RELEASE_VERSION;
  2170. }
  2171. unsigned starpu_worker_get_sched_ctx_list(int workerid, unsigned **sched_ctxs)
  2172. {
  2173. unsigned s = 0;
  2174. unsigned nsched_ctxs = _starpu_worker_get_nsched_ctxs(workerid);
  2175. _STARPU_MALLOC(*sched_ctxs, nsched_ctxs*sizeof(unsigned));
  2176. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  2177. struct _starpu_sched_ctx_elt *e = NULL;
  2178. struct _starpu_sched_ctx_list_iterator list_it;
  2179. _starpu_sched_ctx_list_iterator_init(worker->sched_ctx_list, &list_it);
  2180. while (_starpu_sched_ctx_list_iterator_has_next(&list_it))
  2181. {
  2182. e = _starpu_sched_ctx_list_iterator_get_next(&list_it);
  2183. (*sched_ctxs)[s++] = e->sched_ctx;
  2184. }
  2185. return nsched_ctxs;
  2186. }
  2187. char *starpu_worker_get_type_as_string(enum starpu_worker_archtype type)
  2188. {
  2189. if (type == STARPU_CPU_WORKER) return "STARPU_CPU_WORKER";
  2190. if (type == STARPU_CUDA_WORKER) return "STARPU_CUDA_WORKER";
  2191. if (type == STARPU_OPENCL_WORKER) return "STARPU_OPENCL_WORKER";
  2192. if (type == STARPU_MIC_WORKER) return "STARPU_MIC_WORKER";
  2193. if (type == STARPU_MPI_MS_WORKER) return "STARPU_MPI_MS_WORKER";
  2194. if (type == STARPU_ANY_WORKER) return "STARPU_ANY_WORKER";
  2195. return "STARPU_unknown_WORKER";
  2196. }
  2197. void _starpu_worker_set_stream_ctx(unsigned workerid, struct _starpu_sched_ctx *sched_ctx)
  2198. {
  2199. STARPU_ASSERT(workerid < starpu_worker_get_count());
  2200. struct _starpu_worker *w = _starpu_get_worker_struct(workerid);
  2201. w->stream_ctx = sched_ctx;
  2202. }
  2203. struct _starpu_sched_ctx* _starpu_worker_get_ctx_stream(unsigned stream_workerid)
  2204. {
  2205. if (stream_workerid >= starpu_worker_get_count())
  2206. return NULL;
  2207. struct _starpu_worker *w = _starpu_get_worker_struct(stream_workerid);
  2208. return w->stream_ctx;
  2209. }
  2210. unsigned starpu_worker_get_sched_ctx_id_stream(unsigned stream_workerid)
  2211. {
  2212. if (stream_workerid >= starpu_worker_get_count())
  2213. return STARPU_NMAX_SCHED_CTXS;
  2214. struct _starpu_worker *w = _starpu_get_worker_struct(stream_workerid);
  2215. return w->stream_ctx != NULL ? w->stream_ctx->id : STARPU_NMAX_SCHED_CTXS;
  2216. }
  2217. void starpu_worker_display_names(FILE *output, enum starpu_worker_archtype type)
  2218. {
  2219. int nworkers = starpu_worker_get_count_by_type(type);
  2220. if (nworkers <= 0)
  2221. {
  2222. fprintf(output, "No %s worker\n", starpu_worker_get_type_as_string(type));
  2223. }
  2224. else
  2225. {
  2226. int i, ids[nworkers];
  2227. starpu_worker_get_ids_by_type(type, ids, nworkers);
  2228. fprintf(output, "%d %s worker%s:\n", nworkers, starpu_worker_get_type_as_string(type), nworkers==1?"":"s");
  2229. for(i = 0; i < nworkers; i++)
  2230. {
  2231. char name[256];
  2232. starpu_worker_get_name(ids[i], name, 256);
  2233. fprintf(output, "\t%s\n", name);
  2234. }
  2235. }
  2236. }
  2237. void _starpu_worker_refuse_task(struct _starpu_worker *worker, struct starpu_task *task)
  2238. {
  2239. if (worker->pipeline_length || worker->arch == STARPU_OPENCL_WORKER)
  2240. {
  2241. int j;
  2242. for (j = 0; j < worker->ntasks; j++)
  2243. {
  2244. const int j_mod = (j+worker->first_task)%STARPU_MAX_PIPELINE;
  2245. if (task == worker->current_tasks[j_mod])
  2246. {
  2247. worker->current_tasks[j_mod] = NULL;
  2248. if (j == 0)
  2249. {
  2250. worker->first_task = (worker->first_task + 1) % STARPU_MAX_PIPELINE;
  2251. worker->current_task = NULL;
  2252. _starpu_set_current_task(NULL);
  2253. }
  2254. break;
  2255. }
  2256. }
  2257. STARPU_ASSERT(j<worker->ntasks);
  2258. }
  2259. else
  2260. {
  2261. worker->current_task = NULL;
  2262. _starpu_set_current_task(NULL);
  2263. }
  2264. worker->ntasks--;
  2265. task->prefetched = 0;
  2266. int res = _starpu_push_task_to_workers(task);
  2267. STARPU_ASSERT_MSG(res == 0, "_starpu_push_task_to_workers() unexpectedly returned = %d\n", res);
  2268. }
  2269. int starpu_worker_sched_op_pending(void)
  2270. {
  2271. return _starpu_worker_sched_op_pending();
  2272. }
  2273. #undef starpu_worker_relax_on
  2274. void starpu_worker_relax_on(void)
  2275. {
  2276. _starpu_worker_relax_on();
  2277. }
  2278. #undef starpu_worker_relax_off
  2279. void starpu_worker_relax_off(void)
  2280. {
  2281. _starpu_worker_relax_off();
  2282. }
  2283. #undef starpu_worker_get_relax_state
  2284. int starpu_worker_get_relax_state(void)
  2285. {
  2286. return _starpu_worker_get_relax_state();
  2287. }
  2288. void starpu_worker_lock(int workerid)
  2289. {
  2290. _starpu_worker_lock(workerid);
  2291. }
  2292. int starpu_worker_trylock(int workerid)
  2293. {
  2294. return _starpu_worker_trylock(workerid);
  2295. }
  2296. void starpu_worker_unlock(int workerid)
  2297. {
  2298. _starpu_worker_unlock(workerid);
  2299. }
  2300. void starpu_worker_lock_self(void)
  2301. {
  2302. _starpu_worker_lock_self();
  2303. }
  2304. void starpu_worker_unlock_self(void)
  2305. {
  2306. _starpu_worker_unlock_self();
  2307. }
  2308. int starpu_wake_worker_relax(int workerid)
  2309. {
  2310. return _starpu_wake_worker_relax(workerid);
  2311. }
  2312. #ifdef STARPU_HAVE_HWLOC
  2313. hwloc_cpuset_t starpu_worker_get_hwloc_cpuset(int workerid)
  2314. {
  2315. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  2316. return hwloc_bitmap_dup(worker->hwloc_cpu_set);
  2317. }
  2318. hwloc_obj_t starpu_worker_get_hwloc_obj(int workerid)
  2319. {
  2320. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  2321. return worker->hwloc_obj;
  2322. }
  2323. #endif
  2324. /* Light version of _starpu_wake_worker_relax, which, when possible,
  2325. * speculatively sets keep_awake on the target worker without waiting that
  2326. * worker to enter the relaxed state.
  2327. */
  2328. int starpu_wake_worker_relax_light(int workerid)
  2329. {
  2330. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  2331. STARPU_ASSERT(worker != NULL);
  2332. int cur_workerid = starpu_worker_get_id();
  2333. if (workerid != cur_workerid)
  2334. {
  2335. starpu_worker_relax_on();
  2336. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&worker->sched_mutex);
  2337. while (!worker->state_relax_refcnt)
  2338. {
  2339. /* Attempt a fast path if the worker is not really asleep */
  2340. if (_starpu_config.workers[workerid].status == STATUS_SCHEDULING
  2341. || _starpu_config.workers[workerid].status == STATUS_SLEEPING_SCHEDULING)
  2342. {
  2343. _starpu_config.workers[workerid].state_keep_awake = 1;
  2344. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  2345. starpu_worker_relax_off();
  2346. return 1;
  2347. }
  2348. STARPU_PTHREAD_COND_WAIT(&worker->sched_cond, &worker->sched_mutex);
  2349. }
  2350. }
  2351. else
  2352. {
  2353. STARPU_PTHREAD_MUTEX_LOCK_SCHED(&worker->sched_mutex);
  2354. }
  2355. int ret = starpu_wake_worker_locked(workerid);
  2356. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(&worker->sched_mutex);
  2357. if (workerid != cur_workerid)
  2358. {
  2359. starpu_worker_relax_off();
  2360. }
  2361. return ret;
  2362. }
  2363. #ifdef STARPU_WORKER_CALLBACKS
  2364. void starpu_worker_set_going_to_sleep_callback(void (*callback)(unsigned workerid))
  2365. {
  2366. STARPU_ASSERT(_starpu_config.conf.callback_worker_going_to_sleep);
  2367. _starpu_config.conf.callback_worker_going_to_sleep = callback;
  2368. }
  2369. void starpu_worker_set_waking_up_callback(void (*callback)(unsigned workerid))
  2370. {
  2371. STARPU_ASSERT(_starpu_config.conf.callback_worker_waking_up);
  2372. _starpu_config.conf.callback_worker_waking_up = callback;
  2373. }
  2374. #endif
  2375. enum starpu_node_kind _starpu_worker_get_node_kind(enum starpu_worker_archtype type)
  2376. {
  2377. switch(type)
  2378. {
  2379. case STARPU_CPU_WORKER:
  2380. return STARPU_CPU_RAM;
  2381. case STARPU_CUDA_WORKER:
  2382. return STARPU_CUDA_RAM;
  2383. case STARPU_OPENCL_WORKER:
  2384. return STARPU_OPENCL_RAM;
  2385. break;
  2386. case STARPU_MIC_WORKER:
  2387. return STARPU_MIC_RAM;
  2388. case STARPU_MPI_MS_WORKER:
  2389. return STARPU_MPI_MS_RAM;
  2390. default:
  2391. STARPU_ABORT();
  2392. }
  2393. }