workers.c 58 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2015 Université de Bordeaux
  4. * Copyright (C) 2010, 2011, 2012, 2013, 2014 Centre National de la Recherche Scientifique
  5. * Copyright (C) 2010, 2011 Institut National de Recherche en Informatique et Automatique
  6. * Copyright (C) 2011 Télécom-SudParis
  7. * Copyright (C) 2011-2012 INRIA
  8. *
  9. * StarPU is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU Lesser General Public License as published by
  11. * the Free Software Foundation; either version 2.1 of the License, or (at
  12. * your option) any later version.
  13. *
  14. * StarPU is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  17. *
  18. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  19. */
  20. #include <stdlib.h>
  21. #include <stdio.h>
  22. #include <common/config.h>
  23. #include <common/utils.h>
  24. #include <core/progress_hook.h>
  25. #include <core/workers.h>
  26. #include <core/debug.h>
  27. #include <core/disk.h>
  28. #include <core/task.h>
  29. #include <datawizard/malloc.h>
  30. #include <profiling/profiling.h>
  31. #include <starpu_task_list.h>
  32. #include <sched_policies/sched_component.h>
  33. #include <drivers/mp_common/sink_common.h>
  34. #include <drivers/scc/driver_scc_common.h>
  35. #include <drivers/cpu/driver_cpu.h>
  36. #include <drivers/cuda/driver_cuda.h>
  37. #include <drivers/opencl/driver_opencl.h>
  38. #ifdef STARPU_SIMGRID
  39. #include <msg/msg.h>
  40. #include <core/simgrid.h>
  41. #endif
  42. #if defined(_WIN32) && !defined(__CYGWIN__)
  43. #include <windows.h>
  44. #endif
  45. /* acquire/release semantic for concurrent initialization/de-initialization */
  46. static starpu_pthread_mutex_t init_mutex = STARPU_PTHREAD_MUTEX_INITIALIZER;
  47. static starpu_pthread_cond_t init_cond = STARPU_PTHREAD_COND_INITIALIZER;
  48. static int init_count = 0;
  49. static enum { UNINITIALIZED, CHANGING, INITIALIZED } initialized = UNINITIALIZED;
  50. static starpu_pthread_key_t worker_key;
  51. static starpu_pthread_key_t worker_set_key;
  52. static struct _starpu_machine_config config;
  53. /* Pointers to argc and argv
  54. */
  55. static int *my_argc = 0;
  56. static char ***my_argv = NULL;
  57. /* Initialize value of static argc and argv, called when the process begins
  58. */
  59. void _starpu_set_argc_argv(int *argc_param, char ***argv_param)
  60. {
  61. my_argc = argc_param;
  62. my_argv = argv_param;
  63. }
  64. int *_starpu_get_argc()
  65. {
  66. return my_argc;
  67. }
  68. char ***_starpu_get_argv()
  69. {
  70. return my_argv;
  71. }
  72. int _starpu_is_initialized(void)
  73. {
  74. return initialized == INITIALIZED;
  75. }
  76. struct _starpu_machine_config *_starpu_get_machine_config(void)
  77. {
  78. return &config;
  79. }
  80. /* Makes sure that at least one of the workers of type <arch> can execute
  81. * <task>, for at least one of its implementations. */
  82. static uint32_t _starpu_worker_exists_and_can_execute(struct starpu_task *task,
  83. enum starpu_worker_archtype arch)
  84. {
  85. int i;
  86. _starpu_codelet_check_deprecated_fields(task->cl);
  87. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(task->sched_ctx);
  88. struct starpu_worker_collection *workers = sched_ctx->workers;
  89. struct starpu_sched_ctx_iterator it;
  90. workers->init_iterator(workers, &it);
  91. while(workers->has_next(workers, &it))
  92. {
  93. i = workers->get_next(workers, &it);
  94. if (starpu_worker_get_type(i) != arch)
  95. continue;
  96. unsigned impl;
  97. for (impl = 0; impl < STARPU_MAXIMPLEMENTATIONS; impl++)
  98. {
  99. /* We could call task->cl->can_execute(i, task, impl)
  100. here, it would definitely work. It is probably
  101. cheaper to check whether it is necessary in order to
  102. avoid a useless function call, though. */
  103. unsigned test_implementation = 0;
  104. switch (arch)
  105. {
  106. case STARPU_CPU_WORKER:
  107. if (task->cl->cpu_funcs[impl] != NULL)
  108. test_implementation = 1;
  109. break;
  110. case STARPU_CUDA_WORKER:
  111. if (task->cl->cuda_funcs[impl] != NULL)
  112. test_implementation = 1;
  113. break;
  114. case STARPU_OPENCL_WORKER:
  115. if (task->cl->opencl_funcs[impl] != NULL)
  116. test_implementation = 1;
  117. break;
  118. case STARPU_MIC_WORKER:
  119. if (task->cl->cpu_funcs_name[impl] != NULL || task->cl->mic_funcs[impl] != NULL)
  120. test_implementation = 1;
  121. break;
  122. case STARPU_SCC_WORKER:
  123. if (task->cl->cpu_funcs_name[impl] != NULL || task->cl->scc_funcs[impl] != NULL)
  124. test_implementation = 1;
  125. break;
  126. default:
  127. STARPU_ABORT();
  128. }
  129. if (!test_implementation)
  130. continue;
  131. if (task->cl->can_execute)
  132. return task->cl->can_execute(i, task, impl);
  133. if(test_implementation)
  134. return 1;
  135. }
  136. }
  137. return 0;
  138. }
  139. /* in case a task is submitted, we may check whether there exists a worker
  140. that may execute the task or not */
  141. uint32_t _starpu_worker_exists(struct starpu_task *task)
  142. {
  143. _starpu_codelet_check_deprecated_fields(task->cl);
  144. /* if the task belongs to the init context we can
  145. check out all the worker mask of the machine
  146. if not we should iterate on the workers of the ctx
  147. and verify if it exists a worker able to exec the task */
  148. if(task->sched_ctx == 0)
  149. {
  150. if (!(task->cl->where & config.worker_mask))
  151. return 0;
  152. if (!task->cl->can_execute)
  153. return 1;
  154. }
  155. #if defined(STARPU_USE_CPU) || defined(STARPU_SIMGRID)
  156. if ((task->cl->where & STARPU_CPU) &&
  157. _starpu_worker_exists_and_can_execute(task, STARPU_CPU_WORKER))
  158. return 1;
  159. #endif
  160. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  161. if ((task->cl->where & STARPU_CUDA) &&
  162. _starpu_worker_exists_and_can_execute(task, STARPU_CUDA_WORKER))
  163. return 1;
  164. #endif
  165. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  166. if ((task->cl->where & STARPU_OPENCL) &&
  167. _starpu_worker_exists_and_can_execute(task, STARPU_OPENCL_WORKER))
  168. return 1;
  169. #endif
  170. #ifdef STARPU_USE_MIC
  171. if ((task->cl->where & STARPU_MIC) &&
  172. _starpu_worker_exists_and_can_execute(task, STARPU_MIC_WORKER))
  173. return 1;
  174. #endif
  175. #ifdef STARPU_USE_SCC
  176. if ((task->cl->where & STARPU_SCC) &&
  177. _starpu_worker_exists_and_can_execute(task, STARPU_SCC_WORKER))
  178. return 1;
  179. #endif
  180. return 0;
  181. }
  182. uint32_t _starpu_can_submit_cuda_task(void)
  183. {
  184. return (STARPU_CUDA & config.worker_mask);
  185. }
  186. uint32_t _starpu_can_submit_cpu_task(void)
  187. {
  188. return (STARPU_CPU & config.worker_mask);
  189. }
  190. uint32_t _starpu_can_submit_opencl_task(void)
  191. {
  192. return (STARPU_OPENCL & config.worker_mask);
  193. }
  194. uint32_t _starpu_can_submit_scc_task(void)
  195. {
  196. return (STARPU_SCC & config.worker_mask);
  197. }
  198. static inline int _starpu_can_use_nth_implementation(enum starpu_worker_archtype arch, struct starpu_codelet *cl, unsigned nimpl)
  199. {
  200. switch(arch)
  201. {
  202. case STARPU_ANY_WORKER:
  203. {
  204. int cpu_func_enabled=1, cuda_func_enabled=1, opencl_func_enabled=1;
  205. /* TODO: MIC/SCC */
  206. #if defined(STARPU_USE_CPU) || defined(STARPU_SIMGRID)
  207. starpu_cpu_func_t cpu_func = _starpu_task_get_cpu_nth_implementation(cl, nimpl);
  208. cpu_func_enabled = cpu_func != NULL && starpu_cpu_worker_get_count();
  209. #endif
  210. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  211. starpu_cuda_func_t cuda_func = _starpu_task_get_cuda_nth_implementation(cl, nimpl);
  212. cuda_func_enabled = cuda_func != NULL && starpu_cuda_worker_get_count();
  213. #endif
  214. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  215. starpu_opencl_func_t opencl_func = _starpu_task_get_opencl_nth_implementation(cl, nimpl);
  216. opencl_func_enabled = opencl_func != NULL && starpu_opencl_worker_get_count();
  217. #endif
  218. return (cpu_func_enabled && cuda_func_enabled && opencl_func_enabled);
  219. }
  220. case STARPU_CPU_WORKER:
  221. {
  222. starpu_cpu_func_t func = _starpu_task_get_cpu_nth_implementation(cl, nimpl);
  223. return func != NULL;
  224. }
  225. case STARPU_CUDA_WORKER:
  226. {
  227. starpu_cuda_func_t func = _starpu_task_get_cuda_nth_implementation(cl, nimpl);
  228. return func != NULL;
  229. }
  230. case STARPU_OPENCL_WORKER:
  231. {
  232. starpu_opencl_func_t func = _starpu_task_get_opencl_nth_implementation(cl, nimpl);
  233. return func != NULL;
  234. }
  235. case STARPU_MIC_WORKER:
  236. {
  237. starpu_mic_func_t func = _starpu_task_get_mic_nth_implementation(cl, nimpl);
  238. char *func_name = _starpu_task_get_cpu_name_nth_implementation(cl, nimpl);
  239. return func != NULL || func_name != NULL;
  240. }
  241. case STARPU_SCC_WORKER:
  242. {
  243. starpu_scc_func_t func = _starpu_task_get_scc_nth_implementation(cl, nimpl);
  244. char *func_name = _starpu_task_get_cpu_name_nth_implementation(cl, nimpl);
  245. return func != NULL || func_name != NULL;
  246. }
  247. default:
  248. STARPU_ASSERT_MSG(0, "Unknown arch type %d", arch);
  249. }
  250. return 0;
  251. }
  252. int starpu_worker_can_execute_task(unsigned workerid, struct starpu_task *task, unsigned nimpl)
  253. {
  254. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(task->sched_ctx);
  255. /* if the task can't be parallel don't submit it to a ctx */
  256. unsigned child_sched_ctx = starpu_sched_ctx_worker_is_master_for_child_ctx(workerid, sched_ctx->id);
  257. if(child_sched_ctx != STARPU_NMAX_SCHED_CTXS)
  258. if(!task->possibly_parallel) return 0;
  259. /* if the worker is blocked in a parallel ctx don't submit tasks on it */
  260. if(sched_ctx->parallel_sect[workerid] ) return 0;
  261. /* TODO: check that the task operand sizes will fit on that device */
  262. return (task->cl->where & config.workers[workerid].worker_mask) &&
  263. _starpu_can_use_nth_implementation(config.workers[workerid].arch, task->cl, nimpl) &&
  264. (!task->cl->can_execute || task->cl->can_execute(workerid, task, nimpl));
  265. }
  266. int starpu_worker_can_execute_task_impl(unsigned workerid, struct starpu_task *task, unsigned *impl_mask)
  267. {
  268. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(task->sched_ctx);
  269. unsigned mask;
  270. int i;
  271. enum starpu_worker_archtype arch;
  272. struct starpu_codelet *cl;
  273. if(sched_ctx->parallel_sect[workerid]) return 0;
  274. /* TODO: check that the task operand sizes will fit on that device */
  275. cl = task->cl;
  276. if (!(cl->where & config.workers[workerid].worker_mask)) return 0;
  277. mask = 0;
  278. arch = config.workers[workerid].arch;
  279. if (!task->cl->can_execute)
  280. {
  281. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  282. if (_starpu_can_use_nth_implementation(arch, cl, i)) {
  283. mask |= 1U << i;
  284. if (!impl_mask)
  285. break;
  286. }
  287. } else {
  288. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  289. if (_starpu_can_use_nth_implementation(arch, cl, i)
  290. && (!task->cl->can_execute || task->cl->can_execute(workerid, task, i))) {
  291. mask |= 1U << i;
  292. if (!impl_mask)
  293. break;
  294. }
  295. }
  296. if (impl_mask)
  297. *impl_mask = mask;
  298. return mask != 0;
  299. }
  300. int starpu_worker_can_execute_task_first_impl(unsigned workerid, struct starpu_task *task, unsigned *nimpl)
  301. {
  302. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(task->sched_ctx);
  303. int i;
  304. enum starpu_worker_archtype arch;
  305. struct starpu_codelet *cl;
  306. if(sched_ctx->parallel_sect[workerid]) return 0;
  307. /* TODO: check that the task operand sizes will fit on that device */
  308. cl = task->cl;
  309. if (!(cl->where & config.workers[workerid].worker_mask)) return 0;
  310. arch = config.workers[workerid].arch;
  311. if (!task->cl->can_execute)
  312. {
  313. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  314. if (_starpu_can_use_nth_implementation(arch, cl, i)) {
  315. if (nimpl)
  316. *nimpl = i;
  317. return 1;
  318. }
  319. } else {
  320. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  321. if (_starpu_can_use_nth_implementation(arch, cl, i)
  322. && (!task->cl->can_execute || task->cl->can_execute(workerid, task, i))) {
  323. if (nimpl)
  324. *nimpl = i;
  325. return 1;
  326. }
  327. }
  328. return 0;
  329. }
  330. int starpu_combined_worker_can_execute_task(unsigned workerid, struct starpu_task *task, unsigned nimpl)
  331. {
  332. /* TODO: check that the task operand sizes will fit on that device */
  333. struct starpu_codelet *cl = task->cl;
  334. unsigned nworkers = config.topology.nworkers;
  335. /* Is this a parallel worker ? */
  336. if (workerid < nworkers)
  337. {
  338. return !!((task->cl->where & config.workers[workerid].worker_mask) &&
  339. _starpu_can_use_nth_implementation(config.workers[workerid].arch, task->cl, nimpl) &&
  340. (!task->cl->can_execute || task->cl->can_execute(workerid, task, nimpl)));
  341. }
  342. else
  343. {
  344. if ((cl->type == STARPU_SPMD)
  345. #ifdef STARPU_HAVE_HWLOC
  346. || (cl->type == STARPU_FORKJOIN)
  347. #else
  348. #ifdef __GLIBC__
  349. || (cl->type == STARPU_FORKJOIN)
  350. #endif
  351. #endif
  352. )
  353. {
  354. /* TODO we should add other types of constraints */
  355. /* Is the worker larger than requested ? */
  356. int worker_size = (int)config.combined_workers[workerid - nworkers].worker_size;
  357. int worker0 = config.combined_workers[workerid - nworkers].combined_workerid[0];
  358. return !!((worker_size <= task->cl->max_parallelism) &&
  359. _starpu_can_use_nth_implementation(config.workers[worker0].arch, task->cl, nimpl) &&
  360. (!task->cl->can_execute || task->cl->can_execute(workerid, task, nimpl)));
  361. }
  362. else
  363. {
  364. /* We have a sequential task but a parallel worker */
  365. return 0;
  366. }
  367. }
  368. }
  369. /*
  370. * Runtime initialization methods
  371. */
  372. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  373. static struct _starpu_worker_set cuda_worker_set[STARPU_MAXCUDADEVS];
  374. #endif
  375. #ifdef STARPU_USE_MIC
  376. static struct _starpu_worker_set mic_worker_set[STARPU_MAXMICDEVS];
  377. #endif
  378. static void _starpu_init_worker_queue(struct _starpu_worker *workerarg)
  379. {
  380. starpu_pthread_cond_t *cond = &workerarg->sched_cond;
  381. starpu_pthread_mutex_t *mutex = &workerarg->sched_mutex;
  382. unsigned memory_node = workerarg->memory_node;
  383. _starpu_memory_node_register_condition(cond, mutex, memory_node);
  384. }
  385. /*
  386. * Returns 0 if the given driver is one of the drivers that must be launched by
  387. * the application itself, and not by StarPU, 1 otherwise.
  388. */
  389. static unsigned _starpu_may_launch_driver(struct starpu_conf *conf,
  390. struct starpu_driver *d)
  391. {
  392. if (conf->n_not_launched_drivers == 0 ||
  393. conf->not_launched_drivers == NULL)
  394. return 1;
  395. /* Is <d> in conf->not_launched_drivers ? */
  396. unsigned i;
  397. for (i = 0; i < conf->n_not_launched_drivers; i++)
  398. {
  399. if (d->type != conf->not_launched_drivers[i].type)
  400. continue;
  401. switch (d->type)
  402. {
  403. case STARPU_CPU_WORKER:
  404. if (d->id.cpu_id == conf->not_launched_drivers[i].id.cpu_id)
  405. return 0;
  406. case STARPU_CUDA_WORKER:
  407. if (d->id.cuda_id == conf->not_launched_drivers[i].id.cuda_id)
  408. return 0;
  409. break;
  410. #ifdef STARPU_USE_OPENCL
  411. case STARPU_OPENCL_WORKER:
  412. if (d->id.opencl_id == conf->not_launched_drivers[i].id.opencl_id)
  413. return 0;
  414. break;
  415. #endif
  416. default:
  417. STARPU_ABORT();
  418. }
  419. }
  420. return 1;
  421. }
  422. #ifdef STARPU_PERF_DEBUG
  423. struct itimerval prof_itimer;
  424. #endif
  425. static void _starpu_worker_init(struct _starpu_worker *workerarg, struct _starpu_machine_config *pconfig)
  426. {
  427. workerarg->config = pconfig;
  428. STARPU_PTHREAD_MUTEX_INIT(&workerarg->mutex, NULL);
  429. /* arch initialized by topology.c */
  430. /* worker_mask initialized by topology.c */
  431. /* perf_arch initialized by topology.c */
  432. /* worker_thread initialized by _starpu_launch_drivers */
  433. /* devid initialized by topology.c */
  434. /* subworkerid initialized by topology.c */
  435. /* bindid initialized by topology.c */
  436. /* workerid initialized by topology.c */
  437. workerarg->combined_workerid = workerarg->workerid;
  438. workerarg->current_rank = 0;
  439. workerarg->worker_size = 1;
  440. STARPU_PTHREAD_COND_INIT(&workerarg->started_cond, NULL);
  441. STARPU_PTHREAD_COND_INIT(&workerarg->ready_cond, NULL);
  442. /* memory_node initialized by topology.c */
  443. STARPU_PTHREAD_COND_INIT(&workerarg->sched_cond, NULL);
  444. STARPU_PTHREAD_MUTEX_INIT(&workerarg->sched_mutex, NULL);
  445. starpu_task_list_init(&workerarg->local_tasks);
  446. workerarg->local_ordered_tasks = NULL;
  447. workerarg->local_ordered_tasks_size = 0;
  448. workerarg->current_ordered_task = 0;
  449. workerarg->current_ordered_task_order = 1;
  450. workerarg->current_task = NULL;
  451. workerarg->first_task = 0;
  452. workerarg->ntasks = 0;
  453. workerarg->pipeline_length = 0;
  454. workerarg->pipeline_stuck = 0;
  455. workerarg->set = NULL;
  456. /* if some codelet's termination cannot be handled directly :
  457. * for instance in the Gordon driver, Gordon tasks' callbacks
  458. * may be executed by another thread than that of the Gordon
  459. * driver so that we cannot call the push_codelet_output method
  460. * directly */
  461. workerarg->terminated_jobs = _starpu_job_list_new();
  462. workerarg->worker_is_running = 0;
  463. workerarg->worker_is_initialized = 0;
  464. workerarg->status = STATUS_INITIALIZING;
  465. /* name initialized by driver */
  466. /* short_name initialized by driver */
  467. workerarg->run_by_starpu = 1;
  468. workerarg->sched_ctx_list = NULL;
  469. workerarg->tmp_sched_ctx = -1;
  470. workerarg->nsched_ctxs = 0;
  471. _starpu_barrier_counter_init(&workerarg->tasks_barrier, 0);
  472. workerarg->has_prev_init = 0;
  473. int ctx;
  474. for(ctx = 0; ctx < STARPU_NMAX_SCHED_CTXS; ctx++)
  475. workerarg->removed_from_ctx[ctx] = 0;
  476. workerarg->spinning_backoff = 1;
  477. for(ctx = 0; ctx < STARPU_NMAX_SCHED_CTXS; ctx++)
  478. {
  479. workerarg->shares_tasks_lists[ctx] = 0;
  480. workerarg->poped_in_ctx[ctx] = 0;
  481. }
  482. workerarg->reverse_phase[0] = 0;
  483. workerarg->reverse_phase[1] = 0;
  484. workerarg->pop_ctx_priority = 1;
  485. workerarg->sched_mutex_locked = 0;
  486. workerarg->slave = 0;
  487. /* cpu_set/hwloc_cpu_set initialized in topology.c */
  488. }
  489. void _starpu_worker_start(struct _starpu_worker *worker, unsigned fut_key)
  490. {
  491. (void) fut_key;
  492. int devid = worker->devid;
  493. (void) devid;
  494. #if defined(STARPU_PERF_DEBUG) && !defined(STARPU_SIMGRID)
  495. setitimer(ITIMER_PROF, &prof_itimer, NULL);
  496. #endif
  497. #ifdef STARPU_USE_FXT
  498. _starpu_fxt_register_thread(worker->bindid);
  499. unsigned memnode = worker->memory_node;
  500. _STARPU_TRACE_WORKER_INIT_START(fut_key, worker->workerid, devid, memnode);
  501. #endif
  502. _starpu_bind_thread_on_cpu(worker->config, worker->bindid);
  503. _STARPU_DEBUG("worker %p %d for dev %d is ready on logical cpu %d\n", worker, worker->workerid, devid, worker->bindid);
  504. #ifdef STARPU_HAVE_HWLOC
  505. _STARPU_DEBUG("worker %p %d cpuset start at %d\n", worker, worker->workerid, hwloc_bitmap_first(worker->hwloc_cpu_set));
  506. #endif
  507. _starpu_memory_node_set_local_key(&worker->memory_node);
  508. _starpu_set_local_worker_key(worker);
  509. STARPU_PTHREAD_MUTEX_LOCK(&worker->mutex);
  510. worker->worker_is_running = 1;
  511. STARPU_PTHREAD_COND_SIGNAL(&worker->started_cond);
  512. STARPU_PTHREAD_MUTEX_UNLOCK(&worker->mutex);
  513. }
  514. static void _starpu_launch_drivers(struct _starpu_machine_config *pconfig)
  515. {
  516. pconfig->running = 1;
  517. pconfig->pause_depth = 0;
  518. pconfig->submitting = 1;
  519. STARPU_HG_DISABLE_CHECKING(pconfig->watchdog_ok);
  520. unsigned nworkers = pconfig->topology.nworkers;
  521. /* Launch workers asynchronously */
  522. unsigned worker, i;
  523. #if defined(STARPU_PERF_DEBUG) && !defined(STARPU_SIMGRID)
  524. /* Get itimer of the main thread, to set it for the worker threads */
  525. getitimer(ITIMER_PROF, &prof_itimer);
  526. #endif
  527. #ifdef HAVE_AYUDAME_H
  528. if (AYU_event) AYU_event(AYU_INIT, 0, NULL);
  529. #endif
  530. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  531. for (i = 0; i < sizeof(cuda_worker_set)/sizeof(cuda_worker_set[0]); i++)
  532. cuda_worker_set[i].workers = NULL;
  533. #endif
  534. #ifdef STARPU_USE_MIC
  535. for (i = 0; i < sizeof(mic_worker_set)/sizeof(mic_worker_set[0]); i++)
  536. mic_worker_set[i].workers = NULL;
  537. #endif
  538. for (worker = 0; worker < nworkers; worker++)
  539. {
  540. struct _starpu_worker *workerarg = &pconfig->workers[worker];
  541. #if defined(STARPU_USE_MIC) || defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  542. unsigned devid = workerarg->devid;
  543. #endif
  544. #ifdef STARPU_USE_CUDA
  545. struct _starpu_worker_set *worker_set;
  546. #endif
  547. _STARPU_DEBUG("initialising worker %u/%u\n", worker, nworkers);
  548. _starpu_init_worker_queue(workerarg);
  549. struct starpu_driver driver;
  550. driver.type = workerarg->arch;
  551. switch (workerarg->arch)
  552. {
  553. #if defined(STARPU_USE_CPU) || defined(STARPU_SIMGRID)
  554. case STARPU_CPU_WORKER:
  555. driver.id.cpu_id = workerarg->devid;
  556. if (_starpu_may_launch_driver(pconfig->conf, &driver))
  557. {
  558. STARPU_PTHREAD_CREATE_ON(
  559. workerarg->name,
  560. &workerarg->worker_thread,
  561. NULL,
  562. _starpu_cpu_worker,
  563. workerarg,
  564. _starpu_simgrid_get_host_by_worker(workerarg));
  565. #ifdef STARPU_USE_FXT
  566. /* In tracing mode, make sure the
  567. * thread is really started before
  568. * starting another one, to make sure
  569. * they appear in order in the trace.
  570. */
  571. STARPU_PTHREAD_MUTEX_LOCK(&workerarg->mutex);
  572. while (!workerarg->worker_is_running)
  573. STARPU_PTHREAD_COND_WAIT(&workerarg->started_cond, &workerarg->mutex);
  574. STARPU_PTHREAD_MUTEX_UNLOCK(&workerarg->mutex);
  575. #endif
  576. }
  577. else
  578. {
  579. workerarg->run_by_starpu = 0;
  580. }
  581. break;
  582. #endif
  583. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  584. case STARPU_CUDA_WORKER:
  585. driver.id.cuda_id = workerarg->devid;
  586. worker_set = &cuda_worker_set[devid];
  587. workerarg->set = worker_set;
  588. /* We spawn only one thread per CUDA driver,
  589. * which will control all CUDA workers of this
  590. * driver. (by using a worker set). */
  591. if (worker_set->workers)
  592. break;
  593. worker_set->nworkers = starpu_get_env_number_default("STARPU_NWORKER_PER_CUDA", 1);
  594. #ifndef STARPU_NON_BLOCKING_DRIVERS
  595. if (worker_set->nworkers > 1)
  596. {
  597. _STARPU_DISP("Warning: reducing STARPU_NWORKER_PER_CUDA to 1 because blocking drivers are enabled\n");
  598. worker_set->nworkers = 1;
  599. }
  600. #endif
  601. worker_set->workers = workerarg;
  602. worker_set->set_is_initialized = 0;
  603. if (!_starpu_may_launch_driver(pconfig->conf, &driver))
  604. {
  605. workerarg->run_by_starpu = 0;
  606. break;
  607. }
  608. STARPU_PTHREAD_CREATE_ON(
  609. workerarg->name,
  610. &worker_set->worker_thread,
  611. NULL,
  612. _starpu_cuda_worker,
  613. worker_set,
  614. _starpu_simgrid_get_host_by_worker(workerarg));
  615. #ifdef STARPU_USE_FXT
  616. STARPU_PTHREAD_MUTEX_LOCK(&workerarg->mutex);
  617. while (!workerarg->worker_is_running)
  618. STARPU_PTHREAD_COND_WAIT(&workerarg->started_cond, &workerarg->mutex);
  619. STARPU_PTHREAD_MUTEX_UNLOCK(&workerarg->mutex);
  620. #endif
  621. STARPU_PTHREAD_MUTEX_LOCK(&worker_set->mutex);
  622. while (!worker_set->set_is_initialized)
  623. STARPU_PTHREAD_COND_WAIT(&worker_set->ready_cond,
  624. &worker_set->mutex);
  625. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_set->mutex);
  626. worker_set->started = 1;
  627. break;
  628. #endif
  629. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  630. case STARPU_OPENCL_WORKER:
  631. #ifndef STARPU_SIMGRID
  632. starpu_opencl_get_device(workerarg->devid, &driver.id.opencl_id);
  633. if (!_starpu_may_launch_driver(pconfig->conf, &driver))
  634. {
  635. workerarg->run_by_starpu = 0;
  636. break;
  637. }
  638. #endif
  639. STARPU_PTHREAD_CREATE_ON(
  640. workerarg->name,
  641. &workerarg->worker_thread,
  642. NULL,
  643. _starpu_opencl_worker,
  644. workerarg,
  645. _starpu_simgrid_get_host_by_worker(workerarg));
  646. #ifdef STARPU_USE_FXT
  647. STARPU_PTHREAD_MUTEX_LOCK(&workerarg->mutex);
  648. while (!workerarg->worker_is_running)
  649. STARPU_PTHREAD_COND_WAIT(&workerarg->started_cond, &workerarg->mutex);
  650. STARPU_PTHREAD_MUTEX_UNLOCK(&workerarg->mutex);
  651. #endif
  652. break;
  653. #endif
  654. #ifdef STARPU_USE_MIC
  655. case STARPU_MIC_WORKER:
  656. workerarg->set = &mic_worker_set[devid];
  657. /* We spawn only one thread
  658. * per MIC device, which will control all MIC
  659. * workers of this device. (by using a worker set). */
  660. if (mic_worker_set[devid].workers)
  661. break;
  662. mic_worker_set[devid].nworkers = pconfig->topology.nmiccores[devid];
  663. /* We assume all MIC workers of a given MIC
  664. * device are contiguous so that we can
  665. * address them with the first one only. */
  666. mic_worker_set[devid].workers = workerarg;
  667. mic_worker_set[devid].set_is_initialized = 0;
  668. STARPU_PTHREAD_CREATE_ON(
  669. workerarg->name,
  670. &mic_worker_set[devid].worker_thread,
  671. NULL,
  672. _starpu_mic_src_worker,
  673. &mic_worker_set[devid],
  674. _starpu_simgrid_get_host_by_worker(workerarg));
  675. #ifdef STARPU_USE_FXT
  676. STARPU_PTHREAD_MUTEX_LOCK(&workerarg->mutex);
  677. while (!workerarg->worker_is_running)
  678. STARPU_PTHREAD_COND_WAIT(&workerarg->started_cond, &workerarg->mutex);
  679. STARPU_PTHREAD_MUTEX_UNLOCK(&workerarg->mutex);
  680. #endif
  681. STARPU_PTHREAD_MUTEX_LOCK(&mic_worker_set[devid].mutex);
  682. while (!mic_worker_set[devid].set_is_initialized)
  683. STARPU_PTHREAD_COND_WAIT(&mic_worker_set[devid].ready_cond,
  684. &mic_worker_set[devid].mutex);
  685. STARPU_PTHREAD_MUTEX_UNLOCK(&mic_worker_set[devid].mutex);
  686. mic_worker_set[devid].started = 1;
  687. break;
  688. #endif /* STARPU_USE_MIC */
  689. #ifdef STARPU_USE_SCC
  690. case STARPU_SCC_WORKER:
  691. workerarg->worker_is_initialized = 0;
  692. STARPU_PTHREAD_CREATE_ON(
  693. workerarg->name,
  694. &workerarg->worker_thread,
  695. NULL,
  696. _starpu_scc_src_worker,
  697. workerarg,
  698. _starpu_simgrid_get_host_by_worker(workerarg));
  699. #ifdef STARPU_USE_FXT
  700. STARPU_PTHREAD_MUTEX_LOCK(&workerarg->mutex);
  701. while (!workerarg->worker_is_running)
  702. STARPU_PTHREAD_COND_WAIT(&workerarg->started_cond, &workerarg->mutex);
  703. STARPU_PTHREAD_MUTEX_UNLOCK(&workerarg->mutex);
  704. #endif
  705. break;
  706. #endif
  707. default:
  708. STARPU_ABORT();
  709. }
  710. }
  711. for (worker = 0; worker < nworkers; worker++)
  712. {
  713. struct _starpu_worker *workerarg = &pconfig->workers[worker];
  714. struct starpu_driver driver;
  715. driver.type = workerarg->arch;
  716. switch (workerarg->arch)
  717. {
  718. case STARPU_CPU_WORKER:
  719. driver.id.cpu_id = workerarg->devid;
  720. if (!_starpu_may_launch_driver(pconfig->conf, &driver))
  721. break;
  722. _STARPU_DEBUG("waiting for worker %u initialization\n", worker);
  723. STARPU_PTHREAD_MUTEX_LOCK(&workerarg->mutex);
  724. while (!workerarg->worker_is_initialized)
  725. STARPU_PTHREAD_COND_WAIT(&workerarg->ready_cond, &workerarg->mutex);
  726. STARPU_PTHREAD_MUTEX_UNLOCK(&workerarg->mutex);
  727. break;
  728. case STARPU_CUDA_WORKER:
  729. /* Already waited above */
  730. break;
  731. #if defined(STARPU_USE_OPENCL) || defined(STARPU_SIMGRID)
  732. case STARPU_OPENCL_WORKER:
  733. #ifndef STARPU_SIMGRID
  734. starpu_opencl_get_device(workerarg->devid, &driver.id.opencl_id);
  735. if (!_starpu_may_launch_driver(pconfig->conf, &driver))
  736. break;
  737. #endif
  738. _STARPU_DEBUG("waiting for worker %u initialization\n", worker);
  739. STARPU_PTHREAD_MUTEX_LOCK(&workerarg->mutex);
  740. while (!workerarg->worker_is_initialized)
  741. STARPU_PTHREAD_COND_WAIT(&workerarg->ready_cond, &workerarg->mutex);
  742. STARPU_PTHREAD_MUTEX_UNLOCK(&workerarg->mutex);
  743. break;
  744. #endif
  745. case STARPU_MIC_WORKER:
  746. /* Already waited above */
  747. break;
  748. case STARPU_SCC_WORKER:
  749. /* TODO: implement may_launch? */
  750. _STARPU_DEBUG("waiting for worker %u initialization\n", worker);
  751. STARPU_PTHREAD_MUTEX_LOCK(&workerarg->mutex);
  752. while (!workerarg->worker_is_initialized)
  753. STARPU_PTHREAD_COND_WAIT(&workerarg->ready_cond, &workerarg->mutex);
  754. STARPU_PTHREAD_MUTEX_UNLOCK(&workerarg->mutex);
  755. break;
  756. default:
  757. STARPU_ABORT();
  758. }
  759. }
  760. _STARPU_DEBUG("finished launching drivers\n");
  761. }
  762. void _starpu_set_local_worker_key(struct _starpu_worker *worker)
  763. {
  764. STARPU_PTHREAD_SETSPECIFIC(worker_key, worker);
  765. }
  766. struct _starpu_worker *_starpu_get_local_worker_key(void)
  767. {
  768. return (struct _starpu_worker *) STARPU_PTHREAD_GETSPECIFIC(worker_key);
  769. }
  770. void _starpu_set_local_worker_set_key(struct _starpu_worker_set *worker)
  771. {
  772. STARPU_PTHREAD_SETSPECIFIC(worker_set_key, worker);
  773. }
  774. struct _starpu_worker_set *_starpu_get_local_worker_set_key(void)
  775. {
  776. return (struct _starpu_worker_set *) STARPU_PTHREAD_GETSPECIFIC(worker_set_key);
  777. }
  778. /* Initialize the starpu_conf with default values */
  779. int starpu_conf_init(struct starpu_conf *conf)
  780. {
  781. if (!conf)
  782. return -EINVAL;
  783. memset(conf, 0, sizeof(*conf));
  784. conf->magic = 42;
  785. conf->sched_policy_name = getenv("STARPU_SCHED");
  786. conf->sched_policy = NULL;
  787. /* Note that starpu_get_env_number returns -1 in case the variable is
  788. * not defined */
  789. /* Backward compatibility: check the value of STARPU_NCPUS if
  790. * STARPU_NCPU is not set. */
  791. conf->ncpus = starpu_get_env_number("STARPU_NCPU");
  792. if (conf->ncpus == -1)
  793. conf->ncpus = starpu_get_env_number("STARPU_NCPUS");
  794. conf->ncuda = starpu_get_env_number("STARPU_NCUDA");
  795. conf->nopencl = starpu_get_env_number("STARPU_NOPENCL");
  796. conf->nmic = starpu_get_env_number("STARPU_NMIC");
  797. conf->nscc = starpu_get_env_number("STARPU_NSCC");
  798. conf->calibrate = starpu_get_env_number("STARPU_CALIBRATE");
  799. conf->bus_calibrate = starpu_get_env_number("STARPU_BUS_CALIBRATE");
  800. conf->mic_sink_program_path = getenv("STARPU_MIC_PROGRAM_PATH");
  801. if (conf->calibrate == -1)
  802. conf->calibrate = 0;
  803. if (conf->bus_calibrate == -1)
  804. conf->bus_calibrate = 0;
  805. conf->use_explicit_workers_bindid = 0; /* TODO */
  806. conf->use_explicit_workers_cuda_gpuid = 0; /* TODO */
  807. conf->use_explicit_workers_opencl_gpuid = 0; /* TODO */
  808. conf->use_explicit_workers_mic_deviceid = 0; /* TODO */
  809. conf->use_explicit_workers_scc_deviceid = 0; /* TODO */
  810. conf->single_combined_worker = starpu_get_env_number("STARPU_SINGLE_COMBINED_WORKER");
  811. if (conf->single_combined_worker == -1)
  812. conf->single_combined_worker = 0;
  813. #if defined(STARPU_DISABLE_ASYNCHRONOUS_COPY)
  814. conf->disable_asynchronous_copy = 1;
  815. #else
  816. conf->disable_asynchronous_copy = starpu_get_env_number("STARPU_DISABLE_ASYNCHRONOUS_COPY");
  817. if (conf->disable_asynchronous_copy == -1)
  818. conf->disable_asynchronous_copy = 0;
  819. #endif
  820. #if defined(STARPU_DISABLE_ASYNCHRONOUS_CUDA_COPY)
  821. conf->disable_asynchronous_cuda_copy = 1;
  822. #else
  823. conf->disable_asynchronous_cuda_copy = starpu_get_env_number("STARPU_DISABLE_ASYNCHRONOUS_CUDA_COPY");
  824. if (conf->disable_asynchronous_cuda_copy == -1)
  825. conf->disable_asynchronous_cuda_copy = 0;
  826. #endif
  827. #if defined(STARPU_DISABLE_ASYNCHRONOUS_OPENCL_COPY)
  828. conf->disable_asynchronous_opencl_copy = 1;
  829. #else
  830. conf->disable_asynchronous_opencl_copy = starpu_get_env_number("STARPU_DISABLE_ASYNCHRONOUS_OPENCL_COPY");
  831. if (conf->disable_asynchronous_opencl_copy == -1)
  832. conf->disable_asynchronous_opencl_copy = 0;
  833. #endif
  834. #if defined(STARPU_DISABLE_ASYNCHRONOUS_MIC_COPY)
  835. conf->disable_asynchronous_mic_copy = 1;
  836. #else
  837. conf->disable_asynchronous_mic_copy = starpu_get_env_number("STARPU_DISABLE_ASYNCHRONOUS_MIC_COPY");
  838. if (conf->disable_asynchronous_mic_copy == -1)
  839. conf->disable_asynchronous_mic_copy = 0;
  840. #endif
  841. /* 64MiB by default */
  842. conf->trace_buffer_size = starpu_get_env_number_default("STARPU_TRACE_BUFFER_SIZE", 64) << 20;
  843. return 0;
  844. }
  845. static void _starpu_conf_set_value_against_environment(char *name, int *value)
  846. {
  847. int number;
  848. number = starpu_get_env_number(name);
  849. if (number != -1)
  850. {
  851. *value = number;
  852. }
  853. }
  854. void _starpu_conf_check_environment(struct starpu_conf *conf)
  855. {
  856. char *sched = getenv("STARPU_SCHED");
  857. if (sched)
  858. {
  859. conf->sched_policy_name = sched;
  860. }
  861. _starpu_conf_set_value_against_environment("STARPU_NCPUS", &conf->ncpus);
  862. _starpu_conf_set_value_against_environment("STARPU_NCPU", &conf->ncpus);
  863. _starpu_conf_set_value_against_environment("STARPU_NCUDA", &conf->ncuda);
  864. _starpu_conf_set_value_against_environment("STARPU_NOPENCL", &conf->nopencl);
  865. _starpu_conf_set_value_against_environment("STARPU_CALIBRATE", &conf->calibrate);
  866. _starpu_conf_set_value_against_environment("STARPU_BUS_CALIBRATE", &conf->bus_calibrate);
  867. _starpu_conf_set_value_against_environment("STARPU_SINGLE_COMBINED_WORKER", &conf->single_combined_worker);
  868. _starpu_conf_set_value_against_environment("STARPU_DISABLE_ASYNCHRONOUS_COPY", &conf->disable_asynchronous_copy);
  869. _starpu_conf_set_value_against_environment("STARPU_DISABLE_ASYNCHRONOUS_CUDA_COPY", &conf->disable_asynchronous_cuda_copy);
  870. _starpu_conf_set_value_against_environment("STARPU_DISABLE_ASYNCHRONOUS_OPENCL_COPY", &conf->disable_asynchronous_opencl_copy);
  871. _starpu_conf_set_value_against_environment("STARPU_DISABLE_ASYNCHRONOUS_MIC_COPY", &conf->disable_asynchronous_mic_copy);
  872. }
  873. struct starpu_tree* starpu_workers_get_tree(void)
  874. {
  875. return config.topology.tree;
  876. }
  877. #ifdef STARPU_HAVE_HWLOC
  878. static void _fill_tree(struct starpu_tree *tree, hwloc_obj_t curr_obj, unsigned depth, hwloc_topology_t topology)
  879. {
  880. unsigned i;
  881. for(i = 0; i < curr_obj->arity; i++)
  882. {
  883. starpu_tree_insert(tree->nodes[i], curr_obj->children[i]->logical_index, depth, curr_obj->children[i]->type == HWLOC_OBJ_PU, curr_obj->children[i]->arity, tree);
  884. /* char string[128]; */
  885. /* hwloc_obj_snprintf(string, sizeof(string), topology, curr_obj->children[i], "#", 0); */
  886. /* printf("%*s%s %d is_pu %d \n", 0, "", string, curr_obj->children[i]->logical_index, curr_obj->children[i]->type == HWLOC_OBJ_PU); */
  887. _fill_tree(tree->nodes[i], curr_obj->children[i], depth+1, topology);
  888. }
  889. }
  890. #endif
  891. static void _starpu_build_tree(void)
  892. {
  893. #ifdef STARPU_HAVE_HWLOC
  894. struct starpu_tree* tree = (struct starpu_tree*)malloc(sizeof(struct starpu_tree));
  895. config.topology.tree = tree;
  896. hwloc_obj_t root = hwloc_get_root_obj(config.topology.hwtopology);
  897. /* char string[128]; */
  898. /* hwloc_obj_snprintf(string, sizeof(string), topology, root, "#", 0); */
  899. /* printf("%*s%s %d is_pu = %d \n", 0, "", string, root->logical_index, root->type == HWLOC_OBJ_PU); */
  900. /* level, is_pu, is in the tree (it will be true only after add*/
  901. starpu_tree_insert(tree, root->logical_index, 0,root->type == HWLOC_OBJ_PU, root->arity, NULL);
  902. _fill_tree(tree, root, 1, config.topology.hwtopology);
  903. #endif
  904. }
  905. int starpu_init(struct starpu_conf *user_conf)
  906. {
  907. return starpu_initialize(user_conf, NULL, NULL);
  908. }
  909. int starpu_initialize(struct starpu_conf *user_conf, int *argc, char ***argv)
  910. {
  911. int is_a_sink = 0; /* Always defined. If the MP infrastructure is not
  912. * used, we cannot be a sink. */
  913. unsigned worker;
  914. #ifdef STARPU_USE_MP
  915. _starpu_set_argc_argv(argc, argv);
  916. # ifdef STARPU_USE_SCC
  917. /* In SCC case we look at the rank to know if we are a sink */
  918. if (_starpu_scc_common_mp_init() && !_starpu_scc_common_is_src_node())
  919. setenv("STARPU_SINK", "STARPU_SCC", 1);
  920. # endif
  921. /* If StarPU was configured to use MP sinks, we have to control the
  922. * kind on node we are running on : host or sink ? */
  923. if (getenv("STARPU_SINK"))
  924. is_a_sink = 1;
  925. #else
  926. (void)argc;
  927. (void)argv;
  928. #endif /* STARPU_USE_MP */
  929. int ret;
  930. #ifdef STARPU_OPENMP
  931. _starpu_omp_dummy_init();
  932. #endif
  933. #ifdef STARPU_SIMGRID
  934. _starpu_simgrid_init();
  935. /* Warn when the lots of stacks malloc()-ated by simgrid for transfer
  936. * processes will take a long time to get initialized */
  937. if (getenv("MALLOC_PERTURB_"))
  938. _STARPU_DISP("Warning: MALLOC_PERTURB_ is set, this makes simgrid runs very slow\n");
  939. #else
  940. #ifdef __GNUC__
  941. #ifndef __OPTIMIZE__
  942. _STARPU_DISP("Warning: StarPU was configured with --enable-debug (-O0), and is thus not optimized\n");
  943. #endif
  944. #endif
  945. #ifdef STARPU_SPINLOCK_CHECK
  946. _STARPU_DISP("Warning: StarPU was configured with --enable-spinlock-check, which slows down a bit\n");
  947. #endif
  948. #if 0
  949. #ifndef STARPU_NO_ASSERT
  950. _STARPU_DISP("Warning: StarPU was configured without --enable-fast\n");
  951. #endif
  952. #endif
  953. #ifdef STARPU_MEMORY_STATS
  954. _STARPU_DISP("Warning: StarPU was configured with --enable-memory-stats, which slows down a bit\n");
  955. #endif
  956. #ifdef STARPU_VERBOSE
  957. _STARPU_DISP("Warning: StarPU was configured with --enable-verbose, which slows down a bit\n");
  958. #endif
  959. #ifdef STARPU_USE_FXT
  960. _STARPU_DISP("Warning: StarPU was configured with --with-fxt, which slows down a bit\n");
  961. #endif
  962. #ifdef STARPU_PERF_DEBUG
  963. _STARPU_DISP("Warning: StarPU was configured with --enable-perf-debug, which slows down a bit\n");
  964. #endif
  965. #ifdef STARPU_MODEL_DEBUG
  966. _STARPU_DISP("Warning: StarPU was configured with --enable-model-debug, which slows down a bit\n");
  967. #endif
  968. #ifdef STARPU_ENABLE_STATS
  969. _STARPU_DISP("Warning: StarPU was configured with --enable-stats, which slows down a bit\n");
  970. #endif
  971. #endif
  972. STARPU_PTHREAD_MUTEX_LOCK(&init_mutex);
  973. while (initialized == CHANGING)
  974. /* Wait for the other one changing it */
  975. STARPU_PTHREAD_COND_WAIT(&init_cond, &init_mutex);
  976. init_count++;
  977. if (initialized == INITIALIZED)
  978. {
  979. /* He initialized it, don't do it again, and let the others get the mutex */
  980. STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
  981. return 0;
  982. }
  983. /* initialized == UNINITIALIZED */
  984. initialized = CHANGING;
  985. STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
  986. #if defined(_WIN32) && !defined(__CYGWIN__)
  987. WSADATA wsadata;
  988. WSAStartup(MAKEWORD(1,0), &wsadata);
  989. #endif
  990. srand(2008);
  991. #ifdef HAVE_AYUDAME_H
  992. #ifndef AYU_RT_STARPU
  993. /* Dumb value for now */
  994. #define AYU_RT_STARPU 32
  995. #endif
  996. if (AYU_event)
  997. {
  998. enum ayu_runtime_t ayu_rt = AYU_RT_STARPU;
  999. AYU_event(AYU_PREINIT, 0, (void*) &ayu_rt);
  1000. }
  1001. #endif
  1002. /* store the pointer to the user explicit configuration during the
  1003. * initialization */
  1004. if (user_conf == NULL)
  1005. {
  1006. struct starpu_conf *conf = malloc(sizeof(struct starpu_conf));
  1007. starpu_conf_init(conf);
  1008. config.conf = conf;
  1009. config.default_conf = 1;
  1010. }
  1011. else
  1012. {
  1013. if (user_conf->magic != 42)
  1014. {
  1015. _STARPU_DISP("starpu_conf structure needs to be initialized with starpu_conf_init\n");
  1016. return -EINVAL;
  1017. }
  1018. config.conf = user_conf;
  1019. config.default_conf = 0;
  1020. }
  1021. _starpu_conf_check_environment(config.conf);
  1022. _starpu_init_all_sched_ctxs(&config);
  1023. _starpu_init_progression_hooks();
  1024. _starpu_init_tags();
  1025. #ifdef STARPU_USE_FXT
  1026. _starpu_init_fxt_profiling(config.conf->trace_buffer_size);
  1027. #endif
  1028. _starpu_open_debug_logfile();
  1029. _starpu_data_interface_init();
  1030. _starpu_timing_init();
  1031. _starpu_profiling_init();
  1032. _starpu_load_bus_performance_files();
  1033. /* Depending on whether we are a MP sink or not, we must build the
  1034. * topology with MP nodes or not. */
  1035. ret = _starpu_build_topology(&config, is_a_sink);
  1036. if (ret)
  1037. {
  1038. starpu_perfmodel_free_sampling_directories();
  1039. STARPU_PTHREAD_MUTEX_LOCK(&init_mutex);
  1040. init_count--;
  1041. #ifdef STARPU_USE_SCC
  1042. if (_starpu_scc_common_is_mp_initialized())
  1043. _starpu_scc_src_mp_deinit();
  1044. #endif
  1045. initialized = UNINITIALIZED;
  1046. /* Let somebody else try to do it */
  1047. STARPU_PTHREAD_COND_SIGNAL(&init_cond);
  1048. STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
  1049. return ret;
  1050. }
  1051. /* Allocate swap, if any */
  1052. _starpu_swap_init();
  1053. /* We need to store the current task handled by the different
  1054. * threads */
  1055. _starpu_initialize_current_task_key();
  1056. for (worker = 0; worker < config.topology.nworkers; worker++)
  1057. _starpu_worker_init(&config.workers[worker], &config);
  1058. STARPU_PTHREAD_KEY_CREATE(&worker_key, NULL);
  1059. STARPU_PTHREAD_KEY_CREATE(&worker_set_key, NULL);
  1060. _starpu_build_tree();
  1061. if (!is_a_sink)
  1062. {
  1063. struct starpu_sched_policy *selected_policy = _starpu_select_sched_policy(&config, config.conf->sched_policy_name);
  1064. _starpu_create_sched_ctx(selected_policy, NULL, -1, 1, "init", 0, 0, 0, 0, 1);
  1065. }
  1066. _starpu_initialize_registered_performance_models();
  1067. /* Launch "basic" workers (ie. non-combined workers) */
  1068. if (!is_a_sink)
  1069. _starpu_launch_drivers(&config);
  1070. _starpu_watchdog_init();
  1071. STARPU_PTHREAD_MUTEX_LOCK(&init_mutex);
  1072. initialized = INITIALIZED;
  1073. /* Tell everybody that we initialized */
  1074. STARPU_PTHREAD_COND_BROADCAST(&init_cond);
  1075. STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
  1076. _STARPU_DEBUG("Initialisation finished\n");
  1077. #ifdef STARPU_USE_MP
  1078. /* Finally, if we are a MP sink, we never leave this function. Else,
  1079. * we enter an infinite event loop which listen for MP commands from
  1080. * the source. */
  1081. if (is_a_sink) {
  1082. _starpu_sink_common_worker();
  1083. /* We should normally never leave the loop as we don't want to
  1084. * really initialize STARPU */
  1085. STARPU_ASSERT(0);
  1086. }
  1087. #endif
  1088. return 0;
  1089. }
  1090. /*
  1091. * Handle runtime termination
  1092. */
  1093. static void _starpu_terminate_workers(struct _starpu_machine_config *pconfig)
  1094. {
  1095. int status = 0;
  1096. unsigned workerid;
  1097. unsigned n;
  1098. for (workerid = 0; workerid < pconfig->topology.nworkers; workerid++)
  1099. {
  1100. starpu_wake_all_blocked_workers();
  1101. _STARPU_DEBUG("wait for worker %u\n", workerid);
  1102. struct _starpu_worker_set *set = pconfig->workers[workerid].set;
  1103. struct _starpu_worker *worker = &pconfig->workers[workerid];
  1104. /* in case StarPU termination code is called from a callback,
  1105. * we have to check if pthread_self() is the worker itself */
  1106. if (set)
  1107. {
  1108. if (set->started)
  1109. {
  1110. #ifdef STARPU_SIMGRID
  1111. status = starpu_pthread_join(set->worker_thread, NULL);
  1112. #else
  1113. if (!pthread_equal(pthread_self(), set->worker_thread))
  1114. status = starpu_pthread_join(set->worker_thread, NULL);
  1115. #endif
  1116. if (status)
  1117. {
  1118. #ifdef STARPU_VERBOSE
  1119. _STARPU_DEBUG("starpu_pthread_join -> %d\n", status);
  1120. #endif
  1121. }
  1122. set->started = 0;
  1123. }
  1124. }
  1125. else
  1126. {
  1127. if (!worker->run_by_starpu)
  1128. goto out;
  1129. #ifdef STARPU_SIMGRID
  1130. status = starpu_pthread_join(worker->worker_thread, NULL);
  1131. #else
  1132. if (!pthread_equal(pthread_self(), worker->worker_thread))
  1133. status = starpu_pthread_join(worker->worker_thread, NULL);
  1134. #endif
  1135. if (status)
  1136. {
  1137. #ifdef STARPU_VERBOSE
  1138. _STARPU_DEBUG("starpu_pthread_join -> %d\n", status);
  1139. #endif
  1140. }
  1141. }
  1142. out:
  1143. STARPU_ASSERT(starpu_task_list_empty(&worker->local_tasks));
  1144. for (n = 0; n < worker->local_ordered_tasks_size; n++)
  1145. STARPU_ASSERT(worker->local_ordered_tasks[n] == NULL);
  1146. _starpu_sched_ctx_list_delete(&worker->sched_ctx_list);
  1147. _starpu_job_list_delete(worker->terminated_jobs);
  1148. free(worker->local_ordered_tasks);
  1149. }
  1150. }
  1151. /* Condition variable and mutex used to pause/resume. */
  1152. static starpu_pthread_cond_t pause_cond = STARPU_PTHREAD_COND_INITIALIZER;
  1153. static starpu_pthread_mutex_t pause_mutex = STARPU_PTHREAD_MUTEX_INITIALIZER;
  1154. void _starpu_may_pause(void)
  1155. {
  1156. /* pause_depth is just protected by a memory barrier */
  1157. STARPU_RMB();
  1158. if (STARPU_UNLIKELY(config.pause_depth > 0)) {
  1159. STARPU_PTHREAD_MUTEX_LOCK(&pause_mutex);
  1160. if (config.pause_depth > 0) {
  1161. STARPU_PTHREAD_COND_WAIT(&pause_cond, &pause_mutex);
  1162. }
  1163. STARPU_PTHREAD_MUTEX_UNLOCK(&pause_mutex);
  1164. }
  1165. }
  1166. unsigned _starpu_machine_is_running(void)
  1167. {
  1168. unsigned ret;
  1169. /* running is just protected by a memory barrier */
  1170. STARPU_RMB();
  1171. ANNOTATE_HAPPENS_AFTER(&config.running);
  1172. ret = config.running;
  1173. ANNOTATE_HAPPENS_BEFORE(&config.running);
  1174. return ret;
  1175. }
  1176. void starpu_pause()
  1177. {
  1178. STARPU_HG_DISABLE_CHECKING(config.pause_depth);
  1179. config.pause_depth += 1;
  1180. }
  1181. void starpu_resume()
  1182. {
  1183. STARPU_PTHREAD_MUTEX_LOCK(&pause_mutex);
  1184. config.pause_depth -= 1;
  1185. if (!config.pause_depth) {
  1186. STARPU_PTHREAD_COND_BROADCAST(&pause_cond);
  1187. }
  1188. STARPU_PTHREAD_MUTEX_UNLOCK(&pause_mutex);
  1189. }
  1190. unsigned _starpu_worker_can_block(unsigned memnode STARPU_ATTRIBUTE_UNUSED, struct _starpu_worker *worker STARPU_ATTRIBUTE_UNUSED)
  1191. {
  1192. #ifdef STARPU_NON_BLOCKING_DRIVERS
  1193. return 0;
  1194. #else
  1195. unsigned can_block = 1;
  1196. struct starpu_driver driver;
  1197. driver.type = worker->arch;
  1198. switch (driver.type)
  1199. {
  1200. case STARPU_CPU_WORKER:
  1201. driver.id.cpu_id = worker->devid;
  1202. break;
  1203. case STARPU_CUDA_WORKER:
  1204. driver.id.cuda_id = worker->devid;
  1205. break;
  1206. #ifdef STARPU_USE_OPENCL
  1207. case STARPU_OPENCL_WORKER:
  1208. starpu_opencl_get_device(worker->devid, &driver.id.opencl_id);
  1209. break;
  1210. #endif
  1211. default:
  1212. goto always_launch;
  1213. }
  1214. if (!_starpu_may_launch_driver(config.conf, &driver))
  1215. return 0;
  1216. always_launch:
  1217. #ifndef STARPU_SIMGRID
  1218. if (!_starpu_check_that_no_data_request_exists(memnode))
  1219. can_block = 0;
  1220. #endif
  1221. if (!_starpu_machine_is_running())
  1222. can_block = 0;
  1223. if (!_starpu_execute_registered_progression_hooks())
  1224. can_block = 0;
  1225. return can_block;
  1226. #endif
  1227. }
  1228. static void _starpu_kill_all_workers(struct _starpu_machine_config *pconfig)
  1229. {
  1230. /* set the flag which will tell workers to stop */
  1231. ANNOTATE_HAPPENS_AFTER(&config.running);
  1232. pconfig->running = 0;
  1233. /* running is just protected by a memory barrier */
  1234. ANNOTATE_HAPPENS_BEFORE(&config.running);
  1235. STARPU_WMB();
  1236. starpu_wake_all_blocked_workers();
  1237. }
  1238. void starpu_display_stats()
  1239. {
  1240. starpu_profiling_bus_helper_display_summary();
  1241. starpu_profiling_worker_helper_display_summary();
  1242. }
  1243. void starpu_shutdown(void)
  1244. {
  1245. STARPU_PTHREAD_MUTEX_LOCK(&init_mutex);
  1246. init_count--;
  1247. STARPU_ASSERT_MSG(init_count >= 0, "Number of calls to starpu_shutdown() can not be higher than the number of calls to starpu_init()\n");
  1248. if (init_count)
  1249. {
  1250. _STARPU_DEBUG("Still somebody needing StarPU, don't deinitialize\n");
  1251. STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
  1252. return;
  1253. }
  1254. /* We're last */
  1255. initialized = CHANGING;
  1256. STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
  1257. /* If the workers are frozen, no progress can be made. */
  1258. STARPU_ASSERT(config.pause_depth <= 0);
  1259. starpu_task_wait_for_no_ready();
  1260. /* tell all workers to shutdown */
  1261. _starpu_kill_all_workers(&config);
  1262. {
  1263. int stats = starpu_get_env_number("STARPU_STATS");
  1264. if (stats != 0)
  1265. {
  1266. _starpu_display_msi_stats();
  1267. _starpu_display_alloc_cache_stats();
  1268. _starpu_display_comm_amounts();
  1269. }
  1270. }
  1271. starpu_profiling_bus_helper_display_summary();
  1272. starpu_profiling_worker_helper_display_summary();
  1273. _starpu_deinitialize_current_task_key();
  1274. _starpu_deinitialize_registered_performance_models();
  1275. _starpu_watchdog_shutdown();
  1276. /* wait for their termination */
  1277. _starpu_terminate_workers(&config);
  1278. {
  1279. int stats = starpu_get_env_number("STARPU_MEMORY_STATS");
  1280. if (stats != 0)
  1281. {
  1282. // Display statistics on data which have not been unregistered
  1283. starpu_data_display_memory_stats();
  1284. }
  1285. }
  1286. _starpu_delete_all_sched_ctxs();
  1287. _starpu_sched_component_workers_destroy();
  1288. _starpu_disk_unregister();
  1289. #ifdef STARPU_HAVE_HWLOC
  1290. starpu_tree_free(config.topology.tree);
  1291. #endif
  1292. _starpu_destroy_topology(&config);
  1293. #ifdef STARPU_USE_FXT
  1294. _starpu_stop_fxt_profiling();
  1295. #endif
  1296. _starpu_data_interface_shutdown();
  1297. /* Drop all remaining tags */
  1298. _starpu_tag_clear();
  1299. #ifdef STARPU_OPENMP
  1300. _starpu_omp_dummy_shutdown();
  1301. #endif
  1302. _starpu_close_debug_logfile();
  1303. STARPU_PTHREAD_MUTEX_LOCK(&init_mutex);
  1304. initialized = UNINITIALIZED;
  1305. /* Let someone else that wants to initialize it again do it */
  1306. STARPU_PTHREAD_COND_SIGNAL(&init_cond);
  1307. STARPU_PTHREAD_MUTEX_UNLOCK(&init_mutex);
  1308. /* Clear memory if it was allocated by StarPU */
  1309. if (config.default_conf)
  1310. free(config.conf);
  1311. #ifdef HAVE_AYUDAME_H
  1312. if (AYU_event) AYU_event(AYU_FINISH, 0, NULL);
  1313. #endif
  1314. #ifdef STARPU_USE_SCC
  1315. if (_starpu_scc_common_is_mp_initialized())
  1316. _starpu_scc_src_mp_deinit();
  1317. #endif
  1318. _starpu_print_idle_time();
  1319. _STARPU_DEBUG("Shutdown finished\n");
  1320. }
  1321. unsigned starpu_worker_get_count(void)
  1322. {
  1323. return config.topology.nworkers;
  1324. }
  1325. unsigned starpu_worker_is_slave(int workerid)
  1326. {
  1327. return config.workers[workerid].slave;
  1328. }
  1329. int starpu_worker_get_count_by_type(enum starpu_worker_archtype type)
  1330. {
  1331. switch (type)
  1332. {
  1333. case STARPU_CPU_WORKER:
  1334. return config.topology.ncpus;
  1335. case STARPU_CUDA_WORKER:
  1336. return config.topology.ncudagpus;
  1337. case STARPU_OPENCL_WORKER:
  1338. return config.topology.nopenclgpus;
  1339. case STARPU_MIC_WORKER:
  1340. return config.topology.nmicdevices;
  1341. case STARPU_SCC_WORKER:
  1342. return config.topology.nsccdevices;
  1343. default:
  1344. return -EINVAL;
  1345. }
  1346. }
  1347. unsigned starpu_combined_worker_get_count(void)
  1348. {
  1349. return config.topology.ncombinedworkers;
  1350. }
  1351. unsigned starpu_cpu_worker_get_count(void)
  1352. {
  1353. return config.topology.ncpus;
  1354. }
  1355. unsigned starpu_cuda_worker_get_count(void)
  1356. {
  1357. return config.topology.ncudagpus;
  1358. }
  1359. unsigned starpu_opencl_worker_get_count(void)
  1360. {
  1361. return config.topology.nopenclgpus;
  1362. }
  1363. int starpu_asynchronous_copy_disabled(void)
  1364. {
  1365. return config.conf->disable_asynchronous_copy;
  1366. }
  1367. int starpu_asynchronous_cuda_copy_disabled(void)
  1368. {
  1369. return config.conf->disable_asynchronous_cuda_copy;
  1370. }
  1371. int starpu_asynchronous_opencl_copy_disabled(void)
  1372. {
  1373. return config.conf->disable_asynchronous_opencl_copy;
  1374. }
  1375. int starpu_asynchronous_mic_copy_disabled(void)
  1376. {
  1377. return config.conf->disable_asynchronous_mic_copy;
  1378. }
  1379. unsigned starpu_mic_worker_get_count(void)
  1380. {
  1381. int i = 0, count = 0;
  1382. for (i = 0; i < STARPU_MAXMICDEVS; i++)
  1383. count += config.topology.nmiccores[i];
  1384. return count;
  1385. }
  1386. unsigned starpu_scc_worker_get_count(void)
  1387. {
  1388. return config.topology.nsccdevices;
  1389. }
  1390. /* When analyzing performance, it is useful to see what is the processing unit
  1391. * that actually performed the task. This function returns the id of the
  1392. * processing unit actually executing it, therefore it makes no sense to use it
  1393. * within the callbacks of SPU functions for instance. If called by some thread
  1394. * that is not controlled by StarPU, starpu_worker_get_id returns -1. */
  1395. int starpu_worker_get_id(void)
  1396. {
  1397. struct _starpu_worker * worker;
  1398. worker = _starpu_get_local_worker_key();
  1399. if (worker)
  1400. {
  1401. return worker->workerid;
  1402. }
  1403. else
  1404. {
  1405. /* there is no worker associated to that thread, perhaps it is
  1406. * a thread from the application or this is some SPU worker */
  1407. return -1;
  1408. }
  1409. }
  1410. int starpu_combined_worker_get_id(void)
  1411. {
  1412. struct _starpu_worker *worker;
  1413. worker = _starpu_get_local_worker_key();
  1414. if (worker)
  1415. {
  1416. return worker->combined_workerid;
  1417. }
  1418. else
  1419. {
  1420. /* there is no worker associated to that thread, perhaps it is
  1421. * a thread from the application or this is some SPU worker */
  1422. return -1;
  1423. }
  1424. }
  1425. int starpu_combined_worker_get_size(void)
  1426. {
  1427. struct _starpu_worker *worker;
  1428. worker = _starpu_get_local_worker_key();
  1429. if (worker)
  1430. {
  1431. return worker->worker_size;
  1432. }
  1433. else
  1434. {
  1435. /* there is no worker associated to that thread, perhaps it is
  1436. * a thread from the application or this is some SPU worker */
  1437. return -1;
  1438. }
  1439. }
  1440. int starpu_combined_worker_get_rank(void)
  1441. {
  1442. struct _starpu_worker *worker;
  1443. worker = _starpu_get_local_worker_key();
  1444. if (worker)
  1445. {
  1446. return worker->current_rank;
  1447. }
  1448. else
  1449. {
  1450. /* there is no worker associated to that thread, perhaps it is
  1451. * a thread from the application or this is some SPU worker */
  1452. return -1;
  1453. }
  1454. }
  1455. int starpu_worker_get_subworkerid(int id)
  1456. {
  1457. return config.workers[id].subworkerid;
  1458. }
  1459. int starpu_worker_get_devid(int id)
  1460. {
  1461. return config.workers[id].devid;
  1462. }
  1463. struct _starpu_worker *_starpu_get_worker_struct(unsigned id)
  1464. {
  1465. return &config.workers[id];
  1466. }
  1467. unsigned starpu_worker_is_combined_worker(int id)
  1468. {
  1469. return id >= (int)config.topology.nworkers;
  1470. }
  1471. struct _starpu_sched_ctx *_starpu_get_sched_ctx_struct(unsigned id)
  1472. {
  1473. if(id == STARPU_NMAX_SCHED_CTXS) return NULL;
  1474. return &config.sched_ctxs[id];
  1475. }
  1476. struct _starpu_combined_worker *_starpu_get_combined_worker_struct(unsigned id)
  1477. {
  1478. unsigned basic_worker_count = starpu_worker_get_count();
  1479. //_STARPU_DEBUG("basic_worker_count:%d\n",basic_worker_count);
  1480. STARPU_ASSERT(id >= basic_worker_count);
  1481. return &config.combined_workers[id - basic_worker_count];
  1482. }
  1483. enum starpu_worker_archtype starpu_worker_get_type(int id)
  1484. {
  1485. return config.workers[id].arch;
  1486. }
  1487. int starpu_worker_get_ids_by_type(enum starpu_worker_archtype type, int *workerids, int maxsize)
  1488. {
  1489. unsigned nworkers = starpu_worker_get_count();
  1490. int cnt = 0;
  1491. unsigned id;
  1492. for (id = 0; id < nworkers; id++)
  1493. {
  1494. if (starpu_worker_get_type(id) == type)
  1495. {
  1496. /* Perhaps the array is too small ? */
  1497. if (cnt >= maxsize)
  1498. return -ERANGE;
  1499. workerids[cnt++] = id;
  1500. }
  1501. }
  1502. return cnt;
  1503. }
  1504. int starpu_worker_get_by_type(enum starpu_worker_archtype type, int num)
  1505. {
  1506. unsigned nworkers = starpu_worker_get_count();
  1507. int cnt = 0;
  1508. unsigned id;
  1509. for (id = 0; id < nworkers; id++)
  1510. {
  1511. if (starpu_worker_get_type(id) == type)
  1512. {
  1513. if (num == cnt)
  1514. return id;
  1515. cnt++;
  1516. }
  1517. }
  1518. /* Not found */
  1519. return -1;
  1520. }
  1521. int starpu_worker_get_by_devid(enum starpu_worker_archtype type, int devid)
  1522. {
  1523. unsigned nworkers = starpu_worker_get_count();
  1524. unsigned id;
  1525. for (id = 0; id < nworkers; id++)
  1526. if (starpu_worker_get_type(id) == type && starpu_worker_get_devid(id) == devid)
  1527. return id;
  1528. /* Not found */
  1529. return -1;
  1530. }
  1531. void starpu_worker_get_name(int id, char *dst, size_t maxlen)
  1532. {
  1533. char *name = config.workers[id].name;
  1534. snprintf(dst, maxlen, "%s", name);
  1535. }
  1536. int starpu_worker_get_bindid(int workerid)
  1537. {
  1538. return config.workers[workerid].bindid;
  1539. }
  1540. int _starpu_worker_get_workerids(int bindid, int *workerids)
  1541. {
  1542. unsigned nworkers = starpu_worker_get_count();
  1543. int nw = 0;
  1544. unsigned id;
  1545. for (id = 0; id < nworkers; id++)
  1546. if (config.workers[id].bindid == bindid)
  1547. workerids[nw++] = id;
  1548. return nw;
  1549. }
  1550. /* Retrieve the status which indicates what the worker is currently doing. */
  1551. enum _starpu_worker_status _starpu_worker_get_status(int workerid)
  1552. {
  1553. return config.workers[workerid].status;
  1554. }
  1555. /* Change the status of the worker which indicates what the worker is currently
  1556. * doing (eg. executing a callback). */
  1557. void _starpu_worker_set_status(int workerid, enum _starpu_worker_status status)
  1558. {
  1559. config.workers[workerid].status = status;
  1560. }
  1561. void starpu_worker_get_sched_condition(int workerid, starpu_pthread_mutex_t **sched_mutex, starpu_pthread_cond_t **sched_cond)
  1562. {
  1563. *sched_cond = &config.workers[workerid].sched_cond;
  1564. *sched_mutex = &config.workers[workerid].sched_mutex;
  1565. }
  1566. int starpu_wakeup_worker(int workerid, starpu_pthread_cond_t *cond, starpu_pthread_mutex_t *mutex)
  1567. {
  1568. int success = 0;
  1569. STARPU_PTHREAD_MUTEX_LOCK(mutex);
  1570. if (config.workers[workerid].status == STATUS_SLEEPING)
  1571. {
  1572. config.workers[workerid].status = STATUS_WAKING_UP;
  1573. STARPU_PTHREAD_COND_SIGNAL(cond);
  1574. success = 1;
  1575. }
  1576. STARPU_PTHREAD_MUTEX_UNLOCK(mutex);
  1577. return success;
  1578. }
  1579. int starpu_wake_worker(int workerid)
  1580. {
  1581. starpu_pthread_mutex_t *sched_mutex;
  1582. starpu_pthread_cond_t *sched_cond;
  1583. starpu_worker_get_sched_condition(workerid, &sched_mutex, &sched_cond);
  1584. return starpu_wakeup_worker(workerid, sched_cond, sched_mutex);
  1585. }
  1586. int starpu_worker_get_nids_by_type(enum starpu_worker_archtype type, int *workerids, int maxsize)
  1587. {
  1588. unsigned nworkers = starpu_worker_get_count();
  1589. int cnt = 0;
  1590. unsigned id;
  1591. for (id = 0; id < nworkers; id++)
  1592. {
  1593. if (starpu_worker_get_type(id) == type)
  1594. {
  1595. /* Perhaps the array is too small ? */
  1596. if (cnt >= maxsize)
  1597. return cnt;
  1598. workerids[cnt++] = id;
  1599. }
  1600. }
  1601. return cnt;
  1602. }
  1603. int starpu_worker_get_nids_ctx_free_by_type(enum starpu_worker_archtype type, int *workerids, int maxsize)
  1604. {
  1605. unsigned nworkers = starpu_worker_get_count();
  1606. int cnt = 0;
  1607. unsigned id, worker;
  1608. unsigned found = 0;
  1609. for (id = 0; id < nworkers; id++)
  1610. {
  1611. found = 0;
  1612. if (starpu_worker_get_type(id) == type)
  1613. {
  1614. /* Perhaps the array is too small ? */
  1615. if (cnt >= maxsize)
  1616. return cnt;
  1617. int s;
  1618. for(s = 1; s < STARPU_NMAX_SCHED_CTXS; s++)
  1619. {
  1620. if(config.sched_ctxs[s].id != STARPU_NMAX_SCHED_CTXS)
  1621. {
  1622. struct starpu_worker_collection *workers = config.sched_ctxs[s].workers;
  1623. struct starpu_sched_ctx_iterator it;
  1624. workers->init_iterator(workers, &it);
  1625. while(workers->has_next(workers, &it))
  1626. {
  1627. worker = workers->get_next(workers, &it);
  1628. if(worker == id)
  1629. {
  1630. found = 1;
  1631. break;
  1632. }
  1633. }
  1634. if(found) break;
  1635. }
  1636. }
  1637. if(!found)
  1638. workerids[cnt++] = id;
  1639. }
  1640. }
  1641. return cnt;
  1642. }
  1643. struct _starpu_sched_ctx* _starpu_get_initial_sched_ctx(void)
  1644. {
  1645. return &config.sched_ctxs[STARPU_GLOBAL_SCHED_CTX];
  1646. }
  1647. int _starpu_worker_get_nsched_ctxs(int workerid)
  1648. {
  1649. return config.workers[workerid].nsched_ctxs;
  1650. }
  1651. static void *
  1652. _starpu_get_worker_from_driver(struct starpu_driver *d)
  1653. {
  1654. unsigned nworkers = starpu_worker_get_count();
  1655. unsigned workerid;
  1656. #ifdef STARPU_USE_CUDA
  1657. if (d->type == STARPU_CUDA_WORKER)
  1658. return &cuda_worker_set[d->id.cuda_id];
  1659. #endif
  1660. for (workerid = 0; workerid < nworkers; workerid++)
  1661. {
  1662. if (starpu_worker_get_type(workerid) == d->type)
  1663. {
  1664. struct _starpu_worker *worker;
  1665. worker = _starpu_get_worker_struct(workerid);
  1666. switch (d->type)
  1667. {
  1668. #ifdef STARPU_USE_CPU
  1669. case STARPU_CPU_WORKER:
  1670. if (worker->devid == d->id.cpu_id)
  1671. return worker;
  1672. break;
  1673. #endif
  1674. #ifdef STARPU_USE_OPENCL
  1675. case STARPU_OPENCL_WORKER:
  1676. {
  1677. cl_device_id device;
  1678. starpu_opencl_get_device(worker->devid, &device);
  1679. if (device == d->id.opencl_id)
  1680. return worker;
  1681. break;
  1682. }
  1683. #endif
  1684. default:
  1685. _STARPU_DEBUG("Invalid device type\n");
  1686. return NULL;
  1687. }
  1688. }
  1689. }
  1690. return NULL;
  1691. }
  1692. int
  1693. starpu_driver_run(struct starpu_driver *d)
  1694. {
  1695. if (!d)
  1696. {
  1697. _STARPU_DEBUG("Invalid argument\n");
  1698. return -EINVAL;
  1699. }
  1700. void *worker = _starpu_get_worker_from_driver(d);
  1701. switch (d->type)
  1702. {
  1703. #ifdef STARPU_USE_CPU
  1704. case STARPU_CPU_WORKER:
  1705. return _starpu_run_cpu(worker);
  1706. #endif
  1707. #ifdef STARPU_USE_CUDA
  1708. case STARPU_CUDA_WORKER:
  1709. return _starpu_run_cuda(worker);
  1710. #endif
  1711. #ifdef STARPU_USE_OPENCL
  1712. case STARPU_OPENCL_WORKER:
  1713. return _starpu_run_opencl(worker);
  1714. #endif
  1715. default:
  1716. _STARPU_DEBUG("Invalid device type\n");
  1717. return -EINVAL;
  1718. }
  1719. }
  1720. int
  1721. starpu_driver_init(struct starpu_driver *d)
  1722. {
  1723. STARPU_ASSERT(d);
  1724. void *worker = _starpu_get_worker_from_driver(d);
  1725. switch (d->type)
  1726. {
  1727. #ifdef STARPU_USE_CPU
  1728. case STARPU_CPU_WORKER:
  1729. return _starpu_cpu_driver_init(worker);
  1730. #endif
  1731. #ifdef STARPU_USE_CUDA
  1732. case STARPU_CUDA_WORKER:
  1733. return _starpu_cuda_driver_init(worker);
  1734. #endif
  1735. #ifdef STARPU_USE_OPENCL
  1736. case STARPU_OPENCL_WORKER:
  1737. return _starpu_opencl_driver_init(worker);
  1738. #endif
  1739. default:
  1740. return -EINVAL;
  1741. }
  1742. }
  1743. int
  1744. starpu_driver_run_once(struct starpu_driver *d)
  1745. {
  1746. STARPU_ASSERT(d);
  1747. void *worker = _starpu_get_worker_from_driver(d);
  1748. switch (d->type)
  1749. {
  1750. #ifdef STARPU_USE_CPU
  1751. case STARPU_CPU_WORKER:
  1752. return _starpu_cpu_driver_run_once(worker);
  1753. #endif
  1754. #ifdef STARPU_USE_CUDA
  1755. case STARPU_CUDA_WORKER:
  1756. return _starpu_cuda_driver_run_once(worker);
  1757. #endif
  1758. #ifdef STARPU_USE_OPENCL
  1759. case STARPU_OPENCL_WORKER:
  1760. return _starpu_opencl_driver_run_once(worker);
  1761. #endif
  1762. default:
  1763. return -EINVAL;
  1764. }
  1765. }
  1766. int
  1767. starpu_driver_deinit(struct starpu_driver *d)
  1768. {
  1769. STARPU_ASSERT(d);
  1770. void *worker = _starpu_get_worker_from_driver(d);
  1771. switch (d->type)
  1772. {
  1773. #ifdef STARPU_USE_CPU
  1774. case STARPU_CPU_WORKER:
  1775. return _starpu_cpu_driver_deinit(worker);
  1776. #endif
  1777. #ifdef STARPU_USE_CUDA
  1778. case STARPU_CUDA_WORKER:
  1779. return _starpu_cuda_driver_deinit(worker);
  1780. #endif
  1781. #ifdef STARPU_USE_OPENCL
  1782. case STARPU_OPENCL_WORKER:
  1783. return _starpu_opencl_driver_deinit(worker);
  1784. #endif
  1785. default:
  1786. return -EINVAL;
  1787. }
  1788. }
  1789. void starpu_get_version(int *major, int *minor, int *release)
  1790. {
  1791. *major = STARPU_MAJOR_VERSION;
  1792. *minor = STARPU_MINOR_VERSION;
  1793. *release = STARPU_RELEASE_VERSION;
  1794. }
  1795. void _starpu_unlock_mutex_if_prev_locked()
  1796. {
  1797. int workerid = starpu_worker_get_id();
  1798. if(workerid != -1)
  1799. {
  1800. struct _starpu_worker *w = _starpu_get_worker_struct(workerid);
  1801. if(w->sched_mutex_locked)
  1802. {
  1803. STARPU_PTHREAD_MUTEX_UNLOCK(&w->sched_mutex);
  1804. _starpu_worker_set_flag_sched_mutex_locked(workerid, 1);
  1805. }
  1806. }
  1807. return;
  1808. }
  1809. void _starpu_relock_mutex_if_prev_locked()
  1810. {
  1811. int workerid = starpu_worker_get_id();
  1812. if(workerid != -1)
  1813. {
  1814. struct _starpu_worker *w = _starpu_get_worker_struct(workerid);
  1815. if(w->sched_mutex_locked)
  1816. STARPU_PTHREAD_MUTEX_LOCK(&w->sched_mutex);
  1817. }
  1818. return;
  1819. }
  1820. void _starpu_worker_set_flag_sched_mutex_locked(int workerid, unsigned flag)
  1821. {
  1822. struct _starpu_worker *w = _starpu_get_worker_struct(workerid);
  1823. w->sched_mutex_locked = flag;
  1824. }
  1825. unsigned _starpu_worker_mutex_is_sched_mutex(int workerid, starpu_pthread_mutex_t *mutex)
  1826. {
  1827. struct _starpu_worker *w = _starpu_get_worker_struct(workerid);
  1828. return &w->sched_mutex == mutex;
  1829. }
  1830. unsigned starpu_worker_get_sched_ctx_list(int workerid, unsigned **sched_ctxs)
  1831. {
  1832. unsigned s = 0;
  1833. unsigned nsched_ctxs = _starpu_worker_get_nsched_ctxs(workerid);
  1834. *sched_ctxs = (unsigned*)malloc(nsched_ctxs*sizeof(unsigned));
  1835. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  1836. struct _starpu_sched_ctx_list *l = NULL;
  1837. for (l = worker->sched_ctx_list; l; l = l->next)
  1838. {
  1839. (*sched_ctxs)[s++] = l->sched_ctx;
  1840. }
  1841. return nsched_ctxs;
  1842. }
  1843. char *starpu_worker_get_type_as_string(enum starpu_worker_archtype type)
  1844. {
  1845. if (type == STARPU_CPU_WORKER) return "STARPU_CPU_WORKER";
  1846. if (type == STARPU_CUDA_WORKER) return "STARPU_CUDA_WORKER";
  1847. if (type == STARPU_OPENCL_WORKER) return "STARPU_OPENCL_WORKER";
  1848. if (type == STARPU_MIC_WORKER) return "STARPU_MIC_WORKER";
  1849. if (type == STARPU_SCC_WORKER) return "STARPU_SCC_WORKER";
  1850. if (type == STARPU_ANY_WORKER) return "STARPU_ANY_WORKER";
  1851. return "STARPU_unknown_WORKER";
  1852. }