openmp_runtime_support.c 78 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2014 INRIA
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <starpu.h>
  17. #ifdef STARPU_OPENMP
  18. /*
  19. * locally disable -Wdeprecated-declarations to avoid
  20. * lots of deprecated warnings for ucontext related functions
  21. */
  22. #pragma GCC diagnostic push
  23. #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
  24. #include <util/openmp_runtime_support.h>
  25. #include <core/task.h>
  26. #include <core/workers.h>
  27. #include <common/list.h>
  28. #include <common/starpu_spinlock.h>
  29. #include <common/uthash.h>
  30. #include <datawizard/interfaces/data_interface.h>
  31. #include <stdlib.h>
  32. #include <ctype.h>
  33. #include <strings.h>
  34. #define _STARPU_INITIAL_THREAD_STACKSIZE 2097152
  35. static struct starpu_omp_global _global_state;
  36. static starpu_pthread_key_t omp_thread_key;
  37. static starpu_pthread_key_t omp_task_key;
  38. static struct starpu_conf omp_starpu_conf;
  39. static int omp_dummy_init = 0;
  40. struct starpu_omp_global *_starpu_omp_global_state = NULL;
  41. double _starpu_omp_clock_ref = 0.0; /* clock reference for starpu_omp_get_wtick */
  42. static struct starpu_omp_critical *create_omp_critical_struct(void);
  43. static void destroy_omp_critical_struct(struct starpu_omp_critical *critical);
  44. static struct starpu_omp_device *create_omp_device_struct(void);
  45. static void destroy_omp_device_struct(struct starpu_omp_device *device);
  46. static struct starpu_omp_region *create_omp_region_struct(struct starpu_omp_region *parent_region, struct starpu_omp_device *owner_device);
  47. static void destroy_omp_region_struct(struct starpu_omp_region *region);
  48. static struct starpu_omp_thread *create_omp_thread_struct(struct starpu_omp_region *owner_region);
  49. static void destroy_omp_thread_struct(struct starpu_omp_thread *thread);
  50. static struct starpu_omp_task *create_omp_task_struct(struct starpu_omp_task *parent_task,
  51. struct starpu_omp_thread *owner_thread, struct starpu_omp_region *owner_region, int is_implicit);
  52. static void destroy_omp_task_struct(struct starpu_omp_task *task);
  53. static void wake_up_and_unlock_task(struct starpu_omp_task *task);
  54. static void wake_up_barrier(struct starpu_omp_region *parallel_region);
  55. static void starpu_omp_task_preempt(void);
  56. struct starpu_omp_thread *_starpu_omp_get_thread(void)
  57. {
  58. struct starpu_omp_thread *thread = STARPU_PTHREAD_GETSPECIFIC(omp_thread_key);
  59. return thread;
  60. }
  61. struct starpu_omp_task *_starpu_omp_get_task(void)
  62. {
  63. struct starpu_omp_task *task = STARPU_PTHREAD_GETSPECIFIC(omp_task_key);
  64. return task;
  65. }
  66. static void weak_task_lock(struct starpu_omp_task *task)
  67. {
  68. _starpu_spin_lock(&task->lock);
  69. while (task->transaction_pending)
  70. {
  71. _starpu_spin_unlock(&task->lock);
  72. STARPU_UYIELD();
  73. _starpu_spin_lock(&task->lock);
  74. }
  75. }
  76. static void weak_task_unlock(struct starpu_omp_task *task)
  77. {
  78. _starpu_spin_unlock(&task->lock);
  79. }
  80. static void wake_up_and_unlock_task(struct starpu_omp_task *task)
  81. {
  82. STARPU_ASSERT(task->transaction_pending == 0);
  83. if (task->wait_on == 0)
  84. {
  85. weak_task_unlock(task);
  86. int ret = starpu_task_submit(task->starpu_task);
  87. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  88. } else {
  89. weak_task_unlock(task);
  90. }
  91. }
  92. static void transaction_callback(void *_task)
  93. {
  94. struct starpu_omp_task *task = _task;
  95. _starpu_spin_lock(&task->lock);
  96. STARPU_ASSERT(task->transaction_pending != 0);
  97. task->transaction_pending = 0;
  98. _starpu_spin_unlock(&task->lock);
  99. }
  100. static void condition_init(struct starpu_omp_condition *condition)
  101. {
  102. condition->contention_list_head = NULL;
  103. }
  104. static void condition_exit(struct starpu_omp_condition *condition)
  105. {
  106. STARPU_ASSERT(condition->contention_list_head == NULL);
  107. condition->contention_list_head = NULL;
  108. }
  109. static void condition_wait(struct starpu_omp_condition *condition, struct _starpu_spinlock *lock)
  110. {
  111. struct starpu_omp_task *task = STARPU_PTHREAD_GETSPECIFIC(omp_task_key);
  112. struct starpu_omp_task_link link;
  113. _starpu_spin_lock(&task->lock);
  114. task->wait_on |= starpu_omp_task_wait_on_condition;
  115. link.task = task;
  116. link.next = condition->contention_list_head;
  117. condition->contention_list_head = &link;
  118. task->transaction_pending = 1;
  119. _starpu_spin_unlock(&task->lock);
  120. _starpu_spin_unlock(lock);
  121. _starpu_task_prepare_for_continuation_ext(0, transaction_callback, task);
  122. starpu_omp_task_preempt();
  123. /* re-acquire the lock released by the callback */
  124. _starpu_spin_lock(lock);
  125. }
  126. #if 0
  127. /* unused for now */
  128. static void condition_signal(struct starpu_omp_condition *condition)
  129. {
  130. if (condition->contention_list_head != NULL)
  131. {
  132. struct starpu_omp_task *next_task = condition->contention_list_head->task;
  133. weak_task_lock(next_task);
  134. condition->contention_list_head = condition->contention_list_head->next;
  135. STARPU_ASSERT(next_task->wait_on & starpu_omp_task_wait_on_condition);
  136. next_task->wait_on &= ~starpu_omp_task_wait_on_condition;
  137. wake_up_and_unlock_task(next_task);
  138. }
  139. }
  140. #endif
  141. static void condition_broadcast(struct starpu_omp_condition *condition)
  142. {
  143. while (condition->contention_list_head != NULL)
  144. {
  145. struct starpu_omp_task *next_task = condition->contention_list_head->task;
  146. weak_task_lock(next_task);
  147. condition->contention_list_head = condition->contention_list_head->next;
  148. STARPU_ASSERT(next_task->wait_on & starpu_omp_task_wait_on_condition);
  149. next_task->wait_on &= ~starpu_omp_task_wait_on_condition;
  150. wake_up_and_unlock_task(next_task);
  151. }
  152. }
  153. static void register_thread_worker(struct starpu_omp_thread *thread)
  154. {
  155. STARPU_ASSERT(thread->worker != NULL);
  156. _starpu_spin_lock(&_global_state.hash_workers_lock);
  157. struct _starpu_worker *check = thread->worker;
  158. struct starpu_omp_thread *tmp = NULL;
  159. HASH_FIND_PTR(_global_state.hash_workers, &check, tmp);
  160. STARPU_ASSERT(tmp == NULL);
  161. HASH_ADD_PTR(_global_state.hash_workers, worker, thread);
  162. _starpu_spin_unlock(&_global_state.hash_workers_lock);
  163. }
  164. static struct starpu_omp_thread *get_worker_thread(struct _starpu_worker *starpu_worker)
  165. {
  166. struct starpu_omp_thread *thread = NULL;
  167. _starpu_spin_lock(&_global_state.hash_workers_lock);
  168. HASH_FIND_PTR(_global_state.hash_workers, &starpu_worker, thread);
  169. _starpu_spin_unlock(&_global_state.hash_workers_lock);
  170. return thread;
  171. }
  172. static struct starpu_omp_thread *get_local_thread(void)
  173. {
  174. struct starpu_omp_thread *thread = STARPU_PTHREAD_GETSPECIFIC(omp_thread_key);
  175. if (thread == NULL)
  176. {
  177. struct _starpu_worker *starpu_worker = _starpu_get_local_worker_key();
  178. STARPU_ASSERT(starpu_worker != NULL);
  179. _starpu_spin_lock(&_global_state.hash_workers_lock);
  180. HASH_FIND_PTR(_global_state.hash_workers, &starpu_worker, thread);
  181. _starpu_spin_unlock(&_global_state.hash_workers_lock);
  182. if (
  183. #if STARPU_USE_CUDA
  184. (starpu_worker->arch != STARPU_CUDA_WORKER)
  185. &&
  186. #endif
  187. #if STARPU_USE_OPENCL
  188. (starpu_worker->arch != STARPU_OPENCL_WORKER)
  189. &&
  190. #endif
  191. 1
  192. )
  193. {
  194. STARPU_ASSERT(thread != NULL);
  195. }
  196. if (thread != NULL)
  197. {
  198. STARPU_PTHREAD_SETSPECIFIC(omp_thread_key, thread);
  199. }
  200. }
  201. return thread;
  202. }
  203. static struct starpu_omp_critical *create_omp_critical_struct(void)
  204. {
  205. struct starpu_omp_critical *critical = malloc(sizeof(*critical));
  206. memset(critical, 0, sizeof(*critical));
  207. _starpu_spin_init(&critical->lock);
  208. return critical;
  209. }
  210. static void destroy_omp_critical_struct(struct starpu_omp_critical *critical)
  211. {
  212. STARPU_ASSERT(critical->state == 0);
  213. STARPU_ASSERT(critical->contention_list_head == NULL);
  214. _starpu_spin_destroy(&critical->lock);
  215. critical->name = NULL;
  216. free(critical);
  217. }
  218. static struct starpu_omp_device *create_omp_device_struct(void)
  219. {
  220. struct starpu_omp_device *device = malloc(sizeof(*device));
  221. if (device == NULL)
  222. _STARPU_ERROR("memory allocation failed");
  223. memset(device, 0, sizeof(*device));
  224. _starpu_spin_init(&device->atomic_lock);
  225. return device;
  226. }
  227. static void destroy_omp_device_struct(struct starpu_omp_device *device)
  228. {
  229. _starpu_spin_destroy(&device->atomic_lock);
  230. memset(device, 0, sizeof(*device));
  231. free(device);
  232. }
  233. static struct starpu_omp_device *get_caller_device(void)
  234. {
  235. struct starpu_omp_task *task = _starpu_omp_get_task();
  236. struct starpu_omp_device *device;
  237. if (task)
  238. {
  239. STARPU_ASSERT(task->owner_region != NULL);
  240. device = task->owner_region->owner_device;
  241. }
  242. else
  243. {
  244. device = _global_state.initial_device;
  245. }
  246. STARPU_ASSERT(device != NULL);
  247. return device;
  248. }
  249. static struct starpu_omp_region *create_omp_region_struct(struct starpu_omp_region *parent_region, struct starpu_omp_device *owner_device)
  250. {
  251. struct starpu_omp_region *region = malloc(sizeof(*region));
  252. if (region == NULL)
  253. _STARPU_ERROR("memory allocation failed");
  254. memset(region, 0, sizeof(*region));
  255. region->parent_region = parent_region;
  256. region->owner_device = owner_device;
  257. starpu_omp_thread_list_init(&region->thread_list);
  258. starpu_omp_task_list_init(&region->implicit_task_list);
  259. _starpu_spin_init(&region->lock);
  260. _starpu_spin_init(&region->registered_handles_lock);
  261. region->level = (parent_region != NULL)?parent_region->level+1:0;
  262. return region;
  263. }
  264. static void destroy_omp_region_struct(struct starpu_omp_region *region)
  265. {
  266. STARPU_ASSERT(region->nb_threads == 0);
  267. STARPU_ASSERT(starpu_omp_thread_list_empty(&region->thread_list));
  268. STARPU_ASSERT(starpu_omp_task_list_empty(&region->implicit_task_list));
  269. STARPU_ASSERT(region->continuation_starpu_task == NULL);
  270. _starpu_spin_destroy(&region->registered_handles_lock);
  271. _starpu_spin_destroy(&region->lock);
  272. memset(region, 0, sizeof(*region));
  273. free(region);
  274. }
  275. static void omp_initial_thread_func(void)
  276. {
  277. struct starpu_omp_thread *initial_thread = _global_state.initial_thread;
  278. struct starpu_omp_task *initial_task = _global_state.initial_task;
  279. while (1)
  280. {
  281. struct starpu_task *continuation_starpu_task = initial_task->nested_region->continuation_starpu_task;
  282. starpu_driver_run_once(&initial_thread->starpu_driver);
  283. /*
  284. * if we are leaving the first nested region we give control back to initial task
  285. * otherwise, we should continue to execute work
  286. */
  287. if (_starpu_task_test_termination(continuation_starpu_task))
  288. {
  289. initial_task->nested_region->continuation_starpu_task = NULL;
  290. STARPU_PTHREAD_SETSPECIFIC(omp_task_key, initial_task);
  291. swapcontext(&initial_thread->ctx, &initial_task->ctx);
  292. }
  293. }
  294. }
  295. static struct starpu_omp_thread *create_omp_thread_struct(struct starpu_omp_region *owner_region)
  296. {
  297. struct starpu_omp_thread *thread = starpu_omp_thread_new();
  298. if (thread == NULL)
  299. _STARPU_ERROR("memory allocation failed");
  300. memset(thread, 0, sizeof(*thread));
  301. thread->owner_region = owner_region;
  302. return thread;
  303. }
  304. static void destroy_omp_thread_struct(struct starpu_omp_thread *thread)
  305. {
  306. STARPU_ASSERT(thread->current_task == NULL);
  307. memset(thread, 0, sizeof(*thread));
  308. starpu_omp_thread_delete(thread);
  309. }
  310. static void starpu_omp_explicit_task_entry(struct starpu_omp_task *task)
  311. {
  312. STARPU_ASSERT(!task->is_implicit);
  313. struct _starpu_worker *starpu_worker = _starpu_get_local_worker_key();
  314. if (starpu_worker->arch == STARPU_CPU_WORKER)
  315. {
  316. task->cpu_f(task->starpu_buffers, task->starpu_cl_arg);
  317. }
  318. #if STARPU_USE_CUDA
  319. else if (starpu_worker->arch == STARPU_CUDA_WORKER)
  320. {
  321. task->cuda_f(task->starpu_buffers, task->starpu_cl_arg);
  322. }
  323. #endif
  324. #if STARPU_USE_OPENCL
  325. else if (starpu_worker->arch == STARPU_OPENCL_WORKER)
  326. {
  327. task->opencl_f(task->starpu_buffers, task->starpu_cl_arg);
  328. }
  329. #endif
  330. else
  331. _STARPU_ERROR("invalid worker architecture");
  332. _starpu_omp_unregister_task_handles(task);
  333. _starpu_spin_lock(&task->lock);
  334. task->state = starpu_omp_task_state_terminated;
  335. task->transaction_pending=1;
  336. _starpu_spin_unlock(&task->lock);
  337. struct starpu_omp_thread *thread = STARPU_PTHREAD_GETSPECIFIC(omp_thread_key);
  338. /*
  339. * the task reached the terminated state, definitively give hand back to the worker code.
  340. *
  341. * about to run on the worker stack...
  342. */
  343. setcontext(&thread->ctx);
  344. STARPU_ASSERT(0); /* unreachable code */
  345. }
  346. static void starpu_omp_implicit_task_entry(struct starpu_omp_task *task)
  347. {
  348. struct starpu_omp_thread *thread = STARPU_PTHREAD_GETSPECIFIC(omp_thread_key);
  349. STARPU_ASSERT(task->is_implicit);
  350. task->cpu_f(task->starpu_buffers, task->starpu_cl_arg);
  351. starpu_omp_barrier();
  352. if (thread == task->owner_region->master_thread)
  353. {
  354. _starpu_omp_unregister_region_handles(task->owner_region);
  355. }
  356. task->state = starpu_omp_task_state_terminated;
  357. /*
  358. * the task reached the terminated state, definitively give hand back to the worker code.
  359. *
  360. * about to run on the worker stack...
  361. */
  362. setcontext(&thread->ctx);
  363. STARPU_ASSERT(0); /* unreachable code */
  364. }
  365. /*
  366. * stop executing a task that is about to block
  367. * and give hand back to the thread
  368. */
  369. static void starpu_omp_task_preempt(void)
  370. {
  371. struct starpu_omp_task *task = STARPU_PTHREAD_GETSPECIFIC(omp_task_key);
  372. struct starpu_omp_thread *thread = STARPU_PTHREAD_GETSPECIFIC(omp_thread_key);
  373. task->state = starpu_omp_task_state_preempted;
  374. /*
  375. * the task reached a blocked state, give hand back to the worker code.
  376. *
  377. * about to run on the worker stack...
  378. */
  379. swapcontext(&task->ctx, &thread->ctx);
  380. /* now running on the task stack again */
  381. }
  382. /*
  383. * wrap a task function to allow the task to be preempted
  384. */
  385. static void starpu_omp_implicit_task_exec(void *buffers[], void *cl_arg)
  386. {
  387. struct starpu_omp_task *task = starpu_task_get_current()->omp_task;
  388. STARPU_ASSERT(task->is_implicit);
  389. STARPU_PTHREAD_SETSPECIFIC(omp_task_key, task);
  390. struct starpu_omp_thread *thread = get_local_thread();
  391. if (task->state != starpu_omp_task_state_preempted)
  392. {
  393. task->starpu_buffers = buffers;
  394. task->starpu_cl_arg = cl_arg;
  395. STARPU_ASSERT(task->stack == NULL);
  396. STARPU_ASSERT(task->stacksize > 0);
  397. task->stack = malloc(task->stacksize);
  398. if (task->stack == NULL)
  399. _STARPU_ERROR("memory allocation failed");
  400. getcontext(&task->ctx);
  401. /*
  402. * we do not use uc_link, starpu_omp_task_entry will handle
  403. * the end of the task
  404. */
  405. task->ctx.uc_link = NULL;
  406. task->ctx.uc_stack.ss_sp = task->stack;
  407. task->ctx.uc_stack.ss_size = task->stacksize;
  408. task->stack_vg_id = VALGRIND_STACK_REGISTER(task->stack, task->stack+task->stacksize);
  409. makecontext(&task->ctx, (void (*) ()) starpu_omp_implicit_task_entry, 1, task);
  410. }
  411. task->state = starpu_omp_task_state_clear;
  412. /*
  413. * start the task execution, or restore a previously preempted task.
  414. * about to run on the task stack...
  415. * */
  416. swapcontext(&thread->ctx, &task->ctx);
  417. /* now running on the worker stack again */
  418. STARPU_ASSERT(task->state == starpu_omp_task_state_preempted
  419. || task->state == starpu_omp_task_state_terminated);
  420. STARPU_PTHREAD_SETSPECIFIC(omp_task_key, NULL);
  421. /* TODO: analyse the cause of the return and take appropriate steps */
  422. if (task->state == starpu_omp_task_state_terminated)
  423. {
  424. task->starpu_task->omp_task = NULL;
  425. task->starpu_task = NULL;
  426. VALGRIND_STACK_DEREGISTER(task->stack_vg_id);
  427. task->stack_vg_id = 0;
  428. free(task->stack);
  429. task->stack = NULL;
  430. memset(&task->ctx, 0, sizeof(task->ctx));
  431. }
  432. else if (task->state != starpu_omp_task_state_preempted)
  433. _STARPU_ERROR("invalid omp task state");
  434. }
  435. static void starpu_omp_task_completion_accounting(struct starpu_omp_task *task)
  436. {
  437. struct starpu_omp_task *parent_task = task->parent_task;
  438. struct starpu_omp_region *parallel_region = task->owner_region;
  439. weak_task_lock(parent_task);
  440. if (STARPU_ATOMIC_ADD(&parent_task->child_task_count, -1) == 0)
  441. {
  442. if (parent_task->state == starpu_omp_task_state_zombie)
  443. {
  444. STARPU_ASSERT(!parent_task->is_implicit);
  445. weak_task_unlock(parent_task);
  446. destroy_omp_task_struct(parent_task);
  447. }
  448. else if (parent_task->wait_on & starpu_omp_task_wait_on_task_childs)
  449. {
  450. parent_task->wait_on &= ~starpu_omp_task_wait_on_task_childs;
  451. wake_up_and_unlock_task(parent_task);
  452. }
  453. else
  454. {
  455. weak_task_unlock(parent_task);
  456. }
  457. }
  458. else
  459. {
  460. weak_task_unlock(parent_task);
  461. }
  462. _starpu_spin_lock(&parallel_region->lock);
  463. if (STARPU_ATOMIC_ADD(&parallel_region->bound_explicit_task_count, -1) == 0)
  464. {
  465. struct starpu_omp_task *waiting_task = parallel_region->waiting_task;
  466. _starpu_spin_unlock(&parallel_region->lock);
  467. if (waiting_task)
  468. {
  469. weak_task_lock(waiting_task);
  470. _starpu_spin_lock(&parallel_region->lock);
  471. parallel_region->waiting_task = NULL;
  472. STARPU_ASSERT(waiting_task->wait_on & starpu_omp_task_wait_on_region_tasks);
  473. waiting_task->wait_on &= ~starpu_omp_task_wait_on_region_tasks;
  474. _starpu_spin_unlock(&parallel_region->lock);
  475. wake_up_and_unlock_task(waiting_task);
  476. }
  477. }
  478. else
  479. {
  480. _starpu_spin_unlock(&parallel_region->lock);
  481. }
  482. if (task->task_group)
  483. {
  484. struct starpu_omp_task *leader_task = task->task_group->leader_task;
  485. STARPU_ASSERT(leader_task != task);
  486. weak_task_lock(leader_task);
  487. if (STARPU_ATOMIC_ADD(&task->task_group->descendent_task_count, -1) == 0)
  488. {
  489. if (leader_task->wait_on & starpu_omp_task_wait_on_group)
  490. {
  491. leader_task->wait_on &= ~starpu_omp_task_wait_on_group;
  492. wake_up_and_unlock_task(leader_task);
  493. }
  494. else
  495. {
  496. weak_task_unlock(leader_task);
  497. }
  498. }
  499. else
  500. {
  501. weak_task_unlock(leader_task);
  502. }
  503. }
  504. }
  505. /*
  506. * wrap a task function to allow the task to be preempted
  507. */
  508. static void starpu_omp_explicit_task_exec(void *buffers[], void *cl_arg)
  509. {
  510. struct starpu_omp_task *task = starpu_task_get_current()->omp_task;
  511. STARPU_ASSERT(!task->is_implicit);
  512. STARPU_PTHREAD_SETSPECIFIC(omp_task_key, task);
  513. struct starpu_omp_thread *thread = get_local_thread();
  514. if (task->state != starpu_omp_task_state_preempted)
  515. {
  516. if (thread == NULL)
  517. {
  518. struct _starpu_worker *starpu_worker = _starpu_get_local_worker_key();
  519. if (starpu_worker->arch != STARPU_CPU_WORKER)
  520. {
  521. if (
  522. #if STARPU_USE_CUDA
  523. (starpu_worker->arch != STARPU_CUDA_WORKER)
  524. &&
  525. #endif
  526. #if STARPU_USE_OPENCL
  527. (starpu_worker->arch != STARPU_OPENCL_WORKER)
  528. &&
  529. #endif
  530. 1
  531. )
  532. {
  533. _STARPU_ERROR("invalid worker architecture");
  534. }
  535. struct starpu_omp_thread *new_thread;
  536. new_thread = create_omp_thread_struct(NULL);
  537. new_thread->worker = starpu_worker;
  538. register_thread_worker(new_thread);
  539. thread = get_local_thread();
  540. STARPU_ASSERT(thread == new_thread);
  541. }
  542. else
  543. {
  544. _STARPU_ERROR("orphaned CPU thread");
  545. }
  546. }
  547. STARPU_ASSERT(thread != NULL);
  548. if (!task->is_untied)
  549. {
  550. struct _starpu_worker *starpu_worker = _starpu_get_local_worker_key();
  551. task->starpu_task->workerid = starpu_worker->workerid;
  552. task->starpu_task->execute_on_a_specific_worker = 1;
  553. }
  554. task->starpu_buffers = buffers;
  555. task->starpu_cl_arg = cl_arg;
  556. STARPU_ASSERT(task->stack == NULL);
  557. STARPU_ASSERT(task->stacksize > 0);
  558. task->stack = malloc(task->stacksize);
  559. if (task->stack == NULL)
  560. _STARPU_ERROR("memory allocation failed");
  561. getcontext(&task->ctx);
  562. /*
  563. * we do not use uc_link, starpu_omp_task_entry will handle
  564. * the end of the task
  565. */
  566. task->ctx.uc_link = NULL;
  567. task->ctx.uc_stack.ss_sp = task->stack;
  568. task->ctx.uc_stack.ss_size = task->stacksize;
  569. makecontext(&task->ctx, (void (*) ()) starpu_omp_explicit_task_entry, 1, task);
  570. }
  571. task->state = starpu_omp_task_state_clear;
  572. /*
  573. * start the task execution, or restore a previously preempted task.
  574. * about to run on the task stack...
  575. * */
  576. swapcontext(&thread->ctx, &task->ctx);
  577. /* now running on the worker stack again */
  578. STARPU_ASSERT(task->state == starpu_omp_task_state_preempted
  579. || task->state == starpu_omp_task_state_terminated);
  580. STARPU_PTHREAD_SETSPECIFIC(omp_task_key, NULL);
  581. /* TODO: analyse the cause of the return and take appropriate steps */
  582. if (task->state == starpu_omp_task_state_terminated)
  583. {
  584. free(task->stack);
  585. task->stack = NULL;
  586. memset(&task->ctx, 0, sizeof(task->ctx));
  587. starpu_omp_task_completion_accounting(task);
  588. }
  589. else if (task->state != starpu_omp_task_state_preempted)
  590. _STARPU_ERROR("invalid omp task state");
  591. }
  592. static struct starpu_omp_task *create_omp_task_struct(struct starpu_omp_task *parent_task,
  593. struct starpu_omp_thread *owner_thread, struct starpu_omp_region *owner_region, int is_implicit)
  594. {
  595. struct starpu_omp_task *task = starpu_omp_task_new();
  596. if (task == NULL)
  597. _STARPU_ERROR("memory allocation failed");
  598. memset(task, 0, sizeof(*task));
  599. task->parent_task = parent_task;
  600. task->owner_thread = owner_thread;
  601. task->owner_region = owner_region;
  602. task->is_implicit = is_implicit;
  603. _starpu_spin_init(&task->lock);
  604. /* TODO: initialize task->data_env_icvs with proper values */
  605. memset(&task->data_env_icvs, 0, sizeof(task->data_env_icvs));
  606. if (is_implicit)
  607. {
  608. /* TODO: initialize task->implicit_task_icvs with proper values */
  609. memset(&task->implicit_task_icvs, 0, sizeof(task->implicit_task_icvs));
  610. }
  611. if (owner_region->level > 0)
  612. {
  613. STARPU_ASSERT(owner_region->owner_device->icvs.stacksize_var > 0);
  614. task->stacksize = owner_region->owner_device->icvs.stacksize_var;
  615. }
  616. return task;
  617. }
  618. static void destroy_omp_task_struct(struct starpu_omp_task *task)
  619. {
  620. STARPU_ASSERT(task->state == starpu_omp_task_state_terminated || (task->state == starpu_omp_task_state_zombie && task->child_task_count == 0) || task->state == starpu_omp_task_state_target);
  621. if (task->state == starpu_omp_task_state_target)
  622. {
  623. starpu_omp_task_completion_accounting(task);
  624. }
  625. STARPU_ASSERT(task->nested_region == NULL);
  626. STARPU_ASSERT(task->starpu_task == NULL);
  627. STARPU_ASSERT(task->stack == NULL);
  628. _starpu_spin_destroy(&task->lock);
  629. memset(task, 0, sizeof(*task));
  630. starpu_omp_task_delete(task);
  631. }
  632. /*
  633. * setup the main application thread to handle the possible preemption of the initial task
  634. */
  635. static void omp_initial_thread_setup(void)
  636. {
  637. struct starpu_omp_thread *initial_thread = _global_state.initial_thread;
  638. struct starpu_omp_task *initial_task = _global_state.initial_task;
  639. /* .current_task */
  640. initial_thread->current_task = initial_task;
  641. /* .owner_region already set in create_omp_thread_struct */
  642. /* .initial_thread_stack */
  643. initial_thread->initial_thread_stack = malloc(_STARPU_INITIAL_THREAD_STACKSIZE);
  644. if (initial_thread->initial_thread_stack == NULL)
  645. _STARPU_ERROR("memory allocation failed");
  646. /* .ctx */
  647. getcontext(&initial_thread->ctx);
  648. /*
  649. * we do not use uc_link, the initial thread always should give hand back to the initial task
  650. */
  651. initial_thread->ctx.uc_link = NULL;
  652. initial_thread->ctx.uc_stack.ss_sp = initial_thread->initial_thread_stack;
  653. initial_thread->ctx.uc_stack.ss_size = _STARPU_INITIAL_THREAD_STACKSIZE;
  654. initial_thread->initial_thread_stack_vg_id = VALGRIND_STACK_REGISTER(initial_thread->initial_thread_stack, initial_thread->initial_thread_stack+_STARPU_INITIAL_THREAD_STACKSIZE);
  655. makecontext(&initial_thread->ctx, omp_initial_thread_func, 0);
  656. /* .starpu_driver */
  657. /*
  658. * we configure starpu to not launch CPU worker 0
  659. * because we will use the main thread to play the role of worker 0
  660. */
  661. int ret = starpu_conf_init(&omp_starpu_conf);
  662. STARPU_CHECK_RETURN_VALUE(ret, "starpu_conf_init");
  663. initial_thread->starpu_driver.type = STARPU_CPU_WORKER;
  664. initial_thread->starpu_driver.id.cpu_id = 0;
  665. omp_starpu_conf.not_launched_drivers = &initial_thread->starpu_driver;
  666. omp_starpu_conf.n_not_launched_drivers = 1;
  667. /* we are now ready to start StarPU */
  668. ret = starpu_init(&omp_starpu_conf);
  669. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  670. ret = starpu_driver_init(&initial_thread->starpu_driver);
  671. STARPU_CHECK_RETURN_VALUE(ret, "starpu_driver_init");
  672. STARPU_PTHREAD_SETSPECIFIC(omp_task_key, initial_task);
  673. _global_state.nb_starpu_cpu_workers = starpu_worker_get_count_by_type(STARPU_CPU_WORKER);
  674. _global_state.starpu_cpu_worker_ids = malloc(_global_state.nb_starpu_cpu_workers * sizeof(int));
  675. if (_global_state.starpu_cpu_worker_ids == NULL)
  676. _STARPU_ERROR("memory allocation failed");
  677. ret = starpu_worker_get_ids_by_type(STARPU_CPU_WORKER, _global_state.starpu_cpu_worker_ids, _global_state.nb_starpu_cpu_workers);
  678. STARPU_ASSERT(ret == _global_state.nb_starpu_cpu_workers);
  679. initial_thread->worker = _starpu_get_worker_struct(_global_state.starpu_cpu_worker_ids[0]);
  680. STARPU_ASSERT(initial_thread->worker);
  681. STARPU_ASSERT(initial_thread->worker->arch == STARPU_CPU_WORKER);
  682. STARPU_PTHREAD_SETSPECIFIC(omp_thread_key, initial_thread);
  683. register_thread_worker(initial_thread);
  684. }
  685. static void omp_initial_thread_exit()
  686. {
  687. struct starpu_omp_thread *initial_thread = _global_state.initial_thread;
  688. int ret = starpu_driver_deinit(&initial_thread->starpu_driver);
  689. STARPU_CHECK_RETURN_VALUE(ret, "starpu_driver_deinit");
  690. memset(&initial_thread->starpu_driver, 0, sizeof (initial_thread->starpu_driver));
  691. /* the driver for the main thread is now de-inited, we can shutdown Starpu */
  692. starpu_shutdown();
  693. free(_global_state.starpu_cpu_worker_ids);
  694. _global_state.starpu_cpu_worker_ids = NULL;
  695. _global_state.nb_starpu_cpu_workers = 0;
  696. VALGRIND_STACK_DEREGISTER(initial_thread->initial_thread_stack_vg_id);
  697. free(initial_thread->initial_thread_stack);
  698. initial_thread->initial_thread_stack = NULL;
  699. memset(&initial_thread->ctx, 0, sizeof (initial_thread->ctx));
  700. initial_thread->current_task = NULL;
  701. }
  702. static void omp_initial_region_setup(void)
  703. {
  704. omp_initial_thread_setup();
  705. const int max_active_levels = _starpu_omp_initial_icv_values->max_active_levels_var;
  706. const int max_threads = (int)starpu_cpu_worker_get_count();
  707. /* implementation specific initial ICV values override */
  708. if (_starpu_omp_initial_icv_values->nthreads_var[0] == 0)
  709. {
  710. _starpu_omp_initial_icv_values->nthreads_var[0] = max_threads;
  711. _starpu_omp_initial_icv_values->nthreads_var[1] = 0;
  712. }
  713. else
  714. {
  715. int i;
  716. for (i = 0; i < max_active_levels; i++)
  717. {
  718. if (_starpu_omp_initial_icv_values->nthreads_var[i] == 0)
  719. break;
  720. if (_starpu_omp_initial_icv_values->nthreads_var[i] > max_threads)
  721. {
  722. _starpu_omp_initial_icv_values->nthreads_var[i] = max_threads;
  723. }
  724. }
  725. }
  726. _starpu_omp_initial_icv_values->dyn_var = 0;
  727. _starpu_omp_initial_icv_values->nest_var = 0;
  728. _global_state.initial_device->icvs.max_active_levels_var = max_active_levels;
  729. _global_state.initial_device->icvs.def_sched_var = _starpu_omp_initial_icv_values->def_sched_var;
  730. _global_state.initial_device->icvs.def_sched_chunk_var = _starpu_omp_initial_icv_values->def_sched_chunk_var;
  731. _global_state.initial_device->icvs.stacksize_var = _starpu_omp_initial_icv_values->stacksize_var;
  732. _global_state.initial_device->icvs.wait_policy_var = _starpu_omp_initial_icv_values->wait_policy_var;
  733. _global_state.initial_region->master_thread = _global_state.initial_thread;
  734. _global_state.initial_region->nb_threads++;
  735. _global_state.initial_region->icvs.dyn_var = _starpu_omp_initial_icv_values->dyn_var;
  736. _global_state.initial_region->icvs.nest_var = _starpu_omp_initial_icv_values->nest_var;
  737. if (_starpu_omp_initial_icv_values->nthreads_var[1] != 0)
  738. {
  739. _global_state.initial_region->icvs.nthreads_var = malloc((1+max_active_levels-_global_state.initial_region->level) * sizeof(*_global_state.initial_region->icvs.nthreads_var));
  740. int i,j;
  741. for (i = _global_state.initial_region->level, j = 0; i < max_active_levels; i++, j++)
  742. {
  743. _global_state.initial_region->icvs.nthreads_var[j] = _starpu_omp_initial_icv_values->nthreads_var[j];
  744. }
  745. _global_state.initial_region->icvs.nthreads_var[j] = 0;
  746. }
  747. else
  748. {
  749. _global_state.initial_region->icvs.nthreads_var = malloc(2 * sizeof(*_global_state.initial_region->icvs.nthreads_var));
  750. _global_state.initial_region->icvs.nthreads_var[0] = _starpu_omp_initial_icv_values->nthreads_var[0];
  751. _global_state.initial_region->icvs.nthreads_var[1] = 0;
  752. }
  753. if (_starpu_omp_initial_icv_values->bind_var[1] != starpu_omp_proc_bind_undefined)
  754. {
  755. _global_state.initial_region->icvs.bind_var = malloc((1+max_active_levels-_global_state.initial_region->level) * sizeof(*_global_state.initial_region->icvs.bind_var));
  756. int i,j;
  757. for (i = _global_state.initial_region->level, j = 0; i < max_active_levels; i++, j++)
  758. {
  759. _global_state.initial_region->icvs.bind_var[j] = _starpu_omp_initial_icv_values->bind_var[j];
  760. }
  761. _global_state.initial_region->icvs.bind_var[j] = starpu_omp_proc_bind_undefined;
  762. }
  763. else
  764. {
  765. _global_state.initial_region->icvs.bind_var = malloc(2 * sizeof(*_global_state.initial_region->icvs.bind_var));
  766. _global_state.initial_region->icvs.bind_var[0] = _starpu_omp_initial_icv_values->bind_var[0];
  767. _global_state.initial_region->icvs.bind_var[1] = starpu_omp_proc_bind_undefined;
  768. }
  769. _global_state.initial_region->icvs.thread_limit_var = _starpu_omp_initial_icv_values->thread_limit_var;
  770. _global_state.initial_region->icvs.active_levels_var = 0;
  771. _global_state.initial_region->icvs.levels_var = 0;
  772. _global_state.initial_region->icvs.run_sched_var = _starpu_omp_initial_icv_values->run_sched_var;
  773. _global_state.initial_region->icvs.run_sched_chunk_var = _starpu_omp_initial_icv_values->run_sched_chunk_var;
  774. _global_state.initial_region->icvs.default_device_var = _starpu_omp_initial_icv_values->default_device_var;
  775. starpu_omp_task_list_push_back(&_global_state.initial_region->implicit_task_list,
  776. _global_state.initial_task);
  777. }
  778. static void omp_initial_region_exit(void)
  779. {
  780. omp_initial_thread_exit();
  781. _global_state.initial_task->state = starpu_omp_task_state_terminated;
  782. starpu_omp_task_list_pop_front(&_global_state.initial_region->implicit_task_list);
  783. _global_state.initial_region->master_thread = NULL;
  784. free(_global_state.initial_region->icvs.nthreads_var);
  785. free(_global_state.initial_region->icvs.bind_var);
  786. _global_state.initial_region->nb_threads--;
  787. }
  788. /*
  789. * If StarPU was compiled with --enable-openmp, but the OpenMP runtime support
  790. * is not in use, starpu_init() may have been called directly instead of
  791. * through starpu_omp_init(). However, some starpu_omp functions may be still
  792. * be called such as _starpu_omp_get_task(). So let's setup a basic environment
  793. * for them.
  794. */
  795. void _starpu_omp_dummy_init(void)
  796. {
  797. if (_starpu_omp_global_state != &_global_state)
  798. {
  799. if (omp_dummy_init == 0)
  800. {
  801. STARPU_PTHREAD_KEY_CREATE(&omp_thread_key, NULL);
  802. STARPU_PTHREAD_KEY_CREATE(&omp_task_key, NULL);
  803. }
  804. omp_dummy_init++;
  805. }
  806. }
  807. /*
  808. * Free data structures allocated by _starpu_omp_dummy_init().
  809. */
  810. void _starpu_omp_dummy_shutdown(void)
  811. {
  812. if (omp_dummy_init > 0)
  813. {
  814. if (omp_dummy_init == 1)
  815. {
  816. STARPU_PTHREAD_KEY_DELETE(omp_thread_key);
  817. STARPU_PTHREAD_KEY_DELETE(omp_task_key);
  818. }
  819. omp_dummy_init--;
  820. }
  821. }
  822. /*
  823. * Entry point to be called by the OpenMP runtime constructor
  824. */
  825. int starpu_omp_init(void)
  826. {
  827. _starpu_omp_global_state = &_global_state;
  828. STARPU_PTHREAD_KEY_CREATE(&omp_thread_key, NULL);
  829. STARPU_PTHREAD_KEY_CREATE(&omp_task_key, NULL);
  830. _global_state.initial_device = create_omp_device_struct();
  831. _global_state.initial_region = create_omp_region_struct(NULL, _global_state.initial_device);
  832. _global_state.initial_thread = create_omp_thread_struct(_global_state.initial_region);
  833. _global_state.initial_task = create_omp_task_struct(NULL,
  834. _global_state.initial_thread, _global_state.initial_region, 1);
  835. _global_state.default_critical = create_omp_critical_struct();
  836. _global_state.named_criticals = NULL;
  837. _starpu_spin_init(&_global_state.named_criticals_lock);
  838. _global_state.hash_workers = NULL;
  839. _starpu_spin_init(&_global_state.hash_workers_lock);
  840. _starpu_omp_environment_init();
  841. _global_state.icvs.cancel_var = _starpu_omp_initial_icv_values->cancel_var;
  842. omp_initial_region_setup();
  843. /* init clock reference for starpu_omp_get_wtick */
  844. _starpu_omp_clock_ref = starpu_timing_now();
  845. return 0;
  846. }
  847. void starpu_omp_shutdown(void)
  848. {
  849. omp_initial_region_exit();
  850. /* TODO: free ICV variables */
  851. /* TODO: free task/thread/region/device structures */
  852. destroy_omp_task_struct(_global_state.initial_task);
  853. _global_state.initial_task = NULL;
  854. _global_state.initial_thread = NULL;
  855. destroy_omp_region_struct(_global_state.initial_region);
  856. _global_state.initial_region = NULL;
  857. destroy_omp_device_struct(_global_state.initial_device);
  858. _global_state.initial_device = NULL;
  859. destroy_omp_critical_struct(_global_state.default_critical);
  860. _global_state.default_critical = NULL;
  861. _starpu_spin_lock(&_global_state.named_criticals_lock);
  862. {
  863. struct starpu_omp_critical *critical, *tmp;
  864. HASH_ITER(hh, _global_state.named_criticals, critical, tmp)
  865. {
  866. STARPU_ASSERT(critical != NULL);
  867. HASH_DEL(_global_state.named_criticals, critical);
  868. destroy_omp_critical_struct(critical);
  869. }
  870. }
  871. STARPU_ASSERT(_global_state.named_criticals == NULL);
  872. _starpu_spin_unlock(&_global_state.named_criticals_lock);
  873. _starpu_spin_destroy(&_global_state.named_criticals_lock);
  874. _starpu_spin_lock(&_global_state.hash_workers_lock);
  875. {
  876. struct starpu_omp_thread *thread, *tmp;
  877. HASH_ITER(hh, _global_state.hash_workers, thread, tmp)
  878. {
  879. STARPU_ASSERT(thread != NULL);
  880. HASH_DEL(_global_state.hash_workers, thread);
  881. destroy_omp_thread_struct(thread);
  882. }
  883. }
  884. STARPU_ASSERT(_global_state.hash_workers == NULL);
  885. _starpu_spin_unlock(&_global_state.hash_workers_lock);
  886. _starpu_spin_destroy(&_global_state.hash_workers_lock);
  887. _starpu_omp_environment_exit();
  888. STARPU_PTHREAD_KEY_DELETE(omp_task_key);
  889. STARPU_PTHREAD_KEY_DELETE(omp_thread_key);
  890. }
  891. void starpu_omp_parallel_region(const struct starpu_omp_parallel_region_attr *attr)
  892. {
  893. struct starpu_omp_thread *master_thread = STARPU_PTHREAD_GETSPECIFIC(omp_thread_key);
  894. struct starpu_omp_task *task = STARPU_PTHREAD_GETSPECIFIC(omp_task_key);
  895. struct starpu_omp_region *generating_region = task->owner_region;
  896. const int max_active_levels = generating_region->owner_device->icvs.max_active_levels_var;
  897. struct starpu_omp_region *new_region =
  898. create_omp_region_struct(generating_region, _global_state.initial_device);
  899. int ret;
  900. int nb_threads = 1;
  901. /* TODO: for now, nested parallel sections are not supported, thus we
  902. * open an active parallel section only if the generating region is the
  903. * initial region */
  904. if (attr->if_clause != 0)
  905. {
  906. const int max_threads = (int)starpu_cpu_worker_get_count();
  907. if (attr->num_threads > 0)
  908. {
  909. nb_threads = attr->num_threads;
  910. }
  911. else
  912. {
  913. nb_threads = generating_region->icvs.nthreads_var[0];
  914. }
  915. if (nb_threads > max_threads)
  916. {
  917. nb_threads = max_threads;
  918. }
  919. if (nb_threads > 1 && generating_region->icvs.active_levels_var+1 > max_active_levels)
  920. {
  921. nb_threads = 1;
  922. }
  923. }
  924. STARPU_ASSERT(nb_threads > 0);
  925. new_region->icvs.dyn_var = generating_region->icvs.dyn_var;
  926. new_region->icvs.nest_var = generating_region->icvs.nest_var;
  927. /* the nthreads_var and bind_var arrays do not hold more than
  928. * max_active_levels entries at most, even if some in-between levels
  929. * are inactive */
  930. if (new_region->level < max_active_levels)
  931. {
  932. if (generating_region->icvs.nthreads_var[1] != 0)
  933. {
  934. new_region->icvs.nthreads_var = malloc((1+max_active_levels-new_region->level) * sizeof(*new_region->icvs.nthreads_var));
  935. int i,j;
  936. for (i = new_region->level, j = 0; i < max_active_levels; i++, j++)
  937. {
  938. new_region->icvs.nthreads_var[j] = generating_region->icvs.nthreads_var[j+1];
  939. }
  940. new_region->icvs.nthreads_var[j] = 0;
  941. }
  942. else
  943. {
  944. new_region->icvs.nthreads_var = malloc(2 * sizeof(*new_region->icvs.nthreads_var));
  945. new_region->icvs.nthreads_var[0] = generating_region->icvs.nthreads_var[0];
  946. new_region->icvs.nthreads_var[1] = 0;
  947. }
  948. if (generating_region->icvs.bind_var[1] != starpu_omp_proc_bind_undefined)
  949. {
  950. new_region->icvs.bind_var = malloc((1+max_active_levels-new_region->level) * sizeof(*new_region->icvs.bind_var));
  951. int i,j;
  952. for (i = new_region->level, j = 0; i < max_active_levels; i++, j++)
  953. {
  954. new_region->icvs.bind_var[j] = generating_region->icvs.bind_var[j+1];
  955. }
  956. new_region->icvs.bind_var[j] = starpu_omp_proc_bind_undefined;
  957. }
  958. else
  959. {
  960. new_region->icvs.bind_var = malloc(2 * sizeof(*new_region->icvs.bind_var));
  961. new_region->icvs.bind_var[0] = generating_region->icvs.bind_var[0];
  962. new_region->icvs.bind_var[1] = starpu_omp_proc_bind_undefined;
  963. }
  964. }
  965. else
  966. {
  967. new_region->icvs.nthreads_var = malloc(sizeof(*new_region->icvs.nthreads_var));
  968. new_region->icvs.nthreads_var[0] = generating_region->icvs.nthreads_var[0];
  969. new_region->icvs.bind_var = malloc(sizeof(*new_region->icvs.bind_var));
  970. new_region->icvs.bind_var[0] = generating_region->icvs.bind_var[0];
  971. }
  972. new_region->icvs.thread_limit_var = generating_region->icvs.thread_limit_var;
  973. new_region->icvs.active_levels_var = (nb_threads > 1)?generating_region->icvs.active_levels_var+1:generating_region->icvs.active_levels_var;
  974. new_region->icvs.levels_var = generating_region->icvs.levels_var+1;
  975. new_region->icvs.run_sched_var = generating_region->icvs.run_sched_var;
  976. new_region->icvs.run_sched_chunk_var = generating_region->icvs.run_sched_chunk_var;
  977. new_region->icvs.default_device_var = generating_region->icvs.default_device_var;
  978. int i;
  979. for (i = 0; i < nb_threads; i++)
  980. {
  981. struct starpu_omp_thread *new_thread;
  982. if (i == 0)
  983. {
  984. new_thread = master_thread;
  985. new_region->master_thread = master_thread;
  986. }
  987. else
  988. {
  989. /* TODO: specify actual starpu worker */
  990. /* TODO: use a less arbitrary thread/worker mapping scheme */
  991. if (generating_region->level == 0)
  992. {
  993. struct _starpu_worker *worker = _starpu_get_worker_struct(_global_state.starpu_cpu_worker_ids[i]);
  994. new_thread = get_worker_thread(worker);
  995. if (new_thread == NULL)
  996. {
  997. new_thread = create_omp_thread_struct(new_region);
  998. new_thread->worker = _starpu_get_worker_struct(_global_state.starpu_cpu_worker_ids[i]);
  999. register_thread_worker(new_thread);
  1000. }
  1001. }
  1002. else
  1003. {
  1004. new_thread = master_thread;
  1005. }
  1006. starpu_omp_thread_list_push_back(&new_region->thread_list, new_thread);
  1007. }
  1008. struct starpu_omp_task *new_task = create_omp_task_struct(task, new_thread, new_region, 1);
  1009. new_task->rank = new_region->nb_threads;
  1010. new_region->nb_threads++;
  1011. starpu_omp_task_list_push_back(&new_region->implicit_task_list, new_task);
  1012. }
  1013. STARPU_ASSERT(new_region->nb_threads == nb_threads);
  1014. /*
  1015. * if task == initial_task, create a starpu task as a continuation to all the implicit
  1016. * tasks of the new region, else prepare the task for preemption,
  1017. * to become itself a continuation to the implicit tasks of the new region
  1018. */
  1019. if (task == _global_state.initial_task)
  1020. {
  1021. new_region->continuation_starpu_task = starpu_task_create();
  1022. /* in that case, the continuation starpu task is only used for synchronisation */
  1023. new_region->continuation_starpu_task->cl = NULL;
  1024. new_region->continuation_starpu_task->workerid = master_thread->worker->workerid;
  1025. new_region->continuation_starpu_task->execute_on_a_specific_worker = 1;
  1026. /* this sync task will be tested for completion in omp_initial_thread_func() */
  1027. new_region->continuation_starpu_task->detach = 0;
  1028. }
  1029. else
  1030. {
  1031. /* through the preemption, the parent starpu task becomes the continuation task */
  1032. _starpu_task_prepare_for_continuation();
  1033. new_region->continuation_starpu_task = task->starpu_task;
  1034. }
  1035. task->nested_region = new_region;
  1036. /*
  1037. * create the starpu tasks for the implicit omp tasks,
  1038. * create explicit dependencies between these starpu tasks and the continuation starpu task
  1039. */
  1040. struct starpu_omp_task * implicit_task;
  1041. for (implicit_task = starpu_omp_task_list_begin(&new_region->implicit_task_list);
  1042. implicit_task != starpu_omp_task_list_end(&new_region->implicit_task_list);
  1043. implicit_task = starpu_omp_task_list_next(implicit_task))
  1044. {
  1045. implicit_task->cl = attr->cl;
  1046. /*
  1047. * save pointer to the regions user function from the parallel region codelet
  1048. *
  1049. * TODO: add support for multiple/heterogeneous implementations
  1050. */
  1051. implicit_task->cpu_f = implicit_task->cl.cpu_funcs[0];
  1052. /*
  1053. * plug the task wrapper into the parallel region codelet instead, to support task preemption
  1054. */
  1055. implicit_task->cl.cpu_funcs[0] = starpu_omp_implicit_task_exec;
  1056. implicit_task->starpu_task = starpu_task_create();
  1057. implicit_task->starpu_task->cl = &implicit_task->cl;
  1058. {
  1059. int i;
  1060. for (i = 0; i < implicit_task->cl.nbuffers; i++)
  1061. {
  1062. implicit_task->starpu_task->handles[i] = attr->handles[i];
  1063. }
  1064. }
  1065. implicit_task->starpu_task->cl_arg = attr->cl_arg;
  1066. implicit_task->starpu_task->cl_arg_size = attr->cl_arg_size;
  1067. implicit_task->starpu_task->cl_arg_free = attr->cl_arg_free;
  1068. implicit_task->starpu_task->omp_task = implicit_task;
  1069. implicit_task->starpu_task->workerid = implicit_task->owner_thread->worker->workerid;
  1070. implicit_task->starpu_task->execute_on_a_specific_worker = 1;
  1071. starpu_task_declare_deps_array(new_region->continuation_starpu_task, 1, &implicit_task->starpu_task);
  1072. }
  1073. attr = NULL;
  1074. /*
  1075. * submit all the region implicit starpu tasks
  1076. */
  1077. for (implicit_task = starpu_omp_task_list_begin(&new_region->implicit_task_list);
  1078. implicit_task != starpu_omp_task_list_end(&new_region->implicit_task_list);
  1079. implicit_task = starpu_omp_task_list_next(implicit_task))
  1080. {
  1081. ret = starpu_task_submit(implicit_task->starpu_task);
  1082. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  1083. }
  1084. /*
  1085. * submit the region continuation starpu task if task == initial_task
  1086. */
  1087. if (task == _global_state.initial_task)
  1088. {
  1089. ret = _starpu_task_submit_internally(new_region->continuation_starpu_task);
  1090. STARPU_CHECK_RETURN_VALUE(ret, "_starpu_task_submit_internally");
  1091. }
  1092. /*
  1093. * preempt for completion of the region
  1094. */
  1095. starpu_omp_task_preempt();
  1096. if (task == _global_state.initial_task)
  1097. {
  1098. STARPU_ASSERT(new_region->continuation_starpu_task == NULL);
  1099. }
  1100. else
  1101. {
  1102. STARPU_ASSERT(new_region->continuation_starpu_task != NULL);
  1103. new_region->continuation_starpu_task = NULL;
  1104. }
  1105. /*
  1106. * TODO: free region resources
  1107. */
  1108. for (i = 0; i < nb_threads; i++)
  1109. {
  1110. if (i == 0)
  1111. {
  1112. new_region->master_thread = NULL;
  1113. }
  1114. else
  1115. {
  1116. starpu_omp_thread_list_pop_front(&new_region->thread_list);
  1117. /* TODO: cleanup unused threads */
  1118. }
  1119. new_region->nb_threads--;
  1120. struct starpu_omp_task *implicit_task = starpu_omp_task_list_pop_front(&new_region->implicit_task_list);
  1121. destroy_omp_task_struct(implicit_task);
  1122. }
  1123. STARPU_ASSERT(new_region->nb_threads == 0);
  1124. task->nested_region = NULL;
  1125. free(new_region->icvs.bind_var);
  1126. free(new_region->icvs.nthreads_var);
  1127. destroy_omp_region_struct(new_region);
  1128. }
  1129. static void wake_up_barrier(struct starpu_omp_region *parallel_region)
  1130. {
  1131. struct starpu_omp_task *task = STARPU_PTHREAD_GETSPECIFIC(omp_task_key);
  1132. struct starpu_omp_task *implicit_task;
  1133. for (implicit_task = starpu_omp_task_list_begin(&parallel_region->implicit_task_list);
  1134. implicit_task != starpu_omp_task_list_end(&parallel_region->implicit_task_list);
  1135. implicit_task = starpu_omp_task_list_next(implicit_task))
  1136. {
  1137. if (implicit_task == task)
  1138. continue;
  1139. weak_task_lock(implicit_task);
  1140. STARPU_ASSERT(implicit_task->wait_on & starpu_omp_task_wait_on_barrier);
  1141. implicit_task->wait_on &= ~starpu_omp_task_wait_on_barrier;
  1142. wake_up_and_unlock_task(implicit_task);
  1143. }
  1144. }
  1145. void starpu_omp_barrier(void)
  1146. {
  1147. struct starpu_omp_task *task = STARPU_PTHREAD_GETSPECIFIC(omp_task_key);
  1148. /* Assume barriers are performed in by the implicit tasks of a parallel_region */
  1149. STARPU_ASSERT(task->is_implicit);
  1150. struct starpu_omp_region *parallel_region = task->owner_region;
  1151. _starpu_spin_lock(&task->lock);
  1152. int inc_barrier_count = STARPU_ATOMIC_ADD(&parallel_region->barrier_count, 1);
  1153. if (inc_barrier_count == parallel_region->nb_threads)
  1154. {
  1155. /* last task reaching the barrier */
  1156. _starpu_spin_lock(&parallel_region->lock);
  1157. ANNOTATE_HAPPENS_AFTER(&parallel_region->barrier_count);
  1158. ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(&parallel_region->barrier_count);
  1159. parallel_region->barrier_count = 0;
  1160. ANNOTATE_HAPPENS_AFTER(&parallel_region->barrier_count);
  1161. ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(&parallel_region->barrier_count);
  1162. if (parallel_region->bound_explicit_task_count > 0)
  1163. {
  1164. task->wait_on |= starpu_omp_task_wait_on_region_tasks;
  1165. parallel_region->waiting_task = task;
  1166. task->transaction_pending = 1;
  1167. _starpu_spin_unlock(&parallel_region->lock);
  1168. _starpu_spin_unlock(&task->lock);
  1169. _starpu_task_prepare_for_continuation_ext(0, transaction_callback, task);
  1170. starpu_omp_task_preempt();
  1171. }
  1172. else
  1173. {
  1174. _starpu_spin_unlock(&parallel_region->lock);
  1175. _starpu_spin_unlock(&task->lock);
  1176. }
  1177. wake_up_barrier(parallel_region);
  1178. }
  1179. else
  1180. {
  1181. ANNOTATE_HAPPENS_BEFORE(&parallel_region->barrier_count);
  1182. /* not the last task reaching the barrier
  1183. * . prepare for conditional continuation
  1184. * . sleep
  1185. */
  1186. task->wait_on |= starpu_omp_task_wait_on_barrier;
  1187. task->transaction_pending = 1;
  1188. _starpu_spin_unlock(&task->lock);
  1189. _starpu_task_prepare_for_continuation_ext(0, transaction_callback, task);
  1190. starpu_omp_task_preempt();
  1191. STARPU_ASSERT(task->child_task_count == 0);
  1192. }
  1193. }
  1194. void starpu_omp_master(void (*f)(void *arg), void *arg)
  1195. {
  1196. struct starpu_omp_task *task = STARPU_PTHREAD_GETSPECIFIC(omp_task_key);
  1197. struct starpu_omp_thread *thread = STARPU_PTHREAD_GETSPECIFIC(omp_thread_key);
  1198. /* Assume master is performed in by the implicit tasks of a region */
  1199. STARPU_ASSERT(task->is_implicit);
  1200. struct starpu_omp_region *region = task->owner_region;
  1201. if (thread == region->master_thread)
  1202. {
  1203. f(arg);
  1204. }
  1205. }
  1206. /* variant of omp_master for inlined code
  1207. * return !0 for the task that should perform the master section
  1208. * return 0 for the tasks that should not perform the master section */
  1209. int starpu_omp_master_inline(void)
  1210. {
  1211. struct starpu_omp_task *task = STARPU_PTHREAD_GETSPECIFIC(omp_task_key);
  1212. struct starpu_omp_thread *thread = STARPU_PTHREAD_GETSPECIFIC(omp_thread_key);
  1213. /* Assume master is performed in by the implicit tasks of a region */
  1214. STARPU_ASSERT(task->is_implicit);
  1215. struct starpu_omp_region *region = task->owner_region;
  1216. return thread == region->master_thread;
  1217. }
  1218. void starpu_omp_single(void (*f)(void *arg), void *arg, int nowait)
  1219. {
  1220. struct starpu_omp_task *task = STARPU_PTHREAD_GETSPECIFIC(omp_task_key);
  1221. /* Assume singles are performed in by the implicit tasks of a region */
  1222. STARPU_ASSERT(task->is_implicit);
  1223. struct starpu_omp_region *region = task->owner_region;
  1224. int first = STARPU_BOOL_COMPARE_AND_SWAP(&region->single_id, task->single_id, task->single_id+1);
  1225. task->single_id++;
  1226. if (first)
  1227. {
  1228. f(arg);
  1229. }
  1230. if (!nowait)
  1231. {
  1232. starpu_omp_barrier();
  1233. }
  1234. }
  1235. /* variant of omp_single for inlined code
  1236. * return !0 for the task that should perform the single section
  1237. * return 0 for the tasks that should not perform the single section
  1238. * wait/nowait should be handled directly by the calling code using starpu_omp_barrier */
  1239. int starpu_omp_single_inline(void)
  1240. {
  1241. struct starpu_omp_task *task = STARPU_PTHREAD_GETSPECIFIC(omp_task_key);
  1242. /* Assume singles are performed in by the implicit tasks of a region */
  1243. STARPU_ASSERT(task->is_implicit);
  1244. struct starpu_omp_region *region = task->owner_region;
  1245. int first = STARPU_BOOL_COMPARE_AND_SWAP(&region->single_id, task->single_id, task->single_id+1);
  1246. task->single_id++;
  1247. return first;
  1248. }
  1249. void starpu_omp_single_copyprivate(void (*f)(void *arg, void *data, unsigned long long data_size), void *arg, void *data, unsigned long long data_size)
  1250. {
  1251. struct starpu_omp_task *task = STARPU_PTHREAD_GETSPECIFIC(omp_task_key);
  1252. /* Assume singles are performed in by the implicit tasks of a region */
  1253. STARPU_ASSERT(task->is_implicit);
  1254. struct starpu_omp_region *region = task->owner_region;
  1255. int first = STARPU_BOOL_COMPARE_AND_SWAP(&region->single_id, task->single_id, task->single_id+1);
  1256. task->single_id++;
  1257. if (first)
  1258. {
  1259. region->copy_private_data = data;
  1260. f(arg, data, data_size);
  1261. }
  1262. starpu_omp_barrier();
  1263. if (!first)
  1264. {
  1265. memcpy(data, region->copy_private_data, data_size);
  1266. }
  1267. starpu_omp_barrier();
  1268. }
  1269. void *starpu_omp_single_copyprivate_inline_begin(void *data)
  1270. {
  1271. struct starpu_omp_task *task = STARPU_PTHREAD_GETSPECIFIC(omp_task_key);
  1272. /* Assume singles are performed in by the implicit tasks of a region */
  1273. STARPU_ASSERT(task->is_implicit);
  1274. struct starpu_omp_region *region = task->owner_region;
  1275. int first = STARPU_BOOL_COMPARE_AND_SWAP(&region->single_id, task->single_id, task->single_id+1);
  1276. task->single_id++;
  1277. if (first)
  1278. {
  1279. task->single_first = 1;
  1280. region->copy_private_data = data;
  1281. }
  1282. else
  1283. {
  1284. starpu_omp_barrier();
  1285. }
  1286. return first?NULL:region->copy_private_data;
  1287. }
  1288. void starpu_omp_single_copyprivate_inline_end(void)
  1289. {
  1290. struct starpu_omp_task *task = STARPU_PTHREAD_GETSPECIFIC(omp_task_key);
  1291. /* Assume singles are performed in by the implicit tasks of a region */
  1292. STARPU_ASSERT(task->is_implicit);
  1293. if (task->single_first)
  1294. {
  1295. task->single_first = 0;
  1296. starpu_omp_barrier();
  1297. }
  1298. starpu_omp_barrier();
  1299. }
  1300. void starpu_omp_critical(void (*f)(void *arg), void *arg, const char *name)
  1301. {
  1302. struct starpu_omp_task *task = STARPU_PTHREAD_GETSPECIFIC(omp_task_key);
  1303. struct starpu_omp_critical *critical = NULL;
  1304. struct starpu_omp_task_link link;
  1305. if (name)
  1306. {
  1307. _starpu_spin_lock(&_global_state.named_criticals_lock);
  1308. HASH_FIND_STR(_global_state.named_criticals, name, critical);
  1309. if (critical == NULL)
  1310. {
  1311. critical = create_omp_critical_struct();
  1312. critical->name = name;
  1313. HASH_ADD_STR(_global_state.named_criticals, name, critical);
  1314. }
  1315. _starpu_spin_unlock(&_global_state.named_criticals_lock);
  1316. }
  1317. else
  1318. {
  1319. critical = _global_state.default_critical;
  1320. }
  1321. _starpu_spin_lock(&critical->lock);
  1322. while (critical->state != 0)
  1323. {
  1324. _starpu_spin_lock(&task->lock);
  1325. task->wait_on |= starpu_omp_task_wait_on_critical;
  1326. task->transaction_pending = 1;
  1327. link.task = task;
  1328. link.next = critical->contention_list_head;
  1329. critical->contention_list_head = &link;
  1330. _starpu_spin_unlock(&task->lock);
  1331. _starpu_spin_unlock(&critical->lock);
  1332. _starpu_task_prepare_for_continuation_ext(0, transaction_callback, task);
  1333. starpu_omp_task_preempt();
  1334. /* re-acquire the spin lock */
  1335. _starpu_spin_lock(&critical->lock);
  1336. }
  1337. critical->state = 1;
  1338. _starpu_spin_unlock(&critical->lock);
  1339. f(arg);
  1340. _starpu_spin_lock(&critical->lock);
  1341. STARPU_ASSERT(critical->state == 1);
  1342. critical->state = 0;
  1343. if (critical->contention_list_head != NULL)
  1344. {
  1345. struct starpu_omp_task *next_task = critical->contention_list_head->task;
  1346. weak_task_lock(next_task);
  1347. critical->contention_list_head = critical->contention_list_head->next;
  1348. STARPU_ASSERT(next_task->wait_on & starpu_omp_task_wait_on_critical);
  1349. next_task->wait_on &= ~starpu_omp_task_wait_on_critical;
  1350. wake_up_and_unlock_task(next_task);
  1351. }
  1352. _starpu_spin_unlock(&critical->lock);
  1353. }
  1354. void starpu_omp_critical_inline_begin(const char *name)
  1355. {
  1356. struct starpu_omp_task *task = STARPU_PTHREAD_GETSPECIFIC(omp_task_key);
  1357. struct starpu_omp_critical *critical = NULL;
  1358. struct starpu_omp_task_link link;
  1359. if (name)
  1360. {
  1361. _starpu_spin_lock(&_global_state.named_criticals_lock);
  1362. HASH_FIND_STR(_global_state.named_criticals, name, critical);
  1363. if (critical == NULL)
  1364. {
  1365. critical = create_omp_critical_struct();
  1366. critical->name = name;
  1367. HASH_ADD_STR(_global_state.named_criticals, name, critical);
  1368. }
  1369. _starpu_spin_unlock(&_global_state.named_criticals_lock);
  1370. }
  1371. else
  1372. {
  1373. critical = _global_state.default_critical;
  1374. }
  1375. _starpu_spin_lock(&critical->lock);
  1376. while (critical->state != 0)
  1377. {
  1378. _starpu_spin_lock(&task->lock);
  1379. task->wait_on |= starpu_omp_task_wait_on_critical;
  1380. task->transaction_pending = 1;
  1381. link.task = task;
  1382. link.next = critical->contention_list_head;
  1383. critical->contention_list_head = &link;
  1384. _starpu_spin_unlock(&task->lock);
  1385. _starpu_spin_unlock(&critical->lock);
  1386. _starpu_task_prepare_for_continuation_ext(0, transaction_callback, task);
  1387. starpu_omp_task_preempt();
  1388. /* re-acquire the spin lock */
  1389. _starpu_spin_lock(&critical->lock);
  1390. }
  1391. critical->state = 1;
  1392. _starpu_spin_unlock(&critical->lock);
  1393. }
  1394. void starpu_omp_critical_inline_end(const char *name)
  1395. {
  1396. struct starpu_omp_critical *critical = NULL;
  1397. if (name)
  1398. {
  1399. _starpu_spin_lock(&_global_state.named_criticals_lock);
  1400. HASH_FIND_STR(_global_state.named_criticals, name, critical);
  1401. STARPU_ASSERT(critical != NULL);
  1402. _starpu_spin_unlock(&_global_state.named_criticals_lock);
  1403. }
  1404. else
  1405. {
  1406. critical = _global_state.default_critical;
  1407. }
  1408. _starpu_spin_lock(&critical->lock);
  1409. STARPU_ASSERT(critical->state == 1);
  1410. critical->state = 0;
  1411. if (critical->contention_list_head != NULL)
  1412. {
  1413. struct starpu_omp_task *next_task = critical->contention_list_head->task;
  1414. weak_task_lock(next_task);
  1415. critical->contention_list_head = critical->contention_list_head->next;
  1416. STARPU_ASSERT(next_task->wait_on & starpu_omp_task_wait_on_critical);
  1417. next_task->wait_on &= ~starpu_omp_task_wait_on_critical;
  1418. wake_up_and_unlock_task(next_task);
  1419. }
  1420. _starpu_spin_unlock(&critical->lock);
  1421. }
  1422. static void explicit_task__destroy_callback(void *_task)
  1423. {
  1424. struct starpu_omp_task *task = _task;
  1425. STARPU_ASSERT(!task->is_implicit);
  1426. task->starpu_task->omp_task = NULL;
  1427. task->starpu_task = NULL;
  1428. _starpu_spin_lock(&task->lock);
  1429. if (task->state != starpu_omp_task_state_target)
  1430. {
  1431. STARPU_ASSERT(task->transaction_pending == 1);
  1432. task->transaction_pending = 0;
  1433. if (task->child_task_count != 0)
  1434. {
  1435. task->state = starpu_omp_task_state_zombie;
  1436. _starpu_spin_unlock(&task->lock);
  1437. return;
  1438. }
  1439. }
  1440. _starpu_spin_unlock(&task->lock);
  1441. destroy_omp_task_struct(task);
  1442. }
  1443. void starpu_omp_task_region(const struct starpu_omp_task_region_attr *attr)
  1444. {
  1445. struct starpu_omp_task *generating_task = STARPU_PTHREAD_GETSPECIFIC(omp_task_key);
  1446. struct starpu_omp_region *parallel_region = generating_task->owner_region;
  1447. int is_undeferred = 0;
  1448. int is_final = 0;
  1449. int is_included = 0;
  1450. int is_merged = 0;
  1451. int is_untied = 0;
  1452. int ret;
  1453. if (!attr->if_clause)
  1454. {
  1455. is_undeferred = 1;
  1456. }
  1457. if (generating_task->is_final)
  1458. {
  1459. is_final = 1;
  1460. is_included = 1;
  1461. }
  1462. else if (attr->final_clause)
  1463. {
  1464. is_final = 1;
  1465. }
  1466. if (is_included)
  1467. {
  1468. is_undeferred = 1;
  1469. }
  1470. if ((is_undeferred || is_included) & attr->mergeable_clause)
  1471. {
  1472. is_merged = 1;
  1473. }
  1474. if (is_merged)
  1475. {
  1476. /* note: no need to backup/restore ICVs for merged tasks, merged tasks use the data environment of the caller */
  1477. int i;
  1478. for (i = 0; i < attr->cl.nbuffers; i++)
  1479. {
  1480. ret = starpu_data_acquire(attr->handles[i], attr->cl.modes[i]);
  1481. STARPU_CHECK_RETURN_VALUE(ret, "starpu_data_acquire");
  1482. }
  1483. void (*f)(void **starpu_buffers, void *starpu_cl_arg) = attr->cl.cpu_funcs[0];
  1484. f((void**)attr->handles, attr->cl_arg);
  1485. for (i = 0; i < attr->cl.nbuffers; i++)
  1486. {
  1487. starpu_data_release(attr->handles[i]);
  1488. }
  1489. if (attr->cl_arg_free)
  1490. {
  1491. free(attr->cl_arg);
  1492. }
  1493. }
  1494. else if (is_included)
  1495. {
  1496. /* TODO: backup current ICVs and setup new ICVs for the included task */
  1497. int i;
  1498. for (i = 0; i < attr->cl.nbuffers; i++)
  1499. {
  1500. ret = starpu_data_acquire(attr->handles[i], attr->cl.modes[i]);
  1501. STARPU_CHECK_RETURN_VALUE(ret, "starpu_data_acquire");
  1502. }
  1503. void (*f)(void **starpu_buffers, void *starpu_cl_arg) = attr->cl.cpu_funcs[0];
  1504. f((void**)attr->handles, attr->cl_arg);
  1505. for (i = 0; i < attr->cl.nbuffers; i++)
  1506. {
  1507. starpu_data_release(attr->handles[i]);
  1508. }
  1509. if (attr->cl_arg_free)
  1510. {
  1511. free(attr->cl_arg);
  1512. }
  1513. /* TODO: restore backuped ICVs */
  1514. }
  1515. else
  1516. {
  1517. struct starpu_omp_task *generated_task =
  1518. create_omp_task_struct(generating_task, NULL, parallel_region, 0);
  1519. generated_task->cl = attr->cl;
  1520. if (attr->untied_clause)
  1521. {
  1522. is_untied = 1;
  1523. }
  1524. generated_task->is_undeferred = is_undeferred;
  1525. generated_task->is_final = is_final;
  1526. generated_task->is_untied = is_untied;
  1527. generated_task->task_group = generating_task->task_group;
  1528. generated_task->rank = -1;
  1529. /*
  1530. * save pointer to the regions user function from the task region codelet
  1531. *
  1532. * TODO: add support for multiple/heterogeneous implementations
  1533. */
  1534. if (generated_task->cl.cpu_funcs[0])
  1535. {
  1536. generated_task->cpu_f = generated_task->cl.cpu_funcs[0];
  1537. /*
  1538. * plug the task wrapper into the task region codelet instead, to support task preemption
  1539. */
  1540. generated_task->cl.cpu_funcs[0] = starpu_omp_explicit_task_exec;
  1541. }
  1542. #if STARPU_USE_CUDA
  1543. if (generated_task->cl.cuda_funcs[0])
  1544. {
  1545. generated_task->cuda_f = generated_task->cl.cuda_funcs[0];
  1546. #if 1
  1547. /* we assume for now that Cuda task won't block, thus we don't need
  1548. * to initialize the StarPU OpenMP Runtime Support context for enabling
  1549. * continuations on Cuda tasks */
  1550. generated_task->state = starpu_omp_task_state_target;
  1551. #else
  1552. generated_task->cl.cuda_funcs[0] = starpu_omp_explicit_task_exec;
  1553. #endif
  1554. }
  1555. #endif
  1556. #if STARPU_USE_OPENCL
  1557. if (generated_task->cl.opencl_funcs[0])
  1558. {
  1559. generated_task->opencl_f = generated_task->cl.opencl_funcs[0];
  1560. #if 1
  1561. /* we assume for now that OpenCL task won't block, thus we don't need
  1562. * to initialize the StarPU OpenMP Runtime Support context for enabling
  1563. * continuations on OpenCL tasks */
  1564. generated_task->state = starpu_omp_task_state_target;
  1565. #else
  1566. generated_task->cl.opencl_funcs[0] = starpu_omp_explicit_task_exec;
  1567. #endif
  1568. }
  1569. #endif
  1570. /* TODO: add other accelerator support */
  1571. generated_task->starpu_task = starpu_task_create();
  1572. generated_task->starpu_task->cl = &generated_task->cl;
  1573. generated_task->starpu_task->cl_arg = attr->cl_arg;
  1574. generated_task->starpu_task->cl_arg_size = attr->cl_arg_size;
  1575. generated_task->starpu_task->cl_arg_free = attr->cl_arg_free;
  1576. {
  1577. int i;
  1578. for (i = 0; i < generated_task->cl.nbuffers; i++)
  1579. {
  1580. generated_task->starpu_task->handles[i] = attr->handles[i];
  1581. }
  1582. }
  1583. generated_task->starpu_task->omp_task = generated_task;
  1584. _starpu_task_set_omp_cleanup_callback(generated_task->starpu_task, explicit_task__destroy_callback, generated_task);
  1585. /* if the task is tied, execute_on_a_specific_worker will be changed to 1
  1586. * upon the first preemption of the generated task, once we know
  1587. * which worker thread has been selected */
  1588. generated_task->starpu_task->execute_on_a_specific_worker = 0;
  1589. (void)STARPU_ATOMIC_ADD(&generating_task->child_task_count, 1);
  1590. (void)STARPU_ATOMIC_ADD(&parallel_region->bound_explicit_task_count, 1);
  1591. if (generated_task->task_group)
  1592. {
  1593. (void)STARPU_ATOMIC_ADD(&generated_task->task_group->descendent_task_count, 1);
  1594. }
  1595. /* do not use the attribute struct afterward as it may become out of scope */
  1596. attr = NULL;
  1597. if (is_undeferred)
  1598. {
  1599. _starpu_task_prepare_for_continuation();
  1600. starpu_task_declare_deps_array(generating_task->starpu_task, 1,
  1601. &generated_task->starpu_task);
  1602. }
  1603. ret = starpu_task_submit(generated_task->starpu_task);
  1604. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  1605. if (is_undeferred)
  1606. {
  1607. starpu_omp_task_preempt();
  1608. }
  1609. }
  1610. }
  1611. void starpu_omp_taskwait(void)
  1612. {
  1613. struct starpu_omp_task *task = STARPU_PTHREAD_GETSPECIFIC(omp_task_key);
  1614. _starpu_spin_lock(&task->lock);
  1615. if (task->child_task_count > 0)
  1616. {
  1617. task->wait_on |= starpu_omp_task_wait_on_task_childs;
  1618. task->transaction_pending = 1;
  1619. _starpu_spin_unlock(&task->lock);
  1620. _starpu_task_prepare_for_continuation_ext(0, transaction_callback, task);
  1621. starpu_omp_task_preempt();
  1622. STARPU_ASSERT(task->child_task_count == 0);
  1623. }
  1624. else
  1625. {
  1626. _starpu_spin_unlock(&task->lock);
  1627. }
  1628. }
  1629. void starpu_omp_taskgroup(void (*f)(void *arg), void *arg)
  1630. {
  1631. struct starpu_omp_task *task = STARPU_PTHREAD_GETSPECIFIC(omp_task_key);
  1632. struct starpu_omp_task_group task_group;
  1633. task_group.p_previous_task_group = task->task_group;
  1634. task_group.descendent_task_count = 0;
  1635. task_group.leader_task = task;
  1636. task->task_group = &task_group;
  1637. f(arg);
  1638. _starpu_spin_lock(&task->lock);
  1639. if (task_group.descendent_task_count > 0)
  1640. {
  1641. task->wait_on |= starpu_omp_task_wait_on_group;
  1642. task->transaction_pending = 1;
  1643. _starpu_spin_unlock(&task->lock);
  1644. _starpu_task_prepare_for_continuation_ext(0, transaction_callback, task);
  1645. starpu_omp_task_preempt();
  1646. STARPU_ASSERT(task_group.descendent_task_count == 0);
  1647. }
  1648. else
  1649. {
  1650. _starpu_spin_unlock(&task->lock);
  1651. }
  1652. task->task_group = task_group.p_previous_task_group;
  1653. }
  1654. void starpu_omp_taskgroup_inline_begin(void)
  1655. {
  1656. struct starpu_omp_task *task = STARPU_PTHREAD_GETSPECIFIC(omp_task_key);
  1657. struct starpu_omp_task_group *p_task_group = malloc(sizeof(*p_task_group));
  1658. if (p_task_group == NULL)
  1659. _STARPU_ERROR("memory allocation failed\n");
  1660. p_task_group->p_previous_task_group = task->task_group;
  1661. p_task_group->descendent_task_count = 0;
  1662. p_task_group->leader_task = task;
  1663. task->task_group = p_task_group;
  1664. }
  1665. void starpu_omp_taskgroup_inline_end(void)
  1666. {
  1667. struct starpu_omp_task *task = STARPU_PTHREAD_GETSPECIFIC(omp_task_key);
  1668. _starpu_spin_lock(&task->lock);
  1669. struct starpu_omp_task_group *p_task_group = task->task_group;
  1670. if (p_task_group->descendent_task_count > 0)
  1671. {
  1672. task->wait_on |= starpu_omp_task_wait_on_group;
  1673. task->transaction_pending = 1;
  1674. _starpu_spin_unlock(&task->lock);
  1675. _starpu_task_prepare_for_continuation_ext(0, transaction_callback, task);
  1676. starpu_omp_task_preempt();
  1677. STARPU_ASSERT(p_task_group->descendent_task_count == 0);
  1678. }
  1679. else
  1680. {
  1681. _starpu_spin_unlock(&task->lock);
  1682. }
  1683. task->task_group = p_task_group->p_previous_task_group;
  1684. free(p_task_group);
  1685. }
  1686. static inline void _starpu_omp_for_loop(struct starpu_omp_region *parallel_region, struct starpu_omp_task *task,
  1687. struct starpu_omp_loop *loop, int first_call,
  1688. unsigned long long nb_iterations, unsigned long long chunk, int schedule, int ordered, unsigned long long *_first_i, unsigned long long *_nb_i)
  1689. {
  1690. *_nb_i = 0;
  1691. if (schedule == starpu_omp_sched_undefined)
  1692. {
  1693. schedule = parallel_region->owner_device->icvs.def_sched_var;
  1694. chunk = parallel_region->owner_device->icvs.def_sched_chunk_var;
  1695. }
  1696. else if (schedule == starpu_omp_sched_runtime)
  1697. {
  1698. schedule = parallel_region->icvs.run_sched_var;
  1699. chunk = parallel_region->icvs.run_sched_chunk_var;
  1700. }
  1701. STARPU_ASSERT( schedule == starpu_omp_sched_static
  1702. || schedule == starpu_omp_sched_dynamic
  1703. || schedule == starpu_omp_sched_guided
  1704. || schedule == starpu_omp_sched_auto);
  1705. if (schedule == starpu_omp_sched_auto)
  1706. {
  1707. schedule = starpu_omp_sched_static;
  1708. chunk = 0;
  1709. }
  1710. if (schedule == starpu_omp_sched_static)
  1711. {
  1712. if (chunk > 0)
  1713. {
  1714. if (first_call)
  1715. {
  1716. *_first_i = task->rank * chunk;
  1717. }
  1718. else
  1719. {
  1720. *_first_i += parallel_region->nb_threads * chunk;
  1721. }
  1722. if (*_first_i < nb_iterations)
  1723. {
  1724. if (*_first_i + chunk > nb_iterations)
  1725. {
  1726. *_nb_i = nb_iterations - *_first_i;
  1727. }
  1728. else
  1729. {
  1730. *_nb_i = chunk;
  1731. }
  1732. }
  1733. }
  1734. else
  1735. {
  1736. if (first_call)
  1737. {
  1738. *_nb_i = nb_iterations / parallel_region->nb_threads;
  1739. *_first_i = (unsigned)task->rank * (*_nb_i);
  1740. unsigned long long remainder = nb_iterations % parallel_region->nb_threads;
  1741. if (remainder > 0)
  1742. {
  1743. if ((unsigned)task->rank < remainder)
  1744. {
  1745. (*_nb_i)++;
  1746. *_first_i += (unsigned)task->rank;
  1747. }
  1748. else
  1749. {
  1750. *_first_i += remainder;
  1751. }
  1752. }
  1753. }
  1754. }
  1755. }
  1756. else if (schedule == starpu_omp_sched_dynamic)
  1757. {
  1758. if (chunk == 0)
  1759. {
  1760. chunk = 1;
  1761. }
  1762. if (first_call)
  1763. {
  1764. *_first_i = 0;
  1765. }
  1766. _starpu_spin_lock(&parallel_region->lock);
  1767. if (loop->next_iteration < nb_iterations)
  1768. {
  1769. *_first_i = loop->next_iteration;
  1770. if (*_first_i + chunk > nb_iterations)
  1771. {
  1772. *_nb_i = nb_iterations - *_first_i;
  1773. }
  1774. else
  1775. {
  1776. *_nb_i = chunk;
  1777. }
  1778. loop->next_iteration += *_nb_i;
  1779. }
  1780. _starpu_spin_unlock(&parallel_region->lock);
  1781. }
  1782. else if (schedule == starpu_omp_sched_guided)
  1783. {
  1784. if (chunk == 0)
  1785. {
  1786. chunk = 1;
  1787. }
  1788. if (first_call)
  1789. {
  1790. *_first_i = 0;
  1791. }
  1792. _starpu_spin_lock(&parallel_region->lock);
  1793. if (loop->next_iteration < nb_iterations)
  1794. {
  1795. *_first_i = loop->next_iteration;
  1796. *_nb_i = (nb_iterations - *_first_i)/parallel_region->nb_threads;
  1797. if (*_nb_i < chunk)
  1798. {
  1799. if (*_first_i+chunk > nb_iterations)
  1800. {
  1801. *_nb_i = nb_iterations - *_first_i;
  1802. }
  1803. else
  1804. {
  1805. *_nb_i = chunk;
  1806. }
  1807. }
  1808. loop->next_iteration += *_nb_i;
  1809. }
  1810. _starpu_spin_unlock(&parallel_region->lock);
  1811. }
  1812. if (ordered)
  1813. {
  1814. task->ordered_first_i = *_first_i;
  1815. task->ordered_nb_i = *_nb_i;
  1816. }
  1817. }
  1818. static inline struct starpu_omp_loop *_starpu_omp_for_get_loop(struct starpu_omp_region *parallel_region, struct starpu_omp_task *task)
  1819. {
  1820. struct starpu_omp_loop *loop;
  1821. loop = parallel_region->loop_list;
  1822. while (loop && loop->id != task->loop_id)
  1823. {
  1824. loop = loop->next_loop;
  1825. }
  1826. return loop;
  1827. }
  1828. static inline struct starpu_omp_loop *_starpu_omp_for_loop_begin(struct starpu_omp_region *parallel_region, struct starpu_omp_task *task,
  1829. int ordered)
  1830. {
  1831. struct starpu_omp_loop *loop;
  1832. _starpu_spin_lock(&parallel_region->lock);
  1833. loop = _starpu_omp_for_get_loop(parallel_region, task);
  1834. if (!loop)
  1835. {
  1836. loop = malloc(sizeof(*loop));
  1837. if (loop == NULL)
  1838. _STARPU_ERROR("memory allocation failed\n");
  1839. loop->id = task->loop_id;
  1840. loop->next_iteration = 0;
  1841. loop->nb_completed_threads = 0;
  1842. loop->next_loop = parallel_region->loop_list;
  1843. parallel_region->loop_list = loop;
  1844. if (ordered)
  1845. {
  1846. loop->ordered_iteration = 0;
  1847. _starpu_spin_init(&loop->ordered_lock);
  1848. condition_init(&loop->ordered_cond);
  1849. }
  1850. }
  1851. _starpu_spin_unlock(&parallel_region->lock);
  1852. return loop;
  1853. }
  1854. static inline void _starpu_omp_for_loop_end(struct starpu_omp_region *parallel_region, struct starpu_omp_task *task,
  1855. struct starpu_omp_loop *loop, int ordered)
  1856. {
  1857. _starpu_spin_lock(&parallel_region->lock);
  1858. loop->nb_completed_threads++;
  1859. if (loop->nb_completed_threads == parallel_region->nb_threads)
  1860. {
  1861. struct starpu_omp_loop **p_loop;
  1862. if (ordered)
  1863. {
  1864. loop->ordered_iteration = 0;
  1865. condition_exit(&loop->ordered_cond);
  1866. _starpu_spin_destroy(&loop->ordered_lock);
  1867. }
  1868. STARPU_ASSERT(loop->next_loop == NULL);
  1869. p_loop = &(parallel_region->loop_list);
  1870. while (*p_loop != loop)
  1871. {
  1872. p_loop = &((*p_loop)->next_loop);
  1873. }
  1874. *p_loop = NULL;
  1875. free(loop);
  1876. }
  1877. _starpu_spin_unlock(&parallel_region->lock);
  1878. task->loop_id++;
  1879. }
  1880. int starpu_omp_for_inline_first(unsigned long long nb_iterations, unsigned long long chunk, int schedule, int ordered, unsigned long long *_first_i, unsigned long long *_nb_i)
  1881. {
  1882. struct starpu_omp_task *task = STARPU_PTHREAD_GETSPECIFIC(omp_task_key);
  1883. struct starpu_omp_region *parallel_region = task->owner_region;
  1884. struct starpu_omp_loop *loop = _starpu_omp_for_loop_begin(parallel_region, task, ordered);
  1885. _starpu_omp_for_loop(parallel_region, task, loop, 1, nb_iterations, chunk, schedule, ordered, _first_i, _nb_i);
  1886. if (*_nb_i == 0)
  1887. {
  1888. _starpu_omp_for_loop_end(parallel_region, task, loop, ordered);
  1889. }
  1890. return (*_nb_i != 0);
  1891. }
  1892. int starpu_omp_for_inline_next(unsigned long long nb_iterations, unsigned long long chunk, int schedule, int ordered, unsigned long long *_first_i, unsigned long long *_nb_i)
  1893. {
  1894. struct starpu_omp_task *task = STARPU_PTHREAD_GETSPECIFIC(omp_task_key);
  1895. struct starpu_omp_region *parallel_region = task->owner_region;
  1896. struct starpu_omp_loop *loop = _starpu_omp_for_loop_begin(parallel_region, task, ordered);
  1897. _starpu_omp_for_loop(parallel_region, task, loop, 0, nb_iterations, chunk, schedule, ordered, _first_i, _nb_i);
  1898. if (*_nb_i == 0)
  1899. {
  1900. _starpu_omp_for_loop_end(parallel_region, task, loop, ordered);
  1901. }
  1902. return (*_nb_i != 0);
  1903. }
  1904. int starpu_omp_for_inline_first_alt(unsigned long long nb_iterations, unsigned long long chunk, int schedule, int ordered, unsigned long long *_begin_i, unsigned long long *_end_i)
  1905. {
  1906. unsigned long long nb_i;
  1907. int end = starpu_omp_for_inline_first(nb_iterations, chunk, schedule, ordered, _begin_i, &nb_i);
  1908. *_end_i = *_begin_i + nb_i;
  1909. return end;
  1910. }
  1911. int starpu_omp_for_inline_next_alt(unsigned long long nb_iterations, unsigned long long chunk, int schedule, int ordered, unsigned long long *_begin_i, unsigned long long *_end_i)
  1912. {
  1913. unsigned long long nb_i;
  1914. int end = starpu_omp_for_inline_next(nb_iterations, chunk, schedule, ordered, _begin_i, &nb_i);
  1915. *_end_i = *_begin_i + nb_i;
  1916. return end;
  1917. }
  1918. void starpu_omp_for(void (*f)(unsigned long long _first_i, unsigned long long _nb_i, void *arg), void *arg, unsigned long long nb_iterations, unsigned long long chunk, int schedule, int ordered, int nowait)
  1919. {
  1920. unsigned long long _first_i = 0;
  1921. unsigned long long _nb_i = 0;
  1922. if (starpu_omp_for_inline_first(nb_iterations, chunk, schedule, ordered, &_first_i, &_nb_i))
  1923. {
  1924. do
  1925. {
  1926. f(_first_i, _nb_i, arg);
  1927. }
  1928. while (starpu_omp_for_inline_next(nb_iterations, chunk, schedule, ordered, &_first_i, &_nb_i));
  1929. }
  1930. if (!nowait)
  1931. {
  1932. starpu_omp_barrier();
  1933. }
  1934. }
  1935. void starpu_omp_for_alt(void (*f)(unsigned long long _begin_i, unsigned long long _end_i, void *arg), void *arg, unsigned long long nb_iterations, unsigned long long chunk, int schedule, int ordered, int nowait)
  1936. {
  1937. unsigned long long _begin_i = 0;
  1938. unsigned long long _end_i = 0;
  1939. if (starpu_omp_for_inline_first_alt(nb_iterations, chunk, schedule, ordered, &_begin_i, &_end_i))
  1940. {
  1941. do
  1942. {
  1943. f(_begin_i, _end_i, arg);
  1944. }
  1945. while (starpu_omp_for_inline_next_alt(nb_iterations, chunk, schedule, ordered, &_begin_i, &_end_i));
  1946. }
  1947. if (!nowait)
  1948. {
  1949. starpu_omp_barrier();
  1950. }
  1951. }
  1952. void starpu_omp_ordered(void (*f)(void *arg), void *arg)
  1953. {
  1954. struct starpu_omp_task *task = STARPU_PTHREAD_GETSPECIFIC(omp_task_key);
  1955. struct starpu_omp_region *parallel_region = task->owner_region;
  1956. struct starpu_omp_loop *loop = _starpu_omp_for_get_loop(parallel_region, task);
  1957. unsigned long long i;
  1958. STARPU_ASSERT(task->ordered_nb_i > 0);
  1959. i = task->ordered_first_i;
  1960. task->ordered_first_i++;
  1961. task->ordered_nb_i--;
  1962. _starpu_spin_lock(&loop->ordered_lock);
  1963. while (i != loop->ordered_iteration)
  1964. {
  1965. STARPU_ASSERT(i > loop->ordered_iteration);
  1966. condition_wait(&loop->ordered_cond, &loop->ordered_lock);
  1967. }
  1968. f(arg);
  1969. loop->ordered_iteration++;
  1970. condition_broadcast(&loop->ordered_cond);
  1971. _starpu_spin_unlock(&loop->ordered_lock);
  1972. }
  1973. void starpu_omp_ordered_inline_begin(void)
  1974. {
  1975. struct starpu_omp_task *task = STARPU_PTHREAD_GETSPECIFIC(omp_task_key);
  1976. struct starpu_omp_region *parallel_region = task->owner_region;
  1977. struct starpu_omp_loop *loop = _starpu_omp_for_get_loop(parallel_region, task);
  1978. unsigned long long i;
  1979. STARPU_ASSERT(task->ordered_nb_i > 0);
  1980. i = task->ordered_first_i;
  1981. task->ordered_first_i++;
  1982. task->ordered_nb_i--;
  1983. _starpu_spin_lock(&loop->ordered_lock);
  1984. while (i != loop->ordered_iteration)
  1985. {
  1986. STARPU_ASSERT(i > loop->ordered_iteration);
  1987. condition_wait(&loop->ordered_cond, &loop->ordered_lock);
  1988. }
  1989. }
  1990. void starpu_omp_ordered_inline_end(void)
  1991. {
  1992. struct starpu_omp_task *task = STARPU_PTHREAD_GETSPECIFIC(omp_task_key);
  1993. struct starpu_omp_region *parallel_region = task->owner_region;
  1994. struct starpu_omp_loop *loop = _starpu_omp_for_get_loop(parallel_region, task);
  1995. loop->ordered_iteration++;
  1996. condition_broadcast(&loop->ordered_cond);
  1997. _starpu_spin_unlock(&loop->ordered_lock);
  1998. }
  1999. static inline struct starpu_omp_sections *_starpu_omp_get_sections(struct starpu_omp_region *parallel_region, struct starpu_omp_task *task)
  2000. {
  2001. struct starpu_omp_sections *sections;
  2002. sections = parallel_region->sections_list;
  2003. while (sections && sections->id != task->sections_id)
  2004. {
  2005. sections = sections->next_sections;
  2006. }
  2007. return sections;
  2008. }
  2009. static inline struct starpu_omp_sections *_starpu_omp_sections_begin(struct starpu_omp_region *parallel_region, struct starpu_omp_task *task)
  2010. {
  2011. struct starpu_omp_sections *sections;
  2012. _starpu_spin_lock(&parallel_region->lock);
  2013. sections = _starpu_omp_get_sections(parallel_region, task);
  2014. if (!sections)
  2015. {
  2016. sections = malloc(sizeof(*sections));
  2017. if (sections == NULL)
  2018. _STARPU_ERROR("memory allocation failed\n");
  2019. sections->id = task->sections_id;
  2020. sections->next_section_num = 0;
  2021. sections->nb_completed_threads = 0;
  2022. sections->next_sections = parallel_region->sections_list;
  2023. parallel_region->sections_list = sections;
  2024. }
  2025. _starpu_spin_unlock(&parallel_region->lock);
  2026. return sections;
  2027. }
  2028. static inline void _starpu_omp_sections_end(struct starpu_omp_region *parallel_region, struct starpu_omp_task *task,
  2029. struct starpu_omp_sections *sections)
  2030. {
  2031. _starpu_spin_lock(&parallel_region->lock);
  2032. sections->nb_completed_threads++;
  2033. if (sections->nb_completed_threads == parallel_region->nb_threads)
  2034. {
  2035. struct starpu_omp_sections **p_sections;
  2036. STARPU_ASSERT(sections->next_sections == NULL);
  2037. p_sections = &(parallel_region->sections_list);
  2038. while (*p_sections != sections)
  2039. {
  2040. p_sections = &((*p_sections)->next_sections);
  2041. }
  2042. *p_sections = NULL;
  2043. free(sections);
  2044. }
  2045. _starpu_spin_unlock(&parallel_region->lock);
  2046. task->sections_id++;
  2047. }
  2048. void starpu_omp_sections(unsigned long long nb_sections, void (**section_f)(void *arg), void **section_arg, int nowait)
  2049. {
  2050. struct starpu_omp_task *task = STARPU_PTHREAD_GETSPECIFIC(omp_task_key);
  2051. struct starpu_omp_region *parallel_region = task->owner_region;
  2052. struct starpu_omp_sections *sections = _starpu_omp_sections_begin(parallel_region, task);
  2053. for (;;)
  2054. {
  2055. void (*f)(void *arg) = NULL;
  2056. void *arg = NULL;
  2057. _starpu_spin_lock(&parallel_region->lock);
  2058. if (sections->next_section_num < nb_sections)
  2059. {
  2060. f = section_f[sections->next_section_num];
  2061. arg = section_arg[sections->next_section_num];
  2062. sections->next_section_num ++;
  2063. }
  2064. _starpu_spin_unlock(&parallel_region->lock);
  2065. if (f == NULL)
  2066. break;
  2067. f(arg);
  2068. }
  2069. _starpu_omp_sections_end(parallel_region, task, sections);
  2070. if (!nowait)
  2071. {
  2072. starpu_omp_barrier();
  2073. }
  2074. }
  2075. void starpu_omp_sections_combined(unsigned long long nb_sections, void (*section_f)(unsigned long long section_num, void *arg), void *section_arg, int nowait)
  2076. {
  2077. struct starpu_omp_task *task = STARPU_PTHREAD_GETSPECIFIC(omp_task_key);
  2078. struct starpu_omp_region *parallel_region = task->owner_region;
  2079. struct starpu_omp_sections *sections = _starpu_omp_sections_begin(parallel_region, task);
  2080. for (;;)
  2081. {
  2082. unsigned long long section_num;
  2083. void *arg = NULL;
  2084. _starpu_spin_lock(&parallel_region->lock);
  2085. if (sections->next_section_num < nb_sections)
  2086. {
  2087. section_num = sections->next_section_num;
  2088. arg = section_arg;
  2089. sections->next_section_num ++;
  2090. }
  2091. else
  2092. {
  2093. _starpu_spin_unlock(&parallel_region->lock);
  2094. break;
  2095. }
  2096. _starpu_spin_unlock(&parallel_region->lock);
  2097. section_f(section_num, arg);
  2098. }
  2099. _starpu_omp_sections_end(parallel_region, task, sections);
  2100. if (!nowait)
  2101. {
  2102. starpu_omp_barrier();
  2103. }
  2104. }
  2105. static void _starpu_omp_lock_init(void **_internal)
  2106. {
  2107. struct _starpu_omp_lock_internal * _lock;
  2108. _lock = malloc(sizeof(*_lock));
  2109. STARPU_ASSERT(_lock != NULL);
  2110. memset(_lock, 0, sizeof(*_lock));
  2111. _starpu_spin_init(&_lock->lock);
  2112. condition_init(&_lock->cond);
  2113. *_internal = _lock;
  2114. }
  2115. static void _starpu_omp_lock_destroy(void **_internal)
  2116. {
  2117. struct _starpu_omp_lock_internal * const _lock = *_internal;
  2118. STARPU_ASSERT(_lock->state == 0);
  2119. condition_exit(&_lock->cond);
  2120. _starpu_spin_destroy(&_lock->lock);
  2121. memset(_lock, 0, sizeof(*_lock));
  2122. free(_lock);
  2123. *_internal = NULL;
  2124. }
  2125. static void _starpu_omp_lock_set(void **_internal)
  2126. {
  2127. struct _starpu_omp_lock_internal * const _lock = *_internal;
  2128. _starpu_spin_lock(&_lock->lock);
  2129. while (_lock->state != 0)
  2130. {
  2131. condition_wait(&_lock->cond, &_lock->lock);
  2132. }
  2133. _lock->state = 1;
  2134. _starpu_spin_unlock(&_lock->lock);
  2135. }
  2136. static void _starpu_omp_lock_unset(void **_internal)
  2137. {
  2138. struct _starpu_omp_lock_internal * const _lock = *_internal;
  2139. _starpu_spin_lock(&_lock->lock);
  2140. STARPU_ASSERT(_lock->state == 1);
  2141. _lock->state = 0;
  2142. condition_broadcast(&_lock->cond);
  2143. _starpu_spin_unlock(&_lock->lock);
  2144. }
  2145. static int _starpu_omp_lock_test(void **_internal)
  2146. {
  2147. struct _starpu_omp_lock_internal * const _lock = *_internal;
  2148. int ret = 0;
  2149. _starpu_spin_lock(&_lock->lock);
  2150. if (_lock->state == 0)
  2151. {
  2152. _lock->state = 1;
  2153. ret = 1;
  2154. }
  2155. _starpu_spin_unlock(&_lock->lock);
  2156. return ret;
  2157. }
  2158. static void _starpu_omp_nest_lock_init(void **_internal)
  2159. {
  2160. struct _starpu_omp_nest_lock_internal * _nest_lock;
  2161. _nest_lock = malloc(sizeof(*_nest_lock));
  2162. STARPU_ASSERT(_nest_lock != NULL);
  2163. memset(_nest_lock, 0, sizeof(*_nest_lock));
  2164. _starpu_spin_init(&_nest_lock->lock);
  2165. condition_init(&_nest_lock->cond);
  2166. *_internal = _nest_lock;
  2167. }
  2168. static void _starpu_omp_nest_lock_destroy(void **_internal)
  2169. {
  2170. struct _starpu_omp_nest_lock_internal * const _nest_lock = *_internal;
  2171. STARPU_ASSERT(_nest_lock->state == 0);
  2172. STARPU_ASSERT(_nest_lock->nesting == 0);
  2173. STARPU_ASSERT(_nest_lock->owner_task == NULL);
  2174. condition_exit(&_nest_lock->cond);
  2175. _starpu_spin_destroy(&_nest_lock->lock);
  2176. memset(_nest_lock, 0, sizeof(*_nest_lock));
  2177. free(_nest_lock);
  2178. *_internal = NULL;
  2179. }
  2180. static void _starpu_omp_nest_lock_set(void **_internal)
  2181. {
  2182. struct _starpu_omp_nest_lock_internal * const _nest_lock = *_internal;
  2183. struct starpu_omp_task * const task = _starpu_omp_get_task();
  2184. _starpu_spin_lock(&_nest_lock->lock);
  2185. if (_nest_lock->owner_task == task)
  2186. {
  2187. STARPU_ASSERT(_nest_lock->state == 1);
  2188. STARPU_ASSERT(_nest_lock->nesting > 0);
  2189. _nest_lock->nesting++;
  2190. }
  2191. else
  2192. {
  2193. while (_nest_lock->state != 0)
  2194. {
  2195. condition_wait(&_nest_lock->cond, &_nest_lock->lock);
  2196. }
  2197. STARPU_ASSERT(_nest_lock->nesting == 0);
  2198. STARPU_ASSERT(_nest_lock->owner_task == NULL);
  2199. _nest_lock->state = 1;
  2200. _nest_lock->owner_task = task;
  2201. _nest_lock->nesting = 1;
  2202. }
  2203. _starpu_spin_unlock(&_nest_lock->lock);
  2204. }
  2205. static void _starpu_omp_nest_lock_unset(void **_internal)
  2206. {
  2207. struct _starpu_omp_nest_lock_internal * const _nest_lock = *_internal;
  2208. struct starpu_omp_task * const task = _starpu_omp_get_task();
  2209. _starpu_spin_lock(&_nest_lock->lock);
  2210. STARPU_ASSERT(_nest_lock->owner_task == task);
  2211. STARPU_ASSERT(_nest_lock->state == 1);
  2212. STARPU_ASSERT(_nest_lock->nesting > 0);
  2213. _nest_lock->nesting--;
  2214. if (_nest_lock->nesting == 0)
  2215. {
  2216. _nest_lock->state = 0;
  2217. _nest_lock->owner_task = NULL;
  2218. condition_broadcast(&_nest_lock->cond);
  2219. }
  2220. _starpu_spin_unlock(&_nest_lock->lock);
  2221. }
  2222. static int _starpu_omp_nest_lock_test(void **_internal)
  2223. {
  2224. struct _starpu_omp_nest_lock_internal * const _nest_lock = *_internal;
  2225. struct starpu_omp_task * const task = _starpu_omp_get_task();
  2226. int ret = 0;
  2227. _starpu_spin_lock(&_nest_lock->lock);
  2228. if (_nest_lock->state == 0)
  2229. {
  2230. STARPU_ASSERT(_nest_lock->nesting == 0);
  2231. STARPU_ASSERT(_nest_lock->owner_task == NULL);
  2232. _nest_lock->state = 1;
  2233. _nest_lock->owner_task = task;
  2234. _nest_lock->nesting = 1;
  2235. ret = _nest_lock->nesting;
  2236. }
  2237. else if (_nest_lock->owner_task == task)
  2238. {
  2239. STARPU_ASSERT(_nest_lock->state == 1);
  2240. STARPU_ASSERT(_nest_lock->nesting > 0);
  2241. _nest_lock->nesting++;
  2242. ret = _nest_lock->nesting;
  2243. }
  2244. _starpu_spin_unlock(&_nest_lock->lock);
  2245. return ret;
  2246. }
  2247. void starpu_omp_init_lock (starpu_omp_lock_t *lock)
  2248. {
  2249. _starpu_omp_lock_init(&lock->internal);
  2250. }
  2251. void starpu_omp_destroy_lock (starpu_omp_lock_t *lock)
  2252. {
  2253. _starpu_omp_lock_destroy(&lock->internal);
  2254. }
  2255. void starpu_omp_set_lock (starpu_omp_lock_t *lock)
  2256. {
  2257. _starpu_omp_lock_set(&lock->internal);
  2258. }
  2259. void starpu_omp_unset_lock (starpu_omp_lock_t *lock)
  2260. {
  2261. _starpu_omp_lock_unset(&lock->internal);
  2262. }
  2263. int starpu_omp_test_lock (starpu_omp_lock_t *lock)
  2264. {
  2265. return _starpu_omp_lock_test(&lock->internal);
  2266. }
  2267. void starpu_omp_init_nest_lock (starpu_omp_nest_lock_t *nest_lock)
  2268. {
  2269. _starpu_omp_nest_lock_init(&nest_lock->internal);
  2270. }
  2271. void starpu_omp_destroy_nest_lock (starpu_omp_nest_lock_t *nest_lock)
  2272. {
  2273. _starpu_omp_nest_lock_destroy(&nest_lock->internal);
  2274. }
  2275. void starpu_omp_set_nest_lock (starpu_omp_nest_lock_t *nest_lock)
  2276. {
  2277. _starpu_omp_nest_lock_set(&nest_lock->internal);
  2278. }
  2279. void starpu_omp_unset_nest_lock (starpu_omp_nest_lock_t *nest_lock)
  2280. {
  2281. _starpu_omp_nest_lock_unset(&nest_lock->internal);
  2282. }
  2283. int starpu_omp_test_nest_lock (starpu_omp_nest_lock_t *nest_lock)
  2284. {
  2285. return _starpu_omp_nest_lock_test(&nest_lock->internal);
  2286. }
  2287. void starpu_omp_atomic_fallback_inline_begin(void)
  2288. {
  2289. struct starpu_omp_device *device = get_caller_device();
  2290. _starpu_spin_lock(&device->atomic_lock);
  2291. }
  2292. void starpu_omp_atomic_fallback_inline_end(void)
  2293. {
  2294. struct starpu_omp_device *device = get_caller_device();
  2295. _starpu_spin_unlock(&device->atomic_lock);
  2296. }
  2297. void starpu_omp_vector_annotate(starpu_data_handle_t handle, uint32_t slice_base)
  2298. {
  2299. struct starpu_vector_interface *vector_interface = (struct starpu_vector_interface *)
  2300. starpu_data_get_interface_on_node(handle, STARPU_MAIN_RAM);
  2301. assert(vector_interface->id == STARPU_VECTOR_INTERFACE_ID);
  2302. vector_interface->slice_base = slice_base;
  2303. }
  2304. /*
  2305. * restore deprecated diagnostics (-Wdeprecated-declarations)
  2306. */
  2307. #pragma GCC diagnostic pop
  2308. #endif /* STARPU_OPENMP */