task.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2021 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. * Copyright (C) 2011 Télécom-SudParis
  5. * Copyright (C) 2013 Thibaut Lambert
  6. * Copyright (C) 2016 Uppsala University
  7. * Copyright (C) 2017 Erwan Leria
  8. *
  9. * StarPU is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU Lesser General Public License as published by
  11. * the Free Software Foundation; either version 2.1 of the License, or (at
  12. * your option) any later version.
  13. *
  14. * StarPU is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  17. *
  18. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  19. */
  20. #include <starpu.h>
  21. #include <starpu_profiling.h>
  22. #include <core/workers.h>
  23. #include <core/sched_ctx.h>
  24. #include <core/jobs.h>
  25. #include <core/task.h>
  26. #include <core/task_bundle.h>
  27. #include <core/dependencies/data_concurrency.h>
  28. #include <common/config.h>
  29. #include <common/utils.h>
  30. #include <common/fxt.h>
  31. #include <common/knobs.h>
  32. #include <datawizard/memory_nodes.h>
  33. #include <profiling/profiling.h>
  34. #include <profiling/bound.h>
  35. #include <math.h>
  36. #include <string.h>
  37. #include <core/debug.h>
  38. #include <core/sched_ctx.h>
  39. #include <time.h>
  40. #include <signal.h>
  41. #include <core/simgrid.h>
  42. #ifdef STARPU_HAVE_WINDOWS
  43. #include <windows.h>
  44. #endif
  45. /* global counters */
  46. static int __g_total_submitted;
  47. static int __g_peak_submitted;
  48. static int __g_peak_ready;
  49. /* global counter variables */
  50. int64_t _starpu_task__g_total_submitted__value;
  51. int64_t _starpu_task__g_peak_submitted__value;
  52. int64_t _starpu_task__g_current_submitted__value;
  53. int64_t _starpu_task__g_peak_ready__value;
  54. int64_t _starpu_task__g_current_ready__value;
  55. /* per-worker counters */
  56. static int __w_total_executed;
  57. static int __w_cumul_execution_time;
  58. /* per-codelet counters */
  59. static int __c_total_submitted;
  60. static int __c_peak_submitted;
  61. static int __c_peak_ready;
  62. static int __c_total_executed;
  63. static int __c_cumul_execution_time;
  64. /* - */
  65. /* per-scheduler knobs */
  66. static int __s_max_priority_cap_knob;
  67. static int __s_min_priority_cap_knob;
  68. /* knob variables */
  69. static int __s_max_priority_cap__value;
  70. static int __s_min_priority_cap__value;
  71. static struct starpu_perf_knob_group * __kg_starpu_task__per_scheduler;
  72. /* - */
  73. static void global_sample_updater(struct starpu_perf_counter_sample *sample, void *context)
  74. {
  75. STARPU_ASSERT(context == NULL); /* no context for the global updater */
  76. (void)context;
  77. _starpu_perf_counter_sample_set_int64_value(sample, __g_total_submitted, _starpu_task__g_total_submitted__value);
  78. _starpu_perf_counter_sample_set_int64_value(sample, __g_peak_submitted, _starpu_task__g_peak_submitted__value);
  79. _starpu_perf_counter_sample_set_int64_value(sample, __g_peak_ready, _starpu_task__g_peak_ready__value);
  80. }
  81. static void per_worker_sample_updater(struct starpu_perf_counter_sample *sample, void *context)
  82. {
  83. STARPU_ASSERT(context != NULL);
  84. struct _starpu_worker *worker = context;
  85. _starpu_perf_counter_sample_set_int64_value(sample, __w_total_executed, worker->__w_total_executed__value);
  86. _starpu_perf_counter_sample_set_double_value(sample, __w_cumul_execution_time, worker->__w_cumul_execution_time__value);
  87. }
  88. static void per_codelet_sample_updater(struct starpu_perf_counter_sample *sample, void *context)
  89. {
  90. STARPU_ASSERT(sample->listener != NULL && sample->listener->set != NULL);
  91. struct starpu_perf_counter_set *set = sample->listener->set;
  92. STARPU_ASSERT(set->scope == starpu_perf_counter_scope_per_codelet);
  93. STARPU_ASSERT(context != NULL);
  94. struct starpu_codelet *cl = context;
  95. _starpu_perf_counter_sample_set_int64_value(sample, __c_total_submitted, cl->perf_counter_values->task.total_submitted);
  96. _starpu_perf_counter_sample_set_int64_value(sample, __c_peak_submitted, cl->perf_counter_values->task.peak_submitted);
  97. _starpu_perf_counter_sample_set_int64_value(sample, __c_peak_ready, cl->perf_counter_values->task.peak_ready);
  98. _starpu_perf_counter_sample_set_int64_value(sample, __c_total_executed, cl->perf_counter_values->task.total_executed);
  99. _starpu_perf_counter_sample_set_double_value(sample, __c_cumul_execution_time, cl->perf_counter_values->task.cumul_execution_time);
  100. }
  101. void _starpu__task_c__register_counters(void)
  102. {
  103. {
  104. const enum starpu_perf_counter_scope scope = starpu_perf_counter_scope_global;
  105. __STARPU_PERF_COUNTER_REG("starpu.task", scope, g_total_submitted, int64, "number of tasks submitted globally (since StarPU initialization)");
  106. __STARPU_PERF_COUNTER_REG("starpu.task", scope, g_peak_submitted, int64, "maximum simultaneous number of tasks submitted and not yet ready, globally (since StarPU initialization)");
  107. __STARPU_PERF_COUNTER_REG("starpu.task", scope, g_peak_ready, int64, "maximum simultaneous number of tasks ready and not yet executing, globally (since StarPU initialization)");
  108. _starpu_perf_counter_register_updater(scope, global_sample_updater);
  109. }
  110. {
  111. const enum starpu_perf_counter_scope scope = starpu_perf_counter_scope_per_worker;
  112. __STARPU_PERF_COUNTER_REG("starpu.task", scope, w_total_executed, int64, "number of tasks executed on this worker (since StarPU initialization)");
  113. __STARPU_PERF_COUNTER_REG("starpu.task", scope, w_cumul_execution_time, double, "cumulated execution time of tasks executed on this worker (microseconds, since StarPU initialization)");
  114. _starpu_perf_counter_register_updater(scope, per_worker_sample_updater);
  115. }
  116. {
  117. const enum starpu_perf_counter_scope scope = starpu_perf_counter_scope_per_codelet;
  118. __STARPU_PERF_COUNTER_REG("starpu.task", scope, c_total_submitted, int64, "number of codelet's task instances submitted using this codelet (since enabled)");
  119. __STARPU_PERF_COUNTER_REG("starpu.task", scope, c_peak_submitted, int64, "maximum simultaneous number of codelet's task instances submitted and not yet ready (since enabled)");
  120. __STARPU_PERF_COUNTER_REG("starpu.task", scope, c_peak_ready, int64, "maximum simultaneous number of codelet's task instances ready and not yet executing (since enabled)");
  121. __STARPU_PERF_COUNTER_REG("starpu.task", scope, c_total_executed, int64, "number of codelet's task instances executed using this codelet (since enabled)");
  122. __STARPU_PERF_COUNTER_REG("starpu.task", scope, c_cumul_execution_time, double, "cumulated execution time of codelet's task instances (since enabled)");
  123. _starpu_perf_counter_register_updater(scope, per_codelet_sample_updater);
  124. }
  125. }
  126. /* - */
  127. static void sched_knobs__set(const struct starpu_perf_knob * const knob, void *context, const struct starpu_perf_knob_value * const value)
  128. {
  129. const char * const sched_policy_name = *(const char **)context;
  130. (void) sched_policy_name;
  131. if (knob->id == __s_max_priority_cap_knob)
  132. {
  133. STARPU_ASSERT(value->val_int32_t <= STARPU_MAX_PRIO);
  134. STARPU_ASSERT(value->val_int32_t >= STARPU_MIN_PRIO);
  135. STARPU_ASSERT(value->val_int32_t >= __s_min_priority_cap__value);
  136. __s_max_priority_cap__value = value->val_int32_t;
  137. }
  138. else if (knob->id == __s_min_priority_cap_knob)
  139. {
  140. STARPU_ASSERT(value->val_int32_t <= STARPU_MAX_PRIO);
  141. STARPU_ASSERT(value->val_int32_t >= STARPU_MIN_PRIO);
  142. STARPU_ASSERT(value->val_int32_t <= __s_max_priority_cap__value);
  143. __s_min_priority_cap__value = value->val_int32_t;
  144. }
  145. else
  146. {
  147. STARPU_ASSERT(0);
  148. abort();
  149. }
  150. }
  151. static void sched_knobs__get(const struct starpu_perf_knob * const knob, void *context, struct starpu_perf_knob_value * const value)
  152. {
  153. const char * const sched_policy_name = *(const char **)context;
  154. (void) sched_policy_name;
  155. if (knob->id == __s_max_priority_cap_knob)
  156. {
  157. value->val_int32_t = __s_max_priority_cap__value;
  158. }
  159. else if (knob->id == __s_min_priority_cap_knob)
  160. {
  161. value->val_int32_t = __s_min_priority_cap__value;
  162. }
  163. else
  164. {
  165. STARPU_ASSERT(0);
  166. abort();
  167. }
  168. }
  169. void _starpu__task_c__register_knobs(void)
  170. {
  171. #if 0
  172. {
  173. const enum starpu_perf_knob_scope scope = starpu_perf_knob_scope_global;
  174. __kg_starpu_global = _starpu_perf_knob_group_register(scope, global_knobs__set, global_knobs__get);
  175. }
  176. #endif
  177. #if 0
  178. {
  179. const enum starpu_perf_knob_scope scope = starpu_perf_knob_scope_per_worker;
  180. __kg_starpu_worker__per_worker = _starpu_perf_knob_group_register(scope, worker_knobs__set, worker_knobs__get);
  181. }
  182. #endif
  183. {
  184. const enum starpu_perf_knob_scope scope = starpu_perf_knob_scope_per_scheduler;
  185. __kg_starpu_task__per_scheduler = _starpu_perf_knob_group_register(scope, sched_knobs__set, sched_knobs__get);
  186. /* TODO: priority capping knobs actually work globally for now, the sched policy name is ignored */
  187. __STARPU_PERF_KNOB_REG("starpu.task", __kg_starpu_task__per_scheduler, s_max_priority_cap_knob, int32, "force task priority to this value or below (priority value)");
  188. __s_max_priority_cap__value = STARPU_MAX_PRIO;
  189. __STARPU_PERF_KNOB_REG("starpu.task", __kg_starpu_task__per_scheduler, s_min_priority_cap_knob, int32, "force task priority to this value or above (priority value)");
  190. __s_min_priority_cap__value = STARPU_MIN_PRIO;
  191. }
  192. }
  193. void _starpu__task_c__unregister_knobs(void)
  194. {
  195. _starpu_perf_knob_group_unregister(__kg_starpu_task__per_scheduler);
  196. __kg_starpu_task__per_scheduler = NULL;
  197. }
  198. /* - */
  199. /* XXX this should be reinitialized when StarPU is shutdown (or we should make
  200. * sure that no task remains !) */
  201. /* TODO we could make this hierarchical to avoid contention ? */
  202. //static starpu_pthread_cond_t submitted_cond = STARPU_PTHREAD_COND_INITIALIZER;
  203. /* This key stores the task currently handled by the thread, note that we
  204. * cannot use the worker structure to store that information because it is
  205. * possible that we have a task with a NULL codelet, which means its callback
  206. * could be executed by a user thread as well. */
  207. static starpu_pthread_key_t current_task_key;
  208. static int limit_min_submitted_tasks;
  209. static int limit_max_submitted_tasks;
  210. static int watchdog_crash;
  211. static int watchdog_delay;
  212. /*
  213. * Function to call when watchdog detects that no task has finished for more than STARPU_WATCHDOG_TIMEOUT seconds
  214. */
  215. static void (*watchdog_hook)(void *) = NULL;
  216. static void * watchdog_hook_arg = NULL;
  217. #define _STARPU_TASK_MAGIC 42
  218. /* Called once at starpu_init */
  219. void _starpu_task_init(void)
  220. {
  221. STARPU_PTHREAD_KEY_CREATE(&current_task_key, NULL);
  222. limit_min_submitted_tasks = starpu_get_env_number("STARPU_LIMIT_MIN_SUBMITTED_TASKS");
  223. limit_max_submitted_tasks = starpu_get_env_number("STARPU_LIMIT_MAX_SUBMITTED_TASKS");
  224. watchdog_crash = starpu_get_env_number("STARPU_WATCHDOG_CRASH");
  225. watchdog_delay = starpu_get_env_number_default("STARPU_WATCHDOG_DELAY", 0);
  226. }
  227. void _starpu_task_deinit(void)
  228. {
  229. STARPU_PTHREAD_KEY_DELETE(current_task_key);
  230. }
  231. void starpu_task_init(struct starpu_task *task)
  232. {
  233. /* TODO: memcpy from a template instead? benchmark it */
  234. STARPU_ASSERT(task);
  235. /* As most of the fields must be initialised at NULL, let's put 0
  236. * everywhere */
  237. memset(task, 0, sizeof(struct starpu_task));
  238. task->sequential_consistency = 1;
  239. task->where = -1;
  240. /* Now we can initialise fields which recquire custom value */
  241. /* Note: remember to update STARPU_TASK_INITIALIZER as well */
  242. #if STARPU_DEFAULT_PRIO != 0
  243. task->priority = STARPU_DEFAULT_PRIO;
  244. #endif
  245. task->detach = 1;
  246. #if STARPU_TASK_INIT != 0
  247. task->status = STARPU_TASK_INIT;
  248. #endif
  249. task->predicted = NAN;
  250. task->predicted_transfer = NAN;
  251. task->predicted_start = NAN;
  252. task->magic = _STARPU_TASK_MAGIC;
  253. task->sched_ctx = STARPU_NMAX_SCHED_CTXS;
  254. task->flops = 0.0;
  255. }
  256. /* Free all the ressources allocated for a task, without deallocating the task
  257. * structure itself (this is required for statically allocated tasks).
  258. * All values previously set by the user, like codelet and handles, remain
  259. * unchanged */
  260. void starpu_task_clean(struct starpu_task *task)
  261. {
  262. STARPU_ASSERT(task);
  263. task->magic = 0;
  264. /* If a buffer was allocated to store the profiling info, we free it. */
  265. if (task->profiling_info)
  266. {
  267. free(task->profiling_info);
  268. task->profiling_info = NULL;
  269. }
  270. /* If case the task is (still) part of a bundle */
  271. starpu_task_bundle_t bundle = task->bundle;
  272. if (bundle)
  273. starpu_task_bundle_remove(bundle, task);
  274. if (task->dyn_handles)
  275. {
  276. free(task->dyn_handles);
  277. task->dyn_handles = NULL;
  278. free(task->dyn_interfaces);
  279. task->dyn_interfaces = NULL;
  280. }
  281. if (task->dyn_modes)
  282. {
  283. free(task->dyn_modes);
  284. task->dyn_modes = NULL;
  285. }
  286. struct _starpu_job *j = (struct _starpu_job *)task->starpu_private;
  287. if (j)
  288. {
  289. _starpu_job_destroy(j);
  290. task->starpu_private = NULL;
  291. }
  292. }
  293. struct starpu_task * STARPU_ATTRIBUTE_MALLOC starpu_task_create(void)
  294. {
  295. struct starpu_task *task;
  296. _STARPU_MALLOC(task, sizeof(struct starpu_task));
  297. starpu_task_init(task);
  298. /* Dynamically allocated tasks are destroyed by default */
  299. task->destroy = 1;
  300. return task;
  301. }
  302. /* Free the ressource allocated during starpu_task_create. This function can be
  303. * called automatically after the execution of a task by setting the "destroy"
  304. * flag of the starpu_task structure (default behaviour). Calling this function
  305. * on a statically allocated task results in an undefined behaviour. */
  306. void _starpu_task_destroy(struct starpu_task *task)
  307. {
  308. /* If starpu_task_destroy is called in a callback, we just set the destroy
  309. flag. The task will be destroyed after the callback returns */
  310. if (task == starpu_task_get_current()
  311. && _starpu_get_local_worker_status() == STATUS_CALLBACK)
  312. {
  313. task->destroy = 1;
  314. }
  315. else
  316. {
  317. starpu_task_clean(task);
  318. /* TODO handle the case of task with detach = 1 and destroy = 1 */
  319. /* TODO handle the case of non terminated tasks -> assertion failure, it's too dangerous to be doing something like this */
  320. /* Does user want StarPU release cl_arg ? */
  321. if (task->cl_arg_free)
  322. free(task->cl_arg);
  323. /* Does user want StarPU release cl_ret ? */
  324. if (task->cl_ret_free)
  325. free(task->cl_ret);
  326. /* Does user want StarPU release callback_arg ? */
  327. if (task->callback_arg_free)
  328. free(task->callback_arg);
  329. /* Does user want StarPU release epilogue callback_arg ? */
  330. if (task->epilogue_callback_arg_free)
  331. free(task->epilogue_callback_arg);
  332. /* Does user want StarPU release prologue_callback_arg ? */
  333. if (task->prologue_callback_arg_free)
  334. free(task->prologue_callback_arg);
  335. /* Does user want StarPU release prologue_pop_arg ? */
  336. if (task->prologue_callback_pop_arg_free)
  337. free(task->prologue_callback_pop_arg);
  338. free(task);
  339. }
  340. }
  341. void starpu_task_destroy(struct starpu_task *task)
  342. {
  343. STARPU_ASSERT(task);
  344. STARPU_ASSERT_MSG(!task->destroy || !task->detach, "starpu_task_destroy must not be called for task with destroy = 1 and detach = 1");
  345. _starpu_task_destroy(task);
  346. }
  347. int starpu_task_finished(struct starpu_task *task)
  348. {
  349. STARPU_ASSERT(task);
  350. STARPU_ASSERT_MSG(!task->detach, "starpu_task_finished can only be called on tasks with detach = 0");
  351. return _starpu_job_finished(_starpu_get_job_associated_to_task(task));
  352. }
  353. int starpu_task_wait(struct starpu_task *task)
  354. {
  355. _STARPU_LOG_IN();
  356. STARPU_ASSERT(task);
  357. STARPU_ASSERT_MSG(!task->detach, "starpu_task_wait can only be called on tasks with detach = 0");
  358. if (task->detach || task->synchronous)
  359. {
  360. _STARPU_DEBUG("Task is detached or synchronous. Waiting returns immediately\n");
  361. _STARPU_LOG_OUT_TAG("einval");
  362. return -EINVAL;
  363. }
  364. STARPU_ASSERT_MSG(_starpu_worker_may_perform_blocking_calls(), "starpu_task_wait must not be called from a task or callback");
  365. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  366. _STARPU_TRACE_TASK_WAIT_START(j);
  367. starpu_do_schedule();
  368. _starpu_wait_job(j);
  369. /* as this is a synchronous task, the liberation of the job
  370. structure was deferred */
  371. if (task->destroy)
  372. _starpu_task_destroy(task);
  373. _starpu_perf_counter_update_global_sample();
  374. _STARPU_TRACE_TASK_WAIT_END();
  375. _STARPU_LOG_OUT();
  376. return 0;
  377. }
  378. int starpu_task_wait_array(struct starpu_task **tasks, unsigned nb_tasks)
  379. {
  380. unsigned i;
  381. for (i = 0; i < nb_tasks; i++)
  382. {
  383. int ret = starpu_task_wait(tasks[i]);
  384. if (ret)
  385. return ret;
  386. }
  387. return 0;
  388. }
  389. #ifdef STARPU_OPENMP
  390. int _starpu_task_test_termination(struct starpu_task *task)
  391. {
  392. STARPU_ASSERT(task);
  393. STARPU_ASSERT_MSG(!task->detach, "starpu_task_wait can only be called on tasks with detach = 0");
  394. if (task->detach || task->synchronous)
  395. {
  396. _STARPU_DEBUG("Task is detached or synchronous\n");
  397. _STARPU_LOG_OUT_TAG("einval");
  398. return -EINVAL;
  399. }
  400. struct _starpu_job *j = (struct _starpu_job *)task->starpu_private;
  401. int ret = _starpu_test_job_termination(j);
  402. if (ret)
  403. {
  404. if (task->destroy)
  405. _starpu_task_destroy(task);
  406. }
  407. return ret;
  408. }
  409. #endif
  410. /* NB in case we have a regenerable task, it is possible that the job was
  411. * already counted. */
  412. int _starpu_submit_job(struct _starpu_job *j, int nodeps)
  413. {
  414. struct starpu_task *task = j->task;
  415. int ret;
  416. #ifdef STARPU_OPENMP
  417. const unsigned continuation = j->continuation;
  418. #else
  419. const unsigned continuation = 0;
  420. #endif
  421. _STARPU_LOG_IN();
  422. /* notify bound computation of a new task */
  423. _starpu_bound_record(j);
  424. _starpu_increment_nsubmitted_tasks_of_sched_ctx(j->task->sched_ctx);
  425. _starpu_sched_task_submit(task);
  426. #ifdef STARPU_USE_SC_HYPERVISOR
  427. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(j->task->sched_ctx);
  428. if(sched_ctx != NULL && j->task->sched_ctx != _starpu_get_initial_sched_ctx()->id && j->task->sched_ctx != STARPU_NMAX_SCHED_CTXS
  429. && sched_ctx->perf_counters != NULL)
  430. {
  431. struct starpu_perfmodel_arch arch;
  432. _STARPU_MALLOC(arch.devices, sizeof(struct starpu_perfmodel_device));
  433. arch.ndevices = 1;
  434. arch.devices[0].type = STARPU_CPU_WORKER;
  435. arch.devices[0].devid = 0;
  436. arch.devices[0].ncores = 1;
  437. _starpu_compute_buffers_footprint(j->task->cl->model, &arch, 0, j);
  438. free(arch.devices);
  439. size_t data_size = 0;
  440. if (j->task->cl)
  441. {
  442. unsigned i, nbuffers = STARPU_TASK_GET_NBUFFERS(j->task);
  443. for(i = 0; i < nbuffers; i++)
  444. {
  445. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(task, i);
  446. if (handle != NULL)
  447. data_size += _starpu_data_get_size(handle);
  448. }
  449. }
  450. _STARPU_TRACE_HYPERVISOR_BEGIN();
  451. sched_ctx->perf_counters->notify_submitted_job(j->task, j->footprint, data_size);
  452. _STARPU_TRACE_HYPERVISOR_END();
  453. }
  454. #endif//STARPU_USE_SC_HYPERVISOR
  455. /* We retain handle reference count */
  456. if (task->cl && !continuation)
  457. {
  458. unsigned i;
  459. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(task);
  460. for (i=0; i<nbuffers; i++)
  461. {
  462. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(task, i);
  463. _starpu_spin_lock(&handle->header_lock);
  464. handle->busy_count++;
  465. _starpu_spin_unlock(&handle->header_lock);
  466. }
  467. }
  468. STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  469. _starpu_handle_job_submission(j);
  470. #ifdef STARPU_OPENMP
  471. if (continuation)
  472. {
  473. j->discontinuous = 1;
  474. j->continuation = 0;
  475. }
  476. #endif
  477. if (nodeps)
  478. {
  479. ret = _starpu_take_deps_and_schedule(j);
  480. }
  481. else
  482. {
  483. #ifdef STARPU_OPENMP
  484. if (continuation)
  485. {
  486. ret = _starpu_reenforce_task_deps_and_schedule(j);
  487. }
  488. else
  489. #endif
  490. {
  491. ret = _starpu_enforce_deps_and_schedule(j);
  492. }
  493. }
  494. _STARPU_LOG_OUT();
  495. return ret;
  496. }
  497. /* Note: this is racy, so valgrind would complain. But since we'll always put
  498. * the same values, this is not a problem. */
  499. void _starpu_codelet_check_deprecated_fields(struct starpu_codelet *cl)
  500. {
  501. if (!cl)
  502. return;
  503. if (cl->checked)
  504. {
  505. STARPU_RMB();
  506. return;
  507. }
  508. uint32_t where = cl->where;
  509. int is_where_unset = where == 0;
  510. unsigned i, some_impl;
  511. /* Check deprecated and unset fields (where, <device>_func,
  512. * <device>_funcs) */
  513. /* CPU */
  514. if (cl->cpu_func && cl->cpu_func != STARPU_MULTIPLE_CPU_IMPLEMENTATIONS && cl->cpu_funcs[0])
  515. {
  516. _STARPU_DISP("[warning] [struct starpu_codelet] both cpu_func and cpu_funcs are set. Ignoring cpu_func.\n");
  517. cl->cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS;
  518. }
  519. if (cl->cpu_func && cl->cpu_func != STARPU_MULTIPLE_CPU_IMPLEMENTATIONS)
  520. {
  521. cl->cpu_funcs[0] = cl->cpu_func;
  522. cl->cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS;
  523. }
  524. some_impl = 0;
  525. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  526. if (cl->cpu_funcs[i])
  527. {
  528. some_impl = 1;
  529. break;
  530. }
  531. if (some_impl && cl->cpu_func == 0)
  532. {
  533. cl->cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS;
  534. }
  535. if (some_impl && is_where_unset)
  536. {
  537. where |= STARPU_CPU;
  538. }
  539. /* CUDA */
  540. if (cl->cuda_func && cl->cuda_func != STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS && cl->cuda_funcs[0])
  541. {
  542. _STARPU_DISP("[warning] [struct starpu_codelet] both cuda_func and cuda_funcs are set. Ignoring cuda_func.\n");
  543. cl->cuda_func = STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS;
  544. }
  545. if (cl->cuda_func && cl->cuda_func != STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS)
  546. {
  547. cl->cuda_funcs[0] = cl->cuda_func;
  548. cl->cuda_func = STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS;
  549. }
  550. some_impl = 0;
  551. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  552. if (cl->cuda_funcs[i])
  553. {
  554. some_impl = 1;
  555. break;
  556. }
  557. if (some_impl && cl->cuda_func == 0)
  558. {
  559. cl->cuda_func = STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS;
  560. }
  561. if (some_impl && is_where_unset)
  562. {
  563. where |= STARPU_CUDA;
  564. }
  565. /* OpenCL */
  566. if (cl->opencl_func && cl->opencl_func != STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS && cl->opencl_funcs[0])
  567. {
  568. _STARPU_DISP("[warning] [struct starpu_codelet] both opencl_func and opencl_funcs are set. Ignoring opencl_func.\n");
  569. cl->opencl_func = STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS;
  570. }
  571. if (cl->opencl_func && cl->opencl_func != STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS)
  572. {
  573. cl->opencl_funcs[0] = cl->opencl_func;
  574. cl->opencl_func = STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS;
  575. }
  576. some_impl = 0;
  577. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  578. if (cl->opencl_funcs[i])
  579. {
  580. some_impl = 1;
  581. break;
  582. }
  583. if (some_impl && cl->opencl_func == 0)
  584. {
  585. cl->opencl_func = STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS;
  586. }
  587. if (some_impl && is_where_unset)
  588. {
  589. where |= STARPU_OPENCL;
  590. }
  591. some_impl = 0;
  592. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  593. if (cl->mpi_ms_funcs[i])
  594. {
  595. some_impl = 1;
  596. break;
  597. }
  598. if (some_impl && is_where_unset)
  599. {
  600. where |= STARPU_MPI_MS;
  601. }
  602. some_impl = 0;
  603. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  604. if (cl->cpu_funcs_name[i])
  605. {
  606. some_impl = 1;
  607. break;
  608. }
  609. if (some_impl && is_where_unset)
  610. {
  611. where |= STARPU_MPI_MS;
  612. }
  613. cl->where = where;
  614. STARPU_WMB();
  615. cl->checked = 1;
  616. }
  617. void _starpu_task_check_deprecated_fields(struct starpu_task *task STARPU_ATTRIBUTE_UNUSED)
  618. {
  619. /* None any more */
  620. }
  621. static int _starpu_task_submit_head(struct starpu_task *task)
  622. {
  623. unsigned is_sync = task->synchronous;
  624. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  625. if (task->status == STARPU_TASK_STOPPED || task->status == STARPU_TASK_FINISHED)
  626. task->status = STARPU_TASK_INIT;
  627. else
  628. STARPU_ASSERT(task->status == STARPU_TASK_INIT);
  629. if (j->internal)
  630. {
  631. // Internal tasks are submitted to initial context
  632. task->sched_ctx = _starpu_get_initial_sched_ctx()->id;
  633. }
  634. else if (task->sched_ctx == STARPU_NMAX_SCHED_CTXS)
  635. {
  636. // If the task has not specified a context, we set the current context
  637. task->sched_ctx = _starpu_sched_ctx_get_current_context();
  638. }
  639. if (is_sync)
  640. {
  641. /* Perhaps it is not possible to submit a synchronous
  642. * (blocking) task */
  643. STARPU_ASSERT_MSG(_starpu_worker_may_perform_blocking_calls(), "submitting a synchronous task must not be done from a task or a callback");
  644. task->detach = 0;
  645. }
  646. _starpu_task_check_deprecated_fields(task);
  647. _starpu_codelet_check_deprecated_fields(task->cl);
  648. if (task->where== -1 && task->cl)
  649. task->where = task->cl->where;
  650. if (task->cl)
  651. {
  652. unsigned i;
  653. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(task);
  654. _STARPU_TRACE_UPDATE_TASK_CNT(0);
  655. /* Check buffers */
  656. if (task->dyn_handles == NULL)
  657. STARPU_ASSERT_MSG(STARPU_TASK_GET_NBUFFERS(task) <= STARPU_NMAXBUFS,
  658. "Codelet %p has too many buffers (%d vs max %d). Either use --enable-maxbuffers configure option to increase the max, or use dyn_handles instead of handles.",
  659. task->cl, STARPU_TASK_GET_NBUFFERS(task), STARPU_NMAXBUFS);
  660. if (STARPU_UNLIKELY(task->dyn_handles))
  661. {
  662. _STARPU_MALLOC(task->dyn_interfaces, nbuffers * sizeof(void *));
  663. }
  664. for (i = 0; i < nbuffers; i++)
  665. {
  666. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(task, i);
  667. enum starpu_data_access_mode mode = STARPU_TASK_GET_MODE(task, i);
  668. int node = task->cl->specific_nodes ? STARPU_CODELET_GET_NODE(task->cl, i) : -1;
  669. /* Make sure handles are valid */
  670. STARPU_ASSERT_MSG(handle->magic == _STARPU_TASK_MAGIC, "data %p is invalid (was it already unregistered?)", handle);
  671. /* Make sure handles are not partitioned */
  672. STARPU_ASSERT_MSG(handle->nchildren == 0, "only unpartitioned data (or the pieces of a partitioned data) can be used in a task");
  673. /* Make sure the specified node exists */
  674. STARPU_ASSERT_MSG(node == STARPU_SPECIFIC_NODE_LOCAL || node == STARPU_SPECIFIC_NODE_CPU || node == STARPU_SPECIFIC_NODE_SLOW || node == STARPU_SPECIFIC_NODE_LOCAL_OR_CPU || (node >= 0 && node < (int) starpu_memory_nodes_get_count()), "The codelet-specified memory node does not exist");
  675. /* Provide the home interface for now if any,
  676. * for can_execute hooks */
  677. if (handle->home_node != -1)
  678. _STARPU_TASK_SET_INTERFACE(task, starpu_data_get_interface_on_node(handle, handle->home_node), i);
  679. if (!(task->cl->flags & STARPU_CODELET_NOPLANS) &&
  680. ((handle->nplans && !handle->nchildren) || handle->siblings)
  681. && handle->partition_automatic_disabled == 0
  682. )
  683. /* This handle is involved with asynchronous
  684. * partitioning as a parent or a child, make
  685. * sure the right plan is active, submit
  686. * appropiate partitioning / unpartitioning if
  687. * not */
  688. _starpu_data_partition_access_submit(handle, (mode & STARPU_W) != 0);
  689. }
  690. /* Check the type of worker(s) required by the task exist */
  691. if (STARPU_UNLIKELY(!_starpu_worker_exists(task)))
  692. {
  693. _STARPU_LOG_OUT_TAG("ENODEV");
  694. return -ENODEV;
  695. }
  696. /* In case we require that a task should be explicitely
  697. * executed on a specific worker, we make sure that the worker
  698. * is able to execute this task. */
  699. if (STARPU_UNLIKELY(task->execute_on_a_specific_worker && !starpu_combined_worker_can_execute_task(task->workerid, task, 0)))
  700. {
  701. _STARPU_LOG_OUT_TAG("ENODEV");
  702. return -ENODEV;
  703. }
  704. if (task->cl->model)
  705. _starpu_init_and_load_perfmodel(task->cl->model);
  706. if (task->cl->energy_model)
  707. _starpu_init_and_load_perfmodel(task->cl->energy_model);
  708. }
  709. return 0;
  710. }
  711. /* application should submit new tasks to StarPU through this function */
  712. int _starpu_task_submit(struct starpu_task *task, int nodeps)
  713. {
  714. _STARPU_LOG_IN();
  715. STARPU_ASSERT(task);
  716. STARPU_ASSERT_MSG(task->magic == _STARPU_TASK_MAGIC, "Tasks must be created with starpu_task_create, or initialized with starpu_task_init.");
  717. STARPU_ASSERT_MSG(starpu_is_initialized(), "starpu_init must be called (and return no error) before submitting tasks.");
  718. int ret;
  719. {
  720. /* task knobs */
  721. if (task->priority > __s_max_priority_cap__value)
  722. task->priority = __s_max_priority_cap__value;
  723. if (task->priority < __s_min_priority_cap__value)
  724. task->priority = __s_min_priority_cap__value;
  725. }
  726. unsigned is_sync = task->synchronous;
  727. starpu_task_bundle_t bundle = task->bundle;
  728. STARPU_ASSERT_MSG(!(nodeps && bundle), "not supported\n");
  729. /* internally, StarPU manipulates a struct _starpu_job * which is a wrapper around a
  730. * task structure, it is possible that this job structure was already
  731. * allocated. */
  732. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  733. const unsigned continuation =
  734. #ifdef STARPU_OPENMP
  735. j->continuation
  736. #else
  737. 0
  738. #endif
  739. ;
  740. if (!_starpu_perf_counter_paused() && !j->internal && !continuation)
  741. {
  742. (void) STARPU_ATOMIC_ADD64(&_starpu_task__g_total_submitted__value, 1);
  743. int64_t value = STARPU_ATOMIC_ADD64(&_starpu_task__g_current_submitted__value, 1);
  744. _starpu_perf_counter_update_max_int64(&_starpu_task__g_peak_submitted__value, value);
  745. _starpu_perf_counter_update_global_sample();
  746. if (task->cl && task->cl->perf_counter_values)
  747. {
  748. struct starpu_perf_counter_sample_cl_values * const pcv = task->cl->perf_counter_values;
  749. (void) STARPU_ATOMIC_ADD64(&pcv->task.total_submitted, 1);
  750. value = STARPU_ATOMIC_ADD64(&pcv->task.current_submitted, 1);
  751. _starpu_perf_counter_update_max_int64(&pcv->task.peak_submitted, value);
  752. _starpu_perf_counter_update_per_codelet_sample(task->cl);
  753. }
  754. }
  755. STARPU_ASSERT_MSG(!(nodeps && continuation), "not supported\n");
  756. if (!j->internal)
  757. {
  758. int nsubmitted_tasks = starpu_task_nsubmitted();
  759. if (limit_max_submitted_tasks >= 0 && limit_max_submitted_tasks < nsubmitted_tasks
  760. && limit_min_submitted_tasks >= 0 && limit_min_submitted_tasks < nsubmitted_tasks)
  761. {
  762. starpu_do_schedule();
  763. _STARPU_TRACE_TASK_THROTTLE_START();
  764. starpu_task_wait_for_n_submitted(limit_min_submitted_tasks);
  765. _STARPU_TRACE_TASK_THROTTLE_END();
  766. }
  767. }
  768. _STARPU_TRACE_TASK_SUBMIT_START();
  769. ret = _starpu_task_submit_head(task);
  770. if (ret)
  771. {
  772. _STARPU_TRACE_TASK_SUBMIT_END();
  773. return ret;
  774. }
  775. if (!continuation)
  776. {
  777. STARPU_ASSERT_MSG(!j->submitted || j->terminated >= 1, "Tasks can not be submitted a second time before being terminated. Please use different task structures, or use the regenerate flag to let the task resubmit itself automatically.");
  778. _STARPU_TRACE_TASK_SUBMIT(j,
  779. _starpu_get_sched_ctx_struct(task->sched_ctx)->iterations[0],
  780. _starpu_get_sched_ctx_struct(task->sched_ctx)->iterations[1]);
  781. _STARPU_TRACE_TASK_NAME(j);
  782. _STARPU_TRACE_TASK_LINE(j);
  783. }
  784. /* If this is a continuation, we don't modify the implicit data dependencies detected earlier. */
  785. if (task->cl && !continuation)
  786. {
  787. _starpu_job_set_ordered_buffers(j);
  788. if (!nodeps)
  789. _starpu_detect_implicit_data_deps(task);
  790. }
  791. if (STARPU_UNLIKELY(bundle))
  792. {
  793. /* We need to make sure that models for other tasks of the
  794. * bundle are also loaded, so the scheduler can estimate the
  795. * duration of the whole bundle */
  796. STARPU_PTHREAD_MUTEX_LOCK(&bundle->mutex);
  797. struct _starpu_task_bundle_entry *entry;
  798. entry = bundle->list;
  799. while (entry)
  800. {
  801. if (entry->task->cl->model)
  802. _starpu_init_and_load_perfmodel(entry->task->cl->model);
  803. if (entry->task->cl->energy_model)
  804. _starpu_init_and_load_perfmodel(entry->task->cl->energy_model);
  805. entry = entry->next;
  806. }
  807. STARPU_PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
  808. }
  809. /* If profiling is activated, we allocate a structure to store the
  810. * appropriate info. */
  811. struct starpu_profiling_task_info *info = task->profiling_info;
  812. int profiling = starpu_profiling_status_get();
  813. if (!info)
  814. {
  815. info = _starpu_allocate_profiling_info_if_needed(task);
  816. task->profiling_info = info;
  817. }
  818. /* The task is considered as block until we are sure there remains not
  819. * dependency. */
  820. task->status = STARPU_TASK_BLOCKED;
  821. if (STARPU_UNLIKELY(profiling))
  822. _starpu_clock_gettime(&info->submit_time);
  823. ret = _starpu_submit_job(j, nodeps);
  824. #ifdef STARPU_SIMGRID
  825. if (_starpu_simgrid_task_submit_cost())
  826. starpu_sleep(0.000001);
  827. #endif
  828. if (is_sync)
  829. {
  830. _starpu_sched_do_schedule(task->sched_ctx);
  831. _starpu_wait_job(j);
  832. if (task->destroy)
  833. _starpu_task_destroy(task);
  834. }
  835. _STARPU_TRACE_TASK_SUBMIT_END();
  836. _STARPU_LOG_OUT();
  837. return ret;
  838. }
  839. #undef starpu_task_submit
  840. int starpu_task_submit(struct starpu_task *task)
  841. {
  842. return _starpu_task_submit(task, 0);
  843. }
  844. int _starpu_task_submit_internally(struct starpu_task *task)
  845. {
  846. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  847. j->internal = 1;
  848. return starpu_task_submit(task);
  849. }
  850. /* application should submit new tasks to StarPU through this function */
  851. int starpu_task_submit_to_ctx(struct starpu_task *task, unsigned sched_ctx_id)
  852. {
  853. task->sched_ctx = sched_ctx_id;
  854. return starpu_task_submit(task);
  855. }
  856. /* The StarPU core can submit tasks directly to the scheduler or a worker,
  857. * skipping dependencies completely (when it knows what it is doing). */
  858. int starpu_task_submit_nodeps(struct starpu_task *task)
  859. {
  860. return _starpu_task_submit(task, 1);
  861. }
  862. /*
  863. * worker->sched_mutex must be locked when calling this function.
  864. */
  865. int _starpu_task_submit_conversion_task(struct starpu_task *task,
  866. unsigned int workerid)
  867. {
  868. int ret;
  869. STARPU_ASSERT(task->cl);
  870. STARPU_ASSERT(task->execute_on_a_specific_worker);
  871. ret = _starpu_task_submit_head(task);
  872. STARPU_ASSERT(ret == 0);
  873. /* We retain handle reference count that would have been acquired by data dependencies. */
  874. unsigned i;
  875. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(task);
  876. for (i=0; i<nbuffers; i++)
  877. {
  878. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(task, i);
  879. _starpu_spin_lock(&handle->header_lock);
  880. handle->busy_count++;
  881. _starpu_spin_unlock(&handle->header_lock);
  882. }
  883. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  884. _starpu_increment_nsubmitted_tasks_of_sched_ctx(j->task->sched_ctx);
  885. _starpu_sched_task_submit(task);
  886. STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  887. _starpu_handle_job_submission(j);
  888. _starpu_increment_nready_tasks_of_sched_ctx(j->task->sched_ctx, j->task->flops, j->task);
  889. _starpu_job_set_ordered_buffers(j);
  890. STARPU_ASSERT(task->status == STARPU_TASK_INIT);
  891. task->status = STARPU_TASK_READY;
  892. _starpu_profiling_set_task_push_start_time(task);
  893. unsigned node = starpu_worker_get_memory_node(workerid);
  894. if (starpu_get_prefetch_flag())
  895. starpu_prefetch_task_input_on_node(task, node);
  896. struct _starpu_worker *worker;
  897. worker = _starpu_get_worker_struct(workerid);
  898. starpu_task_prio_list_push_back(&worker->local_tasks, task);
  899. starpu_wake_worker_locked(worker->workerid);
  900. _starpu_profiling_set_task_push_end_time(task);
  901. STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
  902. return 0;
  903. }
  904. void starpu_codelet_init(struct starpu_codelet *cl)
  905. {
  906. memset(cl, 0, sizeof(struct starpu_codelet));
  907. }
  908. #define _STARPU_CODELET_WORKER_NAME_LEN 32
  909. void starpu_codelet_display_stats(struct starpu_codelet *cl)
  910. {
  911. unsigned worker;
  912. unsigned nworkers = starpu_worker_get_count();
  913. if (cl->name)
  914. fprintf(stderr, "Statistics for codelet %s\n", cl->name);
  915. else if (cl->model && cl->model->symbol)
  916. fprintf(stderr, "Statistics for codelet %s\n", cl->model->symbol);
  917. unsigned long total = 0;
  918. for (worker = 0; worker < nworkers; worker++)
  919. total += cl->per_worker_stats[worker];
  920. for (worker = 0; worker < nworkers; worker++)
  921. {
  922. char name[_STARPU_CODELET_WORKER_NAME_LEN];
  923. starpu_worker_get_name(worker, name, _STARPU_CODELET_WORKER_NAME_LEN);
  924. fprintf(stderr, "\t%s -> %lu / %lu (%2.2f %%)\n", name, cl->per_worker_stats[worker], total, (100.0f*cl->per_worker_stats[worker])/total);
  925. }
  926. }
  927. /*
  928. * We wait for all the tasks that have already been submitted. Note that a
  929. * regenerable is not considered finished until it was explicitely set as
  930. * non-regenerale anymore (eg. from a callback).
  931. */
  932. int _starpu_task_wait_for_all_and_return_nb_waited_tasks(void)
  933. {
  934. unsigned nsched_ctxs = _starpu_get_nsched_ctxs();
  935. unsigned sched_ctx_id = nsched_ctxs == 1 ? 0 : starpu_sched_ctx_get_context();
  936. /* if there is no indication about which context to wait,
  937. we wait for all tasks submitted to starpu */
  938. if (sched_ctx_id == STARPU_NMAX_SCHED_CTXS)
  939. {
  940. _STARPU_DEBUG("Waiting for all tasks\n");
  941. STARPU_ASSERT_MSG(_starpu_worker_may_perform_blocking_calls(), "starpu_task_wait_for_all must not be called from a task or callback");
  942. STARPU_AYU_BARRIER();
  943. struct _starpu_machine_config *config = _starpu_get_machine_config();
  944. if(config->topology.nsched_ctxs == 1)
  945. {
  946. _starpu_sched_do_schedule(0);
  947. return _starpu_task_wait_for_all_in_ctx_and_return_nb_waited_tasks(0);
  948. }
  949. else
  950. {
  951. int s;
  952. for(s = 0; s < STARPU_NMAX_SCHED_CTXS; s++)
  953. {
  954. if(config->sched_ctxs[s].do_schedule == 1)
  955. {
  956. _starpu_sched_do_schedule(config->sched_ctxs[s].id);
  957. }
  958. }
  959. for(s = 0; s < STARPU_NMAX_SCHED_CTXS; s++)
  960. {
  961. if(config->sched_ctxs[s].do_schedule == 1)
  962. {
  963. starpu_task_wait_for_all_in_ctx(config->sched_ctxs[s].id);
  964. }
  965. }
  966. return 0;
  967. }
  968. }
  969. else
  970. {
  971. _starpu_sched_do_schedule(sched_ctx_id);
  972. _STARPU_DEBUG("Waiting for tasks submitted to context %u\n", sched_ctx_id);
  973. return _starpu_task_wait_for_all_in_ctx_and_return_nb_waited_tasks(sched_ctx_id);
  974. }
  975. }
  976. int starpu_task_wait_for_all(void)
  977. {
  978. _starpu_task_wait_for_all_and_return_nb_waited_tasks();
  979. if (!_starpu_perf_counter_paused())
  980. _starpu_perf_counter_update_global_sample();
  981. return 0;
  982. }
  983. int _starpu_task_wait_for_all_in_ctx_and_return_nb_waited_tasks(unsigned sched_ctx)
  984. {
  985. _STARPU_TRACE_TASK_WAIT_FOR_ALL_START();
  986. int ret = _starpu_wait_for_all_tasks_of_sched_ctx(sched_ctx);
  987. _STARPU_TRACE_TASK_WAIT_FOR_ALL_END();
  988. /* TODO: improve Temanejo into knowing about contexts ... */
  989. STARPU_AYU_BARRIER();
  990. return ret;
  991. }
  992. int starpu_task_wait_for_all_in_ctx(unsigned sched_ctx)
  993. {
  994. _starpu_task_wait_for_all_in_ctx_and_return_nb_waited_tasks(sched_ctx);
  995. if (!_starpu_perf_counter_paused())
  996. _starpu_perf_counter_update_global_sample();
  997. return 0;
  998. }
  999. /*
  1000. * We wait until there's a certain number of the tasks that have already been
  1001. * submitted left. Note that a regenerable is not considered finished until it
  1002. * was explicitely set as non-regenerale anymore (eg. from a callback).
  1003. */
  1004. int starpu_task_wait_for_n_submitted(unsigned n)
  1005. {
  1006. unsigned nsched_ctxs = _starpu_get_nsched_ctxs();
  1007. unsigned sched_ctx_id = nsched_ctxs == 1 ? 0 : starpu_sched_ctx_get_context();
  1008. /* if there is no indication about which context to wait,
  1009. we wait for all tasks submitted to starpu */
  1010. if (sched_ctx_id == STARPU_NMAX_SCHED_CTXS)
  1011. {
  1012. _STARPU_DEBUG("Waiting for all tasks\n");
  1013. STARPU_ASSERT_MSG(_starpu_worker_may_perform_blocking_calls(), "starpu_task_wait_for_n_submitted must not be called from a task or callback");
  1014. struct _starpu_machine_config *config = _starpu_get_machine_config();
  1015. if(config->topology.nsched_ctxs == 1)
  1016. _starpu_wait_for_n_submitted_tasks_of_sched_ctx(0, n);
  1017. else
  1018. {
  1019. int s;
  1020. for(s = 0; s < STARPU_NMAX_SCHED_CTXS; s++)
  1021. {
  1022. if(config->sched_ctxs[s].do_schedule == 1)
  1023. {
  1024. _starpu_wait_for_n_submitted_tasks_of_sched_ctx(config->sched_ctxs[s].id, n);
  1025. }
  1026. }
  1027. }
  1028. }
  1029. else
  1030. {
  1031. _STARPU_DEBUG("Waiting for tasks submitted to context %u\n", sched_ctx_id);
  1032. _starpu_wait_for_n_submitted_tasks_of_sched_ctx(sched_ctx_id, n);
  1033. }
  1034. if (!_starpu_perf_counter_paused())
  1035. _starpu_perf_counter_update_global_sample();
  1036. return 0;
  1037. }
  1038. int starpu_task_wait_for_n_submitted_in_ctx(unsigned sched_ctx, unsigned n)
  1039. {
  1040. _starpu_wait_for_n_submitted_tasks_of_sched_ctx(sched_ctx, n);
  1041. if (!_starpu_perf_counter_paused())
  1042. _starpu_perf_counter_update_global_sample();
  1043. return 0;
  1044. }
  1045. /*
  1046. * We wait until there is no ready task any more (i.e. StarPU will not be able
  1047. * to progress any more).
  1048. */
  1049. int starpu_task_wait_for_no_ready(void)
  1050. {
  1051. STARPU_ASSERT_MSG(_starpu_worker_may_perform_blocking_calls(), "starpu_task_wait_for_no_ready must not be called from a task or callback");
  1052. struct _starpu_machine_config *config = _starpu_get_machine_config();
  1053. if(config->topology.nsched_ctxs == 1)
  1054. {
  1055. _starpu_sched_do_schedule(0);
  1056. _starpu_wait_for_no_ready_of_sched_ctx(0);
  1057. }
  1058. else
  1059. {
  1060. int s;
  1061. for(s = 0; s < STARPU_NMAX_SCHED_CTXS; s++)
  1062. {
  1063. if(config->sched_ctxs[s].do_schedule == 1)
  1064. {
  1065. _starpu_sched_do_schedule(config->sched_ctxs[s].id);
  1066. }
  1067. }
  1068. for(s = 0; s < STARPU_NMAX_SCHED_CTXS; s++)
  1069. {
  1070. if(config->sched_ctxs[s].do_schedule == 1)
  1071. {
  1072. _starpu_wait_for_no_ready_of_sched_ctx(config->sched_ctxs[s].id);
  1073. }
  1074. }
  1075. }
  1076. if (!_starpu_perf_counter_paused())
  1077. _starpu_perf_counter_update_global_sample();
  1078. return 0;
  1079. }
  1080. void starpu_iteration_push(unsigned long iteration)
  1081. {
  1082. struct _starpu_sched_ctx *ctx = _starpu_get_sched_ctx_struct(_starpu_sched_ctx_get_current_context());
  1083. unsigned level = ctx->iteration_level++;
  1084. if (level < sizeof(ctx->iterations)/sizeof(ctx->iterations[0]))
  1085. ctx->iterations[level] = iteration;
  1086. }
  1087. void starpu_iteration_pop(void)
  1088. {
  1089. struct _starpu_sched_ctx *ctx = _starpu_get_sched_ctx_struct(_starpu_sched_ctx_get_current_context());
  1090. STARPU_ASSERT_MSG(ctx->iteration_level > 0, "calls to starpu_iteration_pop must match starpu_iteration_push calls");
  1091. unsigned level = ctx->iteration_level--;
  1092. if (level < sizeof(ctx->iterations)/sizeof(ctx->iterations[0]))
  1093. ctx->iterations[level] = -1;
  1094. }
  1095. void starpu_do_schedule(void)
  1096. {
  1097. struct _starpu_machine_config *config = _starpu_get_machine_config();
  1098. if(config->topology.nsched_ctxs == 1)
  1099. _starpu_sched_do_schedule(0);
  1100. else
  1101. {
  1102. int s;
  1103. for(s = 0; s < STARPU_NMAX_SCHED_CTXS; s++)
  1104. {
  1105. if(config->sched_ctxs[s].do_schedule == 1)
  1106. {
  1107. _starpu_sched_do_schedule(config->sched_ctxs[s].id);
  1108. }
  1109. }
  1110. }
  1111. }
  1112. void
  1113. starpu_drivers_request_termination(void)
  1114. {
  1115. struct _starpu_machine_config *config = _starpu_get_machine_config();
  1116. STARPU_PTHREAD_MUTEX_LOCK(&config->submitted_mutex);
  1117. int nsubmitted = starpu_task_nsubmitted();
  1118. config->submitting = 0;
  1119. if (nsubmitted == 0)
  1120. {
  1121. ANNOTATE_HAPPENS_AFTER(&config->running);
  1122. config->running = 0;
  1123. ANNOTATE_HAPPENS_BEFORE(&config->running);
  1124. STARPU_WMB();
  1125. int s;
  1126. for(s = 0; s < STARPU_NMAX_SCHED_CTXS; s++)
  1127. {
  1128. if(config->sched_ctxs[s].do_schedule == 1)
  1129. {
  1130. _starpu_check_nsubmitted_tasks_of_sched_ctx(config->sched_ctxs[s].id);
  1131. }
  1132. }
  1133. }
  1134. STARPU_PTHREAD_MUTEX_UNLOCK(&config->submitted_mutex);
  1135. }
  1136. int starpu_task_nsubmitted(void)
  1137. {
  1138. int nsubmitted = 0;
  1139. struct _starpu_machine_config *config = _starpu_get_machine_config();
  1140. if(config->topology.nsched_ctxs == 1)
  1141. nsubmitted = _starpu_get_nsubmitted_tasks_of_sched_ctx(0);
  1142. else
  1143. {
  1144. int s;
  1145. for(s = 0; s < STARPU_NMAX_SCHED_CTXS; s++)
  1146. {
  1147. if(config->sched_ctxs[s].do_schedule == 1)
  1148. {
  1149. nsubmitted += _starpu_get_nsubmitted_tasks_of_sched_ctx(config->sched_ctxs[s].id);
  1150. }
  1151. }
  1152. }
  1153. return nsubmitted;
  1154. }
  1155. int starpu_task_nready(void)
  1156. {
  1157. int nready = 0;
  1158. struct _starpu_machine_config *config = _starpu_get_machine_config();
  1159. if(config->topology.nsched_ctxs == 1)
  1160. nready = starpu_sched_ctx_get_nready_tasks(0);
  1161. else
  1162. {
  1163. int s;
  1164. for(s = 0; s < STARPU_NMAX_SCHED_CTXS; s++)
  1165. {
  1166. if(config->sched_ctxs[s].do_schedule == 1)
  1167. {
  1168. nready += starpu_sched_ctx_get_nready_tasks(config->sched_ctxs[s].id);
  1169. }
  1170. }
  1171. }
  1172. return nready;
  1173. }
  1174. /* Return the task currently executed by the worker, or NULL if this is called
  1175. * either from a thread that is not a task or simply because there is no task
  1176. * being executed at the moment. */
  1177. struct starpu_task *starpu_task_get_current(void)
  1178. {
  1179. return (struct starpu_task *) STARPU_PTHREAD_GETSPECIFIC(current_task_key);
  1180. }
  1181. void _starpu_set_current_task(struct starpu_task *task)
  1182. {
  1183. STARPU_PTHREAD_SETSPECIFIC(current_task_key, task);
  1184. }
  1185. int starpu_task_get_current_data_node(unsigned i)
  1186. {
  1187. struct starpu_task *task = starpu_task_get_current();
  1188. if (!task)
  1189. return -1;
  1190. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  1191. struct _starpu_data_descr *descrs = _STARPU_JOB_GET_ORDERED_BUFFERS(j);
  1192. unsigned orderedindex = descrs[i].orderedindex;
  1193. return descrs[orderedindex].node;
  1194. }
  1195. #ifdef STARPU_OPENMP
  1196. /* Prepare the fields of the currentl task for accepting a new set of
  1197. * dependencies in anticipation of becoming a continuation.
  1198. *
  1199. * When the task becomes 'continued', it will only be queued again when the new
  1200. * set of dependencies is fulfilled. */
  1201. void _starpu_task_prepare_for_continuation(void)
  1202. {
  1203. _starpu_job_prepare_for_continuation(_starpu_get_job_associated_to_task(starpu_task_get_current()));
  1204. }
  1205. void _starpu_task_prepare_for_continuation_ext(unsigned continuation_resubmit,
  1206. void (*continuation_callback_on_sleep)(void *arg), void *continuation_callback_on_sleep_arg)
  1207. {
  1208. _starpu_job_prepare_for_continuation_ext(_starpu_get_job_associated_to_task(starpu_task_get_current()),
  1209. continuation_resubmit, continuation_callback_on_sleep, continuation_callback_on_sleep_arg);
  1210. }
  1211. void _starpu_task_set_omp_cleanup_callback(struct starpu_task *task, void (*omp_cleanup_callback)(void *arg), void *omp_cleanup_callback_arg)
  1212. {
  1213. _starpu_job_set_omp_cleanup_callback(_starpu_get_job_associated_to_task(task),
  1214. omp_cleanup_callback, omp_cleanup_callback_arg);
  1215. }
  1216. #endif
  1217. /*
  1218. * Returns 0 if tasks does not use any multiformat handle, 1 otherwise.
  1219. */
  1220. int
  1221. _starpu_task_uses_multiformat_handles(struct starpu_task *task)
  1222. {
  1223. unsigned i;
  1224. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(task);
  1225. for (i = 0; i < nbuffers; i++)
  1226. {
  1227. if (_starpu_data_is_multiformat_handle(STARPU_TASK_GET_HANDLE(task, i)))
  1228. return 1;
  1229. }
  1230. return 0;
  1231. }
  1232. /*
  1233. * Checks whether the given handle needs to be converted in order to be used on
  1234. * the node given as the second argument.
  1235. */
  1236. int
  1237. _starpu_handle_needs_conversion_task(starpu_data_handle_t handle,
  1238. unsigned int node)
  1239. {
  1240. return _starpu_handle_needs_conversion_task_for_arch(handle, starpu_node_get_kind(node));
  1241. }
  1242. int
  1243. _starpu_handle_needs_conversion_task_for_arch(starpu_data_handle_t handle,
  1244. enum starpu_node_kind node_kind)
  1245. {
  1246. /*
  1247. * Here, we assume that CUDA devices and OpenCL devices use the
  1248. * same data structure. A conversion is only needed when moving
  1249. * data from a CPU to a GPU, or the other way around.
  1250. */
  1251. switch (node_kind)
  1252. {
  1253. case STARPU_CPU_RAM:
  1254. case STARPU_MPI_MS_RAM:
  1255. switch(starpu_node_get_kind(handle->mf_node))
  1256. {
  1257. case STARPU_CPU_RAM:
  1258. case STARPU_MPI_MS_RAM:
  1259. return 0;
  1260. default:
  1261. return 1;
  1262. }
  1263. break;
  1264. default:
  1265. switch(starpu_node_get_kind(handle->mf_node))
  1266. {
  1267. case STARPU_CPU_RAM:
  1268. case STARPU_MPI_MS_RAM:
  1269. return 1;
  1270. default:
  1271. return 0;
  1272. }
  1273. break;
  1274. }
  1275. /* that instruction should never be reached */
  1276. return -EINVAL;
  1277. }
  1278. void starpu_task_set_implementation(struct starpu_task *task, unsigned impl)
  1279. {
  1280. _starpu_get_job_associated_to_task(task)->nimpl = impl;
  1281. }
  1282. unsigned starpu_task_get_implementation(struct starpu_task *task)
  1283. {
  1284. return _starpu_get_job_associated_to_task(task)->nimpl;
  1285. }
  1286. unsigned long starpu_task_get_job_id(struct starpu_task *task)
  1287. {
  1288. return _starpu_get_job_associated_to_task(task)->job_id;
  1289. }
  1290. static starpu_pthread_t watchdog_thread;
  1291. static int sleep_some(float timeout)
  1292. {
  1293. /* If we do a sleep(timeout), we might have to wait too long at the end of the computation. */
  1294. /* To avoid that, we do several sleep() of 1s (and check after each if starpu is still running) */
  1295. float t;
  1296. for (t = timeout ; t > 1.; t--)
  1297. {
  1298. starpu_sleep(1.);
  1299. if (!_starpu_machine_is_running())
  1300. /* Application finished, don't bother finishing the sleep */
  1301. return 0;
  1302. }
  1303. /* and one final sleep (of less than 1 s) with the rest (if needed) */
  1304. if (t > 0.)
  1305. starpu_sleep(t);
  1306. return 1;
  1307. }
  1308. /* Check from times to times that StarPU does finish some tasks */
  1309. static void *watchdog_func(void *arg)
  1310. {
  1311. char *timeout_env = arg;
  1312. float timeout, delay;
  1313. #ifdef _MSC_VER
  1314. timeout = ((float) _atoi64(timeout_env)) / 1000000;
  1315. #else
  1316. timeout = ((float) atoll(timeout_env)) / 1000000;
  1317. #endif
  1318. delay = ((float) watchdog_delay) / 1000000;
  1319. struct _starpu_machine_config *config = _starpu_get_machine_config();
  1320. starpu_pthread_setname("watchdog");
  1321. if (!sleep_some(delay))
  1322. return NULL;
  1323. STARPU_PTHREAD_MUTEX_LOCK(&config->submitted_mutex);
  1324. while (_starpu_machine_is_running())
  1325. {
  1326. int last_nsubmitted = starpu_task_nsubmitted();
  1327. config->watchdog_ok = 0;
  1328. STARPU_PTHREAD_MUTEX_UNLOCK(&config->submitted_mutex);
  1329. if (!sleep_some(timeout))
  1330. return NULL;
  1331. STARPU_PTHREAD_MUTEX_LOCK(&config->submitted_mutex);
  1332. if (!config->watchdog_ok && last_nsubmitted
  1333. && last_nsubmitted == starpu_task_nsubmitted())
  1334. {
  1335. if (watchdog_hook == NULL)
  1336. _STARPU_MSG("The StarPU watchdog detected that no task finished for %fs (can be configured through STARPU_WATCHDOG_TIMEOUT)\n",
  1337. timeout);
  1338. else
  1339. watchdog_hook(watchdog_hook_arg);
  1340. if (watchdog_crash)
  1341. {
  1342. _STARPU_MSG("Crashing the process\n");
  1343. raise(SIGABRT);
  1344. }
  1345. else if (watchdog_hook == NULL)
  1346. _STARPU_MSG("Set the STARPU_WATCHDOG_CRASH environment variable if you want to abort the process in such a case\n");
  1347. }
  1348. /* Only shout again after another period */
  1349. config->watchdog_ok = 1;
  1350. }
  1351. STARPU_PTHREAD_MUTEX_UNLOCK(&config->submitted_mutex);
  1352. return NULL;
  1353. }
  1354. void starpu_task_watchdog_set_hook(void (*hook)(void *), void *hook_arg)
  1355. {
  1356. watchdog_hook = hook;
  1357. watchdog_hook_arg = hook_arg;
  1358. }
  1359. void _starpu_watchdog_init()
  1360. {
  1361. struct _starpu_machine_config *config = _starpu_get_machine_config();
  1362. char *timeout_env = starpu_getenv("STARPU_WATCHDOG_TIMEOUT");
  1363. STARPU_PTHREAD_MUTEX_INIT(&config->submitted_mutex, NULL);
  1364. if (!timeout_env)
  1365. return;
  1366. STARPU_PTHREAD_CREATE(&watchdog_thread, NULL, watchdog_func, timeout_env);
  1367. }
  1368. void _starpu_watchdog_shutdown(void)
  1369. {
  1370. char *timeout_env = starpu_getenv("STARPU_WATCHDOG_TIMEOUT");
  1371. if (!timeout_env)
  1372. return;
  1373. STARPU_PTHREAD_JOIN(watchdog_thread, NULL);
  1374. }
  1375. static void _starpu_ft_check_support(const struct starpu_task *task)
  1376. {
  1377. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(task);
  1378. unsigned i;
  1379. for (i = 0; i < nbuffers; i++)
  1380. {
  1381. enum starpu_data_access_mode mode = STARPU_TASK_GET_MODE(task, i);
  1382. STARPU_ASSERT_MSG (mode == STARPU_R || mode == STARPU_W,
  1383. "starpu_task_failed is only supported for tasks with access modes STARPU_R and STARPU_W");
  1384. }
  1385. }
  1386. struct starpu_task *starpu_task_ft_create_retry
  1387. (const struct starpu_task *meta_task, const struct starpu_task *template_task, void (*check_ft)(void *))
  1388. {
  1389. /* Create a new task to actually perform the result */
  1390. struct starpu_task *new_task = starpu_task_create();
  1391. *new_task = *template_task;
  1392. new_task->prologue_callback_func = NULL;
  1393. /* XXX: cl_arg needs to be duplicated */
  1394. STARPU_ASSERT_MSG(!meta_task->cl_arg_free || !meta_task->cl_arg, "not supported yet");
  1395. STARPU_ASSERT_MSG(!meta_task->callback_func, "not supported");
  1396. new_task->callback_func = check_ft;
  1397. new_task->callback_arg = (void*) meta_task;
  1398. new_task->callback_arg_free = 0;
  1399. new_task->prologue_callback_arg_free = 0;
  1400. STARPU_ASSERT_MSG(!new_task->prologue_callback_pop_arg_free, "not supported");
  1401. new_task->use_tag = 0;
  1402. new_task->synchronous = 0;
  1403. new_task->destroy = 1;
  1404. new_task->regenerate = 0;
  1405. new_task->no_submitorder = 1;
  1406. new_task->failed = 0;
  1407. new_task->scheduled = 0;
  1408. new_task->prefetched = 0;
  1409. new_task->status = STARPU_TASK_INIT;
  1410. new_task->profiling_info = NULL;
  1411. new_task->prev = NULL;
  1412. new_task->next = NULL;
  1413. new_task->starpu_private = NULL;
  1414. new_task->omp_task = NULL;
  1415. return new_task;
  1416. }
  1417. static void _starpu_default_check_ft(void *arg)
  1418. {
  1419. struct starpu_task *meta_task = arg;
  1420. struct starpu_task *current_task = starpu_task_get_current();
  1421. struct starpu_task *new_task;
  1422. int ret;
  1423. if (!current_task->failed)
  1424. {
  1425. starpu_task_ft_success(meta_task);
  1426. return;
  1427. }
  1428. new_task = starpu_task_ft_create_retry
  1429. (meta_task, current_task, _starpu_default_check_ft);
  1430. ret = starpu_task_submit_nodeps(new_task);
  1431. STARPU_ASSERT(!ret);
  1432. }
  1433. void starpu_task_ft_prologue(void *arg)
  1434. {
  1435. struct starpu_task *meta_task = starpu_task_get_current();
  1436. struct starpu_task *new_task;
  1437. void (*check_ft)(void*) = arg;
  1438. int ret;
  1439. if (!check_ft)
  1440. check_ft = _starpu_default_check_ft;
  1441. /* Create a task which will do the actual computation */
  1442. new_task = starpu_task_ft_create_retry
  1443. (meta_task, meta_task, check_ft);
  1444. ret = starpu_task_submit_nodeps(new_task);
  1445. STARPU_ASSERT(!ret);
  1446. /* Make the parent task wait for the result getting correct */
  1447. starpu_task_end_dep_add(meta_task, 1);
  1448. meta_task->where = STARPU_NOWHERE;
  1449. }
  1450. void starpu_task_ft_failed(struct starpu_task *task)
  1451. {
  1452. _starpu_ft_check_support(task);
  1453. task->failed = 1;
  1454. }
  1455. void starpu_task_ft_success(struct starpu_task *meta_task)
  1456. {
  1457. starpu_task_end_dep_release(meta_task);
  1458. }