task.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011-2017 Inria
  4. * Copyright (C) 2017 Erwan Leria
  5. * Copyright (C) 2009-2017 Université de Bordeaux
  6. * Copyright (C) 2010-2017 CNRS
  7. * Copyright (C) 2011 Télécom-SudParis
  8. * Copyright (C) 2016 Uppsala University
  9. *
  10. * StarPU is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU Lesser General Public License as published by
  12. * the Free Software Foundation; either version 2.1 of the License, or (at
  13. * your option) any later version.
  14. *
  15. * StarPU is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  18. *
  19. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  20. */
  21. #include <starpu.h>
  22. #include <starpu_profiling.h>
  23. #include <core/workers.h>
  24. #include <core/sched_ctx.h>
  25. #include <core/jobs.h>
  26. #include <core/task.h>
  27. #include <core/task_bundle.h>
  28. #include <core/dependencies/data_concurrency.h>
  29. #include <common/config.h>
  30. #include <common/utils.h>
  31. #include <common/fxt.h>
  32. #include <profiling/profiling.h>
  33. #include <profiling/bound.h>
  34. #include <math.h>
  35. #include <string.h>
  36. #include <core/debug.h>
  37. #include <core/sched_ctx.h>
  38. #include <time.h>
  39. #include <signal.h>
  40. #include <core/simgrid.h>
  41. #ifdef STARPU_HAVE_WINDOWS
  42. #include <windows.h>
  43. #endif
  44. /* XXX this should be reinitialized when StarPU is shutdown (or we should make
  45. * sure that no task remains !) */
  46. /* TODO we could make this hierarchical to avoid contention ? */
  47. //static starpu_pthread_cond_t submitted_cond = STARPU_PTHREAD_COND_INITIALIZER;
  48. /* This key stores the task currently handled by the thread, note that we
  49. * cannot use the worker structure to store that information because it is
  50. * possible that we have a task with a NULL codelet, which means its callback
  51. * could be executed by a user thread as well. */
  52. static starpu_pthread_key_t current_task_key;
  53. static int limit_min_submitted_tasks;
  54. static int limit_max_submitted_tasks;
  55. static int watchdog_crash;
  56. static int watchdog_delay;
  57. #define _STARPU_TASK_MAGIC 42
  58. /* Called once at starpu_init */
  59. void _starpu_task_init(void)
  60. {
  61. STARPU_PTHREAD_KEY_CREATE(&current_task_key, NULL);
  62. limit_min_submitted_tasks = starpu_get_env_number("STARPU_LIMIT_MIN_SUBMITTED_TASKS");
  63. limit_max_submitted_tasks = starpu_get_env_number("STARPU_LIMIT_MAX_SUBMITTED_TASKS");
  64. watchdog_crash = starpu_get_env_number("STARPU_WATCHDOG_CRASH");
  65. watchdog_delay = starpu_get_env_number_default("STARPU_WATCHDOG_DELAY", 0);
  66. }
  67. void _starpu_task_deinit(void)
  68. {
  69. STARPU_PTHREAD_KEY_DELETE(current_task_key);
  70. }
  71. void starpu_task_init(struct starpu_task *task)
  72. {
  73. /* TODO: memcpy from a template instead? benchmark it */
  74. STARPU_ASSERT(task);
  75. /* As most of the fields must be initialised at NULL, let's put 0
  76. * everywhere */
  77. memset(task, 0, sizeof(struct starpu_task));
  78. task->sequential_consistency = 1;
  79. task->where = -1;
  80. /* Now we can initialise fields which recquire custom value */
  81. /* Note: remember to update STARPU_TASK_INITIALIZER as well */
  82. #if STARPU_DEFAULT_PRIO != 0
  83. task->priority = STARPU_DEFAULT_PRIO;
  84. #endif
  85. task->detach = 1;
  86. #if STARPU_TASK_INVALID != 0
  87. task->status = STARPU_TASK_INVALID;
  88. #endif
  89. task->predicted = NAN;
  90. task->predicted_transfer = NAN;
  91. task->predicted_start = NAN;
  92. task->magic = _STARPU_TASK_MAGIC;
  93. task->sched_ctx = STARPU_NMAX_SCHED_CTXS;
  94. task->flops = 0.0;
  95. }
  96. /* Free all the ressources allocated for a task, without deallocating the task
  97. * structure itself (this is required for statically allocated tasks).
  98. * All values previously set by the user, like codelet and handles, remain
  99. * unchanged */
  100. void starpu_task_clean(struct starpu_task *task)
  101. {
  102. STARPU_ASSERT(task);
  103. task->magic = 0;
  104. /* If a buffer was allocated to store the profiling info, we free it. */
  105. if (task->profiling_info)
  106. {
  107. free(task->profiling_info);
  108. task->profiling_info = NULL;
  109. }
  110. /* If case the task is (still) part of a bundle */
  111. starpu_task_bundle_t bundle = task->bundle;
  112. if (bundle)
  113. starpu_task_bundle_remove(bundle, task);
  114. if (task->dyn_handles)
  115. {
  116. free(task->dyn_handles);
  117. task->dyn_handles = NULL;
  118. free(task->dyn_interfaces);
  119. task->dyn_interfaces = NULL;
  120. }
  121. if (task->dyn_modes)
  122. {
  123. free(task->dyn_modes);
  124. task->dyn_modes = NULL;
  125. }
  126. struct _starpu_job *j = (struct _starpu_job *)task->starpu_private;
  127. if (j)
  128. {
  129. _starpu_job_destroy(j);
  130. task->starpu_private = NULL;
  131. }
  132. }
  133. struct starpu_task * STARPU_ATTRIBUTE_MALLOC starpu_task_create(void)
  134. {
  135. struct starpu_task *task;
  136. _STARPU_MALLOC(task, sizeof(struct starpu_task));
  137. starpu_task_init(task);
  138. /* Dynamically allocated tasks are destroyed by default */
  139. task->destroy = 1;
  140. return task;
  141. }
  142. /* Free the ressource allocated during starpu_task_create. This function can be
  143. * called automatically after the execution of a task by setting the "destroy"
  144. * flag of the starpu_task structure (default behaviour). Calling this function
  145. * on a statically allocated task results in an undefined behaviour. */
  146. void _starpu_task_destroy(struct starpu_task *task)
  147. {
  148. /* If starpu_task_destroy is called in a callback, we just set the destroy
  149. flag. The task will be destroyed after the callback returns */
  150. if (task == starpu_task_get_current()
  151. && _starpu_get_local_worker_status() == STATUS_CALLBACK)
  152. {
  153. task->destroy = 1;
  154. }
  155. else
  156. {
  157. starpu_task_clean(task);
  158. /* TODO handle the case of task with detach = 1 and destroy = 1 */
  159. /* TODO handle the case of non terminated tasks -> assertion failure, it's too dangerous to be doing something like this */
  160. /* Does user want StarPU release cl_arg ? */
  161. if (task->cl_arg_free)
  162. free(task->cl_arg);
  163. /* Does user want StarPU release callback_arg ? */
  164. if (task->callback_arg_free)
  165. free(task->callback_arg);
  166. /* Does user want StarPU release prologue_callback_arg ? */
  167. if (task->prologue_callback_arg_free)
  168. free(task->prologue_callback_arg);
  169. /* Does user want StarPU release prologue_pop_arg ? */
  170. if (task->prologue_callback_pop_arg_free)
  171. free(task->prologue_callback_pop_arg);
  172. free(task);
  173. }
  174. }
  175. void starpu_task_destroy(struct starpu_task *task)
  176. {
  177. STARPU_ASSERT(task);
  178. STARPU_ASSERT_MSG(!task->destroy || !task->detach, "starpu_task_destroy must not be called for task with destroy = 1 and detach = 1");
  179. _starpu_task_destroy(task);
  180. }
  181. int starpu_task_finished(struct starpu_task *task)
  182. {
  183. STARPU_ASSERT(task);
  184. STARPU_ASSERT_MSG(!task->detach, "starpu_task_finished can only be called on tasks with detach = 0");
  185. return _starpu_job_finished(_starpu_get_job_associated_to_task(task));
  186. }
  187. int starpu_task_wait(struct starpu_task *task)
  188. {
  189. _STARPU_LOG_IN();
  190. STARPU_ASSERT(task);
  191. STARPU_ASSERT_MSG(!task->detach, "starpu_task_wait can only be called on tasks with detach = 0");
  192. if (task->detach || task->synchronous)
  193. {
  194. _STARPU_DEBUG("Task is detached or synchronous. Waiting returns immediately\n");
  195. _STARPU_LOG_OUT_TAG("einval");
  196. return -EINVAL;
  197. }
  198. STARPU_ASSERT_MSG(_starpu_worker_may_perform_blocking_calls(), "starpu_task_wait must not be called from a task or callback");
  199. struct _starpu_job *j = (struct _starpu_job *)task->starpu_private;
  200. _STARPU_TRACE_TASK_WAIT_START(j);
  201. starpu_do_schedule();
  202. _starpu_wait_job(j);
  203. /* as this is a synchronous task, the liberation of the job
  204. structure was deferred */
  205. if (task->destroy)
  206. _starpu_task_destroy(task);
  207. _STARPU_TRACE_TASK_WAIT_END();
  208. _STARPU_LOG_OUT();
  209. return 0;
  210. }
  211. int starpu_task_wait_array(struct starpu_task **tasks, unsigned nb_tasks)
  212. {
  213. unsigned i;
  214. for (i = 0; i < nb_tasks; i++)
  215. {
  216. int ret = starpu_task_wait(tasks[i]);
  217. if (ret)
  218. return ret;
  219. }
  220. return 0;
  221. }
  222. #ifdef STARPU_OPENMP
  223. int _starpu_task_test_termination(struct starpu_task *task)
  224. {
  225. STARPU_ASSERT(task);
  226. STARPU_ASSERT_MSG(!task->detach, "starpu_task_wait can only be called on tasks with detach = 0");
  227. if (task->detach || task->synchronous)
  228. {
  229. _STARPU_DEBUG("Task is detached or synchronous\n");
  230. _STARPU_LOG_OUT_TAG("einval");
  231. return -EINVAL;
  232. }
  233. struct _starpu_job *j = (struct _starpu_job *)task->starpu_private;
  234. int ret = _starpu_test_job_termination(j);
  235. if (ret)
  236. {
  237. if (task->destroy)
  238. _starpu_task_destroy(task);
  239. }
  240. return ret;
  241. }
  242. #endif
  243. /* NB in case we have a regenerable task, it is possible that the job was
  244. * already counted. */
  245. int _starpu_submit_job(struct _starpu_job *j)
  246. {
  247. struct starpu_task *task = j->task;
  248. int ret;
  249. #ifdef STARPU_OPENMP
  250. const unsigned continuation = j->continuation;
  251. #else
  252. const unsigned continuation = 0;
  253. #endif
  254. _STARPU_LOG_IN();
  255. /* notify bound computation of a new task */
  256. _starpu_bound_record(j);
  257. _starpu_increment_nsubmitted_tasks_of_sched_ctx(j->task->sched_ctx);
  258. _starpu_sched_task_submit(task);
  259. #ifdef STARPU_USE_SC_HYPERVISOR
  260. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(j->task->sched_ctx);
  261. if(sched_ctx != NULL && j->task->sched_ctx != _starpu_get_initial_sched_ctx()->id && j->task->sched_ctx != STARPU_NMAX_SCHED_CTXS
  262. && sched_ctx->perf_counters != NULL)
  263. {
  264. struct starpu_perfmodel_arch arch;
  265. _STARPU_MALLOC(arch.devices, sizeof(struct starpu_perfmodel_device));
  266. arch.ndevices = 1;
  267. arch.devices[0].type = STARPU_CPU_WORKER;
  268. arch.devices[0].devid = 0;
  269. arch.devices[0].ncores = 1;
  270. _starpu_compute_buffers_footprint(j->task->cl->model, &arch, 0, j);
  271. free(arch.devices);
  272. size_t data_size = 0;
  273. if (j->task->cl)
  274. {
  275. unsigned i, nbuffers = STARPU_TASK_GET_NBUFFERS(j->task);
  276. for(i = 0; i < nbuffers; i++)
  277. {
  278. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(task, i);
  279. if (handle != NULL)
  280. data_size += _starpu_data_get_size(handle);
  281. }
  282. }
  283. _STARPU_TRACE_HYPERVISOR_BEGIN();
  284. sched_ctx->perf_counters->notify_submitted_job(j->task, j->footprint, data_size);
  285. _STARPU_TRACE_HYPERVISOR_END();
  286. }
  287. #endif//STARPU_USE_SC_HYPERVISOR
  288. /* We retain handle reference count */
  289. if (task->cl && !continuation)
  290. {
  291. unsigned i;
  292. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(task);
  293. for (i=0; i<nbuffers; i++)
  294. {
  295. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(task, i);
  296. _starpu_spin_lock(&handle->header_lock);
  297. handle->busy_count++;
  298. _starpu_spin_unlock(&handle->header_lock);
  299. }
  300. }
  301. STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  302. _starpu_handle_job_submission(j);
  303. #ifdef STARPU_OPENMP
  304. if (continuation)
  305. {
  306. j->discontinuous = 1;
  307. j->continuation = 0;
  308. }
  309. #endif
  310. #ifdef STARPU_OPENMP
  311. if (continuation)
  312. {
  313. ret = _starpu_reenforce_task_deps_and_schedule(j);
  314. }
  315. else
  316. #endif
  317. {
  318. ret = _starpu_enforce_deps_and_schedule(j);
  319. }
  320. _STARPU_LOG_OUT();
  321. return ret;
  322. }
  323. /* Note: this is racy, so valgrind would complain. But since we'll always put
  324. * the same values, this is not a problem. */
  325. void _starpu_codelet_check_deprecated_fields(struct starpu_codelet *cl)
  326. {
  327. if (!cl)
  328. return;
  329. uint32_t where = cl->where;
  330. int is_where_unset = where == 0;
  331. unsigned i, some_impl;
  332. /* Check deprecated and unset fields (where, <device>_func,
  333. * <device>_funcs) */
  334. /* CPU */
  335. if (cl->cpu_func && cl->cpu_func != STARPU_MULTIPLE_CPU_IMPLEMENTATIONS && cl->cpu_funcs[0])
  336. {
  337. _STARPU_DISP("[warning] [struct starpu_codelet] both cpu_func and cpu_funcs are set. Ignoring cpu_func.\n");
  338. cl->cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS;
  339. }
  340. if (cl->cpu_func && cl->cpu_func != STARPU_MULTIPLE_CPU_IMPLEMENTATIONS)
  341. {
  342. cl->cpu_funcs[0] = cl->cpu_func;
  343. cl->cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS;
  344. }
  345. some_impl = 0;
  346. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  347. if (cl->cpu_funcs[i])
  348. {
  349. some_impl = 1;
  350. break;
  351. }
  352. if (some_impl && cl->cpu_func == 0)
  353. {
  354. cl->cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS;
  355. }
  356. if (some_impl && is_where_unset)
  357. {
  358. where |= STARPU_CPU;
  359. }
  360. /* CUDA */
  361. if (cl->cuda_func && cl->cuda_func != STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS && cl->cuda_funcs[0])
  362. {
  363. _STARPU_DISP("[warning] [struct starpu_codelet] both cuda_func and cuda_funcs are set. Ignoring cuda_func.\n");
  364. cl->cuda_func = STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS;
  365. }
  366. if (cl->cuda_func && cl->cuda_func != STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS)
  367. {
  368. cl->cuda_funcs[0] = cl->cuda_func;
  369. cl->cuda_func = STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS;
  370. }
  371. some_impl = 0;
  372. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  373. if (cl->cuda_funcs[i])
  374. {
  375. some_impl = 1;
  376. break;
  377. }
  378. if (some_impl && cl->cuda_func == 0)
  379. {
  380. cl->cuda_func = STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS;
  381. }
  382. if (some_impl && is_where_unset)
  383. {
  384. where |= STARPU_CUDA;
  385. }
  386. /* OpenCL */
  387. if (cl->opencl_func && cl->opencl_func != STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS && cl->opencl_funcs[0])
  388. {
  389. _STARPU_DISP("[warning] [struct starpu_codelet] both opencl_func and opencl_funcs are set. Ignoring opencl_func.\n");
  390. cl->opencl_func = STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS;
  391. }
  392. if (cl->opencl_func && cl->opencl_func != STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS)
  393. {
  394. cl->opencl_funcs[0] = cl->opencl_func;
  395. cl->opencl_func = STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS;
  396. }
  397. some_impl = 0;
  398. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  399. if (cl->opencl_funcs[i])
  400. {
  401. some_impl = 1;
  402. break;
  403. }
  404. if (some_impl && cl->opencl_func == 0)
  405. {
  406. cl->opencl_func = STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS;
  407. }
  408. if (some_impl && is_where_unset)
  409. {
  410. where |= STARPU_OPENCL;
  411. }
  412. some_impl = 0;
  413. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  414. if (cl->mic_funcs[i])
  415. {
  416. some_impl = 1;
  417. break;
  418. }
  419. if (some_impl && is_where_unset)
  420. {
  421. where |= STARPU_MIC;
  422. }
  423. some_impl = 0;
  424. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  425. if (cl->mpi_ms_funcs[i])
  426. {
  427. some_impl = 1;
  428. break;
  429. }
  430. if (some_impl && is_where_unset)
  431. {
  432. where |= STARPU_MPI_MS;
  433. }
  434. some_impl = 0;
  435. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  436. if (cl->scc_funcs[i])
  437. {
  438. some_impl = 1;
  439. break;
  440. }
  441. if (some_impl && is_where_unset)
  442. {
  443. where |= STARPU_SCC;
  444. }
  445. some_impl = 0;
  446. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  447. if (cl->cpu_funcs_name[i])
  448. {
  449. some_impl = 1;
  450. break;
  451. }
  452. if (some_impl && is_where_unset)
  453. {
  454. where |= STARPU_MIC|STARPU_SCC|STARPU_MPI_MS;
  455. }
  456. cl->where = where;
  457. }
  458. void _starpu_task_check_deprecated_fields(struct starpu_task *task STARPU_ATTRIBUTE_UNUSED)
  459. {
  460. /* None any more */
  461. }
  462. static int _starpu_task_submit_head(struct starpu_task *task)
  463. {
  464. unsigned is_sync = task->synchronous;
  465. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  466. if (j->internal)
  467. {
  468. // Internal tasks are submitted to initial context
  469. task->sched_ctx = _starpu_get_initial_sched_ctx()->id;
  470. }
  471. else if (task->sched_ctx == STARPU_NMAX_SCHED_CTXS)
  472. {
  473. // If the task has not specified a context, we set the current context
  474. task->sched_ctx = _starpu_sched_ctx_get_current_context();
  475. }
  476. if (is_sync)
  477. {
  478. /* Perhaps it is not possible to submit a synchronous
  479. * (blocking) task */
  480. STARPU_ASSERT_MSG(_starpu_worker_may_perform_blocking_calls(), "submitting a synchronous task must not be done from a task or a callback");
  481. task->detach = 0;
  482. }
  483. _starpu_task_check_deprecated_fields(task);
  484. _starpu_codelet_check_deprecated_fields(task->cl);
  485. if (task->where== -1 && task->cl)
  486. task->where = task->cl->where;
  487. if (task->cl)
  488. {
  489. unsigned i;
  490. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(task);
  491. _STARPU_TRACE_UPDATE_TASK_CNT(0);
  492. /* Check buffers */
  493. if (task->dyn_handles == NULL)
  494. STARPU_ASSERT_MSG(STARPU_TASK_GET_NBUFFERS(task) <= STARPU_NMAXBUFS,
  495. "Codelet %p has too many buffers (%d vs max %d). Either use --enable-maxbuffers configure option to increase the max, or use dyn_handles instead of handles.",
  496. task->cl, STARPU_TASK_GET_NBUFFERS(task), STARPU_NMAXBUFS);
  497. if (task->dyn_handles)
  498. {
  499. _STARPU_MALLOC(task->dyn_interfaces, nbuffers * sizeof(void *));
  500. }
  501. for (i = 0; i < nbuffers; i++)
  502. {
  503. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(task, i);
  504. enum starpu_data_access_mode mode = STARPU_TASK_GET_MODE(task, i);
  505. /* Make sure handles are valid */
  506. STARPU_ASSERT_MSG(handle->magic == _STARPU_TASK_MAGIC, "data %p is invalid (was it already unregistered?)", handle);
  507. /* Make sure handles are not partitioned */
  508. STARPU_ASSERT_MSG(handle->nchildren == 0, "only unpartitioned data (or the pieces of a partitioned data) can be used in a task");
  509. /* Provide the home interface for now if any,
  510. * for can_execute hooks */
  511. if (handle->home_node != -1)
  512. _STARPU_TASK_SET_INTERFACE(task, starpu_data_get_interface_on_node(handle, handle->home_node), i);
  513. if (!(task->cl->flags & STARPU_CODELET_NOPLANS) &&
  514. ((handle->nplans && !handle->nchildren) || handle->siblings))
  515. /* This handle is involved with asynchronous
  516. * partitioning as a parent or a child, make
  517. * sure the right plan is active, submit
  518. * appropiate partitioning / unpartitioning if
  519. * not */
  520. _starpu_data_partition_access_submit(handle, (mode & STARPU_W) != 0);
  521. }
  522. /* Check the type of worker(s) required by the task exist */
  523. if (!_starpu_worker_exists(task))
  524. {
  525. _STARPU_LOG_OUT_TAG("ENODEV");
  526. return -ENODEV;
  527. }
  528. /* In case we require that a task should be explicitely
  529. * executed on a specific worker, we make sure that the worker
  530. * is able to execute this task. */
  531. if (task->execute_on_a_specific_worker && !starpu_combined_worker_can_execute_task(task->workerid, task, 0))
  532. {
  533. _STARPU_LOG_OUT_TAG("ENODEV");
  534. return -ENODEV;
  535. }
  536. if (task->cl->model)
  537. _starpu_init_and_load_perfmodel(task->cl->model);
  538. if (task->cl->energy_model)
  539. _starpu_init_and_load_perfmodel(task->cl->energy_model);
  540. }
  541. return 0;
  542. }
  543. /* application should submit new tasks to StarPU through this function */
  544. int starpu_task_submit(struct starpu_task *task)
  545. {
  546. _STARPU_LOG_IN();
  547. STARPU_ASSERT(task);
  548. STARPU_ASSERT_MSG(task->magic == _STARPU_TASK_MAGIC, "Tasks must be created with starpu_task_create, or initialized with starpu_task_init.");
  549. int ret;
  550. unsigned is_sync = task->synchronous;
  551. starpu_task_bundle_t bundle = task->bundle;
  552. /* internally, StarPU manipulates a struct _starpu_job * which is a wrapper around a
  553. * task structure, it is possible that this job structure was already
  554. * allocated. */
  555. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  556. const unsigned continuation =
  557. #ifdef STARPU_OPENMP
  558. j->continuation
  559. #else
  560. 0
  561. #endif
  562. ;
  563. if (!j->internal)
  564. {
  565. int nsubmitted_tasks = starpu_task_nsubmitted();
  566. if (limit_max_submitted_tasks >= 0 && limit_max_submitted_tasks < nsubmitted_tasks
  567. && limit_min_submitted_tasks >= 0 && limit_min_submitted_tasks < nsubmitted_tasks)
  568. {
  569. starpu_do_schedule();
  570. _STARPU_TRACE_TASK_THROTTLE_START();
  571. starpu_task_wait_for_n_submitted(limit_min_submitted_tasks);
  572. _STARPU_TRACE_TASK_THROTTLE_END();
  573. }
  574. }
  575. _STARPU_TRACE_TASK_SUBMIT_START();
  576. ret = _starpu_task_submit_head(task);
  577. if (ret)
  578. {
  579. _STARPU_TRACE_TASK_SUBMIT_END();
  580. return ret;
  581. }
  582. if (!continuation)
  583. {
  584. _STARPU_TRACE_TASK_SUBMIT(j,
  585. _starpu_get_sched_ctx_struct(task->sched_ctx)->iterations[0],
  586. _starpu_get_sched_ctx_struct(task->sched_ctx)->iterations[1]);
  587. }
  588. /* If this is a continuation, we don't modify the implicit data dependencies detected earlier. */
  589. if (task->cl && !continuation)
  590. _starpu_detect_implicit_data_deps(task);
  591. if (bundle)
  592. {
  593. /* We need to make sure that models for other tasks of the
  594. * bundle are also loaded, so the scheduler can estimate the
  595. * duration of the whole bundle */
  596. STARPU_PTHREAD_MUTEX_LOCK(&bundle->mutex);
  597. struct _starpu_task_bundle_entry *entry;
  598. entry = bundle->list;
  599. while (entry)
  600. {
  601. if (entry->task->cl->model)
  602. _starpu_init_and_load_perfmodel(entry->task->cl->model);
  603. if (entry->task->cl->energy_model)
  604. _starpu_init_and_load_perfmodel(entry->task->cl->energy_model);
  605. entry = entry->next;
  606. }
  607. STARPU_PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
  608. }
  609. /* If profiling is activated, we allocate a structure to store the
  610. * appropriate info. */
  611. struct starpu_profiling_task_info *info;
  612. int profiling = starpu_profiling_status_get();
  613. info = _starpu_allocate_profiling_info_if_needed(task);
  614. task->profiling_info = info;
  615. /* The task is considered as block until we are sure there remains not
  616. * dependency. */
  617. task->status = STARPU_TASK_BLOCKED;
  618. if (profiling)
  619. _starpu_clock_gettime(&info->submit_time);
  620. ret = _starpu_submit_job(j);
  621. #ifdef STARPU_SIMGRID
  622. if (_starpu_simgrid_task_submit_cost())
  623. MSG_process_sleep(0.000001);
  624. #endif
  625. if (is_sync)
  626. {
  627. _starpu_sched_do_schedule(task->sched_ctx);
  628. _starpu_wait_job(j);
  629. if (task->destroy)
  630. _starpu_task_destroy(task);
  631. }
  632. _STARPU_TRACE_TASK_SUBMIT_END();
  633. _STARPU_LOG_OUT();
  634. return ret;
  635. }
  636. int _starpu_task_submit_internally(struct starpu_task *task)
  637. {
  638. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  639. j->internal = 1;
  640. return starpu_task_submit(task);
  641. }
  642. /* application should submit new tasks to StarPU through this function */
  643. int starpu_task_submit_to_ctx(struct starpu_task *task, unsigned sched_ctx_id)
  644. {
  645. task->sched_ctx = sched_ctx_id;
  646. return starpu_task_submit(task);
  647. }
  648. /* The StarPU core can submit tasks directly to the scheduler or a worker,
  649. * skipping dependencies completely (when it knows what it is doing). */
  650. int _starpu_task_submit_nodeps(struct starpu_task *task)
  651. {
  652. int ret = _starpu_task_submit_head(task);
  653. STARPU_ASSERT(ret == 0);
  654. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  655. _starpu_increment_nsubmitted_tasks_of_sched_ctx(j->task->sched_ctx);
  656. _starpu_sched_task_submit(task);
  657. STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  658. _starpu_handle_job_submission(j);
  659. _starpu_increment_nready_tasks_of_sched_ctx(j->task->sched_ctx, j->task->flops, j->task);
  660. if (task->cl)
  661. /* This would be done by data dependencies checking */
  662. _starpu_job_set_ordered_buffers(j);
  663. task->status = STARPU_TASK_READY;
  664. STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
  665. return _starpu_push_task(j);
  666. }
  667. /*
  668. * worker->sched_mutex must be locked when calling this function.
  669. */
  670. int _starpu_task_submit_conversion_task(struct starpu_task *task,
  671. unsigned int workerid)
  672. {
  673. int ret;
  674. STARPU_ASSERT(task->cl);
  675. STARPU_ASSERT(task->execute_on_a_specific_worker);
  676. ret = _starpu_task_submit_head(task);
  677. STARPU_ASSERT(ret == 0);
  678. /* We retain handle reference count that would have been acquired by data dependencies. */
  679. unsigned i;
  680. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(task);
  681. for (i=0; i<nbuffers; i++)
  682. {
  683. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(task, i);
  684. _starpu_spin_lock(&handle->header_lock);
  685. handle->busy_count++;
  686. _starpu_spin_unlock(&handle->header_lock);
  687. }
  688. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  689. _starpu_increment_nsubmitted_tasks_of_sched_ctx(j->task->sched_ctx);
  690. _starpu_sched_task_submit(task);
  691. STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  692. _starpu_handle_job_submission(j);
  693. _starpu_increment_nready_tasks_of_sched_ctx(j->task->sched_ctx, j->task->flops, j->task);
  694. _starpu_job_set_ordered_buffers(j);
  695. task->status = STARPU_TASK_READY;
  696. _starpu_profiling_set_task_push_start_time(task);
  697. unsigned node = starpu_worker_get_memory_node(workerid);
  698. if (starpu_get_prefetch_flag())
  699. starpu_prefetch_task_input_on_node(task, node);
  700. struct _starpu_worker *worker;
  701. worker = _starpu_get_worker_struct(workerid);
  702. starpu_task_list_push_back(&worker->local_tasks, task);
  703. starpu_wake_worker_locked(worker->workerid);
  704. _starpu_profiling_set_task_push_end_time(task);
  705. STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
  706. return 0;
  707. }
  708. void starpu_codelet_init(struct starpu_codelet *cl)
  709. {
  710. memset(cl, 0, sizeof(struct starpu_codelet));
  711. }
  712. #define _STARPU_CODELET_WORKER_NAME_LEN 32
  713. void starpu_codelet_display_stats(struct starpu_codelet *cl)
  714. {
  715. unsigned worker;
  716. unsigned nworkers = starpu_worker_get_count();
  717. if (cl->name)
  718. fprintf(stderr, "Statistics for codelet %s\n", cl->name);
  719. else if (cl->model && cl->model->symbol)
  720. fprintf(stderr, "Statistics for codelet %s\n", cl->model->symbol);
  721. unsigned long total = 0;
  722. for (worker = 0; worker < nworkers; worker++)
  723. total += cl->per_worker_stats[worker];
  724. for (worker = 0; worker < nworkers; worker++)
  725. {
  726. char name[_STARPU_CODELET_WORKER_NAME_LEN];
  727. starpu_worker_get_name(worker, name, _STARPU_CODELET_WORKER_NAME_LEN);
  728. fprintf(stderr, "\t%s -> %lu / %lu (%2.2f %%)\n", name, cl->per_worker_stats[worker], total, (100.0f*cl->per_worker_stats[worker])/total);
  729. }
  730. }
  731. /*
  732. * We wait for all the tasks that have already been submitted. Note that a
  733. * regenerable is not considered finished until it was explicitely set as
  734. * non-regenerale anymore (eg. from a callback).
  735. */
  736. int _starpu_task_wait_for_all_and_return_nb_waited_tasks(void)
  737. {
  738. unsigned nsched_ctxs = _starpu_get_nsched_ctxs();
  739. unsigned sched_ctx_id = nsched_ctxs == 1 ? 0 : starpu_sched_ctx_get_context();
  740. /* if there is no indication about which context to wait,
  741. we wait for all tasks submitted to starpu */
  742. if (sched_ctx_id == STARPU_NMAX_SCHED_CTXS)
  743. {
  744. _STARPU_DEBUG("Waiting for all tasks\n");
  745. STARPU_ASSERT_MSG(_starpu_worker_may_perform_blocking_calls(), "starpu_task_wait_for_all must not be called from a task or callback");
  746. STARPU_AYU_BARRIER();
  747. struct _starpu_machine_config *config = _starpu_get_machine_config();
  748. if(config->topology.nsched_ctxs == 1)
  749. {
  750. _starpu_sched_do_schedule(0);
  751. return _starpu_task_wait_for_all_in_ctx_and_return_nb_waited_tasks(0);
  752. }
  753. else
  754. {
  755. int s;
  756. for(s = 0; s < STARPU_NMAX_SCHED_CTXS; s++)
  757. {
  758. if(config->sched_ctxs[s].id != STARPU_NMAX_SCHED_CTXS)
  759. {
  760. _starpu_sched_do_schedule(config->sched_ctxs[s].id);
  761. }
  762. }
  763. for(s = 0; s < STARPU_NMAX_SCHED_CTXS; s++)
  764. {
  765. if(config->sched_ctxs[s].id != STARPU_NMAX_SCHED_CTXS)
  766. {
  767. starpu_task_wait_for_all_in_ctx(config->sched_ctxs[s].id);
  768. }
  769. }
  770. return 0;
  771. }
  772. }
  773. else
  774. {
  775. _starpu_sched_do_schedule(sched_ctx_id);
  776. _STARPU_DEBUG("Waiting for tasks submitted to context %u\n", sched_ctx_id);
  777. return _starpu_task_wait_for_all_in_ctx_and_return_nb_waited_tasks(sched_ctx_id);
  778. }
  779. }
  780. int starpu_task_wait_for_all(void)
  781. {
  782. _starpu_task_wait_for_all_and_return_nb_waited_tasks();
  783. return 0;
  784. }
  785. int _starpu_task_wait_for_all_in_ctx_and_return_nb_waited_tasks(unsigned sched_ctx)
  786. {
  787. _STARPU_TRACE_TASK_WAIT_FOR_ALL_START();
  788. int ret = _starpu_wait_for_all_tasks_of_sched_ctx(sched_ctx);
  789. _STARPU_TRACE_TASK_WAIT_FOR_ALL_END();
  790. /* TODO: improve Temanejo into knowing about contexts ... */
  791. STARPU_AYU_BARRIER();
  792. return ret;
  793. }
  794. int starpu_task_wait_for_all_in_ctx(unsigned sched_ctx)
  795. {
  796. _starpu_task_wait_for_all_in_ctx_and_return_nb_waited_tasks(sched_ctx);
  797. return 0;
  798. }
  799. /*
  800. * We wait until there's a certain number of the tasks that have already been
  801. * submitted left. Note that a regenerable is not considered finished until it
  802. * was explicitely set as non-regenerale anymore (eg. from a callback).
  803. */
  804. int starpu_task_wait_for_n_submitted(unsigned n)
  805. {
  806. unsigned nsched_ctxs = _starpu_get_nsched_ctxs();
  807. unsigned sched_ctx_id = nsched_ctxs == 1 ? 0 : starpu_sched_ctx_get_context();
  808. /* if there is no indication about which context to wait,
  809. we wait for all tasks submitted to starpu */
  810. if (sched_ctx_id == STARPU_NMAX_SCHED_CTXS)
  811. {
  812. _STARPU_DEBUG("Waiting for all tasks\n");
  813. STARPU_ASSERT_MSG(_starpu_worker_may_perform_blocking_calls(), "starpu_task_wait_for_n_submitted must not be called from a task or callback");
  814. struct _starpu_machine_config *config = _starpu_get_machine_config();
  815. if(config->topology.nsched_ctxs == 1)
  816. _starpu_wait_for_n_submitted_tasks_of_sched_ctx(0, n);
  817. else
  818. {
  819. int s;
  820. for(s = 0; s < STARPU_NMAX_SCHED_CTXS; s++)
  821. {
  822. if(config->sched_ctxs[s].id != STARPU_NMAX_SCHED_CTXS)
  823. {
  824. _starpu_wait_for_n_submitted_tasks_of_sched_ctx(config->sched_ctxs[s].id, n);
  825. }
  826. }
  827. }
  828. return 0;
  829. }
  830. else
  831. {
  832. _STARPU_DEBUG("Waiting for tasks submitted to context %u\n", sched_ctx_id);
  833. _starpu_wait_for_n_submitted_tasks_of_sched_ctx(sched_ctx_id, n);
  834. }
  835. return 0;
  836. }
  837. int starpu_task_wait_for_n_submitted_in_ctx(unsigned sched_ctx, unsigned n)
  838. {
  839. _starpu_wait_for_n_submitted_tasks_of_sched_ctx(sched_ctx, n);
  840. return 0;
  841. }
  842. /*
  843. * We wait until there is no ready task any more (i.e. StarPU will not be able
  844. * to progress any more).
  845. */
  846. int starpu_task_wait_for_no_ready(void)
  847. {
  848. STARPU_ASSERT_MSG(_starpu_worker_may_perform_blocking_calls(), "starpu_task_wait_for_no_ready must not be called from a task or callback");
  849. struct _starpu_machine_config *config = _starpu_get_machine_config();
  850. if(config->topology.nsched_ctxs == 1)
  851. {
  852. _starpu_sched_do_schedule(0);
  853. _starpu_wait_for_no_ready_of_sched_ctx(0);
  854. }
  855. else
  856. {
  857. int s;
  858. for(s = 0; s < STARPU_NMAX_SCHED_CTXS; s++)
  859. {
  860. if(config->sched_ctxs[s].id != STARPU_NMAX_SCHED_CTXS)
  861. {
  862. _starpu_sched_do_schedule(config->sched_ctxs[s].id);
  863. }
  864. }
  865. for(s = 0; s < STARPU_NMAX_SCHED_CTXS; s++)
  866. {
  867. if(config->sched_ctxs[s].id != STARPU_NMAX_SCHED_CTXS)
  868. {
  869. _starpu_wait_for_no_ready_of_sched_ctx(config->sched_ctxs[s].id);
  870. }
  871. }
  872. }
  873. return 0;
  874. }
  875. void starpu_iteration_push(unsigned long iteration)
  876. {
  877. struct _starpu_sched_ctx *ctx = _starpu_get_sched_ctx_struct(_starpu_sched_ctx_get_current_context());
  878. unsigned level = ctx->iteration_level++;
  879. if (level < sizeof(ctx->iterations)/sizeof(ctx->iterations[0]))
  880. ctx->iterations[level] = iteration;
  881. }
  882. void starpu_iteration_pop(void)
  883. {
  884. struct _starpu_sched_ctx *ctx = _starpu_get_sched_ctx_struct(_starpu_sched_ctx_get_current_context());
  885. STARPU_ASSERT_MSG(ctx->iteration_level > 0, "calls to starpu_iteration_pop must match starpu_iteration_push calls");
  886. unsigned level = ctx->iteration_level--;
  887. if (level < sizeof(ctx->iterations)/sizeof(ctx->iterations[0]))
  888. ctx->iterations[level] = -1;
  889. }
  890. void starpu_do_schedule(void)
  891. {
  892. struct _starpu_machine_config *config = _starpu_get_machine_config();
  893. if(config->topology.nsched_ctxs == 1)
  894. _starpu_sched_do_schedule(0);
  895. else
  896. {
  897. int s;
  898. for(s = 0; s < STARPU_NMAX_SCHED_CTXS; s++)
  899. {
  900. if(config->sched_ctxs[s].id != STARPU_NMAX_SCHED_CTXS)
  901. {
  902. _starpu_sched_do_schedule(config->sched_ctxs[s].id);
  903. }
  904. }
  905. }
  906. }
  907. void
  908. starpu_drivers_request_termination(void)
  909. {
  910. struct _starpu_machine_config *config = _starpu_get_machine_config();
  911. STARPU_PTHREAD_MUTEX_LOCK(&config->submitted_mutex);
  912. int nsubmitted = starpu_task_nsubmitted();
  913. config->submitting = 0;
  914. if (nsubmitted == 0)
  915. {
  916. ANNOTATE_HAPPENS_AFTER(&config->running);
  917. config->running = 0;
  918. ANNOTATE_HAPPENS_BEFORE(&config->running);
  919. STARPU_WMB();
  920. int s;
  921. for(s = 0; s < STARPU_NMAX_SCHED_CTXS; s++)
  922. {
  923. if(config->sched_ctxs[s].id != STARPU_NMAX_SCHED_CTXS)
  924. {
  925. _starpu_check_nsubmitted_tasks_of_sched_ctx(config->sched_ctxs[s].id);
  926. }
  927. }
  928. }
  929. STARPU_PTHREAD_MUTEX_UNLOCK(&config->submitted_mutex);
  930. }
  931. int starpu_task_nsubmitted(void)
  932. {
  933. int nsubmitted = 0;
  934. struct _starpu_machine_config *config = _starpu_get_machine_config();
  935. if(config->topology.nsched_ctxs == 1)
  936. nsubmitted = _starpu_get_nsubmitted_tasks_of_sched_ctx(0);
  937. else
  938. {
  939. int s;
  940. for(s = 0; s < STARPU_NMAX_SCHED_CTXS; s++)
  941. {
  942. if(config->sched_ctxs[s].id != STARPU_NMAX_SCHED_CTXS)
  943. {
  944. nsubmitted += _starpu_get_nsubmitted_tasks_of_sched_ctx(config->sched_ctxs[s].id);
  945. }
  946. }
  947. }
  948. return nsubmitted;
  949. }
  950. int starpu_task_nready(void)
  951. {
  952. int nready = 0;
  953. struct _starpu_machine_config *config = _starpu_get_machine_config();
  954. if(config->topology.nsched_ctxs == 1)
  955. nready = starpu_sched_ctx_get_nready_tasks(0);
  956. else
  957. {
  958. int s;
  959. for(s = 0; s < STARPU_NMAX_SCHED_CTXS; s++)
  960. {
  961. if(config->sched_ctxs[s].id != STARPU_NMAX_SCHED_CTXS)
  962. {
  963. nready += starpu_sched_ctx_get_nready_tasks(config->sched_ctxs[s].id);
  964. }
  965. }
  966. }
  967. return nready;
  968. }
  969. /* Return the task currently executed by the worker, or NULL if this is called
  970. * either from a thread that is not a task or simply because there is no task
  971. * being executed at the moment. */
  972. struct starpu_task *starpu_task_get_current(void)
  973. {
  974. return (struct starpu_task *) STARPU_PTHREAD_GETSPECIFIC(current_task_key);
  975. }
  976. void _starpu_set_current_task(struct starpu_task *task)
  977. {
  978. STARPU_PTHREAD_SETSPECIFIC(current_task_key, task);
  979. }
  980. #ifdef STARPU_OPENMP
  981. /* Prepare the fields of the currentl task for accepting a new set of
  982. * dependencies in anticipation of becoming a continuation.
  983. *
  984. * When the task becomes 'continued', it will only be queued again when the new
  985. * set of dependencies is fulfilled. */
  986. void _starpu_task_prepare_for_continuation(void)
  987. {
  988. _starpu_job_prepare_for_continuation(_starpu_get_job_associated_to_task(starpu_task_get_current()));
  989. }
  990. void _starpu_task_prepare_for_continuation_ext(unsigned continuation_resubmit,
  991. void (*continuation_callback_on_sleep)(void *arg), void *continuation_callback_on_sleep_arg)
  992. {
  993. _starpu_job_prepare_for_continuation_ext(_starpu_get_job_associated_to_task(starpu_task_get_current()),
  994. continuation_resubmit, continuation_callback_on_sleep, continuation_callback_on_sleep_arg);
  995. }
  996. void _starpu_task_set_omp_cleanup_callback(struct starpu_task *task, void (*omp_cleanup_callback)(void *arg), void *omp_cleanup_callback_arg)
  997. {
  998. _starpu_job_set_omp_cleanup_callback(_starpu_get_job_associated_to_task(task),
  999. omp_cleanup_callback, omp_cleanup_callback_arg);
  1000. }
  1001. #endif
  1002. /*
  1003. * Returns 0 if tasks does not use any multiformat handle, 1 otherwise.
  1004. */
  1005. int
  1006. _starpu_task_uses_multiformat_handles(struct starpu_task *task)
  1007. {
  1008. unsigned i;
  1009. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(task);
  1010. for (i = 0; i < nbuffers; i++)
  1011. {
  1012. if (_starpu_data_is_multiformat_handle(STARPU_TASK_GET_HANDLE(task, i)))
  1013. return 1;
  1014. }
  1015. return 0;
  1016. }
  1017. /*
  1018. * Checks whether the given handle needs to be converted in order to be used on
  1019. * the node given as the second argument.
  1020. */
  1021. int
  1022. _starpu_handle_needs_conversion_task(starpu_data_handle_t handle,
  1023. unsigned int node)
  1024. {
  1025. return _starpu_handle_needs_conversion_task_for_arch(handle, starpu_node_get_kind(node));
  1026. }
  1027. int
  1028. _starpu_handle_needs_conversion_task_for_arch(starpu_data_handle_t handle,
  1029. enum starpu_node_kind node_kind)
  1030. {
  1031. /*
  1032. * Here, we assume that CUDA devices and OpenCL devices use the
  1033. * same data structure. A conversion is only needed when moving
  1034. * data from a CPU to a GPU, or the other way around.
  1035. */
  1036. switch (node_kind)
  1037. {
  1038. case STARPU_CPU_RAM:
  1039. switch(starpu_node_get_kind(handle->mf_node))
  1040. {
  1041. case STARPU_CPU_RAM:
  1042. return 0;
  1043. case STARPU_CUDA_RAM: /* Fall through */
  1044. case STARPU_OPENCL_RAM:
  1045. case STARPU_MIC_RAM:
  1046. case STARPU_MPI_MS_RAM:
  1047. case STARPU_SCC_RAM:
  1048. return 1;
  1049. default:
  1050. STARPU_ABORT();
  1051. }
  1052. break;
  1053. case STARPU_CUDA_RAM: /* Fall through */
  1054. case STARPU_OPENCL_RAM:
  1055. case STARPU_MIC_RAM:
  1056. case STARPU_MPI_MS_RAM:
  1057. case STARPU_SCC_RAM:
  1058. switch(starpu_node_get_kind(handle->mf_node))
  1059. {
  1060. case STARPU_CPU_RAM:
  1061. return 1;
  1062. case STARPU_CUDA_RAM:
  1063. case STARPU_OPENCL_RAM:
  1064. case STARPU_MIC_RAM:
  1065. case STARPU_MPI_MS_RAM:
  1066. case STARPU_SCC_RAM:
  1067. return 0;
  1068. default:
  1069. STARPU_ABORT();
  1070. }
  1071. break;
  1072. default:
  1073. STARPU_ABORT();
  1074. }
  1075. /* that instruction should never be reached */
  1076. return -EINVAL;
  1077. }
  1078. void starpu_task_set_implementation(struct starpu_task *task, unsigned impl)
  1079. {
  1080. _starpu_get_job_associated_to_task(task)->nimpl = impl;
  1081. }
  1082. unsigned starpu_task_get_implementation(struct starpu_task *task)
  1083. {
  1084. return _starpu_get_job_associated_to_task(task)->nimpl;
  1085. }
  1086. unsigned long starpu_task_get_job_id(struct starpu_task *task)
  1087. {
  1088. return _starpu_get_job_associated_to_task(task)->job_id;
  1089. }
  1090. static starpu_pthread_t watchdog_thread;
  1091. static int sleep_some(float timeout)
  1092. {
  1093. /* If we do a sleep(timeout), we might have to wait too long at the end of the computation. */
  1094. /* To avoid that, we do several sleep() of 1s (and check after each if starpu is still running) */
  1095. float t;
  1096. for (t = timeout ; t > 1.; t--)
  1097. {
  1098. starpu_sleep(1.);
  1099. if (!_starpu_machine_is_running())
  1100. /* Application finished, don't bother finishing the sleep */
  1101. return 0;
  1102. }
  1103. /* and one final sleep (of less than 1 s) with the rest (if needed) */
  1104. if (t > 0.)
  1105. starpu_sleep(t);
  1106. return 1;
  1107. }
  1108. /* Check from times to times that StarPU does finish some tasks */
  1109. static void *watchdog_func(void *arg)
  1110. {
  1111. char *timeout_env = arg;
  1112. float timeout, delay;
  1113. #ifdef _MSC_VER
  1114. timeout = ((float) _atoi64(timeout_env)) / 1000000;
  1115. #else
  1116. timeout = ((float) atoll(timeout_env)) / 1000000;
  1117. #endif
  1118. delay = ((float) watchdog_delay) / 1000000;
  1119. struct _starpu_machine_config *config = _starpu_get_machine_config();
  1120. starpu_pthread_setname("watchdog");
  1121. if (!sleep_some(delay))
  1122. return NULL;
  1123. STARPU_PTHREAD_MUTEX_LOCK(&config->submitted_mutex);
  1124. while (_starpu_machine_is_running())
  1125. {
  1126. int last_nsubmitted = starpu_task_nsubmitted();
  1127. config->watchdog_ok = 0;
  1128. STARPU_PTHREAD_MUTEX_UNLOCK(&config->submitted_mutex);
  1129. if (!sleep_some(timeout))
  1130. return NULL;
  1131. STARPU_PTHREAD_MUTEX_LOCK(&config->submitted_mutex);
  1132. if (!config->watchdog_ok && last_nsubmitted
  1133. && last_nsubmitted == starpu_task_nsubmitted())
  1134. {
  1135. _STARPU_MSG("The StarPU watchdog detected that no task finished for %fs (can be configured through STARPU_WATCHDOG_TIMEOUT)\n",
  1136. timeout);
  1137. if (watchdog_crash)
  1138. {
  1139. _STARPU_MSG("Crashing the process\n");
  1140. raise(SIGABRT);
  1141. }
  1142. else
  1143. _STARPU_MSG("Set the STARPU_WATCHDOG_CRASH environment variable if you want to abort the process in such a case\n");
  1144. }
  1145. /* Only shout again after another period */
  1146. config->watchdog_ok = 1;
  1147. }
  1148. STARPU_PTHREAD_MUTEX_UNLOCK(&config->submitted_mutex);
  1149. return NULL;
  1150. }
  1151. void _starpu_watchdog_init(void)
  1152. {
  1153. struct _starpu_machine_config *config = _starpu_get_machine_config();
  1154. char *timeout_env = starpu_getenv("STARPU_WATCHDOG_TIMEOUT");
  1155. STARPU_PTHREAD_MUTEX_INIT(&config->submitted_mutex, NULL);
  1156. if (!timeout_env)
  1157. return;
  1158. STARPU_PTHREAD_CREATE(&watchdog_thread, NULL, watchdog_func, timeout_env);
  1159. }
  1160. void _starpu_watchdog_shutdown(void)
  1161. {
  1162. char *timeout_env = starpu_getenv("STARPU_WATCHDOG_TIMEOUT");
  1163. if (!timeout_env)
  1164. return;
  1165. STARPU_PTHREAD_JOIN(watchdog_thread, NULL);
  1166. }