task.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2017 Université de Bordeaux
  4. * Copyright (C) 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 CNRS
  5. * Copyright (C) 2011 Télécom-SudParis
  6. * Copyright (C) 2011, 2014, 2016 INRIA
  7. * Copyright (C) 2016 Uppsala University
  8. *
  9. * StarPU is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU Lesser General Public License as published by
  11. * the Free Software Foundation; either version 2.1 of the License, or (at
  12. * your option) any later version.
  13. *
  14. * StarPU is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  17. *
  18. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  19. */
  20. #include <starpu.h>
  21. #include <starpu_profiling.h>
  22. #include <core/workers.h>
  23. #include <core/sched_ctx.h>
  24. #include <core/jobs.h>
  25. #include <core/task.h>
  26. #include <core/task_bundle.h>
  27. #include <core/dependencies/data_concurrency.h>
  28. #include <common/config.h>
  29. #include <common/utils.h>
  30. #include <common/fxt.h>
  31. #include <profiling/profiling.h>
  32. #include <profiling/bound.h>
  33. #include <math.h>
  34. #include <string.h>
  35. #include <core/debug.h>
  36. #include <core/sched_ctx.h>
  37. #include <time.h>
  38. #include <signal.h>
  39. #include <core/simgrid.h>
  40. #ifdef STARPU_HAVE_WINDOWS
  41. #include <windows.h>
  42. #endif
  43. /* XXX this should be reinitialized when StarPU is shutdown (or we should make
  44. * sure that no task remains !) */
  45. /* TODO we could make this hierarchical to avoid contention ? */
  46. //static starpu_pthread_cond_t submitted_cond = STARPU_PTHREAD_COND_INITIALIZER;
  47. /* This key stores the task currently handled by the thread, note that we
  48. * cannot use the worker structure to store that information because it is
  49. * possible that we have a task with a NULL codelet, which means its callback
  50. * could be executed by a user thread as well. */
  51. static starpu_pthread_key_t current_task_key;
  52. static int limit_min_submitted_tasks;
  53. static int limit_max_submitted_tasks;
  54. static int watchdog_crash;
  55. /* Called once at starpu_init */
  56. void _starpu_task_init(void)
  57. {
  58. STARPU_PTHREAD_KEY_CREATE(&current_task_key, NULL);
  59. limit_min_submitted_tasks = starpu_get_env_number("STARPU_LIMIT_MIN_SUBMITTED_TASKS");
  60. limit_max_submitted_tasks = starpu_get_env_number("STARPU_LIMIT_MAX_SUBMITTED_TASKS");
  61. watchdog_crash = starpu_get_env_number("STARPU_WATCHDOG_CRASH");
  62. }
  63. void _starpu_task_deinit(void)
  64. {
  65. STARPU_PTHREAD_KEY_DELETE(current_task_key);
  66. }
  67. void starpu_task_init(struct starpu_task *task)
  68. {
  69. /* TODO: memcpy from a template instead? benchmark it */
  70. STARPU_ASSERT(task);
  71. /* As most of the fields must be initialised at NULL, let's put 0
  72. * everywhere */
  73. memset(task, 0, sizeof(struct starpu_task));
  74. task->sequential_consistency = 1;
  75. /* Now we can initialise fields which recquire custom value */
  76. #if STARPU_DEFAULT_PRIO != 0
  77. task->priority = STARPU_DEFAULT_PRIO;
  78. #endif
  79. task->detach = 1;
  80. #if STARPU_TASK_INVALID != 0
  81. task->status = STARPU_TASK_INVALID;
  82. #endif
  83. task->predicted = NAN;
  84. task->predicted_transfer = NAN;
  85. task->predicted_start = NAN;
  86. task->magic = 42;
  87. task->sched_ctx = STARPU_NMAX_SCHED_CTXS;
  88. task->flops = 0.0;
  89. }
  90. /* Free all the ressources allocated for a task, without deallocating the task
  91. * structure itself (this is required for statically allocated tasks).
  92. * All values previously set by the user, like codelet and handles, remain
  93. * unchanged */
  94. void starpu_task_clean(struct starpu_task *task)
  95. {
  96. STARPU_ASSERT(task);
  97. task->magic = 0;
  98. /* If a buffer was allocated to store the profiling info, we free it. */
  99. if (task->profiling_info)
  100. {
  101. free(task->profiling_info);
  102. task->profiling_info = NULL;
  103. }
  104. /* If case the task is (still) part of a bundle */
  105. starpu_task_bundle_t bundle = task->bundle;
  106. if (bundle)
  107. starpu_task_bundle_remove(bundle, task);
  108. if (task->dyn_handles)
  109. {
  110. free(task->dyn_handles);
  111. task->dyn_handles = NULL;
  112. free(task->dyn_interfaces);
  113. task->dyn_interfaces = NULL;
  114. }
  115. if (task->dyn_modes)
  116. {
  117. free(task->dyn_modes);
  118. task->dyn_modes = NULL;
  119. }
  120. struct _starpu_job *j = (struct _starpu_job *)task->starpu_private;
  121. if (j)
  122. {
  123. _starpu_job_destroy(j);
  124. task->starpu_private = NULL;
  125. }
  126. }
  127. struct starpu_task * STARPU_ATTRIBUTE_MALLOC starpu_task_create(void)
  128. {
  129. struct starpu_task *task;
  130. _STARPU_MALLOC(task, sizeof(struct starpu_task));
  131. starpu_task_init(task);
  132. /* Dynamically allocated tasks are destroyed by default */
  133. task->destroy = 1;
  134. return task;
  135. }
  136. /* Free the ressource allocated during starpu_task_create. This function can be
  137. * called automatically after the execution of a task by setting the "destroy"
  138. * flag of the starpu_task structure (default behaviour). Calling this function
  139. * on a statically allocated task results in an undefined behaviour. */
  140. void _starpu_task_destroy(struct starpu_task *task)
  141. {
  142. /* If starpu_task_destroy is called in a callback, we just set the destroy
  143. flag. The task will be destroyed after the callback returns */
  144. if (task == starpu_task_get_current()
  145. && _starpu_get_local_worker_status() == STATUS_CALLBACK)
  146. {
  147. task->destroy = 1;
  148. }
  149. else
  150. {
  151. starpu_task_clean(task);
  152. /* TODO handle the case of task with detach = 1 and destroy = 1 */
  153. /* TODO handle the case of non terminated tasks -> return -EINVAL */
  154. /* Does user want StarPU release cl_arg ? */
  155. if (task->cl_arg_free)
  156. free(task->cl_arg);
  157. /* Does user want StarPU release callback_arg ? */
  158. if (task->callback_arg_free)
  159. free(task->callback_arg);
  160. /* Does user want StarPU release prologue_callback_arg ? */
  161. if (task->prologue_callback_arg_free)
  162. free(task->prologue_callback_arg);
  163. /* Does user want StarPU release prologue_pop_arg ? */
  164. if (task->prologue_callback_pop_arg_free)
  165. free(task->prologue_callback_pop_arg);
  166. free(task);
  167. }
  168. }
  169. void starpu_task_destroy(struct starpu_task *task)
  170. {
  171. STARPU_ASSERT(task);
  172. STARPU_ASSERT_MSG(!task->destroy || !task->detach, "starpu_task_destroy must not be called for task with destroy = 1 and detach = 1");
  173. _starpu_task_destroy(task);
  174. }
  175. int starpu_task_finished(struct starpu_task *task)
  176. {
  177. STARPU_ASSERT(task);
  178. STARPU_ASSERT_MSG(!task->detach, "starpu_task_finished can only be called on tasks with detach = 0");
  179. return _starpu_job_finished(_starpu_get_job_associated_to_task(task));
  180. }
  181. int starpu_task_wait(struct starpu_task *task)
  182. {
  183. _STARPU_LOG_IN();
  184. STARPU_ASSERT(task);
  185. STARPU_ASSERT_MSG(!task->detach, "starpu_task_wait can only be called on tasks with detach = 0");
  186. if (task->detach || task->synchronous)
  187. {
  188. _STARPU_DEBUG("Task is detached or synchronous. Waiting returns immediately\n");
  189. _STARPU_LOG_OUT_TAG("einval");
  190. return -EINVAL;
  191. }
  192. STARPU_ASSERT_MSG(_starpu_worker_may_perform_blocking_calls(), "starpu_task_wait must not be called from a task or callback");
  193. struct _starpu_job *j = (struct _starpu_job *)task->starpu_private;
  194. _STARPU_TRACE_TASK_WAIT_START(j);
  195. starpu_do_schedule();
  196. _starpu_wait_job(j);
  197. /* as this is a synchronous task, the liberation of the job
  198. structure was deferred */
  199. if (task->destroy)
  200. _starpu_task_destroy(task);
  201. _STARPU_TRACE_TASK_WAIT_END();
  202. _STARPU_LOG_OUT();
  203. return 0;
  204. }
  205. int starpu_task_wait_array(struct starpu_task **tasks, unsigned nb_tasks)
  206. {
  207. unsigned i;
  208. for (i = 0; i < nb_tasks; i++)
  209. {
  210. int ret = starpu_task_wait(tasks[i]);
  211. if (ret)
  212. return ret;
  213. }
  214. return 0;
  215. }
  216. #ifdef STARPU_OPENMP
  217. int _starpu_task_test_termination(struct starpu_task *task)
  218. {
  219. STARPU_ASSERT(task);
  220. STARPU_ASSERT_MSG(!task->detach, "starpu_task_wait can only be called on tasks with detach = 0");
  221. if (task->detach || task->synchronous)
  222. {
  223. _STARPU_DEBUG("Task is detached or synchronous\n");
  224. _STARPU_LOG_OUT_TAG("einval");
  225. return -EINVAL;
  226. }
  227. struct _starpu_job *j = (struct _starpu_job *)task->starpu_private;
  228. int ret = _starpu_test_job_termination(j);
  229. if (ret)
  230. {
  231. if (task->destroy)
  232. _starpu_task_destroy(task);
  233. }
  234. return ret;
  235. }
  236. #endif
  237. /* NB in case we have a regenerable task, it is possible that the job was
  238. * already counted. */
  239. int _starpu_submit_job(struct _starpu_job *j)
  240. {
  241. struct starpu_task *task = j->task;
  242. int ret;
  243. #ifdef STARPU_OPENMP
  244. const unsigned continuation = j->continuation;
  245. #else
  246. const unsigned continuation = 0;
  247. #endif
  248. _STARPU_LOG_IN();
  249. /* notify bound computation of a new task */
  250. _starpu_bound_record(j);
  251. _starpu_increment_nsubmitted_tasks_of_sched_ctx(j->task->sched_ctx);
  252. _starpu_sched_task_submit(task);
  253. #ifdef STARPU_USE_SC_HYPERVISOR
  254. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(j->task->sched_ctx);
  255. if(sched_ctx != NULL && j->task->sched_ctx != _starpu_get_initial_sched_ctx()->id && j->task->sched_ctx != STARPU_NMAX_SCHED_CTXS
  256. && sched_ctx->perf_counters != NULL)
  257. {
  258. struct starpu_perfmodel_arch arch;
  259. _STARPU_MALLOC(arch.devices, sizeof(struct starpu_perfmodel_device));
  260. arch.ndevices = 1;
  261. arch.devices[0].type = STARPU_CPU_WORKER;
  262. arch.devices[0].devid = 0;
  263. arch.devices[0].ncores = 1;
  264. _starpu_compute_buffers_footprint(j->task->cl->model, &arch, 0, j);
  265. free(arch.devices);
  266. size_t data_size = 0;
  267. if (j->task->cl)
  268. {
  269. unsigned i, nbuffers = STARPU_TASK_GET_NBUFFERS(j->task);
  270. for(i = 0; i < nbuffers; i++)
  271. {
  272. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(task, i);
  273. if (handle != NULL)
  274. data_size += _starpu_data_get_size(handle);
  275. }
  276. }
  277. _STARPU_TRACE_HYPERVISOR_BEGIN();
  278. sched_ctx->perf_counters->notify_submitted_job(j->task, j->footprint, data_size);
  279. _STARPU_TRACE_HYPERVISOR_END();
  280. }
  281. #endif//STARPU_USE_SC_HYPERVISOR
  282. /* We retain handle reference count */
  283. if (task->cl && !continuation)
  284. {
  285. unsigned i;
  286. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(task);
  287. for (i=0; i<nbuffers; i++)
  288. {
  289. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(task, i);
  290. _starpu_spin_lock(&handle->header_lock);
  291. handle->busy_count++;
  292. _starpu_spin_unlock(&handle->header_lock);
  293. }
  294. }
  295. STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  296. _starpu_handle_job_submission(j);
  297. #ifdef STARPU_OPENMP
  298. if (continuation)
  299. {
  300. j->discontinuous = 1;
  301. j->continuation = 0;
  302. }
  303. #endif
  304. #ifdef STARPU_OPENMP
  305. if (continuation)
  306. {
  307. ret = _starpu_reenforce_task_deps_and_schedule(j);
  308. }
  309. else
  310. #endif
  311. {
  312. ret = _starpu_enforce_deps_and_schedule(j);
  313. }
  314. _STARPU_LOG_OUT();
  315. return ret;
  316. }
  317. /* Note: this is racy, so valgrind would complain. But since we'll always put
  318. * the same values, this is not a problem. */
  319. void _starpu_codelet_check_deprecated_fields(struct starpu_codelet *cl)
  320. {
  321. if (!cl)
  322. return;
  323. uint32_t where = cl->where;
  324. int is_where_unset = where == 0;
  325. unsigned i, some_impl;
  326. /* Check deprecated and unset fields (where, <device>_func,
  327. * <device>_funcs) */
  328. /* CPU */
  329. if (cl->cpu_func && cl->cpu_func != STARPU_MULTIPLE_CPU_IMPLEMENTATIONS && cl->cpu_funcs[0])
  330. {
  331. _STARPU_DISP("[warning] [struct starpu_codelet] both cpu_func and cpu_funcs are set. Ignoring cpu_func.\n");
  332. cl->cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS;
  333. }
  334. if (cl->cpu_func && cl->cpu_func != STARPU_MULTIPLE_CPU_IMPLEMENTATIONS)
  335. {
  336. cl->cpu_funcs[0] = cl->cpu_func;
  337. cl->cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS;
  338. }
  339. some_impl = 0;
  340. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  341. if (cl->cpu_funcs[i])
  342. {
  343. some_impl = 1;
  344. break;
  345. }
  346. if (some_impl && cl->cpu_func == 0)
  347. {
  348. cl->cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS;
  349. }
  350. if (some_impl && is_where_unset)
  351. {
  352. where |= STARPU_CPU;
  353. }
  354. /* CUDA */
  355. if (cl->cuda_func && cl->cuda_func != STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS && cl->cuda_funcs[0])
  356. {
  357. _STARPU_DISP("[warning] [struct starpu_codelet] both cuda_func and cuda_funcs are set. Ignoring cuda_func.\n");
  358. cl->cuda_func = STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS;
  359. }
  360. if (cl->cuda_func && cl->cuda_func != STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS)
  361. {
  362. cl->cuda_funcs[0] = cl->cuda_func;
  363. cl->cuda_func = STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS;
  364. }
  365. some_impl = 0;
  366. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  367. if (cl->cuda_funcs[i])
  368. {
  369. some_impl = 1;
  370. break;
  371. }
  372. if (some_impl && cl->cuda_func == 0)
  373. {
  374. cl->cuda_func = STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS;
  375. }
  376. if (some_impl && is_where_unset)
  377. {
  378. where |= STARPU_CUDA;
  379. }
  380. /* OpenCL */
  381. if (cl->opencl_func && cl->opencl_func != STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS && cl->opencl_funcs[0])
  382. {
  383. _STARPU_DISP("[warning] [struct starpu_codelet] both opencl_func and opencl_funcs are set. Ignoring opencl_func.\n");
  384. cl->opencl_func = STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS;
  385. }
  386. if (cl->opencl_func && cl->opencl_func != STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS)
  387. {
  388. cl->opencl_funcs[0] = cl->opencl_func;
  389. cl->opencl_func = STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS;
  390. }
  391. some_impl = 0;
  392. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  393. if (cl->opencl_funcs[i])
  394. {
  395. some_impl = 1;
  396. break;
  397. }
  398. if (some_impl && cl->opencl_func == 0)
  399. {
  400. cl->opencl_func = STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS;
  401. }
  402. if (some_impl && is_where_unset)
  403. {
  404. where |= STARPU_OPENCL;
  405. }
  406. some_impl = 0;
  407. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  408. if (cl->mic_funcs[i])
  409. {
  410. some_impl = 1;
  411. break;
  412. }
  413. if (some_impl && is_where_unset)
  414. {
  415. where |= STARPU_MIC;
  416. }
  417. some_impl = 0;
  418. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  419. if (cl->mpi_ms_funcs[i])
  420. {
  421. some_impl = 1;
  422. break;
  423. }
  424. if (some_impl && is_where_unset)
  425. {
  426. where |= STARPU_MPI_MS;
  427. }
  428. some_impl = 0;
  429. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  430. if (cl->scc_funcs[i])
  431. {
  432. some_impl = 1;
  433. break;
  434. }
  435. if (some_impl && is_where_unset)
  436. {
  437. where |= STARPU_SCC;
  438. }
  439. some_impl = 0;
  440. for (i = 0; i < STARPU_MAXIMPLEMENTATIONS; i++)
  441. if (cl->cpu_funcs_name[i])
  442. {
  443. some_impl = 1;
  444. break;
  445. }
  446. if (some_impl && is_where_unset)
  447. {
  448. where |= STARPU_MIC|STARPU_SCC|STARPU_MPI_MS;
  449. }
  450. cl->where = where;
  451. }
  452. void _starpu_task_check_deprecated_fields(struct starpu_task *task STARPU_ATTRIBUTE_UNUSED)
  453. {
  454. /* None any more */
  455. }
  456. static int _starpu_task_submit_head(struct starpu_task *task)
  457. {
  458. unsigned is_sync = task->synchronous;
  459. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  460. if (j->internal)
  461. {
  462. // Internal tasks are submitted to initial context
  463. task->sched_ctx = _starpu_get_initial_sched_ctx()->id;
  464. }
  465. else if (task->sched_ctx == STARPU_NMAX_SCHED_CTXS)
  466. {
  467. // If the task has not specified a context, we set the current context
  468. task->sched_ctx = _starpu_sched_ctx_get_current_context();
  469. }
  470. if (is_sync)
  471. {
  472. /* Perhaps it is not possible to submit a synchronous
  473. * (blocking) task */
  474. STARPU_ASSERT_MSG(_starpu_worker_may_perform_blocking_calls(), "submitting a synchronous task must not be done from a task or a callback");
  475. task->detach = 0;
  476. }
  477. _starpu_task_check_deprecated_fields(task);
  478. _starpu_codelet_check_deprecated_fields(task->cl);
  479. if (task->cl)
  480. {
  481. unsigned i;
  482. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(task);
  483. _STARPU_TRACE_UPDATE_TASK_CNT(0);
  484. /* Check buffers */
  485. if (task->dyn_handles == NULL)
  486. STARPU_ASSERT_MSG(STARPU_TASK_GET_NBUFFERS(task) <= STARPU_NMAXBUFS, "Codelet %p has too many buffers (%d vs max %d). Either use --enable-maxbuffers configure option to increase the max, or use dyn_handles instead of handles.", task->cl, STARPU_TASK_GET_NBUFFERS(task), STARPU_NMAXBUFS);
  487. if (task->dyn_handles)
  488. {
  489. _STARPU_MALLOC(task->dyn_interfaces, nbuffers * sizeof(void *));
  490. }
  491. for (i = 0; i < nbuffers; i++)
  492. {
  493. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(task, i);
  494. /* Make sure handles are valid */
  495. STARPU_ASSERT_MSG(handle->magic == 42, "data %p is invalid (was it already unregistered?)", handle);
  496. /* Make sure handles are not partitioned */
  497. STARPU_ASSERT_MSG(handle->nchildren == 0, "only unpartitioned data (or the pieces of a partitioned data) can be used in a task");
  498. /* Provide the home interface for now if any,
  499. * for can_execute hooks */
  500. if (handle->home_node != -1)
  501. _STARPU_TASK_SET_INTERFACE(task, starpu_data_get_interface_on_node(handle, handle->home_node), i);
  502. }
  503. /* Check the type of worker(s) required by the task exist */
  504. if (!_starpu_worker_exists(task))
  505. {
  506. _STARPU_LOG_OUT_TAG("ENODEV");
  507. return -ENODEV;
  508. }
  509. /* In case we require that a task should be explicitely
  510. * executed on a specific worker, we make sure that the worker
  511. * is able to execute this task. */
  512. if (task->execute_on_a_specific_worker && !starpu_combined_worker_can_execute_task(task->workerid, task, 0))
  513. {
  514. _STARPU_LOG_OUT_TAG("ENODEV");
  515. return -ENODEV;
  516. }
  517. if (task->cl->model)
  518. _starpu_init_and_load_perfmodel(task->cl->model);
  519. if (task->cl->energy_model)
  520. _starpu_init_and_load_perfmodel(task->cl->energy_model);
  521. }
  522. return 0;
  523. }
  524. /* application should submit new tasks to StarPU through this function */
  525. int starpu_task_submit(struct starpu_task *task)
  526. {
  527. _STARPU_LOG_IN();
  528. STARPU_ASSERT(task);
  529. STARPU_ASSERT_MSG(task->magic == 42, "Tasks must be created with starpu_task_create, or initialized with starpu_task_init.");
  530. int ret;
  531. unsigned is_sync = task->synchronous;
  532. starpu_task_bundle_t bundle = task->bundle;
  533. /* internally, StarPU manipulates a struct _starpu_job * which is a wrapper around a
  534. * task structure, it is possible that this job structure was already
  535. * allocated. */
  536. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  537. const unsigned continuation =
  538. #ifdef STARPU_OPENMP
  539. j->continuation
  540. #else
  541. 0
  542. #endif
  543. ;
  544. if (!j->internal)
  545. {
  546. int nsubmitted_tasks = starpu_task_nsubmitted();
  547. if (limit_max_submitted_tasks >= 0 && limit_max_submitted_tasks < nsubmitted_tasks
  548. && limit_min_submitted_tasks >= 0 && limit_min_submitted_tasks < nsubmitted_tasks)
  549. {
  550. starpu_do_schedule();
  551. _STARPU_TRACE_TASK_THROTTLE_START();
  552. starpu_task_wait_for_n_submitted(limit_min_submitted_tasks);
  553. _STARPU_TRACE_TASK_THROTTLE_END();
  554. }
  555. }
  556. _STARPU_TRACE_TASK_SUBMIT_START();
  557. ret = _starpu_task_submit_head(task);
  558. if (ret)
  559. {
  560. _STARPU_TRACE_TASK_SUBMIT_END();
  561. return ret;
  562. }
  563. if (!j->internal && !continuation)
  564. _STARPU_TRACE_TASK_SUBMIT(j,
  565. _starpu_get_sched_ctx_struct(task->sched_ctx)->iterations[0],
  566. _starpu_get_sched_ctx_struct(task->sched_ctx)->iterations[1]);
  567. /* If this is a continuation, we don't modify the implicit data dependencies detected earlier. */
  568. if (task->cl && !continuation)
  569. _starpu_detect_implicit_data_deps(task);
  570. if (bundle)
  571. {
  572. /* We need to make sure that models for other tasks of the
  573. * bundle are also loaded, so the scheduler can estimate the
  574. * duration of the whole bundle */
  575. STARPU_PTHREAD_MUTEX_LOCK(&bundle->mutex);
  576. struct _starpu_task_bundle_entry *entry;
  577. entry = bundle->list;
  578. while (entry)
  579. {
  580. if (entry->task->cl->model)
  581. _starpu_init_and_load_perfmodel(entry->task->cl->model);
  582. if (entry->task->cl->energy_model)
  583. _starpu_init_and_load_perfmodel(entry->task->cl->energy_model);
  584. entry = entry->next;
  585. }
  586. STARPU_PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
  587. }
  588. /* If profiling is activated, we allocate a structure to store the
  589. * appropriate info. */
  590. struct starpu_profiling_task_info *info;
  591. int profiling = starpu_profiling_status_get();
  592. info = _starpu_allocate_profiling_info_if_needed(task);
  593. task->profiling_info = info;
  594. /* The task is considered as block until we are sure there remains not
  595. * dependency. */
  596. task->status = STARPU_TASK_BLOCKED;
  597. if (profiling)
  598. _starpu_clock_gettime(&info->submit_time);
  599. ret = _starpu_submit_job(j);
  600. #ifdef STARPU_SIMGRID
  601. if (_starpu_simgrid_task_submit_cost())
  602. MSG_process_sleep(0.000001);
  603. #endif
  604. if (is_sync)
  605. {
  606. _starpu_sched_do_schedule(task->sched_ctx);
  607. _starpu_wait_job(j);
  608. if (task->destroy)
  609. _starpu_task_destroy(task);
  610. }
  611. _STARPU_TRACE_TASK_SUBMIT_END();
  612. _STARPU_LOG_OUT();
  613. return ret;
  614. }
  615. int _starpu_task_submit_internally(struct starpu_task *task)
  616. {
  617. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  618. j->internal = 1;
  619. return starpu_task_submit(task);
  620. }
  621. /* application should submit new tasks to StarPU through this function */
  622. int starpu_task_submit_to_ctx(struct starpu_task *task, unsigned sched_ctx_id)
  623. {
  624. task->sched_ctx = sched_ctx_id;
  625. return starpu_task_submit(task);
  626. }
  627. /* The StarPU core can submit tasks directly to the scheduler or a worker,
  628. * skipping dependencies completely (when it knows what it is doing). */
  629. int _starpu_task_submit_nodeps(struct starpu_task *task)
  630. {
  631. int ret = _starpu_task_submit_head(task);
  632. STARPU_ASSERT(ret == 0);
  633. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  634. _starpu_increment_nsubmitted_tasks_of_sched_ctx(j->task->sched_ctx);
  635. _starpu_sched_task_submit(task);
  636. STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  637. _starpu_handle_job_submission(j);
  638. _starpu_increment_nready_tasks_of_sched_ctx(j->task->sched_ctx, j->task->flops, j->task);
  639. if (task->cl)
  640. /* This would be done by data dependencies checking */
  641. _starpu_job_set_ordered_buffers(j);
  642. task->status = STARPU_TASK_READY;
  643. STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
  644. return _starpu_push_task(j);
  645. }
  646. /*
  647. * worker->sched_mutex must be locked when calling this function.
  648. */
  649. int _starpu_task_submit_conversion_task(struct starpu_task *task,
  650. unsigned int workerid)
  651. {
  652. int ret;
  653. STARPU_ASSERT(task->cl);
  654. STARPU_ASSERT(task->execute_on_a_specific_worker);
  655. ret = _starpu_task_submit_head(task);
  656. STARPU_ASSERT(ret == 0);
  657. /* We retain handle reference count that would have been acquired by data dependencies. */
  658. unsigned i;
  659. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(task);
  660. for (i=0; i<nbuffers; i++)
  661. {
  662. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(task, i);
  663. _starpu_spin_lock(&handle->header_lock);
  664. handle->busy_count++;
  665. _starpu_spin_unlock(&handle->header_lock);
  666. }
  667. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  668. _starpu_increment_nsubmitted_tasks_of_sched_ctx(j->task->sched_ctx);
  669. _starpu_sched_task_submit(task);
  670. STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  671. _starpu_handle_job_submission(j);
  672. _starpu_increment_nready_tasks_of_sched_ctx(j->task->sched_ctx, j->task->flops, j->task);
  673. _starpu_job_set_ordered_buffers(j);
  674. task->status = STARPU_TASK_READY;
  675. _starpu_profiling_set_task_push_start_time(task);
  676. unsigned node = starpu_worker_get_memory_node(workerid);
  677. if (starpu_get_prefetch_flag())
  678. starpu_prefetch_task_input_on_node(task, node);
  679. struct _starpu_worker *worker;
  680. worker = _starpu_get_worker_struct(workerid);
  681. starpu_task_list_push_back(&worker->local_tasks, task);
  682. starpu_wake_worker_locked(worker->workerid);
  683. _starpu_profiling_set_task_push_end_time(task);
  684. STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
  685. return 0;
  686. }
  687. void starpu_codelet_init(struct starpu_codelet *cl)
  688. {
  689. memset(cl, 0, sizeof(struct starpu_codelet));
  690. }
  691. void starpu_codelet_display_stats(struct starpu_codelet *cl)
  692. {
  693. unsigned worker;
  694. unsigned nworkers = starpu_worker_get_count();
  695. if (cl->name)
  696. fprintf(stderr, "Statistics for codelet %s\n", cl->name);
  697. else if (cl->model && cl->model->symbol)
  698. fprintf(stderr, "Statistics for codelet %s\n", cl->model->symbol);
  699. unsigned long total = 0;
  700. for (worker = 0; worker < nworkers; worker++)
  701. total += cl->per_worker_stats[worker];
  702. for (worker = 0; worker < nworkers; worker++)
  703. {
  704. char name[32];
  705. starpu_worker_get_name(worker, name, 32);
  706. fprintf(stderr, "\t%s -> %lu / %lu (%2.2f %%)\n", name, cl->per_worker_stats[worker], total, (100.0f*cl->per_worker_stats[worker])/total);
  707. }
  708. }
  709. /*
  710. * We wait for all the tasks that have already been submitted. Note that a
  711. * regenerable is not considered finished until it was explicitely set as
  712. * non-regenerale anymore (eg. from a callback).
  713. */
  714. int _starpu_task_wait_for_all_and_return_nb_waited_tasks(void)
  715. {
  716. unsigned nsched_ctxs = _starpu_get_nsched_ctxs();
  717. unsigned sched_ctx_id = nsched_ctxs == 1 ? 0 : starpu_sched_ctx_get_context();
  718. /* if there is no indication about which context to wait,
  719. we wait for all tasks submitted to starpu */
  720. if (sched_ctx_id == STARPU_NMAX_SCHED_CTXS)
  721. {
  722. _STARPU_DEBUG("Waiting for all tasks\n");
  723. STARPU_ASSERT_MSG(_starpu_worker_may_perform_blocking_calls(), "starpu_task_wait_for_all must not be called from a task or callback");
  724. STARPU_AYU_BARRIER();
  725. struct _starpu_machine_config *config = _starpu_get_machine_config();
  726. if(config->topology.nsched_ctxs == 1)
  727. {
  728. _starpu_sched_do_schedule(0);
  729. return _starpu_task_wait_for_all_in_ctx_and_return_nb_waited_tasks(0);
  730. }
  731. else
  732. {
  733. int s;
  734. for(s = 0; s < STARPU_NMAX_SCHED_CTXS; s++)
  735. {
  736. if(config->sched_ctxs[s].id != STARPU_NMAX_SCHED_CTXS)
  737. {
  738. _starpu_sched_do_schedule(config->sched_ctxs[s].id);
  739. }
  740. }
  741. for(s = 0; s < STARPU_NMAX_SCHED_CTXS; s++)
  742. {
  743. if(config->sched_ctxs[s].id != STARPU_NMAX_SCHED_CTXS)
  744. {
  745. starpu_task_wait_for_all_in_ctx(config->sched_ctxs[s].id);
  746. }
  747. }
  748. return 0;
  749. }
  750. }
  751. else
  752. {
  753. _starpu_sched_do_schedule(sched_ctx_id);
  754. _STARPU_DEBUG("Waiting for tasks submitted to context %u\n", sched_ctx_id);
  755. return _starpu_task_wait_for_all_in_ctx_and_return_nb_waited_tasks(sched_ctx_id);
  756. }
  757. }
  758. int starpu_task_wait_for_all(void)
  759. {
  760. _starpu_task_wait_for_all_and_return_nb_waited_tasks();
  761. return 0;
  762. }
  763. int _starpu_task_wait_for_all_in_ctx_and_return_nb_waited_tasks(unsigned sched_ctx)
  764. {
  765. _STARPU_TRACE_TASK_WAIT_FOR_ALL_START();
  766. int ret = _starpu_wait_for_all_tasks_of_sched_ctx(sched_ctx);
  767. _STARPU_TRACE_TASK_WAIT_FOR_ALL_END();
  768. /* TODO: improve Temanejo into knowing about contexts ... */
  769. STARPU_AYU_BARRIER();
  770. return ret;
  771. }
  772. int starpu_task_wait_for_all_in_ctx(unsigned sched_ctx)
  773. {
  774. _starpu_task_wait_for_all_in_ctx_and_return_nb_waited_tasks(sched_ctx);
  775. return 0;
  776. }
  777. /*
  778. * We wait until there's a certain number of the tasks that have already been
  779. * submitted left. Note that a regenerable is not considered finished until it
  780. * was explicitely set as non-regenerale anymore (eg. from a callback).
  781. */
  782. int starpu_task_wait_for_n_submitted(unsigned n)
  783. {
  784. unsigned nsched_ctxs = _starpu_get_nsched_ctxs();
  785. unsigned sched_ctx_id = nsched_ctxs == 1 ? 0 : starpu_sched_ctx_get_context();
  786. /* if there is no indication about which context to wait,
  787. we wait for all tasks submitted to starpu */
  788. if (sched_ctx_id == STARPU_NMAX_SCHED_CTXS)
  789. {
  790. _STARPU_DEBUG("Waiting for all tasks\n");
  791. STARPU_ASSERT_MSG(_starpu_worker_may_perform_blocking_calls(), "starpu_task_wait_for_n_submitted must not be called from a task or callback");
  792. struct _starpu_machine_config *config = _starpu_get_machine_config();
  793. if(config->topology.nsched_ctxs == 1)
  794. _starpu_wait_for_n_submitted_tasks_of_sched_ctx(0, n);
  795. else
  796. {
  797. int s;
  798. for(s = 0; s < STARPU_NMAX_SCHED_CTXS; s++)
  799. {
  800. if(config->sched_ctxs[s].id != STARPU_NMAX_SCHED_CTXS)
  801. {
  802. _starpu_wait_for_n_submitted_tasks_of_sched_ctx(config->sched_ctxs[s].id, n);
  803. }
  804. }
  805. }
  806. return 0;
  807. }
  808. else
  809. {
  810. _STARPU_DEBUG("Waiting for tasks submitted to context %u\n", sched_ctx_id);
  811. _starpu_wait_for_n_submitted_tasks_of_sched_ctx(sched_ctx_id, n);
  812. }
  813. return 0;
  814. }
  815. int starpu_task_wait_for_n_submitted_in_ctx(unsigned sched_ctx, unsigned n)
  816. {
  817. _starpu_wait_for_n_submitted_tasks_of_sched_ctx(sched_ctx, n);
  818. return 0;
  819. }
  820. /*
  821. * We wait until there is no ready task any more (i.e. StarPU will not be able
  822. * to progress any more).
  823. */
  824. int starpu_task_wait_for_no_ready(void)
  825. {
  826. STARPU_ASSERT_MSG(_starpu_worker_may_perform_blocking_calls(), "starpu_task_wait_for_no_ready must not be called from a task or callback");
  827. struct _starpu_machine_config *config = _starpu_get_machine_config();
  828. if(config->topology.nsched_ctxs == 1)
  829. {
  830. _starpu_sched_do_schedule(0);
  831. _starpu_wait_for_no_ready_of_sched_ctx(0);
  832. }
  833. else
  834. {
  835. int s;
  836. for(s = 0; s < STARPU_NMAX_SCHED_CTXS; s++)
  837. {
  838. if(config->sched_ctxs[s].id != STARPU_NMAX_SCHED_CTXS)
  839. {
  840. _starpu_sched_do_schedule(config->sched_ctxs[s].id);
  841. }
  842. }
  843. for(s = 0; s < STARPU_NMAX_SCHED_CTXS; s++)
  844. {
  845. if(config->sched_ctxs[s].id != STARPU_NMAX_SCHED_CTXS)
  846. {
  847. _starpu_wait_for_no_ready_of_sched_ctx(config->sched_ctxs[s].id);
  848. }
  849. }
  850. }
  851. return 0;
  852. }
  853. void starpu_iteration_push(unsigned long iteration)
  854. {
  855. struct _starpu_sched_ctx *ctx = _starpu_get_sched_ctx_struct(_starpu_sched_ctx_get_current_context());
  856. unsigned level = ctx->iteration_level++;
  857. if (level < sizeof(ctx->iterations)/sizeof(ctx->iterations[0]))
  858. ctx->iterations[level] = iteration;
  859. }
  860. void starpu_iteration_pop(void)
  861. {
  862. struct _starpu_sched_ctx *ctx = _starpu_get_sched_ctx_struct(_starpu_sched_ctx_get_current_context());
  863. STARPU_ASSERT_MSG(ctx->iteration_level > 0, "calls to starpu_iteration_pop must match starpu_iteration_push calls");
  864. unsigned level = ctx->iteration_level--;
  865. if (level < sizeof(ctx->iterations)/sizeof(ctx->iterations[0]))
  866. ctx->iterations[level] = -1;
  867. }
  868. void starpu_do_schedule(void)
  869. {
  870. struct _starpu_machine_config *config = _starpu_get_machine_config();
  871. if(config->topology.nsched_ctxs == 1)
  872. _starpu_sched_do_schedule(0);
  873. else
  874. {
  875. int s;
  876. for(s = 0; s < STARPU_NMAX_SCHED_CTXS; s++)
  877. {
  878. if(config->sched_ctxs[s].id != STARPU_NMAX_SCHED_CTXS)
  879. {
  880. _starpu_sched_do_schedule(config->sched_ctxs[s].id);
  881. }
  882. }
  883. }
  884. }
  885. void
  886. starpu_drivers_request_termination(void)
  887. {
  888. struct _starpu_machine_config *config = _starpu_get_machine_config();
  889. STARPU_PTHREAD_MUTEX_LOCK(&config->submitted_mutex);
  890. int nsubmitted = starpu_task_nsubmitted();
  891. config->submitting = 0;
  892. if (nsubmitted == 0)
  893. {
  894. ANNOTATE_HAPPENS_AFTER(&config->running);
  895. config->running = 0;
  896. ANNOTATE_HAPPENS_BEFORE(&config->running);
  897. STARPU_WMB();
  898. int s;
  899. for(s = 0; s < STARPU_NMAX_SCHED_CTXS; s++)
  900. {
  901. if(config->sched_ctxs[s].id != STARPU_NMAX_SCHED_CTXS)
  902. {
  903. _starpu_check_nsubmitted_tasks_of_sched_ctx(config->sched_ctxs[s].id);
  904. }
  905. }
  906. }
  907. STARPU_PTHREAD_MUTEX_UNLOCK(&config->submitted_mutex);
  908. }
  909. int starpu_task_nsubmitted(void)
  910. {
  911. int nsubmitted = 0;
  912. struct _starpu_machine_config *config = _starpu_get_machine_config();
  913. if(config->topology.nsched_ctxs == 1)
  914. nsubmitted = _starpu_get_nsubmitted_tasks_of_sched_ctx(0);
  915. else
  916. {
  917. int s;
  918. for(s = 0; s < STARPU_NMAX_SCHED_CTXS; s++)
  919. {
  920. if(config->sched_ctxs[s].id != STARPU_NMAX_SCHED_CTXS)
  921. {
  922. nsubmitted += _starpu_get_nsubmitted_tasks_of_sched_ctx(config->sched_ctxs[s].id);
  923. }
  924. }
  925. }
  926. return nsubmitted;
  927. }
  928. int starpu_task_nready(void)
  929. {
  930. int nready = 0;
  931. struct _starpu_machine_config *config = _starpu_get_machine_config();
  932. if(config->topology.nsched_ctxs == 1)
  933. nready = starpu_sched_ctx_get_nready_tasks(0);
  934. else
  935. {
  936. int s;
  937. for(s = 0; s < STARPU_NMAX_SCHED_CTXS; s++)
  938. {
  939. if(config->sched_ctxs[s].id != STARPU_NMAX_SCHED_CTXS)
  940. {
  941. nready += starpu_sched_ctx_get_nready_tasks(config->sched_ctxs[s].id);
  942. }
  943. }
  944. }
  945. return nready;
  946. }
  947. /* Return the task currently executed by the worker, or NULL if this is called
  948. * either from a thread that is not a task or simply because there is no task
  949. * being executed at the moment. */
  950. struct starpu_task *starpu_task_get_current(void)
  951. {
  952. return (struct starpu_task *) STARPU_PTHREAD_GETSPECIFIC(current_task_key);
  953. }
  954. void _starpu_set_current_task(struct starpu_task *task)
  955. {
  956. STARPU_PTHREAD_SETSPECIFIC(current_task_key, task);
  957. }
  958. #ifdef STARPU_OPENMP
  959. /* Prepare the fields of the currentl task for accepting a new set of
  960. * dependencies in anticipation of becoming a continuation.
  961. *
  962. * When the task becomes 'continued', it will only be queued again when the new
  963. * set of dependencies is fulfilled. */
  964. void _starpu_task_prepare_for_continuation(void)
  965. {
  966. _starpu_job_prepare_for_continuation(_starpu_get_job_associated_to_task(starpu_task_get_current()));
  967. }
  968. void _starpu_task_prepare_for_continuation_ext(unsigned continuation_resubmit,
  969. void (*continuation_callback_on_sleep)(void *arg), void *continuation_callback_on_sleep_arg)
  970. {
  971. _starpu_job_prepare_for_continuation_ext(_starpu_get_job_associated_to_task(starpu_task_get_current()),
  972. continuation_resubmit, continuation_callback_on_sleep, continuation_callback_on_sleep_arg);
  973. }
  974. void _starpu_task_set_omp_cleanup_callback(struct starpu_task *task, void (*omp_cleanup_callback)(void *arg), void *omp_cleanup_callback_arg)
  975. {
  976. _starpu_job_set_omp_cleanup_callback(_starpu_get_job_associated_to_task(task),
  977. omp_cleanup_callback, omp_cleanup_callback_arg);
  978. }
  979. #endif
  980. /*
  981. * Returns 0 if tasks does not use any multiformat handle, 1 otherwise.
  982. */
  983. int
  984. _starpu_task_uses_multiformat_handles(struct starpu_task *task)
  985. {
  986. unsigned i;
  987. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(task);
  988. for (i = 0; i < nbuffers; i++)
  989. {
  990. if (_starpu_data_is_multiformat_handle(STARPU_TASK_GET_HANDLE(task, i)))
  991. return 1;
  992. }
  993. return 0;
  994. }
  995. /*
  996. * Checks whether the given handle needs to be converted in order to be used on
  997. * the node given as the second argument.
  998. */
  999. int
  1000. _starpu_handle_needs_conversion_task(starpu_data_handle_t handle,
  1001. unsigned int node)
  1002. {
  1003. return _starpu_handle_needs_conversion_task_for_arch(handle, starpu_node_get_kind(node));
  1004. }
  1005. int
  1006. _starpu_handle_needs_conversion_task_for_arch(starpu_data_handle_t handle,
  1007. enum starpu_node_kind node_kind)
  1008. {
  1009. /*
  1010. * Here, we assume that CUDA devices and OpenCL devices use the
  1011. * same data structure. A conversion is only needed when moving
  1012. * data from a CPU to a GPU, or the other way around.
  1013. */
  1014. switch (node_kind)
  1015. {
  1016. case STARPU_CPU_RAM:
  1017. switch(starpu_node_get_kind(handle->mf_node))
  1018. {
  1019. case STARPU_CPU_RAM:
  1020. return 0;
  1021. case STARPU_CUDA_RAM: /* Fall through */
  1022. case STARPU_OPENCL_RAM:
  1023. case STARPU_MIC_RAM:
  1024. case STARPU_MPI_MS_RAM:
  1025. case STARPU_SCC_RAM:
  1026. return 1;
  1027. default:
  1028. STARPU_ABORT();
  1029. }
  1030. break;
  1031. case STARPU_CUDA_RAM: /* Fall through */
  1032. case STARPU_OPENCL_RAM:
  1033. case STARPU_MIC_RAM:
  1034. case STARPU_MPI_MS_RAM:
  1035. case STARPU_SCC_RAM:
  1036. switch(starpu_node_get_kind(handle->mf_node))
  1037. {
  1038. case STARPU_CPU_RAM:
  1039. return 1;
  1040. case STARPU_CUDA_RAM:
  1041. case STARPU_OPENCL_RAM:
  1042. case STARPU_MIC_RAM:
  1043. case STARPU_MPI_MS_RAM:
  1044. case STARPU_SCC_RAM:
  1045. return 0;
  1046. default:
  1047. STARPU_ABORT();
  1048. }
  1049. break;
  1050. default:
  1051. STARPU_ABORT();
  1052. }
  1053. /* that instruction should never be reached */
  1054. return -EINVAL;
  1055. }
  1056. void starpu_task_set_implementation(struct starpu_task *task, unsigned impl)
  1057. {
  1058. _starpu_get_job_associated_to_task(task)->nimpl = impl;
  1059. }
  1060. unsigned starpu_task_get_implementation(struct starpu_task *task)
  1061. {
  1062. return _starpu_get_job_associated_to_task(task)->nimpl;
  1063. }
  1064. unsigned long starpu_task_get_job_id(struct starpu_task *task)
  1065. {
  1066. return _starpu_get_job_associated_to_task(task)->job_id;
  1067. }
  1068. static starpu_pthread_t watchdog_thread;
  1069. /* Check from times to times that StarPU does finish some tasks */
  1070. static void *watchdog_func(void *arg)
  1071. {
  1072. char *timeout_env = arg;
  1073. float timeout;
  1074. #ifdef _MSC_VER
  1075. timeout = ((float) _atoi64(timeout_env)) / 1000000;
  1076. #else
  1077. timeout = ((float) atoll(timeout_env)) / 1000000;
  1078. #endif
  1079. struct _starpu_machine_config *config = _starpu_get_machine_config();
  1080. starpu_pthread_setname("watchdog");
  1081. STARPU_PTHREAD_MUTEX_LOCK(&config->submitted_mutex);
  1082. while (_starpu_machine_is_running())
  1083. {
  1084. int last_nsubmitted = starpu_task_nsubmitted();
  1085. config->watchdog_ok = 0;
  1086. STARPU_PTHREAD_MUTEX_UNLOCK(&config->submitted_mutex);
  1087. /* If we do a sleep(timeout), we might have to wait too long at the end of the computation. */
  1088. /* To avoid that, we do several sleep() of 1s (and check after each if starpu is still running) */
  1089. float t;
  1090. for (t = timeout ; t > 1.; t--)
  1091. {
  1092. starpu_sleep(1.);
  1093. if (!_starpu_machine_is_running())
  1094. /* Application finished, don't bother finishing the sleep */
  1095. return NULL;
  1096. }
  1097. /* and one final sleep (of less than 1 s) with the rest (if needed) */
  1098. if (t > 0.)
  1099. starpu_sleep(t);
  1100. STARPU_PTHREAD_MUTEX_LOCK(&config->submitted_mutex);
  1101. if (!config->watchdog_ok && last_nsubmitted
  1102. && last_nsubmitted == starpu_task_nsubmitted())
  1103. {
  1104. _STARPU_MSG("The StarPU watchdog detected that no task finished for %fs (can be configured through STARPU_WATCHDOG_TIMEOUT)\n", timeout);
  1105. if (watchdog_crash)
  1106. {
  1107. _STARPU_MSG("Crashing the process\n");
  1108. raise(SIGABRT);
  1109. }
  1110. else
  1111. _STARPU_MSG("Set the STARPU_WATCHDOG_CRASH environment variable if you want to abort the process in such a case\n");
  1112. }
  1113. /* Only shout again after another period */
  1114. config->watchdog_ok = 1;
  1115. }
  1116. STARPU_PTHREAD_MUTEX_UNLOCK(&config->submitted_mutex);
  1117. return NULL;
  1118. }
  1119. void _starpu_watchdog_init(void)
  1120. {
  1121. struct _starpu_machine_config *config = _starpu_get_machine_config();
  1122. char *timeout_env = starpu_getenv("STARPU_WATCHDOG_TIMEOUT");
  1123. STARPU_PTHREAD_MUTEX_INIT(&config->submitted_mutex, NULL);
  1124. if (!timeout_env)
  1125. return;
  1126. STARPU_PTHREAD_CREATE(&watchdog_thread, NULL, watchdog_func, timeout_env);
  1127. }
  1128. void _starpu_watchdog_shutdown(void)
  1129. {
  1130. char *timeout_env = starpu_getenv("STARPU_WATCHDOG_TIMEOUT");
  1131. if (!timeout_env)
  1132. return;
  1133. starpu_pthread_join(watchdog_thread, NULL);
  1134. }