task.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2013 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012, 2013 Centre National de la Recherche Scientifique
  5. * Copyright (C) 2011 Télécom-SudParis
  6. * Copyright (C) 2011 INRIA
  7. *
  8. * StarPU is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU Lesser General Public License as published by
  10. * the Free Software Foundation; either version 2.1 of the License, or (at
  11. * your option) any later version.
  12. *
  13. * StarPU is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  16. *
  17. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  18. */
  19. #include <starpu.h>
  20. #include <starpu_profiling.h>
  21. #include <core/workers.h>
  22. #include <core/sched_ctx.h>
  23. #include <core/jobs.h>
  24. #include <core/task.h>
  25. #include <core/task_bundle.h>
  26. #include <common/config.h>
  27. #include <common/utils.h>
  28. #include <profiling/profiling.h>
  29. #include <profiling/bound.h>
  30. #include <math.h>
  31. #include <string.h>
  32. #include <core/debug.h>
  33. /* XXX this should be reinitialized when StarPU is shutdown (or we should make
  34. * sure that no task remains !) */
  35. /* TODO we could make this hierarchical to avoid contention ? */
  36. static _starpu_pthread_cond_t submitted_cond = _STARPU_PTHREAD_COND_INITIALIZER;
  37. static _starpu_pthread_mutex_t submitted_mutex = _STARPU_PTHREAD_MUTEX_INITIALIZER;
  38. static long int nsubmitted = 0, nready = 0;
  39. static void _starpu_increment_nsubmitted_tasks(void);
  40. /* This key stores the task currently handled by the thread, note that we
  41. * cannot use the worker structure to store that information because it is
  42. * possible that we have a task with a NULL codelet, which means its callback
  43. * could be executed by a user thread as well. */
  44. static _starpu_pthread_key_t current_task_key;
  45. void starpu_task_init(struct starpu_task *task)
  46. {
  47. /* TODO: memcpy from a template instead? benchmark it */
  48. STARPU_ASSERT(task);
  49. /* As most of the fields must be initialised at NULL, let's put 0
  50. * everywhere */
  51. memset(task, 0, sizeof(struct starpu_task));
  52. /* Now we can initialise fields which recquire custom value */
  53. #if STARPU_DEFAULT_PRIO != 0
  54. task->priority = STARPU_DEFAULT_PRIO;
  55. #endif
  56. task->detach = 1;
  57. #if STARPU_TASK_INVALID != 0
  58. task->status = STARPU_TASK_INVALID;
  59. #endif
  60. task->predicted = NAN;
  61. task->predicted_transfer = NAN;
  62. task->magic = 42;
  63. task->sched_ctx = _starpu_get_initial_sched_ctx()->id;
  64. task->flops = 0.0;
  65. }
  66. /* Free all the ressources allocated for a task, without deallocating the task
  67. * structure itself (this is required for statically allocated tasks).
  68. * All values previously set by the user, like codelet and handles, remain
  69. * unchanged */
  70. void starpu_task_clean(struct starpu_task *task)
  71. {
  72. STARPU_ASSERT(task);
  73. /* If a buffer was allocated to store the profiling info, we free it. */
  74. if (task->profiling_info)
  75. {
  76. free(task->profiling_info);
  77. task->profiling_info = NULL;
  78. }
  79. /* If case the task is (still) part of a bundle */
  80. starpu_task_bundle_t bundle = task->bundle;
  81. if (bundle)
  82. starpu_task_bundle_remove(bundle, task);
  83. struct _starpu_job *j = (struct _starpu_job *)task->starpu_private;
  84. if (j)
  85. {
  86. _starpu_job_destroy(j);
  87. task->starpu_private = NULL;
  88. }
  89. }
  90. struct starpu_task * __attribute__((malloc)) starpu_task_create(void)
  91. {
  92. struct starpu_task *task;
  93. task = (struct starpu_task *) malloc(sizeof(struct starpu_task));
  94. STARPU_ASSERT(task);
  95. starpu_task_init(task);
  96. /* Dynamically allocated tasks are destroyed by default */
  97. task->destroy = 1;
  98. return task;
  99. }
  100. /* Free the ressource allocated during starpu_task_create. This function can be
  101. * called automatically after the execution of a task by setting the "destroy"
  102. * flag of the starpu_task structure (default behaviour). Calling this function
  103. * on a statically allocated task results in an undefined behaviour. */
  104. void _starpu_task_destroy(struct starpu_task *task)
  105. {
  106. /* If starpu_task_destroy is called in a callback, we just set the destroy
  107. flag. The task will be destroyed after the callback returns */
  108. if (task == starpu_task_get_current()
  109. && _starpu_get_local_worker_status() == STATUS_CALLBACK)
  110. {
  111. task->destroy = 1;
  112. }
  113. else
  114. {
  115. starpu_task_clean(task);
  116. /* TODO handle the case of task with detach = 1 and destroy = 1 */
  117. /* TODO handle the case of non terminated tasks -> return -EINVAL */
  118. free(task);
  119. }
  120. }
  121. void starpu_task_destroy(struct starpu_task *task)
  122. {
  123. STARPU_ASSERT(task);
  124. STARPU_ASSERT_MSG(!task->destroy || !task->detach, "starpu_task_destroy must not be called for task with destroy = 1 and detach = 1");
  125. _starpu_task_destroy(task);
  126. }
  127. int starpu_task_wait(struct starpu_task *task)
  128. {
  129. _STARPU_LOG_IN();
  130. STARPU_ASSERT(task);
  131. STARPU_ASSERT_MSG(!task->detach, "starpu_task_wait can only be called on tasks with detach = 0");
  132. if (task->detach || task->synchronous)
  133. {
  134. _STARPU_DEBUG("Task is detached or asynchronous. Waiting returns immediately\n");
  135. _STARPU_LOG_OUT_TAG("einval");
  136. return -EINVAL;
  137. }
  138. if (STARPU_UNLIKELY(!_starpu_worker_may_perform_blocking_calls()))
  139. {
  140. _STARPU_LOG_OUT_TAG("edeadlk");
  141. return -EDEADLK;
  142. }
  143. struct _starpu_job *j = (struct _starpu_job *)task->starpu_private;
  144. _starpu_wait_job(j);
  145. /* as this is a synchronous task, the liberation of the job
  146. structure was deferred */
  147. if (task->destroy)
  148. _starpu_task_destroy(task);
  149. _STARPU_LOG_OUT();
  150. return 0;
  151. }
  152. struct _starpu_job *_starpu_get_job_associated_to_task(struct starpu_task *task)
  153. {
  154. STARPU_ASSERT(task);
  155. if (!task->starpu_private)
  156. {
  157. struct _starpu_job *j = _starpu_job_create(task);
  158. task->starpu_private = j;
  159. }
  160. return (struct _starpu_job *)task->starpu_private;
  161. }
  162. /* NB in case we have a regenerable task, it is possible that the job was
  163. * already counted. */
  164. int _starpu_submit_job(struct _starpu_job *j)
  165. {
  166. struct starpu_task *task = j->task;
  167. _STARPU_LOG_IN();
  168. /* notify bound computation of a new task */
  169. _starpu_bound_record(j);
  170. _starpu_increment_nsubmitted_tasks();
  171. _starpu_increment_nsubmitted_tasks_of_sched_ctx(j->task->sched_ctx);
  172. #ifdef STARPU_USE_SCHED_CTX_HYPERVISOR
  173. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(j->task->sched_ctx);
  174. if(sched_ctx != NULL && j->task->sched_ctx != 0 && j->task->sched_ctx != STARPU_NMAX_SCHED_CTXS
  175. && sched_ctx->perf_counters != NULL)
  176. {
  177. _starpu_compute_buffers_footprint(j->task->cl->model, STARPU_CPU_DEFAULT, 0, j);
  178. sched_ctx->perf_counters->notify_submitted_job(j->task, j->footprint);
  179. }
  180. #endif
  181. /* We retain handle reference count */
  182. if (task->cl)
  183. {
  184. unsigned i;
  185. for (i=0; i<task->cl->nbuffers; i++)
  186. {
  187. starpu_data_handle_t handle = task->handles[i];
  188. _starpu_spin_lock(&handle->header_lock);
  189. handle->busy_count++;
  190. _starpu_spin_unlock(&handle->header_lock);
  191. }
  192. }
  193. _STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  194. /* Need to atomically set submitted to 1 and check dependencies, since
  195. * this is concucrent with _starpu_notify_cg */
  196. j->terminated = 0;
  197. if (!j->submitted)
  198. j->submitted = 1;
  199. else
  200. j->submitted = 2;
  201. int ret = _starpu_enforce_deps_and_schedule(j);
  202. _STARPU_LOG_OUT();
  203. return ret;
  204. }
  205. void _starpu_codelet_check_deprecated_fields(struct starpu_codelet *cl)
  206. {
  207. if (!cl)
  208. return;
  209. int is_where_unset = cl->where == 0;
  210. /* Check deprecated and unset fields (where, <device>_func,
  211. * <device>_funcs) */
  212. /* CPU */
  213. if (cl->cpu_func && cl->cpu_func != STARPU_MULTIPLE_CPU_IMPLEMENTATIONS && cl->cpu_funcs[0])
  214. {
  215. _STARPU_DISP("[warning] [struct starpu_codelet] both cpu_func and cpu_funcs are set. Ignoring cpu_func.\n");
  216. cl->cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS;
  217. }
  218. if (cl->cpu_func && cl->cpu_func != STARPU_MULTIPLE_CPU_IMPLEMENTATIONS)
  219. {
  220. cl->cpu_funcs[0] = cl->cpu_func;
  221. cl->cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS;
  222. }
  223. if (cl->cpu_funcs[0] && cl->cpu_func == 0)
  224. {
  225. cl->cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS;
  226. }
  227. if (cl->cpu_funcs[0] && is_where_unset)
  228. {
  229. cl->where |= STARPU_CPU;
  230. }
  231. /* CUDA */
  232. if (cl->cuda_func && cl->cuda_func != STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS && cl->cuda_funcs[0])
  233. {
  234. _STARPU_DISP("[warning] [struct starpu_codelet] both cuda_func and cuda_funcs are set. Ignoring cuda_func.\n");
  235. cl->cuda_func = STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS;
  236. }
  237. if (cl->cuda_func && cl->cuda_func != STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS)
  238. {
  239. cl->cuda_funcs[0] = cl->cuda_func;
  240. cl->cuda_func = STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS;
  241. }
  242. if (cl->cuda_funcs[0] && cl->cuda_func == 0)
  243. {
  244. cl->cuda_func = STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS;
  245. }
  246. if (cl->cuda_funcs[0] && is_where_unset)
  247. {
  248. cl->where |= STARPU_CUDA;
  249. }
  250. /* OpenCL */
  251. if (cl->opencl_func && cl->opencl_func != STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS && cl->opencl_funcs[0])
  252. {
  253. _STARPU_DISP("[warning] [struct starpu_codelet] both opencl_func and opencl_funcs are set. Ignoring opencl_func.\n");
  254. cl->opencl_func = STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS;
  255. }
  256. if (cl->opencl_func && cl->opencl_func != STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS)
  257. {
  258. cl->opencl_funcs[0] = cl->opencl_func;
  259. cl->opencl_func = STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS;
  260. }
  261. if (cl->opencl_funcs[0] && cl->opencl_func == 0)
  262. {
  263. cl->opencl_func = STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS;
  264. }
  265. if (cl->opencl_funcs[0] && is_where_unset)
  266. {
  267. cl->where |= STARPU_OPENCL;
  268. }
  269. }
  270. void _starpu_task_check_deprecated_fields(struct starpu_task *task)
  271. {
  272. if (task->cl)
  273. {
  274. unsigned i;
  275. for(i=0; i<task->cl->nbuffers ; i++)
  276. {
  277. if (task->buffers[i].handle && task->handles[i])
  278. {
  279. _STARPU_DISP("[warning][struct starpu_task] task->buffers[%u] and task->handles[%u] both set. Ignoring task->buffers[%u] ?\n", i, i, i);
  280. STARPU_ASSERT(task->buffers[i].mode == task->cl->modes[i]);
  281. STARPU_ABORT();
  282. }
  283. if (task->buffers[i].handle)
  284. {
  285. task->handles[i] = task->buffers[i].handle;
  286. task->cl->modes[i] = task->buffers[i].mode;
  287. }
  288. }
  289. }
  290. }
  291. /* application should submit new tasks to StarPU through this function */
  292. int starpu_task_submit(struct starpu_task *task)
  293. {
  294. _STARPU_LOG_IN();
  295. STARPU_ASSERT(task);
  296. STARPU_ASSERT_MSG(task->magic == 42, "Tasks must be created with starpu_task_create, or initialized with starpu_task_init.");
  297. int ret;
  298. unsigned is_sync = task->synchronous;
  299. starpu_task_bundle_t bundle = task->bundle;
  300. unsigned nsched_ctxs = _starpu_get_nsched_ctxs();
  301. unsigned set_sched_ctx = STARPU_NMAX_SCHED_CTXS;
  302. /* internally, StarPU manipulates a struct _starpu_job * which is a wrapper around a
  303. * task structure, it is possible that this job structure was already
  304. * allocated. */
  305. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  306. if (task->sched_ctx == 0 && nsched_ctxs != 1 && !j->exclude_from_dag)
  307. {
  308. set_sched_ctx = starpu_task_get_context();
  309. if (set_sched_ctx != STARPU_NMAX_SCHED_CTXS)
  310. task->sched_ctx = set_sched_ctx;
  311. }
  312. if (is_sync)
  313. {
  314. /* Perhaps it is not possible to submit a synchronous
  315. * (blocking) task */
  316. if (STARPU_UNLIKELY(!_starpu_worker_may_perform_blocking_calls()))
  317. {
  318. _STARPU_LOG_OUT_TAG("EDEADLK");
  319. return -EDEADLK;
  320. }
  321. task->detach = 0;
  322. }
  323. _starpu_task_check_deprecated_fields(task);
  324. _starpu_codelet_check_deprecated_fields(task->cl);
  325. if (task->cl)
  326. {
  327. unsigned i;
  328. /* Check buffers */
  329. STARPU_ASSERT_MSG(task->cl->nbuffers <= STARPU_NMAXBUFS, "Codelet %p has too many buffers (%d vs max %d)", task->cl, task->cl->nbuffers, STARPU_NMAXBUFS);
  330. for (i = 0; i < task->cl->nbuffers; i++)
  331. {
  332. starpu_data_handle_t handle = task->handles[i];
  333. /* Make sure handles are not partitioned */
  334. STARPU_ASSERT_MSG(handle->nchildren == 0, "only unpartitioned data can be used in a task");
  335. /* Provide the home interface for now if any,
  336. * for can_execute hooks */
  337. if (handle->home_node != -1)
  338. task->interfaces[i] = starpu_data_get_interface_on_node(task->handles[i], handle->home_node);
  339. }
  340. /* Check the type of worker(s) required by the task exist */
  341. if (!_starpu_worker_exists(task))
  342. {
  343. _STARPU_LOG_OUT_TAG("ENODEV");
  344. return -ENODEV;
  345. }
  346. /* In case we require that a task should be explicitely
  347. * executed on a specific worker, we make sure that the worker
  348. * is able to execute this task. */
  349. if (task->execute_on_a_specific_worker && !starpu_combined_worker_can_execute_task(task->workerid, task, 0))
  350. {
  351. _STARPU_LOG_OUT_TAG("ENODEV");
  352. return -ENODEV;
  353. }
  354. _starpu_detect_implicit_data_deps(task);
  355. if (task->cl->model && task->cl->model->symbol)
  356. _starpu_load_perfmodel(task->cl->model);
  357. if (task->cl->power_model && task->cl->power_model->symbol)
  358. _starpu_load_perfmodel(task->cl->power_model);
  359. }
  360. if (bundle)
  361. {
  362. /* We need to make sure that models for other tasks of the
  363. * bundle are also loaded, so the scheduler can estimate the
  364. * duration of the whole bundle */
  365. _STARPU_PTHREAD_MUTEX_LOCK(&bundle->mutex);
  366. struct _starpu_task_bundle_entry *entry;
  367. entry = bundle->list;
  368. while (entry)
  369. {
  370. if (entry->task->cl->model && entry->task->cl->model->symbol)
  371. _starpu_load_perfmodel(entry->task->cl->model);
  372. if (entry->task->cl->power_model && entry->task->cl->power_model->symbol)
  373. _starpu_load_perfmodel(entry->task->cl->power_model);
  374. entry = entry->next;
  375. }
  376. _STARPU_PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
  377. }
  378. /* If profiling is activated, we allocate a structure to store the
  379. * appropriate info. */
  380. struct starpu_task_profiling_info *info;
  381. int profiling = starpu_profiling_status_get();
  382. info = _starpu_allocate_profiling_info_if_needed(task);
  383. task->profiling_info = info;
  384. /* The task is considered as block until we are sure there remains not
  385. * dependency. */
  386. task->status = STARPU_TASK_BLOCKED;
  387. if (profiling)
  388. _starpu_clock_gettime(&info->submit_time);
  389. ret = _starpu_submit_job(j);
  390. if (is_sync)
  391. {
  392. _starpu_wait_job(j);
  393. if (task->destroy)
  394. _starpu_task_destroy(task);
  395. }
  396. _STARPU_LOG_OUT();
  397. return ret;
  398. }
  399. int _starpu_task_submit_internally(struct starpu_task *task)
  400. {
  401. _starpu_exclude_task_from_dag(task);
  402. return starpu_task_submit(task);
  403. }
  404. /* application should submit new tasks to StarPU through this function */
  405. int starpu_task_submit_to_ctx(struct starpu_task *task, unsigned sched_ctx_id)
  406. {
  407. task->sched_ctx = sched_ctx_id;
  408. return starpu_task_submit(task);
  409. }
  410. /* The StarPU core can submit tasks directly to the scheduler or a worker,
  411. * skipping dependencies completely (when it knows what it is doing). */
  412. int _starpu_task_submit_nodeps(struct starpu_task *task)
  413. {
  414. _starpu_task_check_deprecated_fields(task);
  415. _starpu_codelet_check_deprecated_fields(task->cl);
  416. if (task->cl)
  417. {
  418. if (task->cl->model)
  419. _starpu_load_perfmodel(task->cl->model);
  420. if (task->cl->power_model)
  421. _starpu_load_perfmodel(task->cl->power_model);
  422. }
  423. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  424. _starpu_increment_nsubmitted_tasks();
  425. _starpu_increment_nsubmitted_tasks_of_sched_ctx(j->task->sched_ctx);
  426. _STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  427. j->submitted = 1;
  428. if (task->cl)
  429. {
  430. /* This would be done by data dependencies checking */
  431. unsigned i;
  432. for (i=0 ; i<task->cl->nbuffers ; i++)
  433. {
  434. j->ordered_buffers[i].handle = j->task->handles[i];
  435. j->ordered_buffers[i].mode = j->task->cl->modes[i];
  436. }
  437. }
  438. _STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
  439. return _starpu_push_task(j);
  440. }
  441. /*
  442. * worker->sched_mutex must be locked when calling this function.
  443. */
  444. int _starpu_task_submit_conversion_task(struct starpu_task *task,
  445. unsigned int workerid)
  446. {
  447. STARPU_ASSERT(task->cl);
  448. STARPU_ASSERT(task->execute_on_a_specific_worker);
  449. _starpu_task_check_deprecated_fields(task);
  450. _starpu_codelet_check_deprecated_fields(task->cl);
  451. /* We should factorize that */
  452. if (task->cl->model)
  453. _starpu_load_perfmodel(task->cl->model);
  454. if (task->cl->power_model)
  455. _starpu_load_perfmodel(task->cl->power_model);
  456. /* We retain handle reference count */
  457. unsigned i;
  458. for (i=0; i<task->cl->nbuffers; i++)
  459. {
  460. starpu_data_handle_t handle = task->handles[i];
  461. _starpu_spin_lock(&handle->header_lock);
  462. handle->busy_count++;
  463. _starpu_spin_unlock(&handle->header_lock);
  464. }
  465. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  466. _starpu_increment_nsubmitted_tasks();
  467. _starpu_increment_nsubmitted_tasks_of_sched_ctx(j->task->sched_ctx);
  468. _STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  469. j->submitted = 1;
  470. _starpu_increment_nready_tasks();
  471. for (i=0 ; i<task->cl->nbuffers ; i++)
  472. {
  473. j->ordered_buffers[i].handle = j->task->handles[i];
  474. j->ordered_buffers[i].mode = j->task->cl->modes[i];
  475. }
  476. _STARPU_LOG_IN();
  477. task->status = STARPU_TASK_READY;
  478. _starpu_profiling_set_task_push_start_time(task);
  479. unsigned node = starpu_worker_get_memory_node(workerid);
  480. if (starpu_get_prefetch_flag())
  481. starpu_prefetch_task_input_on_node(task, node);
  482. struct _starpu_worker *worker;
  483. worker = _starpu_get_worker_struct(workerid);
  484. starpu_task_list_push_front(&worker->local_tasks, task);
  485. _starpu_profiling_set_task_push_end_time(task);
  486. _STARPU_LOG_OUT();
  487. _STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
  488. return 0;
  489. }
  490. void starpu_codelet_init(struct starpu_codelet *cl)
  491. {
  492. memset(cl, 0, sizeof(struct starpu_codelet));
  493. }
  494. void starpu_display_codelet_stats(struct starpu_codelet *cl)
  495. {
  496. unsigned worker;
  497. unsigned nworkers = starpu_worker_get_count();
  498. if (cl->name)
  499. fprintf(stderr, "Statistics for codelet %s\n", cl->name);
  500. else if (cl->model && cl->model->symbol)
  501. fprintf(stderr, "Statistics for codelet %s\n", cl->model->symbol);
  502. unsigned long total = 0;
  503. for (worker = 0; worker < nworkers; worker++)
  504. total += cl->per_worker_stats[worker];
  505. for (worker = 0; worker < nworkers; worker++)
  506. {
  507. char name[32];
  508. starpu_worker_get_name(worker, name, 32);
  509. fprintf(stderr, "\t%s -> %lu / %lu (%2.2f %%)\n", name, cl->per_worker_stats[worker], total, (100.0f*cl->per_worker_stats[worker])/total);
  510. }
  511. }
  512. /*
  513. * We wait for all the tasks that have already been submitted. Note that a
  514. * regenerable is not considered finished until it was explicitely set as
  515. * non-regenerale anymore (eg. from a callback).
  516. */
  517. int starpu_task_wait_for_all(void)
  518. {
  519. unsigned nsched_ctxs = _starpu_get_nsched_ctxs();
  520. unsigned sched_ctx_id = nsched_ctxs == 1 ? 0 : starpu_task_get_context();
  521. /* if there is no indication about which context to wait,
  522. we wait for all tasks submitted to starpu */
  523. if (sched_ctx_id == STARPU_NMAX_SCHED_CTXS)
  524. {
  525. _STARPU_DEBUG("Waiting for all tasks\n");
  526. if (STARPU_UNLIKELY(!_starpu_worker_may_perform_blocking_calls()))
  527. return -EDEADLK;
  528. _STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  529. _STARPU_TRACE_TASK_WAIT_FOR_ALL;
  530. while (nsubmitted > 0)
  531. _STARPU_PTHREAD_COND_WAIT(&submitted_cond, &submitted_mutex);
  532. _STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  533. #ifdef HAVE_AYUDAME_H
  534. if (AYU_event) AYU_event(AYU_BARRIER, 0, NULL);
  535. #endif
  536. }
  537. else
  538. {
  539. _STARPU_DEBUG("Waiting for tasks submitted to context %u\n", sched_ctx_id);
  540. _starpu_wait_for_all_tasks_of_sched_ctx(sched_ctx_id);
  541. #ifdef HAVE_AYUDAME_H
  542. /* TODO: improve Temanejo into knowing about contexts ... */
  543. if (AYU_event) AYU_event(AYU_BARRIER, 0, NULL);
  544. #endif
  545. }
  546. return 0;
  547. }
  548. int starpu_task_wait_for_all_in_ctx(unsigned sched_ctx)
  549. {
  550. _starpu_wait_for_all_tasks_of_sched_ctx(sched_ctx);
  551. #ifdef HAVE_AYUDAME_H
  552. if (AYU_event) AYU_event(AYU_BARRIER, 0, NULL);
  553. #endif
  554. return 0;
  555. }
  556. /*
  557. * We wait until there is no ready task any more (i.e. StarPU will not be able
  558. * to progress any more).
  559. */
  560. int starpu_task_wait_for_no_ready(void)
  561. {
  562. if (STARPU_UNLIKELY(!_starpu_worker_may_perform_blocking_calls()))
  563. return -EDEADLK;
  564. _STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  565. _STARPU_TRACE_TASK_WAIT_FOR_ALL;
  566. while (nready > 0)
  567. _STARPU_PTHREAD_COND_WAIT(&submitted_cond, &submitted_mutex);
  568. _STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  569. return 0;
  570. }
  571. void _starpu_decrement_nsubmitted_tasks(void)
  572. {
  573. struct _starpu_machine_config *config = _starpu_get_machine_config();
  574. _STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  575. if (--nsubmitted == 0)
  576. {
  577. if (!config->submitting)
  578. config->running = 0;
  579. _STARPU_PTHREAD_COND_BROADCAST(&submitted_cond);
  580. }
  581. _STARPU_TRACE_UPDATE_TASK_CNT(nsubmitted);
  582. _STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  583. }
  584. void
  585. starpu_drivers_request_termination(void)
  586. {
  587. struct _starpu_machine_config *config = _starpu_get_machine_config();
  588. _STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  589. config->submitting = 0;
  590. if (nsubmitted == 0)
  591. {
  592. config->running = 0;
  593. _STARPU_PTHREAD_COND_BROADCAST(&submitted_cond);
  594. }
  595. _STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  596. }
  597. static void _starpu_increment_nsubmitted_tasks(void)
  598. {
  599. _STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  600. nsubmitted++;
  601. _STARPU_TRACE_UPDATE_TASK_CNT(nsubmitted);
  602. _STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  603. }
  604. void _starpu_increment_nready_tasks(void)
  605. {
  606. _STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  607. nready++;
  608. _STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  609. }
  610. void _starpu_decrement_nready_tasks(void)
  611. {
  612. _STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  613. if (--nready == 0)
  614. _STARPU_PTHREAD_COND_BROADCAST(&submitted_cond);
  615. _STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  616. }
  617. void _starpu_initialize_current_task_key(void)
  618. {
  619. _STARPU_PTHREAD_KEY_CREATE(&current_task_key, NULL);
  620. }
  621. /* Return the task currently executed by the worker, or NULL if this is called
  622. * either from a thread that is not a task or simply because there is no task
  623. * being executed at the moment. */
  624. struct starpu_task *starpu_task_get_current(void)
  625. {
  626. return (struct starpu_task *) _STARPU_PTHREAD_GETSPECIFIC(current_task_key);
  627. }
  628. void _starpu_set_current_task(struct starpu_task *task)
  629. {
  630. _STARPU_PTHREAD_SETSPECIFIC(current_task_key, task);
  631. }
  632. /*
  633. * Returns 0 if tasks does not use any multiformat handle, 1 otherwise.
  634. */
  635. int
  636. _starpu_task_uses_multiformat_handles(struct starpu_task *task)
  637. {
  638. unsigned i;
  639. for (i = 0; i < task->cl->nbuffers; i++)
  640. {
  641. if (_starpu_data_is_multiformat_handle(task->handles[i]))
  642. return 1;
  643. }
  644. return 0;
  645. }
  646. /*
  647. * Checks whether the given handle needs to be converted in order to be used on
  648. * the node given as the second argument.
  649. */
  650. int
  651. _starpu_handle_needs_conversion_task(starpu_data_handle_t handle,
  652. unsigned int node)
  653. {
  654. return _starpu_handle_needs_conversion_task_for_arch(handle, starpu_node_get_kind(node));
  655. }
  656. int
  657. _starpu_handle_needs_conversion_task_for_arch(starpu_data_handle_t handle,
  658. enum starpu_node_kind node_kind)
  659. {
  660. /*
  661. * Here, we assume that CUDA devices and OpenCL devices use the
  662. * same data structure. A conversion is only needed when moving
  663. * data from a CPU to a GPU, or the other way around.
  664. */
  665. switch (node_kind)
  666. {
  667. case STARPU_CPU_RAM:
  668. switch(starpu_node_get_kind(handle->mf_node))
  669. {
  670. case STARPU_CPU_RAM:
  671. return 0;
  672. case STARPU_CUDA_RAM: /* Fall through */
  673. case STARPU_OPENCL_RAM:
  674. return 1;
  675. default:
  676. STARPU_ABORT();
  677. }
  678. break;
  679. case STARPU_CUDA_RAM: /* Fall through */
  680. case STARPU_OPENCL_RAM:
  681. switch(starpu_node_get_kind(handle->mf_node))
  682. {
  683. case STARPU_CPU_RAM:
  684. return 1;
  685. case STARPU_CUDA_RAM:
  686. case STARPU_OPENCL_RAM:
  687. return 0;
  688. default:
  689. STARPU_ABORT();
  690. }
  691. break;
  692. default:
  693. STARPU_ABORT();
  694. }
  695. /* that instruction should never be reached */
  696. return -EINVAL;
  697. }
  698. starpu_cpu_func_t _starpu_task_get_cpu_nth_implementation(struct starpu_codelet *cl, unsigned nimpl)
  699. {
  700. return cl->cpu_funcs[nimpl];
  701. }
  702. starpu_cuda_func_t _starpu_task_get_cuda_nth_implementation(struct starpu_codelet *cl, unsigned nimpl)
  703. {
  704. return cl->cuda_funcs[nimpl];
  705. }
  706. starpu_opencl_func_t _starpu_task_get_opencl_nth_implementation(struct starpu_codelet *cl, unsigned nimpl)
  707. {
  708. return cl->opencl_funcs[nimpl];
  709. }