task.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2012 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012 Centre National de la Recherche Scientifique
  5. * Copyright (C) 2011 Télécom-SudParis
  6. * Copyright (C) 2011 INRIA
  7. *
  8. * StarPU is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU Lesser General Public License as published by
  10. * the Free Software Foundation; either version 2.1 of the License, or (at
  11. * your option) any later version.
  12. *
  13. * StarPU is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  16. *
  17. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  18. */
  19. #include <starpu.h>
  20. #include <starpu_profiling.h>
  21. #include <core/workers.h>
  22. #include <core/sched_ctx.h>
  23. #include <core/jobs.h>
  24. #include <core/task.h>
  25. #include <core/task_bundle.h>
  26. #include <common/config.h>
  27. #include <common/utils.h>
  28. #include <profiling/profiling.h>
  29. #include <profiling/bound.h>
  30. #include <math.h>
  31. #include <string.h>
  32. /* XXX this should be reinitialized when StarPU is shutdown (or we should make
  33. * sure that no task remains !) */
  34. /* TODO we could make this hierarchical to avoid contention ? */
  35. static pthread_cond_t submitted_cond = PTHREAD_COND_INITIALIZER;
  36. static pthread_mutex_t submitted_mutex = PTHREAD_MUTEX_INITIALIZER;
  37. static long int nsubmitted = 0, nready = 0;
  38. static void _starpu_increment_nsubmitted_tasks(void);
  39. /* This key stores the task currently handled by the thread, note that we
  40. * cannot use the worker structure to store that information because it is
  41. * possible that we have a task with a NULL codelet, which means its callback
  42. * could be executed by a user thread as well. */
  43. static pthread_key_t current_task_key;
  44. void starpu_task_init(struct starpu_task *task)
  45. {
  46. STARPU_ASSERT(task);
  47. /* As most of the fields must be initialised at NULL, let's put 0
  48. * everywhere */
  49. memset(task, 0, sizeof(struct starpu_task));
  50. /* Now we can initialise fields which recquire custom value */
  51. task->priority = STARPU_DEFAULT_PRIO;
  52. task->detach = 1;
  53. task->status = STARPU_TASK_INVALID;
  54. task->predicted = NAN;
  55. task->predicted_transfer = NAN;
  56. task->magic = 42;
  57. task->sched_ctx = _starpu_get_initial_sched_ctx()->id;
  58. task->control_task = 0;
  59. task->hypervisor_tag = 0;
  60. task->flops = 0.0;
  61. task->already_pushed = 0;
  62. }
  63. /* Free all the ressources allocated for a task, without deallocating the task
  64. * structure itself (this is required for statically allocated tasks).
  65. * All values previously set by the user, like codelet and handles, remain
  66. * unchanged */
  67. void starpu_task_clean(struct starpu_task *task)
  68. {
  69. STARPU_ASSERT(task);
  70. /* If a buffer was allocated to store the profiling info, we free it. */
  71. if (task->profiling_info)
  72. {
  73. free(task->profiling_info);
  74. task->profiling_info = NULL;
  75. }
  76. /* If case the task is (still) part of a bundle */
  77. starpu_task_bundle_t bundle = task->bundle;
  78. if (bundle)
  79. starpu_task_bundle_remove(bundle, task);
  80. struct _starpu_job *j = (struct _starpu_job *)task->starpu_private;
  81. if (j) {
  82. _starpu_job_destroy(j);
  83. task->starpu_private = NULL;
  84. }
  85. }
  86. struct starpu_task * __attribute__((malloc)) starpu_task_create(void)
  87. {
  88. struct starpu_task *task;
  89. task = (struct starpu_task *) malloc(sizeof(struct starpu_task));
  90. STARPU_ASSERT(task);
  91. starpu_task_init(task);
  92. /* Dynamically allocated tasks are destroyed by default */
  93. task->destroy = 1;
  94. return task;
  95. }
  96. /* Free the ressource allocated during starpu_task_create. This function can be
  97. * called automatically after the execution of a task by setting the "destroy"
  98. * flag of the starpu_task structure (default behaviour). Calling this function
  99. * on a statically allocated task results in an undefined behaviour. */
  100. void _starpu_task_destroy(struct starpu_task *task)
  101. {
  102. /* If starpu_task_destroy is called in a callback, we just set the destroy
  103. flag. The task will be destroyed after the callback returns */
  104. if (task == starpu_task_get_current()
  105. && _starpu_get_local_worker_status() == STATUS_CALLBACK)
  106. {
  107. task->destroy = 1;
  108. }
  109. else
  110. {
  111. starpu_task_clean(task);
  112. /* TODO handle the case of task with detach = 1 and destroy = 1 */
  113. /* TODO handle the case of non terminated tasks -> return -EINVAL */
  114. free(task);
  115. }
  116. }
  117. void starpu_task_destroy(struct starpu_task *task)
  118. {
  119. STARPU_ASSERT(task);
  120. STARPU_ASSERT_MSG(!task->destroy || !task->detach, "starpu_task_destroy must not be called for task with destroy = 1 and detach = 1");
  121. _starpu_task_destroy(task);
  122. }
  123. int starpu_task_wait(struct starpu_task *task)
  124. {
  125. _STARPU_LOG_IN();
  126. STARPU_ASSERT(task);
  127. STARPU_ASSERT_MSG(!task->detach, "starpu_task_wait can only be called on tasks with detach = 0");
  128. if (task->detach || task->synchronous)
  129. {
  130. _STARPU_DEBUG("Task is detached or asynchronous. Waiting returns immediately\n");
  131. _STARPU_LOG_OUT_TAG("einval");
  132. return -EINVAL;
  133. }
  134. if (STARPU_UNLIKELY(!_starpu_worker_may_perform_blocking_calls()))
  135. {
  136. _STARPU_LOG_OUT_TAG("edeadlk");
  137. return -EDEADLK;
  138. }
  139. struct _starpu_job *j = (struct _starpu_job *)task->starpu_private;
  140. _starpu_wait_job(j);
  141. /* as this is a synchronous task, the liberation of the job
  142. structure was deferred */
  143. if (task->destroy)
  144. _starpu_task_destroy(task);
  145. _STARPU_LOG_OUT();
  146. return 0;
  147. }
  148. struct _starpu_job *_starpu_get_job_associated_to_task(struct starpu_task *task)
  149. {
  150. STARPU_ASSERT(task);
  151. if (!task->starpu_private)
  152. {
  153. struct _starpu_job *j = _starpu_job_create(task);
  154. task->starpu_private = j;
  155. }
  156. return (struct _starpu_job *)task->starpu_private;
  157. }
  158. /* NB in case we have a regenerable task, it is possible that the job was
  159. * already counted. */
  160. int _starpu_submit_job(struct _starpu_job *j)
  161. {
  162. struct starpu_task *task = j->task;
  163. _STARPU_LOG_IN();
  164. /* notify bound computation of a new task */
  165. _starpu_bound_record(j);
  166. _starpu_increment_nsubmitted_tasks();
  167. _starpu_increment_nsubmitted_tasks_of_sched_ctx(j->task->sched_ctx);
  168. #ifdef STARPU_USE_SCHED_CTX_HYPERVISOR
  169. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(j->task->sched_ctx);
  170. if(sched_ctx != NULL && j->task->sched_ctx != 0 && j->task->sched_ctx != STARPU_NMAX_SCHED_CTXS
  171. && sched_ctx->perf_counters != NULL)
  172. {
  173. _starpu_compute_buffers_footprint(j->task->cl->model, STARPU_CPU_DEFAULT, 0, j);
  174. sched_ctx->perf_counters->notify_submitted_job(j->task, j->footprint);
  175. }
  176. #endif
  177. /* We retain handle reference count */
  178. if (task->cl) {
  179. unsigned i;
  180. for (i=0; i<task->cl->nbuffers; i++) {
  181. starpu_data_handle_t handle = task->handles[i];
  182. _starpu_spin_lock(&handle->header_lock);
  183. handle->busy_count++;
  184. _starpu_spin_unlock(&handle->header_lock);
  185. }
  186. }
  187. _STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  188. /* Need to atomically set submitted to 1 and check dependencies, since
  189. * this is concucrent with _starpu_notify_cg */
  190. j->terminated = 0;
  191. if (!j->submitted)
  192. j->submitted = 1;
  193. else
  194. j->submitted = 2;
  195. int ret = _starpu_enforce_deps_and_schedule(j);
  196. _STARPU_LOG_OUT();
  197. return ret;
  198. }
  199. void _starpu_codelet_check_deprecated_fields(struct starpu_codelet *cl)
  200. {
  201. if (!cl)
  202. return;
  203. int is_where_unset = cl->where == 0;
  204. /* Check deprecated and unset fields (where, <device>_func,
  205. * <device>_funcs) */
  206. /* CPU */
  207. if (cl->cpu_func && cl->cpu_func != STARPU_MULTIPLE_CPU_IMPLEMENTATIONS && cl->cpu_funcs[0])
  208. {
  209. fprintf(stderr, "[warning] [struct starpu_codelet] both cpu_func and cpu_funcs are set. Ignoring cpu_func.\n");
  210. cl->cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS;
  211. }
  212. if (cl->cpu_func && cl->cpu_func != STARPU_MULTIPLE_CPU_IMPLEMENTATIONS)
  213. {
  214. cl->cpu_funcs[0] = cl->cpu_func;
  215. cl->cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS;
  216. }
  217. if (cl->cpu_funcs[0] && cl->cpu_func == 0)
  218. {
  219. cl->cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS;
  220. }
  221. if (cl->cpu_funcs[0] && is_where_unset)
  222. {
  223. cl->where |= STARPU_CPU;
  224. }
  225. /* CUDA */
  226. if (cl->cuda_func && cl->cuda_func != STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS && cl->cuda_funcs[0])
  227. {
  228. fprintf(stderr, "[warning] [struct starpu_codelet] both cuda_func and cuda_funcs are set. Ignoring cuda_func.\n");
  229. cl->cuda_func = STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS;
  230. }
  231. if (cl->cuda_func && cl->cuda_func != STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS)
  232. {
  233. cl->cuda_funcs[0] = cl->cuda_func;
  234. cl->cuda_func = STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS;
  235. }
  236. if (cl->cuda_funcs[0] && cl->cuda_func == 0)
  237. {
  238. cl->cuda_func = STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS;
  239. }
  240. if (cl->cuda_funcs[0] && is_where_unset)
  241. {
  242. cl->where |= STARPU_CUDA;
  243. }
  244. /* OpenCL */
  245. if (cl->opencl_func && cl->opencl_func != STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS && cl->opencl_funcs[0])
  246. {
  247. fprintf(stderr, "[warning] [struct starpu_codelet] both opencl_func and opencl_funcs are set. Ignoring opencl_func.\n");
  248. cl->opencl_func = STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS;
  249. }
  250. if (cl->opencl_func && cl->opencl_func != STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS)
  251. {
  252. cl->opencl_funcs[0] = cl->opencl_func;
  253. cl->opencl_func = STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS;
  254. }
  255. if (cl->opencl_funcs[0] && cl->opencl_func == 0)
  256. {
  257. cl->opencl_func = STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS;
  258. }
  259. if (cl->opencl_funcs[0] && is_where_unset)
  260. {
  261. cl->where |= STARPU_OPENCL;
  262. }
  263. /* Gordon */
  264. if (cl->gordon_func && cl->gordon_func != STARPU_MULTIPLE_GORDON_IMPLEMENTATIONS)
  265. {
  266. cl->gordon_funcs[0] = cl->gordon_func;
  267. cl->gordon_func = STARPU_MULTIPLE_GORDON_IMPLEMENTATIONS;
  268. }
  269. if (cl->gordon_funcs[0] && cl->gordon_func == 0)
  270. {
  271. cl->gordon_func = STARPU_MULTIPLE_GORDON_IMPLEMENTATIONS;
  272. }
  273. if (cl->gordon_funcs[0] && is_where_unset)
  274. {
  275. cl->where = STARPU_GORDON;
  276. }
  277. }
  278. void _starpu_task_check_deprecated_fields(struct starpu_task *task)
  279. {
  280. if (task->cl)
  281. {
  282. unsigned i;
  283. for(i=0; i<task->cl->nbuffers ; i++)
  284. {
  285. if (task->buffers[i].handle && task->handles[i])
  286. {
  287. fprintf(stderr, "[warning][struct starpu_task] task->buffers[%u] and task->handles[%u] both set. Ignoring task->buffers[%u] ?\n", i, i, i);
  288. STARPU_ASSERT(task->buffers[i].mode == task->cl->modes[i]);
  289. STARPU_ABORT();
  290. }
  291. if (task->buffers[i].handle)
  292. {
  293. task->handles[i] = task->buffers[i].handle;
  294. task->cl->modes[i] = task->buffers[i].mode;
  295. }
  296. }
  297. }
  298. }
  299. /* application should submit new tasks to StarPU through this function */
  300. int starpu_task_submit(struct starpu_task *task)
  301. {
  302. STARPU_ASSERT(task);
  303. STARPU_ASSERT(task->magic == 42);
  304. unsigned nsched_ctxs = _starpu_get_nsched_ctxs();
  305. if(task->sched_ctx == 0 && nsched_ctxs != 1 && !task->control_task)
  306. task->sched_ctx = starpu_get_sched_ctx();
  307. // task->sched_ctx = (nsched_ctxs == 1 || task->control_task) ?
  308. // 0 : starpu_get_sched_ctx());
  309. int ret;
  310. unsigned is_sync = task->synchronous;
  311. _STARPU_LOG_IN();
  312. if (is_sync)
  313. {
  314. /* Perhaps it is not possible to submit a synchronous
  315. * (blocking) task */
  316. if (STARPU_UNLIKELY(!_starpu_worker_may_perform_blocking_calls()))
  317. {
  318. _STARPU_LOG_OUT_TAG("EDEADLK");
  319. return -EDEADLK;
  320. }
  321. task->detach = 0;
  322. }
  323. _starpu_task_check_deprecated_fields(task);
  324. _starpu_codelet_check_deprecated_fields(task->cl);
  325. if (task->cl)
  326. {
  327. unsigned i;
  328. /* Check the type of worker(s) required by the task exist */
  329. if (!_starpu_worker_exists(task))
  330. {
  331. _STARPU_LOG_OUT_TAG("ENODEV");
  332. return -ENODEV;
  333. }
  334. /* Check buffers */
  335. STARPU_ASSERT(task->cl->nbuffers <= STARPU_NMAXBUFS);
  336. for (i = 0; i < task->cl->nbuffers; i++)
  337. {
  338. /* Make sure handles are not partitioned */
  339. STARPU_ASSERT(task->handles[i]->nchildren == 0);
  340. }
  341. /* In case we require that a task should be explicitely
  342. * executed on a specific worker, we make sure that the worker
  343. * is able to execute this task. */
  344. if (task->execute_on_a_specific_worker && !starpu_combined_worker_can_execute_task(task->workerid, task, 0))
  345. {
  346. _STARPU_LOG_OUT_TAG("ENODEV");
  347. return -ENODEV;
  348. }
  349. _starpu_detect_implicit_data_deps(task);
  350. if (task->cl->model && task->cl->model->symbol)
  351. _starpu_load_perfmodel(task->cl->model);
  352. if (task->cl->power_model && task->cl->power_model->symbol)
  353. _starpu_load_perfmodel(task->cl->power_model);
  354. }
  355. /* If profiling is activated, we allocate a structure to store the
  356. * appropriate info. */
  357. struct starpu_task_profiling_info *info;
  358. int profiling = starpu_profiling_status_get();
  359. info = _starpu_allocate_profiling_info_if_needed(task);
  360. task->profiling_info = info;
  361. /* The task is considered as block until we are sure there remains not
  362. * dependency. */
  363. task->status = STARPU_TASK_BLOCKED;
  364. if (profiling)
  365. _starpu_clock_gettime(&info->submit_time);
  366. /* internally, StarPU manipulates a struct _starpu_job * which is a wrapper around a
  367. * task structure, it is possible that this job structure was already
  368. * allocated, for instance to enforce task depenencies. */
  369. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  370. ret = _starpu_submit_job(j);
  371. if (is_sync)
  372. {
  373. _starpu_wait_job(j);
  374. if (task->destroy)
  375. _starpu_task_destroy(task);
  376. }
  377. _STARPU_LOG_OUT();
  378. return ret;
  379. }
  380. int _starpu_task_submit_internally(struct starpu_task *task)
  381. {
  382. task->control_task = 1;
  383. return starpu_task_submit(task);
  384. }
  385. /* application should submit new tasks to StarPU through this function */
  386. int starpu_task_submit_to_ctx(struct starpu_task *task, unsigned sched_ctx_id)
  387. {
  388. task->sched_ctx = sched_ctx_id;
  389. starpu_task_submit(task);
  390. }
  391. /* The StarPU core can submit tasks directly to the scheduler or a worker,
  392. * skipping dependencies completely (when it knows what it is doing). */
  393. int _starpu_task_submit_nodeps(struct starpu_task *task)
  394. {
  395. _starpu_task_check_deprecated_fields(task);
  396. _starpu_codelet_check_deprecated_fields(task->cl);
  397. if (task->cl)
  398. {
  399. if (task->cl->model)
  400. _starpu_load_perfmodel(task->cl->model);
  401. if (task->cl->power_model)
  402. _starpu_load_perfmodel(task->cl->power_model);
  403. }
  404. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  405. _starpu_increment_nsubmitted_tasks();
  406. _starpu_increment_nsubmitted_tasks_of_sched_ctx(j->task->sched_ctx);
  407. _STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  408. j->submitted = 1;
  409. if (task->cl)
  410. {
  411. /* This would be done by data dependencies checking */
  412. unsigned i;
  413. for (i=0 ; i<task->cl->nbuffers ; i++)
  414. {
  415. j->ordered_buffers[i].handle = j->task->handles[i];
  416. j->ordered_buffers[i].mode = j->task->cl->modes[i];
  417. }
  418. }
  419. _STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
  420. return _starpu_push_task(j);
  421. }
  422. /*
  423. * worker->sched_mutex must be locked when calling this function.
  424. */
  425. int _starpu_task_submit_conversion_task(struct starpu_task *task,
  426. unsigned int workerid)
  427. {
  428. STARPU_ASSERT(task->cl);
  429. STARPU_ASSERT(task->execute_on_a_specific_worker);
  430. _starpu_task_check_deprecated_fields(task);
  431. _starpu_codelet_check_deprecated_fields(task->cl);
  432. /* We should factorize that */
  433. if (task->cl->model)
  434. _starpu_load_perfmodel(task->cl->model);
  435. if (task->cl->power_model)
  436. _starpu_load_perfmodel(task->cl->power_model);
  437. /* We retain handle reference count */
  438. unsigned i;
  439. for (i=0; i<task->cl->nbuffers; i++) {
  440. starpu_data_handle_t handle = task->handles[i];
  441. _starpu_spin_lock(&handle->header_lock);
  442. handle->busy_count++;
  443. _starpu_spin_unlock(&handle->header_lock);
  444. }
  445. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  446. _starpu_increment_nsubmitted_tasks();
  447. _starpu_increment_nsubmitted_tasks_of_sched_ctx(j->task->sched_ctx);
  448. _STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  449. j->submitted = 1;
  450. _starpu_increment_nready_tasks();
  451. for (i=0 ; i<task->cl->nbuffers ; i++)
  452. {
  453. j->ordered_buffers[i].handle = j->task->handles[i];
  454. j->ordered_buffers[i].mode = j->task->cl->modes[i];
  455. }
  456. _STARPU_LOG_IN();
  457. task->status = STARPU_TASK_READY;
  458. _starpu_profiling_set_task_push_start_time(task);
  459. unsigned node = starpu_worker_get_memory_node(workerid);
  460. if (starpu_get_prefetch_flag())
  461. starpu_prefetch_task_input_on_node(task, node);
  462. struct _starpu_worker *worker;
  463. worker = _starpu_get_worker_struct(workerid);
  464. starpu_task_list_push_front(&worker->local_tasks, task);
  465. _starpu_profiling_set_task_push_end_time(task);
  466. _STARPU_LOG_OUT();
  467. _STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
  468. return 0;
  469. }
  470. void starpu_codelet_init(struct starpu_codelet *cl)
  471. {
  472. memset(cl, 0, sizeof(struct starpu_codelet));
  473. }
  474. void starpu_display_codelet_stats(struct starpu_codelet *cl)
  475. {
  476. unsigned worker;
  477. unsigned nworkers = starpu_worker_get_count();
  478. if (cl->name)
  479. fprintf(stderr, "Statistics for codelet %s\n", cl->name);
  480. else if (cl->model && cl->model->symbol)
  481. fprintf(stderr, "Statistics for codelet %s\n", cl->model->symbol);
  482. unsigned long total = 0;
  483. for (worker = 0; worker < nworkers; worker++)
  484. total += cl->per_worker_stats[worker];
  485. for (worker = 0; worker < nworkers; worker++)
  486. {
  487. char name[32];
  488. starpu_worker_get_name(worker, name, 32);
  489. fprintf(stderr, "\t%s -> %lu / %lu (%2.2f %%)\n", name, cl->per_worker_stats[worker], total, (100.0f*cl->per_worker_stats[worker])/total);
  490. }
  491. }
  492. /*
  493. * We wait for all the tasks that have already been submitted. Note that a
  494. * regenerable is not considered finished until it was explicitely set as
  495. * non-regenerale anymore (eg. from a callback).
  496. */
  497. int starpu_task_wait_for_all(void)
  498. {
  499. unsigned nsched_ctxs = _starpu_get_nsched_ctxs();
  500. unsigned sched_ctx = nsched_ctxs == 1 ? 0 : starpu_get_sched_ctx();
  501. _starpu_wait_for_all_tasks_of_sched_ctx(sched_ctx);
  502. return 0;
  503. }
  504. int starpu_task_wait_for_all_in_ctx(unsigned sched_ctx)
  505. {
  506. _starpu_wait_for_all_tasks_of_sched_ctx(sched_ctx);
  507. return 0;
  508. }
  509. /*
  510. * We wait until there is no ready task any more (i.e. StarPU will not be able
  511. * to progress any more).
  512. */
  513. int starpu_task_wait_for_no_ready(void)
  514. {
  515. if (STARPU_UNLIKELY(!_starpu_worker_may_perform_blocking_calls()))
  516. return -EDEADLK;
  517. _STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  518. _STARPU_TRACE_TASK_WAIT_FOR_ALL;
  519. while (nready > 0)
  520. _STARPU_PTHREAD_COND_WAIT(&submitted_cond, &submitted_mutex);
  521. _STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  522. return 0;
  523. }
  524. void _starpu_decrement_nsubmitted_tasks(void)
  525. {
  526. struct _starpu_machine_config *config = _starpu_get_machine_config();
  527. _STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  528. if (--nsubmitted == 0) {
  529. if (!config->submitting)
  530. config->running = 0;
  531. _STARPU_PTHREAD_COND_BROADCAST(&submitted_cond);
  532. }
  533. _STARPU_TRACE_UPDATE_TASK_CNT(nsubmitted);
  534. _STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  535. }
  536. void
  537. starpu_drivers_request_termination(void)
  538. {
  539. struct _starpu_machine_config *config = _starpu_get_machine_config();
  540. _STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  541. config->submitting = 0;
  542. if (nsubmitted == 0) {
  543. config->running = 0;
  544. _STARPU_PTHREAD_COND_BROADCAST(&submitted_cond);
  545. }
  546. _STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  547. }
  548. static void _starpu_increment_nsubmitted_tasks(void)
  549. {
  550. _STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  551. nsubmitted++;
  552. _STARPU_TRACE_UPDATE_TASK_CNT(nsubmitted);
  553. _STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  554. }
  555. void _starpu_increment_nready_tasks(void)
  556. {
  557. _STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  558. nready++;
  559. _STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  560. }
  561. void _starpu_decrement_nready_tasks(void)
  562. {
  563. _STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  564. if (--nready == 0)
  565. _STARPU_PTHREAD_COND_BROADCAST(&submitted_cond);
  566. _STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  567. }
  568. void _starpu_initialize_current_task_key(void)
  569. {
  570. pthread_key_create(&current_task_key, NULL);
  571. }
  572. /* Return the task currently executed by the worker, or NULL if this is called
  573. * either from a thread that is not a task or simply because there is no task
  574. * being executed at the moment. */
  575. struct starpu_task *starpu_task_get_current(void)
  576. {
  577. return (struct starpu_task *) pthread_getspecific(current_task_key);
  578. }
  579. void _starpu_set_current_task(struct starpu_task *task)
  580. {
  581. pthread_setspecific(current_task_key, task);
  582. }
  583. /*
  584. * Returns 0 if tasks does not use any multiformat handle, 1 otherwise.
  585. */
  586. int
  587. _starpu_task_uses_multiformat_handles(struct starpu_task *task)
  588. {
  589. unsigned i;
  590. for (i = 0; i < task->cl->nbuffers; i++)
  591. {
  592. if (_starpu_data_is_multiformat_handle(task->handles[i]))
  593. return 1;
  594. }
  595. return 0;
  596. }
  597. /*
  598. * Checks whether the given handle needs to be converted in order to be used on
  599. * the node given as the second argument.
  600. */
  601. int
  602. _starpu_handle_needs_conversion_task(starpu_data_handle_t handle,
  603. unsigned int node)
  604. {
  605. enum starpu_node_kind node_kind;
  606. node_kind = starpu_node_get_kind(node);
  607. /*
  608. * Here, we assume that CUDA devices and OpenCL devices use the
  609. * same data structure. A conversion is only needed when moving
  610. * data from a CPU to a GPU, or the other way around.
  611. */
  612. switch (node_kind)
  613. {
  614. case STARPU_CPU_RAM:
  615. switch(starpu_node_get_kind(handle->mf_node))
  616. {
  617. case STARPU_CPU_RAM:
  618. return 0;
  619. case STARPU_CUDA_RAM: /* Fall through */
  620. case STARPU_OPENCL_RAM:
  621. return 1;
  622. case STARPU_SPU_LS: /* Not supported */
  623. default:
  624. STARPU_ASSERT(0);
  625. }
  626. break;
  627. case STARPU_CUDA_RAM: /* Fall through */
  628. case STARPU_OPENCL_RAM:
  629. switch(starpu_node_get_kind(handle->mf_node))
  630. {
  631. case STARPU_CPU_RAM:
  632. return 1;
  633. case STARPU_CUDA_RAM:
  634. case STARPU_OPENCL_RAM:
  635. return 0;
  636. case STARPU_SPU_LS: /* Not supported */
  637. default:
  638. STARPU_ASSERT(0);
  639. }
  640. break;
  641. case STARPU_SPU_LS: /* Not supported */
  642. default:
  643. STARPU_ASSERT(0);
  644. }
  645. /* that instruction should never be reached */
  646. return -EINVAL;
  647. }
  648. starpu_cpu_func_t _starpu_task_get_cpu_nth_implementation(struct starpu_codelet *cl, unsigned nimpl)
  649. {
  650. STARPU_ASSERT(cl->cpu_func == STARPU_MULTIPLE_CPU_IMPLEMENTATIONS);
  651. return cl->cpu_funcs[nimpl];
  652. }
  653. starpu_cuda_func_t _starpu_task_get_cuda_nth_implementation(struct starpu_codelet *cl, unsigned nimpl)
  654. {
  655. STARPU_ASSERT(cl->cuda_func == STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS);
  656. return cl->cuda_funcs[nimpl];
  657. }
  658. starpu_opencl_func_t _starpu_task_get_opencl_nth_implementation(struct starpu_codelet *cl, unsigned nimpl)
  659. {
  660. STARPU_ASSERT(cl->opencl_func == STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS);
  661. return cl->opencl_funcs[nimpl];
  662. }
  663. starpu_gordon_func_t _starpu_task_get_gordon_nth_implementation(struct starpu_codelet *cl, unsigned nimpl)
  664. {
  665. STARPU_ASSERT(cl->gordon_func == STARPU_MULTIPLE_GORDON_IMPLEMENTATIONS);
  666. return cl->gordon_funcs[nimpl];
  667. }