task.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2012 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012 Centre National de la Recherche Scientifique
  5. * Copyright (C) 2011 Télécom-SudParis
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. #include <starpu.h>
  19. #include <starpu_profiling.h>
  20. #include <core/workers.h>
  21. #include <core/jobs.h>
  22. #include <core/task.h>
  23. #include <core/task_bundle.h>
  24. #include <common/config.h>
  25. #include <common/utils.h>
  26. #include <profiling/profiling.h>
  27. #include <profiling/bound.h>
  28. #include <math.h>
  29. #include <string.h>
  30. /* XXX this should be reinitialized when StarPU is shutdown (or we should make
  31. * sure that no task remains !) */
  32. /* TODO we could make this hierarchical to avoid contention ? */
  33. static pthread_cond_t submitted_cond = PTHREAD_COND_INITIALIZER;
  34. static pthread_mutex_t submitted_mutex = PTHREAD_MUTEX_INITIALIZER;
  35. static long int nsubmitted = 0, nready = 0;
  36. static void _starpu_increment_nsubmitted_tasks(void);
  37. /* This key stores the task currently handled by the thread, note that we
  38. * cannot use the worker structure to store that information because it is
  39. * possible that we have a task with a NULL codelet, which means its callback
  40. * could be executed by a user thread as well. */
  41. static pthread_key_t current_task_key;
  42. void starpu_task_init(struct starpu_task *task)
  43. {
  44. /* TODO: memcpy from a template instead? benchmark it */
  45. STARPU_ASSERT(task);
  46. /* As most of the fields must be initialised at NULL, let's put 0
  47. * everywhere */
  48. memset(task, 0, sizeof(struct starpu_task));
  49. /* Now we can initialise fields which recquire custom value */
  50. #if STARPU_DEFAULT_PRIO != 0
  51. task->priority = STARPU_DEFAULT_PRIO;
  52. #endif
  53. task->detach = 1;
  54. #if STARPU_TASK_INVALID != 0
  55. task->status = STARPU_TASK_INVALID;
  56. #endif
  57. task->predicted = NAN;
  58. task->predicted_transfer = NAN;
  59. task->magic = 42;
  60. }
  61. /* Free all the ressources allocated for a task, without deallocating the task
  62. * structure itself (this is required for statically allocated tasks).
  63. * All values previously set by the user, like codelet and handles, remain
  64. * unchanged */
  65. void starpu_task_clean(struct starpu_task *task)
  66. {
  67. STARPU_ASSERT(task);
  68. /* If a buffer was allocated to store the profiling info, we free it. */
  69. if (task->profiling_info)
  70. {
  71. free(task->profiling_info);
  72. task->profiling_info = NULL;
  73. }
  74. /* If case the task is (still) part of a bundle */
  75. starpu_task_bundle_t bundle = task->bundle;
  76. if (bundle)
  77. starpu_task_bundle_remove(bundle, task);
  78. struct _starpu_job *j = (struct _starpu_job *)task->starpu_private;
  79. if (j) {
  80. _starpu_job_destroy(j);
  81. task->starpu_private = NULL;
  82. }
  83. }
  84. struct starpu_task * __attribute__((malloc)) starpu_task_create(void)
  85. {
  86. struct starpu_task *task;
  87. task = (struct starpu_task *) malloc(sizeof(struct starpu_task));
  88. STARPU_ASSERT(task);
  89. starpu_task_init(task);
  90. /* Dynamically allocated tasks are destroyed by default */
  91. task->destroy = 1;
  92. return task;
  93. }
  94. /* Free the ressource allocated during starpu_task_create. This function can be
  95. * called automatically after the execution of a task by setting the "destroy"
  96. * flag of the starpu_task structure (default behaviour). Calling this function
  97. * on a statically allocated task results in an undefined behaviour. */
  98. void _starpu_task_destroy(struct starpu_task *task)
  99. {
  100. /* If starpu_task_destroy is called in a callback, we just set the destroy
  101. flag. The task will be destroyed after the callback returns */
  102. if (task == starpu_task_get_current()
  103. && _starpu_get_local_worker_status() == STATUS_CALLBACK)
  104. {
  105. task->destroy = 1;
  106. }
  107. else
  108. {
  109. starpu_task_clean(task);
  110. /* TODO handle the case of task with detach = 1 and destroy = 1 */
  111. /* TODO handle the case of non terminated tasks -> return -EINVAL */
  112. free(task);
  113. }
  114. }
  115. void starpu_task_destroy(struct starpu_task *task)
  116. {
  117. STARPU_ASSERT(task);
  118. STARPU_ASSERT_MSG(!task->destroy || !task->detach, "starpu_task_destroy must not be called for task with destroy = 1 and detach = 1");
  119. _starpu_task_destroy(task);
  120. }
  121. int starpu_task_wait(struct starpu_task *task)
  122. {
  123. _STARPU_LOG_IN();
  124. STARPU_ASSERT(task);
  125. STARPU_ASSERT_MSG(!task->detach, "starpu_task_wait can only be called on tasks with detach = 0");
  126. if (task->detach || task->synchronous)
  127. {
  128. _STARPU_DEBUG("Task is detached or asynchronous. Waiting returns immediately\n");
  129. _STARPU_LOG_OUT_TAG("einval");
  130. return -EINVAL;
  131. }
  132. if (STARPU_UNLIKELY(!_starpu_worker_may_perform_blocking_calls()))
  133. {
  134. _STARPU_LOG_OUT_TAG("edeadlk");
  135. return -EDEADLK;
  136. }
  137. struct _starpu_job *j = (struct _starpu_job *)task->starpu_private;
  138. _starpu_wait_job(j);
  139. /* as this is a synchronous task, the liberation of the job
  140. structure was deferred */
  141. if (task->destroy)
  142. _starpu_task_destroy(task);
  143. _STARPU_LOG_OUT();
  144. return 0;
  145. }
  146. struct _starpu_job *_starpu_get_job_associated_to_task(struct starpu_task *task)
  147. {
  148. STARPU_ASSERT(task);
  149. if (!task->starpu_private)
  150. {
  151. struct _starpu_job *j = _starpu_job_create(task);
  152. task->starpu_private = j;
  153. }
  154. return (struct _starpu_job *)task->starpu_private;
  155. }
  156. /* NB in case we have a regenerable task, it is possible that the job was
  157. * already counted. */
  158. int _starpu_submit_job(struct _starpu_job *j)
  159. {
  160. struct starpu_task *task = j->task;
  161. _STARPU_LOG_IN();
  162. /* notify bound computation of a new task */
  163. _starpu_bound_record(j);
  164. _starpu_increment_nsubmitted_tasks();
  165. /* We retain handle reference count */
  166. if (task->cl) {
  167. unsigned i;
  168. for (i=0; i<task->cl->nbuffers; i++) {
  169. starpu_data_handle_t handle = task->handles[i];
  170. _starpu_spin_lock(&handle->header_lock);
  171. handle->busy_count++;
  172. _starpu_spin_unlock(&handle->header_lock);
  173. }
  174. }
  175. _STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  176. /* Need to atomically set submitted to 1 and check dependencies, since
  177. * this is concucrent with _starpu_notify_cg */
  178. j->terminated = 0;
  179. if (!j->submitted)
  180. j->submitted = 1;
  181. else
  182. j->submitted = 2;
  183. int ret = _starpu_enforce_deps_and_schedule(j);
  184. _STARPU_LOG_OUT();
  185. return ret;
  186. }
  187. void _starpu_codelet_check_deprecated_fields(struct starpu_codelet *cl)
  188. {
  189. if (!cl)
  190. return;
  191. int is_where_unset = cl->where == 0;
  192. /* Check deprecated and unset fields (where, <device>_func,
  193. * <device>_funcs) */
  194. /* CPU */
  195. if (cl->cpu_func && cl->cpu_func != STARPU_MULTIPLE_CPU_IMPLEMENTATIONS && cl->cpu_funcs[0])
  196. {
  197. _STARPU_DISP("[warning] [struct starpu_codelet] both cpu_func and cpu_funcs are set. Ignoring cpu_func.\n");
  198. cl->cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS;
  199. }
  200. if (cl->cpu_func && cl->cpu_func != STARPU_MULTIPLE_CPU_IMPLEMENTATIONS)
  201. {
  202. cl->cpu_funcs[0] = cl->cpu_func;
  203. cl->cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS;
  204. }
  205. if (cl->cpu_funcs[0] && cl->cpu_func == 0)
  206. {
  207. cl->cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS;
  208. }
  209. if (cl->cpu_funcs[0] && is_where_unset)
  210. {
  211. cl->where |= STARPU_CPU;
  212. }
  213. /* CUDA */
  214. if (cl->cuda_func && cl->cuda_func != STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS && cl->cuda_funcs[0])
  215. {
  216. _STARPU_DISP("[warning] [struct starpu_codelet] both cuda_func and cuda_funcs are set. Ignoring cuda_func.\n");
  217. cl->cuda_func = STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS;
  218. }
  219. if (cl->cuda_func && cl->cuda_func != STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS)
  220. {
  221. cl->cuda_funcs[0] = cl->cuda_func;
  222. cl->cuda_func = STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS;
  223. }
  224. if (cl->cuda_funcs[0] && cl->cuda_func == 0)
  225. {
  226. cl->cuda_func = STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS;
  227. }
  228. if (cl->cuda_funcs[0] && is_where_unset)
  229. {
  230. cl->where |= STARPU_CUDA;
  231. }
  232. /* OpenCL */
  233. if (cl->opencl_func && cl->opencl_func != STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS && cl->opencl_funcs[0])
  234. {
  235. _STARPU_DISP("[warning] [struct starpu_codelet] both opencl_func and opencl_funcs are set. Ignoring opencl_func.\n");
  236. cl->opencl_func = STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS;
  237. }
  238. if (cl->opencl_func && cl->opencl_func != STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS)
  239. {
  240. cl->opencl_funcs[0] = cl->opencl_func;
  241. cl->opencl_func = STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS;
  242. }
  243. if (cl->opencl_funcs[0] && cl->opencl_func == 0)
  244. {
  245. cl->opencl_func = STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS;
  246. }
  247. if (cl->opencl_funcs[0] && is_where_unset)
  248. {
  249. cl->where |= STARPU_OPENCL;
  250. }
  251. /* Gordon */
  252. if (cl->gordon_func && cl->gordon_func != STARPU_MULTIPLE_GORDON_IMPLEMENTATIONS)
  253. {
  254. cl->gordon_funcs[0] = cl->gordon_func;
  255. cl->gordon_func = STARPU_MULTIPLE_GORDON_IMPLEMENTATIONS;
  256. }
  257. if (cl->gordon_funcs[0] && cl->gordon_func == 0)
  258. {
  259. cl->gordon_func = STARPU_MULTIPLE_GORDON_IMPLEMENTATIONS;
  260. }
  261. if (cl->gordon_funcs[0] && is_where_unset)
  262. {
  263. cl->where = STARPU_GORDON;
  264. }
  265. }
  266. void _starpu_task_check_deprecated_fields(struct starpu_task *task)
  267. {
  268. if (task->cl)
  269. {
  270. unsigned i;
  271. for(i=0; i<task->cl->nbuffers ; i++)
  272. {
  273. if (task->buffers[i].handle && task->handles[i])
  274. {
  275. _STARPU_DISP("[warning][struct starpu_task] task->buffers[%u] and task->handles[%u] both set. Ignoring task->buffers[%u] ?\n", i, i, i);
  276. STARPU_ASSERT(task->buffers[i].mode == task->cl->modes[i]);
  277. STARPU_ABORT();
  278. }
  279. if (task->buffers[i].handle)
  280. {
  281. task->handles[i] = task->buffers[i].handle;
  282. task->cl->modes[i] = task->buffers[i].mode;
  283. }
  284. }
  285. }
  286. }
  287. /* application should submit new tasks to StarPU through this function */
  288. int starpu_task_submit(struct starpu_task *task)
  289. {
  290. STARPU_ASSERT(task);
  291. STARPU_ASSERT(task->magic == 42);
  292. int ret;
  293. unsigned is_sync = task->synchronous;
  294. starpu_task_bundle_t bundle = task->bundle;
  295. _STARPU_LOG_IN();
  296. if (is_sync)
  297. {
  298. /* Perhaps it is not possible to submit a synchronous
  299. * (blocking) task */
  300. if (STARPU_UNLIKELY(!_starpu_worker_may_perform_blocking_calls()))
  301. {
  302. _STARPU_LOG_OUT_TAG("EDEADLK");
  303. return -EDEADLK;
  304. }
  305. task->detach = 0;
  306. }
  307. _starpu_task_check_deprecated_fields(task);
  308. _starpu_codelet_check_deprecated_fields(task->cl);
  309. if (task->cl)
  310. {
  311. unsigned i;
  312. /* Check buffers */
  313. STARPU_ASSERT(task->cl->nbuffers <= STARPU_NMAXBUFS);
  314. for (i = 0; i < task->cl->nbuffers; i++)
  315. {
  316. starpu_data_handle_t handle = task->handles[i];
  317. /* Make sure handles are not partitioned */
  318. STARPU_ASSERT_MSG(handle->nchildren == 0, "only unpartitioned data can be used in a task");
  319. /* Provide the home interface for now if any,
  320. * for can_execute hooks */
  321. if (handle->home_node != -1)
  322. task->interfaces[i] = starpu_data_get_interface_on_node(task->handles[i], handle->home_node);
  323. }
  324. /* Check the type of worker(s) required by the task exist */
  325. if (!_starpu_worker_exists(task))
  326. {
  327. _STARPU_LOG_OUT_TAG("ENODEV");
  328. return -ENODEV;
  329. }
  330. /* In case we require that a task should be explicitely
  331. * executed on a specific worker, we make sure that the worker
  332. * is able to execute this task. */
  333. if (task->execute_on_a_specific_worker && !starpu_combined_worker_can_execute_task(task->workerid, task, 0))
  334. {
  335. _STARPU_LOG_OUT_TAG("ENODEV");
  336. return -ENODEV;
  337. }
  338. _starpu_detect_implicit_data_deps(task);
  339. if (task->cl->model && task->cl->model->symbol)
  340. _starpu_load_perfmodel(task->cl->model);
  341. if (task->cl->power_model && task->cl->power_model->symbol)
  342. _starpu_load_perfmodel(task->cl->power_model);
  343. }
  344. if (bundle)
  345. {
  346. /* We need to make sure that models for other tasks of the
  347. * bundle are also loaded, so the scheduler can estimate the
  348. * duration of the whole bundle */
  349. _STARPU_PTHREAD_MUTEX_LOCK(&bundle->mutex);
  350. struct _starpu_task_bundle_entry *entry;
  351. entry = bundle->list;
  352. while (entry)
  353. {
  354. if (entry->task->cl->model && entry->task->cl->model->symbol)
  355. _starpu_load_perfmodel(entry->task->cl->model);
  356. if (entry->task->cl->power_model && entry->task->cl->power_model->symbol)
  357. _starpu_load_perfmodel(entry->task->cl->power_model);
  358. entry = entry->next;
  359. }
  360. _STARPU_PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
  361. }
  362. /* If profiling is activated, we allocate a structure to store the
  363. * appropriate info. */
  364. struct starpu_task_profiling_info *info;
  365. int profiling = starpu_profiling_status_get();
  366. info = _starpu_allocate_profiling_info_if_needed(task);
  367. task->profiling_info = info;
  368. /* The task is considered as block until we are sure there remains not
  369. * dependency. */
  370. task->status = STARPU_TASK_BLOCKED;
  371. if (profiling)
  372. _starpu_clock_gettime(&info->submit_time);
  373. /* internally, StarPU manipulates a struct _starpu_job * which is a wrapper around a
  374. * task structure, it is possible that this job structure was already
  375. * allocated, for instance to enforce task depenencies. */
  376. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  377. ret = _starpu_submit_job(j);
  378. if (is_sync)
  379. {
  380. _starpu_wait_job(j);
  381. if (task->destroy)
  382. _starpu_task_destroy(task);
  383. }
  384. _STARPU_LOG_OUT();
  385. return ret;
  386. }
  387. /* The StarPU core can submit tasks directly to the scheduler or a worker,
  388. * skipping dependencies completely (when it knows what it is doing). */
  389. int _starpu_task_submit_nodeps(struct starpu_task *task)
  390. {
  391. _starpu_task_check_deprecated_fields(task);
  392. _starpu_codelet_check_deprecated_fields(task->cl);
  393. if (task->cl)
  394. {
  395. if (task->cl->model)
  396. _starpu_load_perfmodel(task->cl->model);
  397. if (task->cl->power_model)
  398. _starpu_load_perfmodel(task->cl->power_model);
  399. }
  400. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  401. _starpu_increment_nsubmitted_tasks();
  402. _STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  403. j->submitted = 1;
  404. if (task->cl)
  405. {
  406. /* This would be done by data dependencies checking */
  407. unsigned i;
  408. for (i=0 ; i<task->cl->nbuffers ; i++)
  409. {
  410. j->ordered_buffers[i].handle = j->task->handles[i];
  411. j->ordered_buffers[i].mode = j->task->cl->modes[i];
  412. }
  413. }
  414. _STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
  415. return _starpu_push_task(j);
  416. }
  417. /*
  418. * worker->sched_mutex must be locked when calling this function.
  419. */
  420. int _starpu_task_submit_conversion_task(struct starpu_task *task,
  421. unsigned int workerid)
  422. {
  423. STARPU_ASSERT(task->cl);
  424. STARPU_ASSERT(task->execute_on_a_specific_worker);
  425. _starpu_task_check_deprecated_fields(task);
  426. _starpu_codelet_check_deprecated_fields(task->cl);
  427. /* We should factorize that */
  428. if (task->cl->model)
  429. _starpu_load_perfmodel(task->cl->model);
  430. if (task->cl->power_model)
  431. _starpu_load_perfmodel(task->cl->power_model);
  432. /* We retain handle reference count */
  433. unsigned i;
  434. for (i=0; i<task->cl->nbuffers; i++) {
  435. starpu_data_handle_t handle = task->handles[i];
  436. _starpu_spin_lock(&handle->header_lock);
  437. handle->busy_count++;
  438. _starpu_spin_unlock(&handle->header_lock);
  439. }
  440. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  441. _starpu_increment_nsubmitted_tasks();
  442. _STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  443. j->submitted = 1;
  444. _starpu_increment_nready_tasks();
  445. for (i=0 ; i<task->cl->nbuffers ; i++)
  446. {
  447. j->ordered_buffers[i].handle = j->task->handles[i];
  448. j->ordered_buffers[i].mode = j->task->cl->modes[i];
  449. }
  450. _STARPU_LOG_IN();
  451. task->status = STARPU_TASK_READY;
  452. _starpu_profiling_set_task_push_start_time(task);
  453. unsigned node = starpu_worker_get_memory_node(workerid);
  454. if (starpu_get_prefetch_flag())
  455. starpu_prefetch_task_input_on_node(task, node);
  456. struct _starpu_worker *worker;
  457. worker = _starpu_get_worker_struct(workerid);
  458. starpu_task_list_push_front(&worker->local_tasks, task);
  459. _starpu_profiling_set_task_push_end_time(task);
  460. _STARPU_LOG_OUT();
  461. _STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
  462. return 0;
  463. }
  464. void starpu_codelet_init(struct starpu_codelet *cl)
  465. {
  466. memset(cl, 0, sizeof(struct starpu_codelet));
  467. }
  468. void starpu_display_codelet_stats(struct starpu_codelet *cl)
  469. {
  470. unsigned worker;
  471. unsigned nworkers = starpu_worker_get_count();
  472. if (cl->name)
  473. fprintf(stderr, "Statistics for codelet %s\n", cl->name);
  474. else if (cl->model && cl->model->symbol)
  475. fprintf(stderr, "Statistics for codelet %s\n", cl->model->symbol);
  476. unsigned long total = 0;
  477. for (worker = 0; worker < nworkers; worker++)
  478. total += cl->per_worker_stats[worker];
  479. for (worker = 0; worker < nworkers; worker++)
  480. {
  481. char name[32];
  482. starpu_worker_get_name(worker, name, 32);
  483. fprintf(stderr, "\t%s -> %lu / %lu (%2.2f %%)\n", name, cl->per_worker_stats[worker], total, (100.0f*cl->per_worker_stats[worker])/total);
  484. }
  485. }
  486. /*
  487. * We wait for all the tasks that have already been submitted. Note that a
  488. * regenerable is not considered finished until it was explicitely set as
  489. * non-regenerale anymore (eg. from a callback).
  490. */
  491. int starpu_task_wait_for_all(void)
  492. {
  493. if (STARPU_UNLIKELY(!_starpu_worker_may_perform_blocking_calls()))
  494. return -EDEADLK;
  495. _STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  496. _STARPU_TRACE_TASK_WAIT_FOR_ALL;
  497. while (nsubmitted > 0)
  498. _STARPU_PTHREAD_COND_WAIT(&submitted_cond, &submitted_mutex);
  499. _STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  500. return 0;
  501. }
  502. /*
  503. * We wait until there is no ready task any more (i.e. StarPU will not be able
  504. * to progress any more).
  505. */
  506. int starpu_task_wait_for_no_ready(void)
  507. {
  508. if (STARPU_UNLIKELY(!_starpu_worker_may_perform_blocking_calls()))
  509. return -EDEADLK;
  510. _STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  511. _STARPU_TRACE_TASK_WAIT_FOR_ALL;
  512. while (nready > 0)
  513. _STARPU_PTHREAD_COND_WAIT(&submitted_cond, &submitted_mutex);
  514. _STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  515. return 0;
  516. }
  517. void _starpu_decrement_nsubmitted_tasks(void)
  518. {
  519. struct _starpu_machine_config *config = _starpu_get_machine_config();
  520. _STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  521. if (--nsubmitted == 0) {
  522. if (!config->submitting)
  523. config->running = 0;
  524. _STARPU_PTHREAD_COND_BROADCAST(&submitted_cond);
  525. }
  526. _STARPU_TRACE_UPDATE_TASK_CNT(nsubmitted);
  527. _STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  528. }
  529. void
  530. starpu_drivers_request_termination(void)
  531. {
  532. struct _starpu_machine_config *config = _starpu_get_machine_config();
  533. _STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  534. config->submitting = 0;
  535. if (nsubmitted == 0) {
  536. config->running = 0;
  537. _STARPU_PTHREAD_COND_BROADCAST(&submitted_cond);
  538. }
  539. _STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  540. }
  541. static void _starpu_increment_nsubmitted_tasks(void)
  542. {
  543. _STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  544. nsubmitted++;
  545. _STARPU_TRACE_UPDATE_TASK_CNT(nsubmitted);
  546. _STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  547. }
  548. void _starpu_increment_nready_tasks(void)
  549. {
  550. _STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  551. nready++;
  552. _STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  553. }
  554. void _starpu_decrement_nready_tasks(void)
  555. {
  556. _STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  557. if (--nready == 0)
  558. _STARPU_PTHREAD_COND_BROADCAST(&submitted_cond);
  559. _STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  560. }
  561. void _starpu_initialize_current_task_key(void)
  562. {
  563. pthread_key_create(&current_task_key, NULL);
  564. }
  565. /* Return the task currently executed by the worker, or NULL if this is called
  566. * either from a thread that is not a task or simply because there is no task
  567. * being executed at the moment. */
  568. struct starpu_task *starpu_task_get_current(void)
  569. {
  570. return (struct starpu_task *) pthread_getspecific(current_task_key);
  571. }
  572. void _starpu_set_current_task(struct starpu_task *task)
  573. {
  574. pthread_setspecific(current_task_key, task);
  575. }
  576. /*
  577. * Returns 0 if tasks does not use any multiformat handle, 1 otherwise.
  578. */
  579. int
  580. _starpu_task_uses_multiformat_handles(struct starpu_task *task)
  581. {
  582. unsigned i;
  583. for (i = 0; i < task->cl->nbuffers; i++)
  584. {
  585. if (_starpu_data_is_multiformat_handle(task->handles[i]))
  586. return 1;
  587. }
  588. return 0;
  589. }
  590. /*
  591. * Checks whether the given handle needs to be converted in order to be used on
  592. * the node given as the second argument.
  593. */
  594. int
  595. _starpu_handle_needs_conversion_task(starpu_data_handle_t handle,
  596. unsigned int node)
  597. {
  598. enum starpu_node_kind node_kind;
  599. node_kind = starpu_node_get_kind(node);
  600. /*
  601. * Here, we assume that CUDA devices and OpenCL devices use the
  602. * same data structure. A conversion is only needed when moving
  603. * data from a CPU to a GPU, or the other way around.
  604. */
  605. switch (node_kind)
  606. {
  607. case STARPU_CPU_RAM:
  608. switch(starpu_node_get_kind(handle->mf_node))
  609. {
  610. case STARPU_CPU_RAM:
  611. return 0;
  612. case STARPU_CUDA_RAM: /* Fall through */
  613. case STARPU_OPENCL_RAM:
  614. return 1;
  615. case STARPU_SPU_LS: /* Not supported */
  616. default:
  617. STARPU_ABORT();
  618. }
  619. break;
  620. case STARPU_CUDA_RAM: /* Fall through */
  621. case STARPU_OPENCL_RAM:
  622. switch(starpu_node_get_kind(handle->mf_node))
  623. {
  624. case STARPU_CPU_RAM:
  625. return 1;
  626. case STARPU_CUDA_RAM:
  627. case STARPU_OPENCL_RAM:
  628. return 0;
  629. case STARPU_SPU_LS: /* Not supported */
  630. default:
  631. STARPU_ABORT();
  632. }
  633. break;
  634. case STARPU_SPU_LS: /* Not supported */
  635. default:
  636. STARPU_ABORT();
  637. }
  638. /* that instruction should never be reached */
  639. return -EINVAL;
  640. }
  641. starpu_cpu_func_t _starpu_task_get_cpu_nth_implementation(struct starpu_codelet *cl, unsigned nimpl)
  642. {
  643. STARPU_ASSERT(cl->cpu_func == STARPU_MULTIPLE_CPU_IMPLEMENTATIONS);
  644. return cl->cpu_funcs[nimpl];
  645. }
  646. starpu_cuda_func_t _starpu_task_get_cuda_nth_implementation(struct starpu_codelet *cl, unsigned nimpl)
  647. {
  648. STARPU_ASSERT(cl->cuda_func == STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS);
  649. return cl->cuda_funcs[nimpl];
  650. }
  651. starpu_opencl_func_t _starpu_task_get_opencl_nth_implementation(struct starpu_codelet *cl, unsigned nimpl)
  652. {
  653. STARPU_ASSERT(cl->opencl_func == STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS);
  654. return cl->opencl_funcs[nimpl];
  655. }
  656. starpu_gordon_func_t _starpu_task_get_gordon_nth_implementation(struct starpu_codelet *cl, unsigned nimpl)
  657. {
  658. STARPU_ASSERT(cl->gordon_func == STARPU_MULTIPLE_GORDON_IMPLEMENTATIONS);
  659. return cl->gordon_funcs[nimpl];
  660. }