task.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2012 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012 Centre National de la Recherche Scientifique
  5. * Copyright (C) 2011 Télécom-SudParis
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. #include <starpu.h>
  19. #include <starpu_profiling.h>
  20. #include <core/workers.h>
  21. #include <core/jobs.h>
  22. #include <core/task.h>
  23. #include <core/task_bundle.h>
  24. #include <common/config.h>
  25. #include <common/utils.h>
  26. #include <profiling/profiling.h>
  27. #include <profiling/bound.h>
  28. #include <math.h>
  29. #include <string.h>
  30. /* XXX this should be reinitialized when StarPU is shutdown (or we should make
  31. * sure that no task remains !) */
  32. /* TODO we could make this hierarchical to avoid contention ? */
  33. static pthread_cond_t submitted_cond = PTHREAD_COND_INITIALIZER;
  34. static pthread_mutex_t submitted_mutex = PTHREAD_MUTEX_INITIALIZER;
  35. static long int nsubmitted = 0, nready = 0;
  36. static void _starpu_increment_nsubmitted_tasks(void);
  37. /* This key stores the task currently handled by the thread, note that we
  38. * cannot use the worker structure to store that information because it is
  39. * possible that we have a task with a NULL codelet, which means its callback
  40. * could be executed by a user thread as well. */
  41. static pthread_key_t current_task_key;
  42. void starpu_task_init(struct starpu_task *task)
  43. {
  44. STARPU_ASSERT(task);
  45. task->cl = NULL;
  46. task->cl_arg = NULL;
  47. task->cl_arg_size = 0;
  48. task->callback_func = NULL;
  49. task->callback_arg = NULL;
  50. task->priority = STARPU_DEFAULT_PRIO;
  51. task->use_tag = 0;
  52. task->synchronous = 0;
  53. task->execute_on_a_specific_worker = 0;
  54. task->bundle = NULL;
  55. task->detach = 1;
  56. /* by default, we do not let StarPU free the task structure since
  57. * starpu_task_init is likely to be used only for statically allocated
  58. * tasks */
  59. task->destroy = 0;
  60. task->regenerate = 0;
  61. task->status = STARPU_TASK_INVALID;
  62. task->profiling_info = NULL;
  63. task->predicted = NAN;
  64. task->predicted_transfer = NAN;
  65. task->starpu_private = NULL;
  66. task->magic = 42;
  67. }
  68. /* Free all the ressources allocated for a task, without deallocating the task
  69. * structure itself (this is required for statically allocated tasks). */
  70. void starpu_task_deinit(struct starpu_task *task)
  71. {
  72. STARPU_ASSERT(task);
  73. /* If a buffer was allocated to store the profiling info, we free it. */
  74. if (task->profiling_info)
  75. {
  76. free(task->profiling_info);
  77. task->profiling_info = NULL;
  78. }
  79. /* If case the task is (still) part of a bundle */
  80. starpu_task_bundle_t bundle = task->bundle;
  81. if (bundle)
  82. starpu_task_bundle_remove(bundle, task);
  83. struct _starpu_job *j = (struct _starpu_job *)task->starpu_private;
  84. if (j)
  85. _starpu_job_destroy(j);
  86. }
  87. struct starpu_task * __attribute__((malloc)) starpu_task_create(void)
  88. {
  89. struct starpu_task *task;
  90. task = (struct starpu_task *) calloc(1, sizeof(struct starpu_task));
  91. STARPU_ASSERT(task);
  92. starpu_task_init(task);
  93. /* Dynamically allocated tasks are destroyed by default */
  94. task->destroy = 1;
  95. return task;
  96. }
  97. /* Free the ressource allocated during starpu_task_create. This function can be
  98. * called automatically after the execution of a task by setting the "destroy"
  99. * flag of the starpu_task structure (default behaviour). Calling this function
  100. * on a statically allocated task results in an undefined behaviour. */
  101. void _starpu_task_destroy(struct starpu_task *task)
  102. {
  103. /* If starpu_task_destroy is called in a callback, we just set the destroy
  104. flag. The task will be destroyed after the callback returns */
  105. if (task == starpu_task_get_current()
  106. && _starpu_get_local_worker_status() == STATUS_CALLBACK)
  107. {
  108. task->destroy = 1;
  109. }
  110. else
  111. {
  112. starpu_task_deinit(task);
  113. /* TODO handle the case of task with detach = 1 and destroy = 1 */
  114. /* TODO handle the case of non terminated tasks -> return -EINVAL */
  115. free(task);
  116. }
  117. }
  118. void starpu_task_destroy(struct starpu_task *task)
  119. {
  120. STARPU_ASSERT(task);
  121. STARPU_ASSERT_MSG(!task->destroy || !task->detach, "starpu_task_destroy must not be called for task with destroy = 1 and detach = 1");
  122. _starpu_task_destroy(task);
  123. }
  124. int starpu_task_wait(struct starpu_task *task)
  125. {
  126. _STARPU_LOG_IN();
  127. STARPU_ASSERT(task);
  128. STARPU_ASSERT_MSG(!task->detach, "starpu_task_wait can only be called on tasks with detach = 0");
  129. if (task->detach || task->synchronous)
  130. {
  131. _STARPU_DEBUG("Task is detached or asynchronous. Waiting returns immediately\n");
  132. _STARPU_LOG_OUT_TAG("einval");
  133. return -EINVAL;
  134. }
  135. if (STARPU_UNLIKELY(!_starpu_worker_may_perform_blocking_calls()))
  136. {
  137. _STARPU_LOG_OUT_TAG("edeadlk");
  138. return -EDEADLK;
  139. }
  140. struct _starpu_job *j = (struct _starpu_job *)task->starpu_private;
  141. _starpu_wait_job(j);
  142. /* as this is a synchronous task, the liberation of the job
  143. structure was deferred */
  144. if (task->destroy)
  145. _starpu_task_destroy(task);
  146. _STARPU_LOG_OUT();
  147. return 0;
  148. }
  149. struct _starpu_job *_starpu_get_job_associated_to_task(struct starpu_task *task)
  150. {
  151. STARPU_ASSERT(task);
  152. if (!task->starpu_private)
  153. {
  154. struct _starpu_job *j = _starpu_job_create(task);
  155. task->starpu_private = j;
  156. }
  157. return (struct _starpu_job *)task->starpu_private;
  158. }
  159. /* NB in case we have a regenerable task, it is possible that the job was
  160. * already counted. */
  161. int _starpu_submit_job(struct _starpu_job *j)
  162. {
  163. _STARPU_LOG_IN();
  164. /* notify bound computation of a new task */
  165. _starpu_bound_record(j);
  166. _starpu_increment_nsubmitted_tasks();
  167. _STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  168. /* Need to atomically set submitted to 1 and check dependencies, since
  169. * this is concucrent with _starpu_notify_cg */
  170. j->terminated = 0;
  171. j->submitted = 1;
  172. int ret = _starpu_enforce_deps_and_schedule(j);
  173. _STARPU_LOG_OUT();
  174. return ret;
  175. }
  176. void _starpu_codelet_check_deprecated_fields(struct starpu_codelet *cl)
  177. {
  178. if (!cl)
  179. return;
  180. int is_where_unset = cl->where == 0;
  181. /* Check deprecated and unset fields (where, <device>_func,
  182. * <device>_funcs) */
  183. /* CPU */
  184. if (cl->cpu_func && cl->cpu_func != STARPU_MULTIPLE_CPU_IMPLEMENTATIONS && cl->cpu_funcs[0])
  185. {
  186. fprintf(stderr, "[warning] [struct starpu_codelet] both cpu_func and cpu_funcs are set. Ignoring cpu_func.\n");
  187. cl->cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS;
  188. }
  189. if (cl->cpu_func && cl->cpu_func != STARPU_MULTIPLE_CPU_IMPLEMENTATIONS)
  190. {
  191. cl->cpu_funcs[0] = cl->cpu_func;
  192. cl->cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS;
  193. }
  194. if (cl->cpu_funcs[0] && cl->cpu_func == 0)
  195. {
  196. cl->cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS;
  197. }
  198. if (cl->cpu_funcs[0] && is_where_unset)
  199. {
  200. cl->where |= STARPU_CPU;
  201. }
  202. /* CUDA */
  203. if (cl->cuda_func && cl->cuda_func != STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS && cl->cuda_funcs[0])
  204. {
  205. fprintf(stderr, "[warning] [struct starpu_codelet] both cuda_func and cuda_funcs are set. Ignoring cuda_func.\n");
  206. cl->cuda_func = STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS;
  207. }
  208. if (cl->cuda_func && cl->cuda_func != STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS)
  209. {
  210. cl->cuda_funcs[0] = cl->cuda_func;
  211. cl->cuda_func = STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS;
  212. }
  213. if (cl->cuda_funcs[0] && cl->cuda_func == 0)
  214. {
  215. cl->cuda_func = STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS;
  216. }
  217. if (cl->cuda_funcs[0] && is_where_unset)
  218. {
  219. cl->where |= STARPU_CUDA;
  220. }
  221. /* OpenCL */
  222. if (cl->opencl_func && cl->opencl_func != STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS && cl->opencl_funcs[0])
  223. {
  224. fprintf(stderr, "[warning] [struct starpu_codelet] both opencl_func and opencl_funcs are set. Ignoring opencl_func.\n");
  225. cl->opencl_func = STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS;
  226. }
  227. if (cl->opencl_func && cl->opencl_func != STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS)
  228. {
  229. cl->opencl_funcs[0] = cl->opencl_func;
  230. cl->opencl_func = STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS;
  231. }
  232. if (cl->opencl_funcs[0] && cl->opencl_func == 0)
  233. {
  234. cl->opencl_func = STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS;
  235. }
  236. if (cl->opencl_funcs[0] && is_where_unset)
  237. {
  238. cl->where |= STARPU_OPENCL;
  239. }
  240. /* Gordon */
  241. if (cl->gordon_func && cl->gordon_func != STARPU_MULTIPLE_GORDON_IMPLEMENTATIONS)
  242. {
  243. cl->gordon_funcs[0] = cl->gordon_func;
  244. cl->gordon_func = STARPU_MULTIPLE_GORDON_IMPLEMENTATIONS;
  245. }
  246. if (cl->gordon_funcs[0] && cl->gordon_func == 0)
  247. {
  248. cl->gordon_func = STARPU_MULTIPLE_GORDON_IMPLEMENTATIONS;
  249. }
  250. if (cl->gordon_funcs[0] && is_where_unset)
  251. {
  252. cl->where = STARPU_GORDON;
  253. }
  254. }
  255. void _starpu_task_check_deprecated_fields(struct starpu_task *task)
  256. {
  257. if (task->cl)
  258. {
  259. unsigned i;
  260. for(i=0; i<task->cl->nbuffers ; i++)
  261. {
  262. if (task->buffers[i].handle && task->handles[i])
  263. {
  264. fprintf(stderr, "[warning][struct starpu_task] task->buffers[%u] and task->handles[%u] both set. Ignoring task->buffers[%u] ?\n", i, i, i);
  265. STARPU_ASSERT(task->buffers[i].mode == task->cl->modes[i]);
  266. STARPU_ABORT();
  267. }
  268. if (task->buffers[i].handle)
  269. {
  270. task->handles[i] = task->buffers[i].handle;
  271. task->cl->modes[i] = task->buffers[i].mode;
  272. }
  273. task->buffers[i].handle = NULL;
  274. task->buffers[i].mode = STARPU_NONE;
  275. }
  276. }
  277. }
  278. /* application should submit new tasks to StarPU through this function */
  279. int starpu_task_submit(struct starpu_task *task)
  280. {
  281. STARPU_ASSERT(task);
  282. STARPU_ASSERT(task->magic == 42);
  283. int ret;
  284. unsigned is_sync = task->synchronous;
  285. _STARPU_LOG_IN();
  286. if (is_sync)
  287. {
  288. /* Perhaps it is not possible to submit a synchronous
  289. * (blocking) task */
  290. if (STARPU_UNLIKELY(!_starpu_worker_may_perform_blocking_calls()))
  291. {
  292. _STARPU_LOG_OUT_TAG("EDEADLK");
  293. return -EDEADLK;
  294. }
  295. task->detach = 0;
  296. }
  297. _starpu_task_check_deprecated_fields(task);
  298. _starpu_codelet_check_deprecated_fields(task->cl);
  299. if (task->cl)
  300. {
  301. unsigned i;
  302. /* Check the type of worker(s) required by the task exist */
  303. if (!_starpu_worker_exists(task))
  304. {
  305. _STARPU_LOG_OUT_TAG("ENODEV");
  306. return -ENODEV;
  307. }
  308. /* Check buffers */
  309. STARPU_ASSERT(task->cl->nbuffers <= STARPU_NMAXBUFS);
  310. for (i = 0; i < task->cl->nbuffers; i++)
  311. {
  312. /* Make sure handles are not partitioned */
  313. STARPU_ASSERT(task->handles[i]->nchildren == 0);
  314. }
  315. /* In case we require that a task should be explicitely
  316. * executed on a specific worker, we make sure that the worker
  317. * is able to execute this task. */
  318. if (task->execute_on_a_specific_worker && !starpu_combined_worker_can_execute_task(task->workerid, task, 0))
  319. {
  320. _STARPU_LOG_OUT_TAG("ENODEV");
  321. return -ENODEV;
  322. }
  323. _starpu_detect_implicit_data_deps(task);
  324. if (task->cl->model)
  325. _starpu_load_perfmodel(task->cl->model);
  326. if (task->cl->power_model)
  327. _starpu_load_perfmodel(task->cl->power_model);
  328. }
  329. /* If profiling is activated, we allocate a structure to store the
  330. * appropriate info. */
  331. struct starpu_task_profiling_info *info;
  332. int profiling = starpu_profiling_status_get();
  333. info = _starpu_allocate_profiling_info_if_needed(task);
  334. task->profiling_info = info;
  335. /* The task is considered as block until we are sure there remains not
  336. * dependency. */
  337. task->status = STARPU_TASK_BLOCKED;
  338. if (profiling)
  339. _starpu_clock_gettime(&info->submit_time);
  340. /* internally, StarPU manipulates a struct _starpu_job * which is a wrapper around a
  341. * task structure, it is possible that this job structure was already
  342. * allocated, for instance to enforce task depenencies. */
  343. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  344. ret = _starpu_submit_job(j);
  345. if (is_sync)
  346. {
  347. _starpu_wait_job(j);
  348. if (task->destroy)
  349. _starpu_task_destroy(task);
  350. }
  351. _STARPU_LOG_OUT();
  352. return ret;
  353. }
  354. /* The StarPU core can submit tasks directly to the scheduler or a worker,
  355. * skipping dependencies completely (when it knows what it is doing). */
  356. int _starpu_task_submit_nodeps(struct starpu_task *task)
  357. {
  358. _starpu_task_check_deprecated_fields(task);
  359. _starpu_codelet_check_deprecated_fields(task->cl);
  360. if (task->cl)
  361. {
  362. if (task->cl->model)
  363. _starpu_load_perfmodel(task->cl->model);
  364. if (task->cl->power_model)
  365. _starpu_load_perfmodel(task->cl->power_model);
  366. }
  367. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  368. _starpu_increment_nsubmitted_tasks();
  369. _STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  370. j->submitted = 1;
  371. if (task->cl)
  372. {
  373. /* This would be done by data dependencies checking */
  374. unsigned i;
  375. for (i=0 ; i<task->cl->nbuffers ; i++)
  376. {
  377. j->ordered_buffers[i].handle = j->task->handles[i];
  378. j->ordered_buffers[i].mode = j->task->cl->modes[i];
  379. }
  380. }
  381. _STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
  382. return _starpu_push_task(j);
  383. }
  384. /*
  385. * worker->sched_mutex must be locked when calling this function.
  386. */
  387. int _starpu_task_submit_conversion_task(struct starpu_task *task,
  388. unsigned int workerid)
  389. {
  390. STARPU_ASSERT(task->cl);
  391. STARPU_ASSERT(task->execute_on_a_specific_worker);
  392. _starpu_task_check_deprecated_fields(task);
  393. _starpu_codelet_check_deprecated_fields(task->cl);
  394. /* We should factorize that */
  395. if (task->cl->model)
  396. _starpu_load_perfmodel(task->cl->model);
  397. if (task->cl->power_model)
  398. _starpu_load_perfmodel(task->cl->power_model);
  399. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  400. _starpu_increment_nsubmitted_tasks();
  401. _STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  402. j->submitted = 1;
  403. _starpu_increment_nready_tasks();
  404. unsigned i;
  405. for (i=0 ; i<task->cl->nbuffers ; i++)
  406. {
  407. j->ordered_buffers[i].handle = j->task->handles[i];
  408. j->ordered_buffers[i].mode = j->task->cl->modes[i];
  409. }
  410. _STARPU_LOG_IN();
  411. task->status = STARPU_TASK_READY;
  412. _starpu_profiling_set_task_push_start_time(task);
  413. unsigned node = starpu_worker_get_memory_node(workerid);
  414. if (starpu_get_prefetch_flag())
  415. starpu_prefetch_task_input_on_node(task, node);
  416. struct _starpu_worker *worker;
  417. worker = _starpu_get_worker_struct(workerid);
  418. starpu_task_list_push_front(&worker->local_tasks, task);
  419. _starpu_profiling_set_task_push_end_time(task);
  420. _STARPU_LOG_OUT();
  421. _STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
  422. return 0;
  423. }
  424. void starpu_codelet_init(struct starpu_codelet *cl)
  425. {
  426. memset(cl, 0, sizeof(struct starpu_codelet));
  427. }
  428. void starpu_display_codelet_stats(struct starpu_codelet *cl)
  429. {
  430. unsigned worker;
  431. unsigned nworkers = starpu_worker_get_count();
  432. if (cl->name)
  433. fprintf(stderr, "Statistics for codelet %s\n", cl->name);
  434. else if (cl->model && cl->model->symbol)
  435. fprintf(stderr, "Statistics for codelet %s\n", cl->model->symbol);
  436. unsigned long total = 0;
  437. for (worker = 0; worker < nworkers; worker++)
  438. total += cl->per_worker_stats[worker];
  439. for (worker = 0; worker < nworkers; worker++)
  440. {
  441. char name[32];
  442. starpu_worker_get_name(worker, name, 32);
  443. fprintf(stderr, "\t%s -> %lu / %lu (%2.2f %%)\n", name, cl->per_worker_stats[worker], total, (100.0f*cl->per_worker_stats[worker])/total);
  444. }
  445. }
  446. /*
  447. * We wait for all the tasks that have already been submitted. Note that a
  448. * regenerable is not considered finished until it was explicitely set as
  449. * non-regenerale anymore (eg. from a callback).
  450. */
  451. int starpu_task_wait_for_all(void)
  452. {
  453. if (STARPU_UNLIKELY(!_starpu_worker_may_perform_blocking_calls()))
  454. return -EDEADLK;
  455. _STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  456. _STARPU_TRACE_TASK_WAIT_FOR_ALL;
  457. while (nsubmitted > 0)
  458. _STARPU_PTHREAD_COND_WAIT(&submitted_cond, &submitted_mutex);
  459. _STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  460. return 0;
  461. }
  462. /*
  463. * We wait until there is no ready task any more (i.e. StarPU will not be able
  464. * to progress any more).
  465. */
  466. int starpu_task_wait_for_no_ready(void)
  467. {
  468. if (STARPU_UNLIKELY(!_starpu_worker_may_perform_blocking_calls()))
  469. return -EDEADLK;
  470. _STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  471. _STARPU_TRACE_TASK_WAIT_FOR_ALL;
  472. while (nready > 0)
  473. _STARPU_PTHREAD_COND_WAIT(&submitted_cond, &submitted_mutex);
  474. _STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  475. return 0;
  476. }
  477. void _starpu_decrement_nsubmitted_tasks(void)
  478. {
  479. _STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  480. if (--nsubmitted == 0)
  481. _STARPU_PTHREAD_COND_BROADCAST(&submitted_cond);
  482. _STARPU_TRACE_UPDATE_TASK_CNT(nsubmitted);
  483. _STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  484. }
  485. static void _starpu_increment_nsubmitted_tasks(void)
  486. {
  487. _STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  488. nsubmitted++;
  489. _STARPU_TRACE_UPDATE_TASK_CNT(nsubmitted);
  490. _STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  491. }
  492. void _starpu_increment_nready_tasks(void)
  493. {
  494. _STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  495. nready++;
  496. _STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  497. }
  498. void _starpu_decrement_nready_tasks(void)
  499. {
  500. _STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  501. if (--nready == 0)
  502. _STARPU_PTHREAD_COND_BROADCAST(&submitted_cond);
  503. _STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  504. }
  505. void _starpu_initialize_current_task_key(void)
  506. {
  507. pthread_key_create(&current_task_key, NULL);
  508. }
  509. /* Return the task currently executed by the worker, or NULL if this is called
  510. * either from a thread that is not a task or simply because there is no task
  511. * being executed at the moment. */
  512. struct starpu_task *starpu_task_get_current(void)
  513. {
  514. return (struct starpu_task *) pthread_getspecific(current_task_key);
  515. }
  516. void _starpu_set_current_task(struct starpu_task *task)
  517. {
  518. pthread_setspecific(current_task_key, task);
  519. }
  520. /*
  521. * Returns 0 if tasks does not use any multiformat handle, 1 otherwise.
  522. */
  523. int
  524. _starpu_task_uses_multiformat_handles(struct starpu_task *task)
  525. {
  526. unsigned i;
  527. for (i = 0; i < task->cl->nbuffers; i++)
  528. {
  529. if (_starpu_data_is_multiformat_handle(task->handles[i]))
  530. return 1;
  531. }
  532. return 0;
  533. }
  534. /*
  535. * Checks whether the given handle needs to be converted in order to be used on
  536. * the node given as the second argument.
  537. */
  538. int
  539. _starpu_handle_needs_conversion_task(starpu_data_handle_t handle,
  540. unsigned int node)
  541. {
  542. enum starpu_node_kind node_kind;
  543. node_kind = starpu_node_get_kind(node);
  544. /*
  545. * Here, we assume that CUDA devices and OpenCL devices use the
  546. * same data structure. A conversion is only needed when moving
  547. * data from a CPU to a GPU, or the other way around.
  548. */
  549. switch (node_kind)
  550. {
  551. case STARPU_CPU_RAM:
  552. switch(starpu_node_get_kind(handle->mf_node))
  553. {
  554. case STARPU_CPU_RAM:
  555. return 0;
  556. case STARPU_CUDA_RAM: /* Fall through */
  557. case STARPU_OPENCL_RAM:
  558. return 1;
  559. case STARPU_SPU_LS: /* Not supported */
  560. default:
  561. STARPU_ASSERT(0);
  562. }
  563. break;
  564. case STARPU_CUDA_RAM: /* Fall through */
  565. case STARPU_OPENCL_RAM:
  566. switch(starpu_node_get_kind(handle->mf_node))
  567. {
  568. case STARPU_CPU_RAM:
  569. return 1;
  570. case STARPU_CUDA_RAM:
  571. case STARPU_OPENCL_RAM:
  572. return 0;
  573. case STARPU_SPU_LS: /* Not supported */
  574. default:
  575. STARPU_ASSERT(0);
  576. }
  577. break;
  578. case STARPU_SPU_LS: /* Not supported */
  579. default:
  580. STARPU_ASSERT(0);
  581. }
  582. }
  583. starpu_cpu_func_t _starpu_task_get_cpu_nth_implementation(struct starpu_codelet *cl, unsigned nimpl)
  584. {
  585. STARPU_ASSERT(cl->cpu_func == STARPU_MULTIPLE_CPU_IMPLEMENTATIONS);
  586. return cl->cpu_funcs[nimpl];
  587. }
  588. starpu_cuda_func_t _starpu_task_get_cuda_nth_implementation(struct starpu_codelet *cl, unsigned nimpl)
  589. {
  590. STARPU_ASSERT(cl->cuda_func == STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS);
  591. return cl->cuda_funcs[nimpl];
  592. }
  593. starpu_opencl_func_t _starpu_task_get_opencl_nth_implementation(struct starpu_codelet *cl, unsigned nimpl)
  594. {
  595. STARPU_ASSERT(cl->opencl_func == STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS);
  596. return cl->opencl_funcs[nimpl];
  597. }
  598. starpu_gordon_func_t _starpu_task_get_gordon_nth_implementation(struct starpu_codelet *cl, unsigned nimpl)
  599. {
  600. STARPU_ASSERT(cl->gordon_func == STARPU_MULTIPLE_GORDON_IMPLEMENTATIONS);
  601. return cl->gordon_funcs[nimpl];
  602. }