task.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2013 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012, 2013 Centre National de la Recherche Scientifique
  5. * Copyright (C) 2011 Télécom-SudParis
  6. * Copyright (C) 2011 INRIA
  7. *
  8. * StarPU is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU Lesser General Public License as published by
  10. * the Free Software Foundation; either version 2.1 of the License, or (at
  11. * your option) any later version.
  12. *
  13. * StarPU is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  16. *
  17. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  18. */
  19. #include <starpu.h>
  20. #include <starpu_profiling.h>
  21. #include <core/workers.h>
  22. #include <core/sched_ctx.h>
  23. #include <core/jobs.h>
  24. #include <core/task.h>
  25. #include <core/task_bundle.h>
  26. #include <common/config.h>
  27. #include <common/utils.h>
  28. #include <profiling/profiling.h>
  29. #include <profiling/bound.h>
  30. #include <math.h>
  31. #include <string.h>
  32. #include <core/debug.h>
  33. /* XXX this should be reinitialized when StarPU is shutdown (or we should make
  34. * sure that no task remains !) */
  35. /* TODO we could make this hierarchical to avoid contention ? */
  36. static starpu_pthread_cond_t submitted_cond = STARPU_PTHREAD_COND_INITIALIZER;
  37. static starpu_pthread_mutex_t submitted_mutex = STARPU_PTHREAD_MUTEX_INITIALIZER;
  38. static long int nsubmitted = 0, nready = 0;
  39. static void _starpu_increment_nsubmitted_tasks(void);
  40. /* This key stores the task currently handled by the thread, note that we
  41. * cannot use the worker structure to store that information because it is
  42. * possible that we have a task with a NULL codelet, which means its callback
  43. * could be executed by a user thread as well. */
  44. static starpu_pthread_key_t current_task_key;
  45. void starpu_task_init(struct starpu_task *task)
  46. {
  47. /* TODO: memcpy from a template instead? benchmark it */
  48. STARPU_ASSERT(task);
  49. /* As most of the fields must be initialised at NULL, let's put 0
  50. * everywhere */
  51. memset(task, 0, sizeof(struct starpu_task));
  52. task->sequential_consistency = 1;
  53. /* Now we can initialise fields which recquire custom value */
  54. #if STARPU_DEFAULT_PRIO != 0
  55. task->priority = STARPU_DEFAULT_PRIO;
  56. #endif
  57. task->detach = 1;
  58. #if STARPU_TASK_INVALID != 0
  59. task->status = STARPU_TASK_INVALID;
  60. #endif
  61. task->predicted = NAN;
  62. task->predicted_transfer = NAN;
  63. task->magic = 42;
  64. task->sched_ctx = _starpu_get_initial_sched_ctx()->id;
  65. task->flops = 0.0;
  66. task->scheduled = 0;
  67. task->dyn_handles = NULL;
  68. task->dyn_interfaces = NULL;
  69. }
  70. /* Free all the ressources allocated for a task, without deallocating the task
  71. * structure itself (this is required for statically allocated tasks).
  72. * All values previously set by the user, like codelet and handles, remain
  73. * unchanged */
  74. void starpu_task_clean(struct starpu_task *task)
  75. {
  76. STARPU_ASSERT(task);
  77. /* If a buffer was allocated to store the profiling info, we free it. */
  78. if (task->profiling_info)
  79. {
  80. free(task->profiling_info);
  81. task->profiling_info = NULL;
  82. }
  83. /* If case the task is (still) part of a bundle */
  84. starpu_task_bundle_t bundle = task->bundle;
  85. if (bundle)
  86. starpu_task_bundle_remove(bundle, task);
  87. if (task->dyn_handles)
  88. {
  89. free(task->dyn_handles);
  90. task->dyn_handles = NULL;
  91. free(task->dyn_interfaces);
  92. task->dyn_interfaces = NULL;
  93. }
  94. struct _starpu_job *j = (struct _starpu_job *)task->starpu_private;
  95. if (j)
  96. {
  97. _starpu_job_destroy(j);
  98. task->starpu_private = NULL;
  99. }
  100. }
  101. struct starpu_task * __attribute__((malloc)) starpu_task_create(void)
  102. {
  103. struct starpu_task *task;
  104. task = (struct starpu_task *) malloc(sizeof(struct starpu_task));
  105. STARPU_ASSERT(task);
  106. starpu_task_init(task);
  107. /* Dynamically allocated tasks are destroyed by default */
  108. task->destroy = 1;
  109. return task;
  110. }
  111. /* Free the ressource allocated during starpu_task_create. This function can be
  112. * called automatically after the execution of a task by setting the "destroy"
  113. * flag of the starpu_task structure (default behaviour). Calling this function
  114. * on a statically allocated task results in an undefined behaviour. */
  115. void _starpu_task_destroy(struct starpu_task *task)
  116. {
  117. /* If starpu_task_destroy is called in a callback, we just set the destroy
  118. flag. The task will be destroyed after the callback returns */
  119. if (task == starpu_task_get_current()
  120. && _starpu_get_local_worker_status() == STATUS_CALLBACK)
  121. {
  122. task->destroy = 1;
  123. }
  124. else
  125. {
  126. starpu_task_clean(task);
  127. /* TODO handle the case of task with detach = 1 and destroy = 1 */
  128. /* TODO handle the case of non terminated tasks -> return -EINVAL */
  129. /* Does user want StarPU release cl_arg ? */
  130. if (task->cl_arg_free)
  131. free(task->cl_arg);
  132. free(task);
  133. }
  134. }
  135. void starpu_task_destroy(struct starpu_task *task)
  136. {
  137. STARPU_ASSERT(task);
  138. STARPU_ASSERT_MSG(!task->destroy || !task->detach, "starpu_task_destroy must not be called for task with destroy = 1 and detach = 1");
  139. _starpu_task_destroy(task);
  140. }
  141. int starpu_task_wait(struct starpu_task *task)
  142. {
  143. _STARPU_LOG_IN();
  144. STARPU_ASSERT(task);
  145. STARPU_ASSERT_MSG(!task->detach, "starpu_task_wait can only be called on tasks with detach = 0");
  146. if (task->detach || task->synchronous)
  147. {
  148. _STARPU_DEBUG("Task is detached or asynchronous. Waiting returns immediately\n");
  149. _STARPU_LOG_OUT_TAG("einval");
  150. return -EINVAL;
  151. }
  152. if (STARPU_UNLIKELY(!_starpu_worker_may_perform_blocking_calls()))
  153. {
  154. _STARPU_LOG_OUT_TAG("edeadlk");
  155. return -EDEADLK;
  156. }
  157. struct _starpu_job *j = (struct _starpu_job *)task->starpu_private;
  158. _starpu_wait_job(j);
  159. /* as this is a synchronous task, the liberation of the job
  160. structure was deferred */
  161. if (task->destroy)
  162. _starpu_task_destroy(task);
  163. _STARPU_LOG_OUT();
  164. return 0;
  165. }
  166. struct _starpu_job *_starpu_get_job_associated_to_task(struct starpu_task *task)
  167. {
  168. STARPU_ASSERT(task);
  169. if (!task->starpu_private)
  170. {
  171. struct _starpu_job *j = _starpu_job_create(task);
  172. task->starpu_private = j;
  173. }
  174. return (struct _starpu_job *)task->starpu_private;
  175. }
  176. /* NB in case we have a regenerable task, it is possible that the job was
  177. * already counted. */
  178. int _starpu_submit_job(struct _starpu_job *j)
  179. {
  180. struct starpu_task *task = j->task;
  181. _STARPU_LOG_IN();
  182. /* notify bound computation of a new task */
  183. _starpu_bound_record(j);
  184. _starpu_increment_nsubmitted_tasks();
  185. _starpu_increment_nsubmitted_tasks_of_sched_ctx(j->task->sched_ctx);
  186. #ifdef STARPU_USE_SC_HYPERVISOR
  187. struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(j->task->sched_ctx);
  188. if(sched_ctx != NULL && j->task->sched_ctx != _starpu_get_initial_sched_ctx()->id && j->task->sched_ctx != STARPU_NMAX_SCHED_CTXS
  189. && sched_ctx->perf_counters != NULL)
  190. {
  191. _starpu_compute_buffers_footprint(j->task->cl->model, STARPU_CPU_DEFAULT, 0, j);
  192. sched_ctx->perf_counters->notify_submitted_job(j->task, j->footprint);
  193. }
  194. #endif
  195. /* We retain handle reference count */
  196. if (task->cl)
  197. {
  198. unsigned i;
  199. for (i=0; i<task->cl->nbuffers; i++)
  200. {
  201. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(task, i);
  202. _starpu_spin_lock(&handle->header_lock);
  203. handle->busy_count++;
  204. _starpu_spin_unlock(&handle->header_lock);
  205. }
  206. }
  207. STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  208. /* Need to atomically set submitted to 1 and check dependencies, since
  209. * this is concucrent with _starpu_notify_cg */
  210. j->terminated = 0;
  211. if (!j->submitted)
  212. j->submitted = 1;
  213. else
  214. j->submitted = 2;
  215. int ret = _starpu_enforce_deps_and_schedule(j);
  216. _STARPU_LOG_OUT();
  217. return ret;
  218. }
  219. /* Note: this is racy, so valgrind would complain. But since we'll always put
  220. * the same values, this is not a problem. */
  221. void _starpu_codelet_check_deprecated_fields(struct starpu_codelet *cl)
  222. {
  223. if (!cl)
  224. return;
  225. int is_where_unset = cl->where == 0;
  226. /* Check deprecated and unset fields (where, <device>_func,
  227. * <device>_funcs) */
  228. /* CPU */
  229. if (cl->cpu_func && cl->cpu_func != STARPU_MULTIPLE_CPU_IMPLEMENTATIONS && cl->cpu_funcs[0])
  230. {
  231. _STARPU_DISP("[warning] [struct starpu_codelet] both cpu_func and cpu_funcs are set. Ignoring cpu_func.\n");
  232. cl->cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS;
  233. }
  234. if (cl->cpu_func && cl->cpu_func != STARPU_MULTIPLE_CPU_IMPLEMENTATIONS)
  235. {
  236. cl->cpu_funcs[0] = cl->cpu_func;
  237. cl->cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS;
  238. }
  239. if (cl->cpu_funcs[0] && cl->cpu_func == 0)
  240. {
  241. cl->cpu_func = STARPU_MULTIPLE_CPU_IMPLEMENTATIONS;
  242. }
  243. if (cl->cpu_funcs[0] && is_where_unset)
  244. {
  245. cl->where |= STARPU_CPU;
  246. }
  247. /* CUDA */
  248. if (cl->cuda_func && cl->cuda_func != STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS && cl->cuda_funcs[0])
  249. {
  250. _STARPU_DISP("[warning] [struct starpu_codelet] both cuda_func and cuda_funcs are set. Ignoring cuda_func.\n");
  251. cl->cuda_func = STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS;
  252. }
  253. if (cl->cuda_func && cl->cuda_func != STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS)
  254. {
  255. cl->cuda_funcs[0] = cl->cuda_func;
  256. cl->cuda_func = STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS;
  257. }
  258. if (cl->cuda_funcs[0] && cl->cuda_func == 0)
  259. {
  260. cl->cuda_func = STARPU_MULTIPLE_CUDA_IMPLEMENTATIONS;
  261. }
  262. if (cl->cuda_funcs[0] && is_where_unset)
  263. {
  264. cl->where |= STARPU_CUDA;
  265. }
  266. /* OpenCL */
  267. if (cl->opencl_func && cl->opencl_func != STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS && cl->opencl_funcs[0])
  268. {
  269. _STARPU_DISP("[warning] [struct starpu_codelet] both opencl_func and opencl_funcs are set. Ignoring opencl_func.\n");
  270. cl->opencl_func = STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS;
  271. }
  272. if (cl->opencl_func && cl->opencl_func != STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS)
  273. {
  274. cl->opencl_funcs[0] = cl->opencl_func;
  275. cl->opencl_func = STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS;
  276. }
  277. if (cl->opencl_funcs[0] && cl->opencl_func == 0)
  278. {
  279. cl->opencl_func = STARPU_MULTIPLE_OPENCL_IMPLEMENTATIONS;
  280. }
  281. if (cl->opencl_funcs[0] && is_where_unset)
  282. {
  283. cl->where |= STARPU_OPENCL;
  284. }
  285. if (cl->mic_funcs[0] && is_where_unset)
  286. {
  287. cl->where |= STARPU_MIC;
  288. }
  289. if (cl->scc_funcs[0] && is_where_unset)
  290. {
  291. cl->where |= STARPU_SCC;
  292. }
  293. if (cl->cpu_funcs_name[0] && is_where_unset)
  294. {
  295. cl->where |= STARPU_MIC|STARPU_SCC;
  296. }
  297. }
  298. void _starpu_task_check_deprecated_fields(struct starpu_task *task)
  299. {
  300. if (task->cl)
  301. {
  302. unsigned i;
  303. for(i=0; i<task->cl->nbuffers ; i++)
  304. {
  305. if (task->buffers[i].handle && task->handles[i])
  306. {
  307. _STARPU_DISP("[warning][struct starpu_task] task->buffers[%u] and task->handles[%u] both set. Ignoring task->buffers[%u] ?\n", i, i, i);
  308. STARPU_ASSERT(task->buffers[i].mode == task->cl->modes[i]);
  309. STARPU_ABORT();
  310. }
  311. if (task->buffers[i].handle)
  312. {
  313. task->handles[i] = task->buffers[i].handle;
  314. task->cl->modes[i] = task->buffers[i].mode;
  315. }
  316. }
  317. }
  318. }
  319. /* application should submit new tasks to StarPU through this function */
  320. int starpu_task_submit(struct starpu_task *task)
  321. {
  322. _STARPU_LOG_IN();
  323. STARPU_ASSERT(task);
  324. STARPU_ASSERT_MSG(task->magic == 42, "Tasks must be created with starpu_task_create, or initialized with starpu_task_init.");
  325. int ret;
  326. unsigned is_sync = task->synchronous;
  327. starpu_task_bundle_t bundle = task->bundle;
  328. unsigned nsched_ctxs = _starpu_get_nsched_ctxs();
  329. unsigned set_sched_ctx = STARPU_NMAX_SCHED_CTXS;
  330. /* internally, StarPU manipulates a struct _starpu_job * which is a wrapper around a
  331. * task structure, it is possible that this job structure was already
  332. * allocated. */
  333. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  334. if (task->sched_ctx == _starpu_get_initial_sched_ctx()->id && nsched_ctxs != 1 && !j->internal)
  335. {
  336. set_sched_ctx = starpu_sched_ctx_get_context();
  337. if (set_sched_ctx != STARPU_NMAX_SCHED_CTXS)
  338. task->sched_ctx = set_sched_ctx;
  339. }
  340. if (is_sync)
  341. {
  342. /* Perhaps it is not possible to submit a synchronous
  343. * (blocking) task */
  344. if (STARPU_UNLIKELY(!_starpu_worker_may_perform_blocking_calls()))
  345. {
  346. _STARPU_LOG_OUT_TAG("EDEADLK");
  347. return -EDEADLK;
  348. }
  349. task->detach = 0;
  350. }
  351. _starpu_task_check_deprecated_fields(task);
  352. _starpu_codelet_check_deprecated_fields(task->cl);
  353. if (task->cl)
  354. {
  355. unsigned i;
  356. /* Check buffers */
  357. if (task->dyn_handles == NULL)
  358. STARPU_ASSERT_MSG(task->cl->nbuffers <= STARPU_NMAXBUFS, "Codelet %p has too many buffers (%d vs max %d)", task->cl, task->cl->nbuffers, STARPU_NMAXBUFS);
  359. if (task->dyn_handles)
  360. {
  361. task->dyn_interfaces = malloc(task->cl->nbuffers * sizeof(void *));
  362. }
  363. for (i = 0; i < task->cl->nbuffers; i++)
  364. {
  365. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(task, i);
  366. /* Make sure handles are not partitioned */
  367. STARPU_ASSERT_MSG(handle->nchildren == 0, "only unpartitioned data can be used in a task");
  368. /* Provide the home interface for now if any,
  369. * for can_execute hooks */
  370. if (handle->home_node != -1)
  371. _STARPU_TASK_SET_INTERFACE(task, starpu_data_get_interface_on_node(handle, handle->home_node), i);
  372. }
  373. /* Check the type of worker(s) required by the task exist */
  374. if (!_starpu_worker_exists(task))
  375. {
  376. _STARPU_LOG_OUT_TAG("ENODEV");
  377. return -ENODEV;
  378. }
  379. /* In case we require that a task should be explicitely
  380. * executed on a specific worker, we make sure that the worker
  381. * is able to execute this task. */
  382. if (task->execute_on_a_specific_worker && !starpu_combined_worker_can_execute_task(task->workerid, task, 0))
  383. {
  384. _STARPU_LOG_OUT_TAG("ENODEV");
  385. return -ENODEV;
  386. }
  387. _starpu_detect_implicit_data_deps(task);
  388. if (task->cl->model && task->cl->model->symbol)
  389. _starpu_load_perfmodel(task->cl->model);
  390. if (task->cl->power_model && task->cl->power_model->symbol)
  391. _starpu_load_perfmodel(task->cl->power_model);
  392. }
  393. if (bundle)
  394. {
  395. /* We need to make sure that models for other tasks of the
  396. * bundle are also loaded, so the scheduler can estimate the
  397. * duration of the whole bundle */
  398. STARPU_PTHREAD_MUTEX_LOCK(&bundle->mutex);
  399. struct _starpu_task_bundle_entry *entry;
  400. entry = bundle->list;
  401. while (entry)
  402. {
  403. if (entry->task->cl->model && entry->task->cl->model->symbol)
  404. _starpu_load_perfmodel(entry->task->cl->model);
  405. if (entry->task->cl->power_model && entry->task->cl->power_model->symbol)
  406. _starpu_load_perfmodel(entry->task->cl->power_model);
  407. entry = entry->next;
  408. }
  409. STARPU_PTHREAD_MUTEX_UNLOCK(&bundle->mutex);
  410. }
  411. /* If profiling is activated, we allocate a structure to store the
  412. * appropriate info. */
  413. struct starpu_profiling_task_info *info;
  414. int profiling = starpu_profiling_status_get();
  415. info = _starpu_allocate_profiling_info_if_needed(task);
  416. task->profiling_info = info;
  417. /* The task is considered as block until we are sure there remains not
  418. * dependency. */
  419. task->status = STARPU_TASK_BLOCKED;
  420. if (profiling)
  421. _starpu_clock_gettime(&info->submit_time);
  422. ret = _starpu_submit_job(j);
  423. if (is_sync)
  424. {
  425. _starpu_wait_job(j);
  426. if (task->destroy)
  427. _starpu_task_destroy(task);
  428. }
  429. _STARPU_LOG_OUT();
  430. return ret;
  431. }
  432. int _starpu_task_submit_internally(struct starpu_task *task)
  433. {
  434. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  435. j->internal = 1;
  436. return starpu_task_submit(task);
  437. }
  438. /* application should submit new tasks to StarPU through this function */
  439. int starpu_task_submit_to_ctx(struct starpu_task *task, unsigned sched_ctx_id)
  440. {
  441. task->sched_ctx = sched_ctx_id;
  442. return starpu_task_submit(task);
  443. }
  444. /* The StarPU core can submit tasks directly to the scheduler or a worker,
  445. * skipping dependencies completely (when it knows what it is doing). */
  446. int _starpu_task_submit_nodeps(struct starpu_task *task)
  447. {
  448. _starpu_task_check_deprecated_fields(task);
  449. _starpu_codelet_check_deprecated_fields(task->cl);
  450. if (task->cl)
  451. {
  452. if (task->cl->model)
  453. _starpu_load_perfmodel(task->cl->model);
  454. if (task->cl->power_model)
  455. _starpu_load_perfmodel(task->cl->power_model);
  456. }
  457. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  458. _starpu_increment_nsubmitted_tasks();
  459. _starpu_increment_nsubmitted_tasks_of_sched_ctx(j->task->sched_ctx);
  460. STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  461. j->submitted = 1;
  462. if (task->cl)
  463. {
  464. /* This would be done by data dependencies checking */
  465. unsigned i;
  466. for (i=0 ; i<task->cl->nbuffers ; i++)
  467. {
  468. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(j->task, i);
  469. _STARPU_JOB_SET_ORDERED_BUFFER_HANDLE(j, handle, i);
  470. enum starpu_data_access_mode mode = STARPU_CODELET_GET_MODE(j->task->cl, i);
  471. _STARPU_JOB_SET_ORDERED_BUFFER_MODE(j, mode, i);
  472. }
  473. }
  474. STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
  475. return _starpu_push_task(j);
  476. }
  477. /*
  478. * worker->sched_mutex must be locked when calling this function.
  479. */
  480. int _starpu_task_submit_conversion_task(struct starpu_task *task,
  481. unsigned int workerid)
  482. {
  483. STARPU_ASSERT(task->cl);
  484. STARPU_ASSERT(task->execute_on_a_specific_worker);
  485. _starpu_task_check_deprecated_fields(task);
  486. _starpu_codelet_check_deprecated_fields(task->cl);
  487. /* We should factorize that */
  488. if (task->cl->model)
  489. _starpu_load_perfmodel(task->cl->model);
  490. if (task->cl->power_model)
  491. _starpu_load_perfmodel(task->cl->power_model);
  492. /* We retain handle reference count */
  493. unsigned i;
  494. for (i=0; i<task->cl->nbuffers; i++)
  495. {
  496. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(task, i);
  497. _starpu_spin_lock(&handle->header_lock);
  498. handle->busy_count++;
  499. _starpu_spin_unlock(&handle->header_lock);
  500. }
  501. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  502. _starpu_increment_nsubmitted_tasks();
  503. _starpu_increment_nsubmitted_tasks_of_sched_ctx(j->task->sched_ctx);
  504. STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  505. j->submitted = 1;
  506. _starpu_increment_nready_tasks();
  507. for (i=0 ; i<task->cl->nbuffers ; i++)
  508. {
  509. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(j->task, i);
  510. _STARPU_JOB_SET_ORDERED_BUFFER_HANDLE(j, handle, i);
  511. enum starpu_data_access_mode mode = STARPU_CODELET_GET_MODE(j->task->cl, i);
  512. _STARPU_JOB_SET_ORDERED_BUFFER_MODE(j, mode, i);
  513. }
  514. _STARPU_LOG_IN();
  515. task->status = STARPU_TASK_READY;
  516. _starpu_profiling_set_task_push_start_time(task);
  517. unsigned node = starpu_worker_get_memory_node(workerid);
  518. if (starpu_get_prefetch_flag())
  519. starpu_prefetch_task_input_on_node(task, node);
  520. struct _starpu_worker *worker;
  521. worker = _starpu_get_worker_struct(workerid);
  522. starpu_task_list_push_back(&worker->local_tasks, task);
  523. _starpu_profiling_set_task_push_end_time(task);
  524. _STARPU_LOG_OUT();
  525. STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
  526. return 0;
  527. }
  528. void starpu_codelet_init(struct starpu_codelet *cl)
  529. {
  530. memset(cl, 0, sizeof(struct starpu_codelet));
  531. }
  532. void starpu_codelet_display_stats(struct starpu_codelet *cl)
  533. {
  534. unsigned worker;
  535. unsigned nworkers = starpu_worker_get_count();
  536. if (cl->name)
  537. fprintf(stderr, "Statistics for codelet %s\n", cl->name);
  538. else if (cl->model && cl->model->symbol)
  539. fprintf(stderr, "Statistics for codelet %s\n", cl->model->symbol);
  540. unsigned long total = 0;
  541. for (worker = 0; worker < nworkers; worker++)
  542. total += cl->per_worker_stats[worker];
  543. for (worker = 0; worker < nworkers; worker++)
  544. {
  545. char name[32];
  546. starpu_worker_get_name(worker, name, 32);
  547. fprintf(stderr, "\t%s -> %lu / %lu (%2.2f %%)\n", name, cl->per_worker_stats[worker], total, (100.0f*cl->per_worker_stats[worker])/total);
  548. }
  549. }
  550. /*
  551. * We wait for all the tasks that have already been submitted. Note that a
  552. * regenerable is not considered finished until it was explicitely set as
  553. * non-regenerale anymore (eg. from a callback).
  554. */
  555. int starpu_task_wait_for_all(void)
  556. {
  557. unsigned nsched_ctxs = _starpu_get_nsched_ctxs();
  558. unsigned sched_ctx_id = nsched_ctxs == 1 ? 0 : starpu_sched_ctx_get_context();
  559. /* if there is no indication about which context to wait,
  560. we wait for all tasks submitted to starpu */
  561. if (sched_ctx_id == STARPU_NMAX_SCHED_CTXS)
  562. {
  563. _STARPU_DEBUG("Waiting for all tasks\n");
  564. if (STARPU_UNLIKELY(!_starpu_worker_may_perform_blocking_calls()))
  565. return -EDEADLK;
  566. STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  567. _STARPU_TRACE_TASK_WAIT_FOR_ALL;
  568. while (nsubmitted > 0)
  569. STARPU_PTHREAD_COND_WAIT(&submitted_cond, &submitted_mutex);
  570. STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  571. #ifdef HAVE_AYUDAME_H
  572. if (AYU_event) AYU_event(AYU_BARRIER, 0, NULL);
  573. #endif
  574. }
  575. else
  576. {
  577. _STARPU_DEBUG("Waiting for tasks submitted to context %u\n", sched_ctx_id);
  578. _starpu_wait_for_all_tasks_of_sched_ctx(sched_ctx_id);
  579. #ifdef HAVE_AYUDAME_H
  580. /* TODO: improve Temanejo into knowing about contexts ... */
  581. if (AYU_event) AYU_event(AYU_BARRIER, 0, NULL);
  582. #endif
  583. }
  584. return 0;
  585. }
  586. int starpu_task_wait_for_all_in_ctx(unsigned sched_ctx)
  587. {
  588. _starpu_wait_for_all_tasks_of_sched_ctx(sched_ctx);
  589. #ifdef HAVE_AYUDAME_H
  590. if (AYU_event) AYU_event(AYU_BARRIER, 0, NULL);
  591. #endif
  592. return 0;
  593. }
  594. /*
  595. * We wait until there is no ready task any more (i.e. StarPU will not be able
  596. * to progress any more).
  597. */
  598. int starpu_task_wait_for_no_ready(void)
  599. {
  600. if (STARPU_UNLIKELY(!_starpu_worker_may_perform_blocking_calls()))
  601. return -EDEADLK;
  602. STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  603. _STARPU_TRACE_TASK_WAIT_FOR_ALL;
  604. while (nready > 0)
  605. STARPU_PTHREAD_COND_WAIT(&submitted_cond, &submitted_mutex);
  606. STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  607. return 0;
  608. }
  609. void _starpu_decrement_nsubmitted_tasks(void)
  610. {
  611. struct _starpu_machine_config *config = _starpu_get_machine_config();
  612. STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  613. if (--nsubmitted == 0)
  614. {
  615. if (!config->submitting)
  616. {
  617. ANNOTATE_HAPPENS_AFTER(&config->running);
  618. config->running = 0;
  619. ANNOTATE_HAPPENS_BEFORE(&config->running);
  620. }
  621. STARPU_PTHREAD_COND_BROADCAST(&submitted_cond);
  622. }
  623. _STARPU_TRACE_UPDATE_TASK_CNT(nsubmitted);
  624. STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  625. }
  626. void
  627. starpu_drivers_request_termination(void)
  628. {
  629. struct _starpu_machine_config *config = _starpu_get_machine_config();
  630. STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  631. config->submitting = 0;
  632. if (nsubmitted == 0)
  633. {
  634. ANNOTATE_HAPPENS_AFTER(&config->running);
  635. config->running = 0;
  636. ANNOTATE_HAPPENS_BEFORE(&config->running);
  637. STARPU_PTHREAD_COND_BROADCAST(&submitted_cond);
  638. }
  639. STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  640. }
  641. static void _starpu_increment_nsubmitted_tasks(void)
  642. {
  643. STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  644. nsubmitted++;
  645. _STARPU_TRACE_UPDATE_TASK_CNT(nsubmitted);
  646. STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  647. }
  648. int starpu_task_nsubmitted(void)
  649. {
  650. return nsubmitted;
  651. }
  652. void _starpu_increment_nready_tasks(void)
  653. {
  654. STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  655. nready++;
  656. STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  657. }
  658. void _starpu_decrement_nready_tasks(void)
  659. {
  660. STARPU_PTHREAD_MUTEX_LOCK(&submitted_mutex);
  661. if (--nready == 0)
  662. STARPU_PTHREAD_COND_BROADCAST(&submitted_cond);
  663. STARPU_PTHREAD_MUTEX_UNLOCK(&submitted_mutex);
  664. }
  665. int starpu_task_nready(void)
  666. {
  667. return nready;
  668. }
  669. void _starpu_initialize_current_task_key(void)
  670. {
  671. STARPU_PTHREAD_KEY_CREATE(&current_task_key, NULL);
  672. }
  673. /* Return the task currently executed by the worker, or NULL if this is called
  674. * either from a thread that is not a task or simply because there is no task
  675. * being executed at the moment. */
  676. struct starpu_task *starpu_task_get_current(void)
  677. {
  678. return (struct starpu_task *) STARPU_PTHREAD_GETSPECIFIC(current_task_key);
  679. }
  680. void _starpu_set_current_task(struct starpu_task *task)
  681. {
  682. STARPU_PTHREAD_SETSPECIFIC(current_task_key, task);
  683. }
  684. /*
  685. * Returns 0 if tasks does not use any multiformat handle, 1 otherwise.
  686. */
  687. int
  688. _starpu_task_uses_multiformat_handles(struct starpu_task *task)
  689. {
  690. unsigned i;
  691. for (i = 0; i < task->cl->nbuffers; i++)
  692. {
  693. if (_starpu_data_is_multiformat_handle(STARPU_TASK_GET_HANDLE(task, i)))
  694. return 1;
  695. }
  696. return 0;
  697. }
  698. /*
  699. * Checks whether the given handle needs to be converted in order to be used on
  700. * the node given as the second argument.
  701. */
  702. int
  703. _starpu_handle_needs_conversion_task(starpu_data_handle_t handle,
  704. unsigned int node)
  705. {
  706. return _starpu_handle_needs_conversion_task_for_arch(handle, starpu_node_get_kind(node));
  707. }
  708. int
  709. _starpu_handle_needs_conversion_task_for_arch(starpu_data_handle_t handle,
  710. enum starpu_node_kind node_kind)
  711. {
  712. /*
  713. * Here, we assume that CUDA devices and OpenCL devices use the
  714. * same data structure. A conversion is only needed when moving
  715. * data from a CPU to a GPU, or the other way around.
  716. */
  717. switch (node_kind)
  718. {
  719. case STARPU_CPU_RAM:
  720. switch(starpu_node_get_kind(handle->mf_node))
  721. {
  722. case STARPU_CPU_RAM:
  723. return 0;
  724. case STARPU_CUDA_RAM: /* Fall through */
  725. case STARPU_OPENCL_RAM:
  726. case STARPU_MIC_RAM:
  727. case STARPU_SCC_RAM:
  728. return 1;
  729. default:
  730. STARPU_ABORT();
  731. }
  732. break;
  733. case STARPU_CUDA_RAM: /* Fall through */
  734. case STARPU_OPENCL_RAM:
  735. case STARPU_MIC_RAM:
  736. case STARPU_SCC_RAM:
  737. switch(starpu_node_get_kind(handle->mf_node))
  738. {
  739. case STARPU_CPU_RAM:
  740. return 1;
  741. case STARPU_CUDA_RAM:
  742. case STARPU_OPENCL_RAM:
  743. case STARPU_MIC_RAM:
  744. case STARPU_SCC_RAM:
  745. return 0;
  746. default:
  747. STARPU_ABORT();
  748. }
  749. break;
  750. default:
  751. STARPU_ABORT();
  752. }
  753. /* that instruction should never be reached */
  754. return -EINVAL;
  755. }
  756. starpu_cpu_func_t _starpu_task_get_cpu_nth_implementation(struct starpu_codelet *cl, unsigned nimpl)
  757. {
  758. return cl->cpu_funcs[nimpl];
  759. }
  760. starpu_cuda_func_t _starpu_task_get_cuda_nth_implementation(struct starpu_codelet *cl, unsigned nimpl)
  761. {
  762. return cl->cuda_funcs[nimpl];
  763. }
  764. starpu_opencl_func_t _starpu_task_get_opencl_nth_implementation(struct starpu_codelet *cl, unsigned nimpl)
  765. {
  766. return cl->opencl_funcs[nimpl];
  767. }
  768. void starpu_task_set_implementation(struct starpu_task *task, unsigned impl)
  769. {
  770. _starpu_get_job_associated_to_task(task)->nimpl = impl;
  771. }
  772. unsigned starpu_task_get_implementation(struct starpu_task *task)
  773. {
  774. return _starpu_get_job_associated_to_task(task)->nimpl;
  775. }
  776. starpu_mic_func_t _starpu_task_get_mic_nth_implementation(struct starpu_codelet *cl, unsigned nimpl)
  777. {
  778. return cl->mic_funcs[nimpl];
  779. }
  780. starpu_scc_func_t _starpu_task_get_scc_nth_implementation(struct starpu_codelet *cl, unsigned nimpl)
  781. {
  782. return cl->scc_funcs[nimpl];
  783. }
  784. char *_starpu_task_get_cpu_name_nth_implementation(struct starpu_codelet *cl, unsigned nimpl)
  785. {
  786. return cl->cpu_funcs_name[nimpl];
  787. }