jobs.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2008-2018 Université de Bordeaux
  4. * Copyright (C) 2011,2014 Inria
  5. * Copyright (C) 2010,2011,2013-2015,2017,2018 CNRS
  6. * Copyright (C) 2013 Thibaut Lambert
  7. * Copyright (C) 2011 Télécom-SudParis
  8. *
  9. * StarPU is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU Lesser General Public License as published by
  11. * the Free Software Foundation; either version 2.1 of the License, or (at
  12. * your option) any later version.
  13. *
  14. * StarPU is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  17. *
  18. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  19. */
  20. #ifndef __JOBS_H__
  21. #define __JOBS_H__
  22. #include <starpu.h>
  23. #include <semaphore.h>
  24. #include <stdio.h>
  25. #include <stdlib.h>
  26. #include <stdint.h>
  27. #include <string.h>
  28. #include <stdarg.h>
  29. #include <common/config.h>
  30. #ifdef HAVE_UNISTD_H
  31. #include <unistd.h>
  32. #endif
  33. #include <common/timing.h>
  34. #include <common/list.h>
  35. #include <common/fxt.h>
  36. #include <core/dependencies/tags.h>
  37. #include <datawizard/datawizard.h>
  38. #include <core/perfmodel/perfmodel.h>
  39. #include <core/errorcheck.h>
  40. #include <common/barrier.h>
  41. #include <common/utils.h>
  42. #include <common/list.h>
  43. #ifdef STARPU_USE_CUDA
  44. #include <cuda.h>
  45. #endif
  46. struct _starpu_worker;
  47. /* codelet function */
  48. typedef void (*_starpu_cl_func_t)(void **, void *);
  49. #define _STARPU_CPU_MAY_PERFORM(j) ((j)->task->where & STARPU_CPU)
  50. #define _STARPU_CUDA_MAY_PERFORM(j) ((j)->task->where & STARPU_CUDA)
  51. #define _STARPU_OPENCL_MAY_PERFORM(j) ((j)->task->where & STARPU_OPENCL)
  52. #define _STARPU_MIC_MAY_PERFORM(j) ((j)->task->where & STARPU_MIC)
  53. #define _STARPU_SCC_MAY_PERFORM(j) ((j)->task->where & STARPU_SCC)
  54. struct _starpu_data_descr
  55. {
  56. starpu_data_handle_t handle;
  57. enum starpu_data_access_mode mode;
  58. int node; /* This is the value actually chosen, only set by
  59. _starpu_fetch_task_input for coherency with
  60. __starpu_push_task_output */
  61. int index;
  62. int orderedindex; /* For this field the array is actually indexed by
  63. parameter order, and this provides the ordered
  64. index */
  65. };
  66. #ifdef STARPU_DEBUG
  67. MULTILIST_CREATE_TYPE(_starpu_job, all_submitted)
  68. #endif
  69. /* A job is the internal representation of a task. */
  70. struct _starpu_job
  71. {
  72. /* Each job is attributed a unique id. */
  73. unsigned long job_id;
  74. /* The task associated to that job */
  75. struct starpu_task *task;
  76. /* A task that this will unlock quickly, e.g. we are the pre_sync part
  77. * of a data acquisition, and the caller promised that data release will
  78. * happen immediately, so that the post_sync task will be started
  79. * immediately after. */
  80. struct _starpu_job *quick_next;
  81. /* These synchronization structures are used to wait for the job to be
  82. * available or terminated for instance. */
  83. starpu_pthread_mutex_t sync_mutex;
  84. starpu_pthread_cond_t sync_cond;
  85. /* To avoid deadlocks, we reorder the different buffers accessed to by
  86. * the task so that we always grab the rw-lock associated to the
  87. * handles in the same order. */
  88. struct _starpu_data_descr ordered_buffers[STARPU_NMAXBUFS];
  89. struct _starpu_task_wrapper_dlist dep_slots[STARPU_NMAXBUFS];
  90. struct _starpu_data_descr *dyn_ordered_buffers;
  91. struct _starpu_task_wrapper_dlist *dyn_dep_slots;
  92. /* If a tag is associated to the job, this points to the internal data
  93. * structure that describes the tag status. */
  94. struct _starpu_tag *tag;
  95. /* Maintain a list of all the completion groups that depend on the job.
  96. * */
  97. struct _starpu_cg_list job_successors;
  98. /* Task whose termination depends on this task */
  99. struct starpu_task *end_rdep;
  100. /* For tasks with cl==NULL but submitted with explicit data dependency,
  101. * the handle for this dependency, so as to remove the task from the
  102. * last_writer/readers */
  103. starpu_data_handle_t implicit_dep_handle;
  104. struct _starpu_task_wrapper_dlist implicit_dep_slot;
  105. /* Indicates whether the task associated to that job has already been
  106. * submitted to StarPU (1) or not (0) (using starpu_task_submit).
  107. * Becomes and stays 2 when the task is submitted several times.
  108. *
  109. * Protected by j->sync_mutex.
  110. */
  111. unsigned submitted:2;
  112. /* Indicates whether the task associated to this job is terminated or
  113. * not.
  114. *
  115. * Protected by j->sync_mutex.
  116. */
  117. unsigned terminated:2;
  118. #ifdef STARPU_OPENMP
  119. /* Job is a continuation or a regular task. */
  120. unsigned continuation;
  121. /* If 0, the prepared continuation is not resubmitted automatically
  122. * when going to sleep, if 1, the prepared continuation is immediately
  123. * resubmitted when going to sleep. */
  124. unsigned continuation_resubmit;
  125. /* Callback function called when:
  126. * - The continuation starpu task is ready to be submitted again if
  127. * continuation_resubmit = 0;
  128. * - The continuation starpu task has just been re-submitted if
  129. * continuation_resubmit = 1. */
  130. void (*continuation_callback_on_sleep)(void *arg);
  131. void *continuation_callback_on_sleep_arg;
  132. void (*omp_cleanup_callback)(void *arg);
  133. void *omp_cleanup_callback_arg;
  134. /* Job has been stopped at least once. */
  135. unsigned discontinuous;
  136. /* Cumulated execution time for discontinuous jobs */
  137. struct timespec cumulated_ts;
  138. /* Cumulated energy consumption for discontinuous jobs */
  139. double cumulated_energy_consumed;
  140. #endif
  141. /* The value of the footprint that identifies the job may be stored in
  142. * this structure. */
  143. uint32_t footprint;
  144. unsigned footprint_is_computed:1;
  145. /* Should that task appear in the debug tools ? (eg. the DAG generated
  146. * with dot) */
  147. unsigned exclude_from_dag:1;
  148. /* Is that task internal to StarPU? */
  149. unsigned internal:1;
  150. /* Did that task use sequential consistency for its data? */
  151. unsigned sequential_consistency:1;
  152. /* During the reduction of a handle, StarPU may have to submit tasks to
  153. * perform the reduction itself: those task should not be stalled while
  154. * other tasks are blocked until the handle has been properly reduced,
  155. * so we need a flag to differentiate them from "normal" tasks. */
  156. unsigned reduction_task:1;
  157. /* The implementation associated to the job */
  158. unsigned nimpl;
  159. /* Number of workers executing that task (>1 if the task is parallel)
  160. * */
  161. int task_size;
  162. /* In case we have assigned this job to a combined workerid */
  163. int combined_workerid;
  164. /* How many workers are currently running an alias of that job (for
  165. * parallel tasks only). */
  166. int active_task_alias_count;
  167. struct bound_task *bound_task;
  168. /* Parallel workers may have to synchronize before/after the execution of a parallel task. */
  169. starpu_pthread_barrier_t before_work_barrier;
  170. starpu_pthread_barrier_t after_work_barrier;
  171. unsigned after_work_busy_barrier;
  172. struct _starpu_graph_node *graph_node;
  173. #ifdef STARPU_DEBUG
  174. /* Linked-list of all jobs, for debugging */
  175. struct _starpu_job_multilist_all_submitted all_submitted;
  176. #endif
  177. };
  178. #ifdef STARPU_DEBUG
  179. MULTILIST_CREATE_INLINES(struct _starpu_job, _starpu_job, all_submitted)
  180. #endif
  181. void _starpu_job_init(void);
  182. void _starpu_job_fini(void);
  183. /* Create an internal struct _starpu_job *structure to encapsulate the task. */
  184. struct _starpu_job* _starpu_job_create(struct starpu_task *task) STARPU_ATTRIBUTE_MALLOC;
  185. /* Destroy the data structure associated to the job structure */
  186. void _starpu_job_destroy(struct _starpu_job *j);
  187. /* Test for the termination of the job */
  188. int _starpu_job_finished(struct _starpu_job *j);
  189. /* Wait for the termination of the job */
  190. void _starpu_wait_job(struct _starpu_job *j);
  191. #ifdef STARPU_OPENMP
  192. /* Test for the termination of the job */
  193. int _starpu_test_job_termination(struct _starpu_job *j);
  194. /* Prepare the job for accepting new dependencies before becoming a continuation. */
  195. void _starpu_job_prepare_for_continuation_ext(struct _starpu_job *j, unsigned continuation_resubmit,
  196. void (*continuation_callback_on_sleep)(void *arg), void *continuation_callback_on_sleep_arg);
  197. void _starpu_job_prepare_for_continuation(struct _starpu_job *j);
  198. void _starpu_job_set_omp_cleanup_callback(struct _starpu_job *j,
  199. void (*omp_cleanup_callback)(void *arg), void *omp_cleanup_callback_arg);
  200. #endif
  201. /* Specify that the task should not appear in the DAG generated by debug tools. */
  202. void _starpu_exclude_task_from_dag(struct starpu_task *task);
  203. /* try to submit job j, enqueue it if it's not schedulable yet. The job's sync mutex is supposed to be held already */
  204. unsigned _starpu_enforce_deps_and_schedule(struct _starpu_job *j);
  205. unsigned _starpu_enforce_deps_starting_from_task(struct _starpu_job *j);
  206. #ifdef STARPU_OPENMP
  207. /* When waking up a continuation, we only enforce new task dependencies */
  208. unsigned _starpu_reenforce_task_deps_and_schedule(struct _starpu_job *j);
  209. #endif
  210. void _starpu_enforce_deps_notify_job_ready_soon(struct _starpu_job *j, _starpu_notify_job_start_data *data, int tag);
  211. /* Called at the submission of the job */
  212. void _starpu_handle_job_submission(struct _starpu_job *j);
  213. /* This function must be called after the execution of a job, this triggers all
  214. * job's dependencies and perform the callback function if any. */
  215. void _starpu_handle_job_termination(struct _starpu_job *j);
  216. /* Get the sum of the size of the data accessed by the job. */
  217. size_t _starpu_job_get_data_size(struct starpu_perfmodel *model, struct starpu_perfmodel_arch* arch, unsigned nimpl, struct _starpu_job *j);
  218. /* Get a task from the local pool of tasks that were explicitly attributed to
  219. * that worker. */
  220. struct starpu_task *_starpu_pop_local_task(struct _starpu_worker *worker);
  221. /* Put a task into the pool of tasks that are explicitly attributed to the
  222. * specified worker. If "back" is set, the task is put at the back of the list.
  223. * Considering the tasks are popped from the back, this value should be 0 to
  224. * enforce a FIFO ordering. */
  225. int _starpu_push_local_task(struct _starpu_worker *worker, struct starpu_task *task, int prio);
  226. #define _STARPU_JOB_GET_ORDERED_BUFFER_INDEX(job, i) ((job->dyn_ordered_buffers) ? job->dyn_ordered_buffers[i].index : job->ordered_buffers[i].index)
  227. #define _STARPU_JOB_GET_ORDERED_BUFFER_HANDLE(job, i) ((job->dyn_ordered_buffers) ? job->dyn_ordered_buffers[i].handle : job->ordered_buffers[i].handle)
  228. #define _STARPU_JOB_GET_ORDERED_BUFFER_MODE(job, i) ((job->dyn_ordered_buffers) ? job->dyn_ordered_buffers[i].mode : job->ordered_buffers[i].mode)
  229. #define _STARPU_JOB_GET_ORDERED_BUFFER_NODE(job, i) ((job->dyn_ordered_buffers) ? job->dyn_ordered_buffers[i].node : job->ordered_buffers[i].node)
  230. #define _STARPU_JOB_SET_ORDERED_BUFFER_INDEX(job, __index, i) do { if (job->dyn_ordered_buffers) job->dyn_ordered_buffers[i].index = (__index); else job->ordered_buffers[i].index = (__index);} while(0)
  231. #define _STARPU_JOB_SET_ORDERED_BUFFER_HANDLE(job, __handle, i) do { if (job->dyn_ordered_buffers) job->dyn_ordered_buffers[i].handle = (__handle); else job->ordered_buffers[i].handle = (__handle);} while(0)
  232. #define _STARPU_JOB_SET_ORDERED_BUFFER_MODE(job, __mode, i) do { if (job->dyn_ordered_buffers) job->dyn_ordered_buffers[i].mode = __mode; else job->ordered_buffers[i].mode = __mode;} while(0)
  233. #define _STARPU_JOB_SET_ORDERED_BUFFER_NODE(job, __node, i) do { if (job->dyn_ordered_buffers) job->dyn_ordered_buffers[i].node = __node; else job->ordered_buffers[i].node = __node;} while(0)
  234. #define _STARPU_JOB_SET_ORDERED_BUFFER(job, buffer, i) do { if (job->dyn_ordered_buffers) job->dyn_ordered_buffers[i] = buffer; else job->ordered_buffers[i] = buffer;} while(0)
  235. #define _STARPU_JOB_GET_ORDERED_BUFFERS(job) ((job->dyn_ordered_buffers) ? job->dyn_ordered_buffers : &job->ordered_buffers[0])
  236. #define _STARPU_JOB_GET_DEP_SLOTS(job) (((job)->dyn_dep_slots) ? (job)->dyn_dep_slots : (job)->dep_slots)
  237. #endif // __JOBS_H__