coherency.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2008-2020 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #ifndef __COHERENCY__H__
  17. #define __COHERENCY__H__
  18. /** @file */
  19. #include <starpu.h>
  20. #include <common/config.h>
  21. #include <common/starpu_spinlock.h>
  22. #include <common/rwlock.h>
  23. #include <common/timing.h>
  24. #include <common/fxt.h>
  25. #include <common/list.h>
  26. #include <datawizard/interfaces/data_interface.h>
  27. #include <datawizard/datastats.h>
  28. #include <datawizard/memstats.h>
  29. #include <datawizard/data_request.h>
  30. enum _starpu_cache_state
  31. {
  32. STARPU_OWNER,
  33. STARPU_SHARED,
  34. STARPU_INVALID
  35. };
  36. /** this should contain the information relative to a given data replicate */
  37. struct _starpu_data_replicate
  38. {
  39. starpu_data_handle_t handle;
  40. /** describe the actual data layout, as manipulated by data interfaces in *_interface.c */
  41. void *data_interface;
  42. /** How many requests or tasks are currently working with this replicate */
  43. int refcnt;
  44. char memory_node;
  45. /** describes the state of the local data in term of coherency */
  46. enum _starpu_cache_state state: 2;
  47. /** A buffer that is used for SCRATCH or reduction cannnot be used with
  48. * filters. */
  49. unsigned relaxed_coherency:2;
  50. /** We may need to initialize the replicate with some value before using it. */
  51. unsigned initialized:1;
  52. /** is the data locally allocated ? */
  53. unsigned allocated:1;
  54. /** was it automatically allocated ? (else it's the application-provided
  55. * buffer, don't ever try to free it!) */
  56. /** perhaps the allocation was perform higher in the hiearchy
  57. * for now this is just translated into !automatically_allocated
  58. * */
  59. unsigned automatically_allocated:1;
  60. /** To help the scheduling policies to make some decision, we
  61. may keep a track of the tasks that are likely to request
  62. this data on the current node.
  63. It is the responsability of the scheduling _policy_ to set that
  64. flag when it assigns a task to a queue, policies which do not
  65. use this hint can simply ignore it.
  66. */
  67. uint32_t requested;
  68. struct _starpu_data_request *request[STARPU_MAXNODES];
  69. /** Pointer to memchunk for LRU strategy */
  70. struct _starpu_mem_chunk * mc;
  71. };
  72. struct _starpu_data_requester_prio_list;
  73. struct _starpu_jobid_list
  74. {
  75. unsigned long id;
  76. struct _starpu_jobid_list *next;
  77. };
  78. /** This structure describes a simply-linked list of task */
  79. struct _starpu_task_wrapper_list
  80. {
  81. struct starpu_task *task;
  82. struct _starpu_task_wrapper_list *next;
  83. };
  84. /** This structure describes a doubly-linked list of task */
  85. struct _starpu_task_wrapper_dlist
  86. {
  87. struct starpu_task *task;
  88. struct _starpu_task_wrapper_dlist *next;
  89. struct _starpu_task_wrapper_dlist *prev;
  90. };
  91. extern int _starpu_has_not_important_data;
  92. typedef void (*_starpu_data_handle_unregister_hook)(starpu_data_handle_t);
  93. /** This is initialized in both _starpu_register_new_data and _starpu_data_partition */
  94. struct _starpu_data_state
  95. {
  96. int magic;
  97. struct _starpu_data_requester_prio_list req_list;
  98. /** the number of requests currently in the scheduling engine (not in
  99. * the req_list anymore), i.e. the number of holders of the
  100. * current_mode rwlock */
  101. unsigned refcnt;
  102. /** whether we are already unlocking data requests */
  103. unsigned unlocking_reqs;
  104. /** Current access mode. Is always either STARPU_R, STARPU_W,
  105. * STARPU_SCRATCH or STARPU_REDUX, but never a combination such as
  106. * STARPU_RW. */
  107. enum starpu_data_access_mode current_mode;
  108. /** protect meta data */
  109. struct _starpu_spinlock header_lock;
  110. /** Condition to make application wait for all transfers before freeing handle */
  111. /** busy_count is the number of handle->refcnt, handle->per_node[*]->refcnt, number of starpu_data_requesters, and number of tasks that have released it but are still registered on the implicit data dependency lists. */
  112. /** Core code which releases busy_count has to call
  113. * _starpu_data_check_not_busy to let starpu_data_unregister proceed */
  114. unsigned busy_count;
  115. /** Is starpu_data_unregister waiting for busy_count? */
  116. unsigned busy_waiting;
  117. starpu_pthread_mutex_t busy_mutex;
  118. starpu_pthread_cond_t busy_cond;
  119. /** In case we user filters, the handle may describe a sub-data */
  120. struct _starpu_data_state *root_handle; /** root of the tree */
  121. struct _starpu_data_state *father_handle; /** father of the node, NULL if the current node is the root */
  122. starpu_data_handle_t *active_children; /** The currently active set of read-write children */
  123. unsigned active_nchildren;
  124. starpu_data_handle_t **active_readonly_children; /** The currently active set of read-only children */
  125. unsigned *active_readonly_nchildren; /** Size of active_readonly_children[i] array */
  126. unsigned nactive_readonly_children; /** Size of active_readonly_children and active_readonly_nchildren arrays. Actual use is given by 'partitioned' */
  127. /** Our siblings in the father partitioning */
  128. unsigned nsiblings; /** How many siblings */
  129. starpu_data_handle_t *siblings;
  130. unsigned sibling_index; /** indicate which child this node is from the father's perpsective (if any) */
  131. unsigned depth; /** what's the depth of the tree ? */
  132. /** Synchronous partitioning */
  133. starpu_data_handle_t children;
  134. unsigned nchildren;
  135. /** How many partition plans this handle has */
  136. unsigned nplans;
  137. /** Switch codelet for asynchronous partitioning */
  138. struct starpu_codelet *switch_cl;
  139. /** size of dyn_nodes recorded in switch_cl */
  140. unsigned switch_cl_nparts;
  141. /** Whether a partition plan is currently submitted and the
  142. * corresponding unpartition has not been yet
  143. *
  144. * Or the number of partition plans currently submitted in readonly
  145. * mode.
  146. */
  147. unsigned partitioned;
  148. /** Whether a partition plan is currently submitted in readonly mode */
  149. unsigned part_readonly:1;
  150. /** Whether our father is currently partitioned into ourself */
  151. unsigned active:1;
  152. unsigned active_ro:1;
  153. /** describe the state of the data in term of coherency
  154. * This is execution-time state. */
  155. struct _starpu_data_replicate per_node[STARPU_MAXNODES];
  156. struct _starpu_data_replicate *per_worker;
  157. struct starpu_data_interface_ops *ops;
  158. /** Footprint which identifies data layout */
  159. uint32_t footprint;
  160. /** where is the data home, i.e. which node it was registered from ? -1 if none yet */
  161. int home_node;
  162. /** what is the default write-through mask for that data ? */
  163. uint32_t wt_mask;
  164. /** for a readonly handle, the number of times that we have returned again the
  165. same handle and thus the number of times we have to ignore unregistration requests */
  166. unsigned aliases;
  167. /** for a non-readonly handle, a readonly-only duplicate, that we can
  168. return from starpu_data_dup_ro */
  169. starpu_data_handle_t readonly_dup;
  170. /** for a readonly handle, the non-readonly handle that is referencing
  171. is in its readonly_dup field. */
  172. starpu_data_handle_t readonly_dup_of;
  173. /** in some case, the application may explicitly tell StarPU that a
  174. * piece of data is not likely to be used soon again */
  175. unsigned is_not_important:1;
  176. /** Does StarPU have to enforce some implicit data-dependencies ? */
  177. unsigned sequential_consistency:1;
  178. /** Is the data initialized, or a task is already submitted to initialize it
  179. * This is submission-time initialization state. */
  180. unsigned initialized:1;
  181. /** Whether we shall not ever write to this handle, thus allowing various optimizations */
  182. unsigned readonly:1;
  183. /** Can the data be pushed to the disk? */
  184. unsigned ooc:1;
  185. /** Whether lazy unregistration was requested throught starpu_data_unregister_submit */
  186. unsigned lazy_unregister:1;
  187. /** Whether automatic planned partitioning/unpartitioning should not be done */
  188. int partition_automatic_disabled:1;
  189. #ifdef STARPU_OPENMP
  190. unsigned removed_from_context_hash:1;
  191. #endif
  192. /** This lock should protect any operation to enforce
  193. * sequential_consistency */
  194. starpu_pthread_mutex_t sequential_consistency_mutex;
  195. /** The last submitted task (or application data request) that declared
  196. * it would modify the piece of data ? Any task accessing the data in a
  197. * read-only mode should depend on that task implicitely if the
  198. * sequential_consistency flag is enabled. */
  199. enum starpu_data_access_mode last_submitted_mode;
  200. struct starpu_task *last_sync_task;
  201. struct _starpu_task_wrapper_dlist last_submitted_accessors;
  202. /** If FxT is enabled, we keep track of "ghost dependencies": that is to
  203. * say the dependencies that are not needed anymore, but that should
  204. * appear in the post-mortem DAG. For instance if we have the sequence
  205. * f(Aw) g(Aw), and that g is submitted after the termination of f, we
  206. * want to have f->g appear in the DAG even if StarPU does not need to
  207. * enforce this dependency anymore.*/
  208. unsigned last_submitted_ghost_sync_id_is_valid;
  209. unsigned long last_submitted_ghost_sync_id;
  210. struct _starpu_jobid_list *last_submitted_ghost_accessors_id;
  211. /** protected by sequential_consistency_mutex */
  212. struct _starpu_task_wrapper_list *post_sync_tasks;
  213. unsigned post_sync_tasks_cnt;
  214. /*
  215. * Reductions
  216. */
  217. /** During reduction we need some specific methods: redux_func performs
  218. * the reduction of an interface into another one (eg. "+="), and init_func
  219. * initializes the data interface to a default value that is stable by
  220. * reduction (eg. 0 for +=). */
  221. struct starpu_codelet *redux_cl;
  222. struct starpu_codelet *init_cl;
  223. /** Are we currently performing a reduction on that handle ? If so the
  224. * reduction_refcnt should be non null until there are pending tasks
  225. * that are performing the reduction. */
  226. unsigned reduction_refcnt;
  227. /** List of requesters that are specific to the pending reduction. This
  228. * list is used when the requests in the req_list list are frozen until
  229. * the end of the reduction. */
  230. struct _starpu_data_requester_prio_list reduction_req_list;
  231. starpu_data_handle_t *reduction_tmp_handles;
  232. /** Final request for write invalidation */
  233. struct _starpu_data_request *write_invalidation_req;
  234. /** Used for MPI */
  235. void *mpi_data;
  236. _starpu_memory_stats_t memory_stats;
  237. unsigned int mf_node; //XXX
  238. /** hook to be called when unregistering the data */
  239. _starpu_data_handle_unregister_hook unregister_hook;
  240. struct starpu_arbiter *arbiter;
  241. /** This is protected by the arbiter mutex */
  242. struct _starpu_data_requester_prio_list arbitered_req_list;
  243. /** Data maintained by schedulers themselves */
  244. /** Last worker that took this data in locality mode, or -1 if nobody
  245. * took it yet */
  246. int last_locality;
  247. /** Application-provided coordinates. The maximum dimension (5) is
  248. * relatively arbitrary. */
  249. unsigned dimensions;
  250. int coordinates[5];
  251. /** A generic pointer to data in the user land (could be anything and this
  252. * is not manage by StarPU) */
  253. void *user_data;
  254. };
  255. /** This does not take a reference on the handle, the caller has to do it,
  256. * e.g. through _starpu_attempt_to_submit_data_request_from_apps()
  257. * detached means that the core is allowed to drop the request. The caller
  258. * should thus *not* take a reference since it can not know whether the request will complete
  259. * async means that _starpu_fetch_data_on_node will wait for completion of the request
  260. */
  261. int _starpu_fetch_data_on_node(starpu_data_handle_t handle, int node, struct _starpu_data_replicate *replicate,
  262. enum starpu_data_access_mode mode, unsigned detached, enum _starpu_is_prefetch is_prefetch, unsigned async,
  263. void (*callback_func)(void *), void *callback_arg, int prio, const char *origin);
  264. /** This releases a reference on the handle */
  265. void _starpu_release_data_on_node(struct _starpu_data_state *state, uint32_t default_wt_mask,
  266. enum starpu_data_access_mode down_to_mode,
  267. struct _starpu_data_replicate *replicate);
  268. void _starpu_update_data_state(starpu_data_handle_t handle,
  269. struct _starpu_data_replicate *requesting_replicate,
  270. enum starpu_data_access_mode mode);
  271. uint32_t _starpu_get_data_refcnt(struct _starpu_data_state *state, unsigned node);
  272. size_t _starpu_data_get_size(starpu_data_handle_t handle);
  273. size_t _starpu_data_get_alloc_size(starpu_data_handle_t handle);
  274. starpu_ssize_t _starpu_data_get_max_size(starpu_data_handle_t handle);
  275. uint32_t _starpu_data_get_footprint(starpu_data_handle_t handle);
  276. void __starpu_push_task_output(struct _starpu_job *j);
  277. /** Version with driver trace */
  278. void _starpu_push_task_output(struct _starpu_job *j);
  279. void _starpu_release_nowhere_task_output(struct _starpu_job *j);
  280. struct _starpu_worker;
  281. STARPU_ATTRIBUTE_WARN_UNUSED_RESULT
  282. /** Fetch the data parameters for task \p task
  283. * Setting \p async to 1 allows to only start the fetches, and call
  284. * \p _starpu_fetch_task_input_tail later when the transfers are finished */
  285. int _starpu_fetch_task_input(struct starpu_task *task, struct _starpu_job *j, int async);
  286. void _starpu_fetch_task_input_tail(struct starpu_task *task, struct _starpu_job *j, struct _starpu_worker *worker);
  287. void _starpu_fetch_nowhere_task_input(struct _starpu_job *j);
  288. int _starpu_select_src_node(struct _starpu_data_state *state, unsigned destination);
  289. int _starpu_determine_request_path(starpu_data_handle_t handle,
  290. int src_node, int dst_node,
  291. enum starpu_data_access_mode mode, int max_len,
  292. unsigned *src_nodes, unsigned *dst_nodes,
  293. unsigned *handling_nodes, unsigned write_invalidation);
  294. /** is_prefetch is whether the DSM may drop the request (when there is not enough memory for instance
  295. * async is whether the caller wants a reference on the last request, to be
  296. * able to wait for it (which will release that reference).
  297. */
  298. struct _starpu_data_request *_starpu_create_request_to_fetch_data(starpu_data_handle_t handle,
  299. struct _starpu_data_replicate *dst_replicate,
  300. enum starpu_data_access_mode mode, enum _starpu_is_prefetch is_prefetch,
  301. unsigned async,
  302. void (*callback_func)(void *), void *callback_arg, int prio, const char *origin);
  303. void _starpu_redux_init_data_replicate(starpu_data_handle_t handle, struct _starpu_data_replicate *replicate, int workerid);
  304. void _starpu_data_start_reduction_mode(starpu_data_handle_t handle);
  305. void _starpu_data_end_reduction_mode(starpu_data_handle_t handle);
  306. void _starpu_data_end_reduction_mode_terminate(starpu_data_handle_t handle);
  307. void _starpu_data_set_unregister_hook(starpu_data_handle_t handle, _starpu_data_handle_unregister_hook func);
  308. struct _starpu_data_replicate *get_replicate(starpu_data_handle_t handle, enum starpu_data_access_mode mode, int workerid, unsigned node);
  309. #endif // __COHERENCY__H__