graph.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2016 Université de Bordeaux
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. /*
  17. * This stores the task graph structure, to used by the schedulers which need
  18. * it. We do not always enable it since it is costly. To avoid interfering
  19. * too much with execution, it may be a bit outdated, i.e. still contain jobs
  20. * which have completed very recently.
  21. *
  22. * This is because we drop nodes lazily: when a job terminates, we just add the
  23. * node to the dropped list (to avoid having to take the mutex on the whole
  24. * graph). The graph gets updated whenever the graph mutex becomes available.
  25. */
  26. #include <starpu.h>
  27. #include <core/jobs.h>
  28. #include <common/graph.h>
  29. /* Protects the whole task graph except the dropped list */
  30. static starpu_pthread_rwlock_t graph_lock;
  31. /* Whether we should enable recording the task graph */
  32. int _starpu_graph_record;
  33. /* This list contains all nodes without incoming dependency */
  34. struct _starpu_graph_node_multilist_top top;
  35. /* This list contains all nodes without outgoing dependency */
  36. struct _starpu_graph_node_multilist_bottom bottom;
  37. /* This list contains all nodes */
  38. struct _starpu_graph_node_multilist_all all;
  39. /* Protects the dropped list, always taken before graph lock */
  40. static starpu_pthread_mutex_t dropped_lock;
  41. /* This list contains all dropped nodes, i.e. the job terminated by the corresponding node is still int he graph */
  42. struct _starpu_graph_node_multilist_dropped dropped;
  43. void _starpu_graph_init(void)
  44. {
  45. STARPU_PTHREAD_RWLOCK_INIT(&graph_lock, NULL);
  46. _starpu_graph_node_multilist_init_top(&top);
  47. _starpu_graph_node_multilist_init_bottom(&bottom);
  48. _starpu_graph_node_multilist_init_all(&all);
  49. STARPU_PTHREAD_MUTEX_INIT(&dropped_lock, NULL);
  50. _starpu_graph_node_multilist_init_dropped(&dropped);
  51. }
  52. /* LockWR the graph lock */
  53. void _starpu_graph_wrlock(void)
  54. {
  55. STARPU_PTHREAD_RWLOCK_WRLOCK(&graph_lock);
  56. }
  57. void _starpu_graph_drop_node(struct _starpu_graph_node *node);
  58. /* This flushes the list of nodes to be dropped. Both the dropped_lock and
  59. * graph_lock mutexes have to be held on entry, and are released. */
  60. void _starpu_graph_drop_dropped_nodes(void)
  61. {
  62. struct _starpu_graph_node_multilist_dropped dropping;
  63. /* Pick up the list of dropped nodes */
  64. _starpu_graph_node_multilist_move_dropped(&dropped, &dropping);
  65. STARPU_PTHREAD_MUTEX_UNLOCK(&dropped_lock);
  66. /* And now process it if it's not empty. */
  67. if (!_starpu_graph_node_multilist_empty_dropped(&dropping))
  68. {
  69. struct _starpu_graph_node *node, *next;
  70. for (node = _starpu_graph_node_multilist_begin_dropped(&dropping);
  71. node != _starpu_graph_node_multilist_end_dropped(&dropping);
  72. node = next)
  73. {
  74. next = _starpu_graph_node_multilist_next_dropped(node);
  75. _starpu_graph_drop_node(node);
  76. }
  77. }
  78. STARPU_PTHREAD_RWLOCK_UNLOCK(&graph_lock);
  79. }
  80. /* UnlockWR the graph lock */
  81. void _starpu_graph_wrunlock(void)
  82. {
  83. STARPU_PTHREAD_MUTEX_LOCK(&dropped_lock);
  84. _starpu_graph_drop_dropped_nodes();
  85. }
  86. /* LockRD the graph lock */
  87. void _starpu_graph_rdlock(void)
  88. {
  89. STARPU_PTHREAD_RWLOCK_RDLOCK(&graph_lock);
  90. }
  91. /* UnlockRD the graph lock */
  92. void _starpu_graph_rdunlock(void)
  93. {
  94. STARPU_PTHREAD_RWLOCK_UNLOCK(&graph_lock);
  95. /* Take the opportunity to try to take it WR */
  96. if (STARPU_PTHREAD_RWLOCK_TRYWRLOCK(&graph_lock) == 0)
  97. /* Good, flush dropped nodes */
  98. _starpu_graph_wrunlock();
  99. }
  100. static void __starpu_graph_foreach(void (*func)(void *data, struct _starpu_graph_node *node), void *data)
  101. {
  102. struct _starpu_graph_node *node;
  103. for (node = _starpu_graph_node_multilist_begin_all(&all);
  104. node != _starpu_graph_node_multilist_end_all(&all);
  105. node = _starpu_graph_node_multilist_next_all(node))
  106. func(data, node);
  107. }
  108. /* Add a node to the graph */
  109. void _starpu_graph_add_job(struct _starpu_job *job)
  110. {
  111. struct _starpu_graph_node *node;
  112. _STARPU_CALLOC(node, 1, sizeof(*node));
  113. node->job = job;
  114. job->graph_node = node;
  115. STARPU_PTHREAD_MUTEX_INIT(&node->mutex, NULL);
  116. _starpu_graph_wrlock();
  117. /* It does not have any dependency yet, add to all lists */
  118. _starpu_graph_node_multilist_push_back_top(&top, node);
  119. _starpu_graph_node_multilist_push_back_bottom(&bottom, node);
  120. _starpu_graph_node_multilist_push_back_all(&all, node);
  121. _starpu_graph_wrunlock();
  122. }
  123. /* Add a node to an array of nodes */
  124. static unsigned add_node(struct _starpu_graph_node *node, struct _starpu_graph_node ***nodes, unsigned *n_nodes, unsigned *alloc_nodes, unsigned **slot)
  125. {
  126. unsigned ret;
  127. if (*n_nodes == *alloc_nodes)
  128. {
  129. if (*alloc_nodes)
  130. *alloc_nodes *= 2;
  131. else
  132. *alloc_nodes = 4;
  133. _STARPU_REALLOC(*nodes, *alloc_nodes * sizeof(**nodes));
  134. if (slot)
  135. {
  136. _STARPU_REALLOC(*slot, *alloc_nodes * sizeof(**slot));
  137. }
  138. }
  139. ret = (*n_nodes)++;
  140. (*nodes)[ret] = node;
  141. return ret;
  142. }
  143. /* Add a dependency between nodes */
  144. void _starpu_graph_add_job_dep(struct _starpu_job *job, struct _starpu_job *prev_job)
  145. {
  146. unsigned rank_incoming, rank_outgoing;
  147. _starpu_graph_wrlock();
  148. struct _starpu_graph_node *node = job->graph_node;
  149. struct _starpu_graph_node *prev_node = prev_job->graph_node;
  150. if (!node || !prev_node)
  151. return;
  152. if (_starpu_graph_node_multilist_queued_bottom(prev_node))
  153. /* Previous node is not at bottom any more */
  154. _starpu_graph_node_multilist_erase_bottom(&bottom, prev_node);
  155. if (_starpu_graph_node_multilist_queued_top(node))
  156. /* Next node is not at top any more */
  157. _starpu_graph_node_multilist_erase_top(&top, node);
  158. rank_incoming = add_node(prev_node, &node->incoming, &node->n_incoming, &node->alloc_incoming, NULL);
  159. rank_outgoing = add_node(node, &prev_node->outgoing, &prev_node->n_outgoing, &prev_node->alloc_outgoing, &prev_node->outgoing_slot);
  160. prev_node->outgoing_slot[rank_outgoing] = rank_incoming;
  161. _starpu_graph_wrunlock();
  162. }
  163. /* Drop a node, and thus its dependencies */
  164. void _starpu_graph_drop_node(struct _starpu_graph_node *node)
  165. {
  166. unsigned i;
  167. STARPU_ASSERT(!node->job);
  168. if (_starpu_graph_node_multilist_queued_bottom(node))
  169. _starpu_graph_node_multilist_erase_bottom(&bottom, node);
  170. if (_starpu_graph_node_multilist_queued_top(node))
  171. _starpu_graph_node_multilist_erase_top(&top, node);
  172. if (_starpu_graph_node_multilist_queued_all(node))
  173. _starpu_graph_node_multilist_erase_all(&all, node);
  174. /* Drop ourself from the incoming part of the outgoing nodes */
  175. for (i = 0; i < node->n_outgoing; i++)
  176. {
  177. struct _starpu_graph_node *next = node->outgoing[i];
  178. next->incoming[node->outgoing_slot[i]] = NULL;
  179. }
  180. node->n_outgoing = 0;
  181. free(node->outgoing);
  182. node->outgoing = NULL;
  183. free(node->outgoing_slot);
  184. node->outgoing_slot = NULL;
  185. node->alloc_outgoing = 0;
  186. node->n_incoming = 0;
  187. free(node->incoming);
  188. node->incoming = NULL;
  189. node->alloc_incoming = 0;
  190. free(node);
  191. }
  192. /* Drop a job */
  193. void _starpu_graph_drop_job(struct _starpu_job *job)
  194. {
  195. struct _starpu_graph_node *node = job->graph_node;
  196. job->graph_node = NULL;
  197. if (!node)
  198. return;
  199. STARPU_PTHREAD_MUTEX_LOCK(&node->mutex);
  200. /* Will not be able to use the job any more */
  201. node->job = NULL;
  202. STARPU_PTHREAD_MUTEX_UNLOCK(&node->mutex);
  203. STARPU_PTHREAD_MUTEX_LOCK(&dropped_lock);
  204. /* Queue for removal when lock becomes available */
  205. _starpu_graph_node_multilist_push_back_dropped(&dropped, node);
  206. if (STARPU_PTHREAD_RWLOCK_TRYWRLOCK(&graph_lock) == 0)
  207. {
  208. /* Graph wrlock is available, drop nodes immediately */
  209. _starpu_graph_drop_dropped_nodes();
  210. }
  211. else
  212. STARPU_PTHREAD_MUTEX_UNLOCK(&dropped_lock);
  213. }
  214. static void _starpu_graph_set_n(void *data, struct _starpu_graph_node *node)
  215. {
  216. int value = (intptr_t) data;
  217. node->graph_n = value;
  218. }
  219. /* Call func for each vertex of the task graph, from bottom to top, in topological order */
  220. static void _starpu_graph_compute_bottom_up(void (*func)(struct _starpu_graph_node *next_node, struct _starpu_graph_node *prev_node, void *data), void *data)
  221. {
  222. struct _starpu_graph_node *node, *node2;
  223. struct _starpu_graph_node **current_set = NULL, **next_set = NULL, **swap_set;
  224. unsigned current_n, next_n, i, j;
  225. unsigned current_alloc = 0, next_alloc = 0, swap_alloc;
  226. /* Classical flow algorithm: start from bottom, and propagate depths to top */
  227. /* Set number of processed outgoing edges to 0 for each node */
  228. __starpu_graph_foreach(_starpu_graph_set_n, (void*) 0);
  229. /* Start with the bottom of the graph */
  230. current_n = 0;
  231. for (node = _starpu_graph_node_multilist_begin_bottom(&bottom);
  232. node != _starpu_graph_node_multilist_end_bottom(&bottom);
  233. node = _starpu_graph_node_multilist_next_bottom(node))
  234. add_node(node, &current_set, &current_n, &current_alloc, NULL);
  235. /* Now propagate to top as long as we have current nodes */
  236. while (current_n)
  237. {
  238. /* Next set is initially empty */
  239. next_n = 0;
  240. /* For each node in the current set */
  241. for (i = 0; i < current_n; i++)
  242. {
  243. node = current_set[i];
  244. /* For each parent of this node */
  245. for (j = 0; j < node->n_incoming; j++)
  246. {
  247. node2 = node->incoming[j];
  248. if (!node2)
  249. continue;
  250. node2->graph_n++;
  251. func(node, node2, data);
  252. if ((unsigned) node2->graph_n == node2->n_outgoing)
  253. /* All outgoing edges were processed, can now add to next set */
  254. add_node(node2, &next_set, &next_n, &next_alloc, NULL);
  255. }
  256. }
  257. /* Swap next set with current set */
  258. swap_set = next_set;
  259. swap_alloc = next_alloc;
  260. next_set = current_set;
  261. next_alloc = current_alloc;
  262. current_set = swap_set;
  263. current_alloc = swap_alloc;
  264. current_n = next_n;
  265. }
  266. free(current_set);
  267. free(next_set);
  268. }
  269. static void compute_depth(struct _starpu_graph_node *next_node, struct _starpu_graph_node *prev_node, void *data STARPU_ATTRIBUTE_UNUSED)
  270. {
  271. if (prev_node->depth < next_node->depth + 1)
  272. prev_node->depth = next_node->depth + 1;
  273. }
  274. void _starpu_graph_compute_depths(void)
  275. {
  276. struct _starpu_graph_node *node;
  277. _starpu_graph_wrlock();
  278. /* The bottom of the graph has depth 0 */
  279. for (node = _starpu_graph_node_multilist_begin_bottom(&bottom);
  280. node != _starpu_graph_node_multilist_end_bottom(&bottom);
  281. node = _starpu_graph_node_multilist_next_bottom(node))
  282. node->depth = 0;
  283. _starpu_graph_compute_bottom_up(compute_depth, NULL);
  284. _starpu_graph_wrunlock();
  285. }
  286. void _starpu_graph_compute_descendants(void)
  287. {
  288. struct _starpu_graph_node *node, *node2, *node3;
  289. struct _starpu_graph_node **current_set = NULL, **next_set = NULL, **swap_set;
  290. unsigned current_n, next_n, i, j;
  291. unsigned current_alloc = 0, next_alloc = 0, swap_alloc;
  292. _starpu_graph_wrlock();
  293. /* Yes, this is O(|V|.(|V|+|E|)) :( */
  294. /* We could get O(|V|.|E|) by doing a topological sort first.
  295. *
  296. * |E| is usually O(|V|), though (bounded number of data dependencies,
  297. * and we use synchronization tasks) */
  298. for (node = _starpu_graph_node_multilist_begin_all(&all);
  299. node != _starpu_graph_node_multilist_end_all(&all);
  300. node = _starpu_graph_node_multilist_next_all(node))
  301. {
  302. unsigned descendants;
  303. /* Mark all nodes as unseen */
  304. for (node2 = _starpu_graph_node_multilist_begin_all(&all);
  305. node2 != _starpu_graph_node_multilist_end_all(&all);
  306. node2 = _starpu_graph_node_multilist_next_all(node2))
  307. node2->graph_n = 0;
  308. /* Start with the node we want to compute the number of descendants of */
  309. current_n = 0;
  310. add_node(node, &current_set, &current_n, &current_alloc, NULL);
  311. node->graph_n = 1;
  312. descendants = 0;
  313. /* While we have descendants, count their descendants */
  314. while (current_n) {
  315. /* Next set is initially empty */
  316. next_n = 0;
  317. /* For each node in the current set */
  318. for (i = 0; i < current_n; i++)
  319. {
  320. node2 = current_set[i];
  321. /* For each child of this node2 */
  322. for (j = 0; j < node2->n_outgoing; j++)
  323. {
  324. node3 = node2->outgoing[j];
  325. if (!node3)
  326. continue;
  327. if (node3->graph_n)
  328. /* Already seen */
  329. continue;
  330. /* Add this node */
  331. node3->graph_n = 1;
  332. descendants++;
  333. add_node(node3, &next_set, &next_n, &next_alloc, NULL);
  334. }
  335. }
  336. /* Swap next set with current set */
  337. swap_set = next_set;
  338. swap_alloc = next_alloc;
  339. next_set = current_set;
  340. next_alloc = current_alloc;
  341. current_set = swap_set;
  342. current_alloc = swap_alloc;
  343. current_n = next_n;
  344. }
  345. node->descendants = descendants;
  346. }
  347. _starpu_graph_wrunlock();
  348. free(current_set);
  349. free(next_set);
  350. }
  351. void _starpu_graph_foreach(void (*func)(void *data, struct _starpu_graph_node *node), void *data)
  352. {
  353. _starpu_graph_wrlock();
  354. __starpu_graph_foreach(func, data);
  355. _starpu_graph_wrunlock();
  356. }