graph.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2016-2017 Université de Bordeaux
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. /*
  17. * This stores the task graph structure, to used by the schedulers which need
  18. * it. We do not always enable it since it is costly. To avoid interfering
  19. * too much with execution, it may be a bit outdated, i.e. still contain jobs
  20. * which have completed very recently.
  21. *
  22. * This is because we drop nodes lazily: when a job terminates, we just add the
  23. * node to the dropped list (to avoid having to take the mutex on the whole
  24. * graph). The graph gets updated whenever the graph mutex becomes available.
  25. */
  26. #include <starpu.h>
  27. #include <core/jobs.h>
  28. #include <common/graph.h>
  29. /* Protects the whole task graph except the dropped list */
  30. static starpu_pthread_rwlock_t graph_lock;
  31. /* Whether we should enable recording the task graph */
  32. int _starpu_graph_record;
  33. /* This list contains all nodes without incoming dependency */
  34. struct _starpu_graph_node_multilist_top top;
  35. /* This list contains all nodes without outgoing dependency */
  36. struct _starpu_graph_node_multilist_bottom bottom;
  37. /* This list contains all nodes */
  38. struct _starpu_graph_node_multilist_all all;
  39. /* Protects the dropped list, always taken before graph lock */
  40. static starpu_pthread_mutex_t dropped_lock;
  41. /* This list contains all dropped nodes, i.e. the job terminated by the corresponding node is still int he graph */
  42. struct _starpu_graph_node_multilist_dropped dropped;
  43. void _starpu_graph_init(void)
  44. {
  45. STARPU_PTHREAD_RWLOCK_INIT(&graph_lock, NULL);
  46. _starpu_graph_node_multilist_init_top(&top);
  47. _starpu_graph_node_multilist_init_bottom(&bottom);
  48. _starpu_graph_node_multilist_init_all(&all);
  49. STARPU_PTHREAD_MUTEX_INIT(&dropped_lock, NULL);
  50. _starpu_graph_node_multilist_init_dropped(&dropped);
  51. }
  52. /* LockWR the graph lock */
  53. void _starpu_graph_wrlock(void)
  54. {
  55. STARPU_PTHREAD_RWLOCK_WRLOCK(&graph_lock);
  56. }
  57. void _starpu_graph_drop_node(struct _starpu_graph_node *node);
  58. /* This flushes the list of nodes to be dropped. Both the dropped_lock and
  59. * graph_lock mutexes have to be held on entry, and are released. */
  60. void _starpu_graph_drop_dropped_nodes(void)
  61. {
  62. struct _starpu_graph_node_multilist_dropped dropping;
  63. /* Pick up the list of dropped nodes */
  64. _starpu_graph_node_multilist_move_dropped(&dropped, &dropping);
  65. STARPU_PTHREAD_MUTEX_UNLOCK(&dropped_lock);
  66. /* And now process it if it's not empty. */
  67. if (!_starpu_graph_node_multilist_empty_dropped(&dropping))
  68. {
  69. struct _starpu_graph_node *node, *next;
  70. for (node = _starpu_graph_node_multilist_begin_dropped(&dropping);
  71. node != _starpu_graph_node_multilist_end_dropped(&dropping);
  72. node = next)
  73. {
  74. next = _starpu_graph_node_multilist_next_dropped(node);
  75. _starpu_graph_drop_node(node);
  76. }
  77. }
  78. STARPU_PTHREAD_RWLOCK_UNLOCK(&graph_lock);
  79. }
  80. /* UnlockWR the graph lock */
  81. void _starpu_graph_wrunlock(void)
  82. {
  83. STARPU_PTHREAD_MUTEX_LOCK(&dropped_lock);
  84. _starpu_graph_drop_dropped_nodes();
  85. }
  86. /* LockRD the graph lock */
  87. void _starpu_graph_rdlock(void)
  88. {
  89. STARPU_PTHREAD_RWLOCK_RDLOCK(&graph_lock);
  90. }
  91. /* UnlockRD the graph lock */
  92. void _starpu_graph_rdunlock(void)
  93. {
  94. STARPU_PTHREAD_RWLOCK_UNLOCK(&graph_lock);
  95. /* Take the opportunity to try to take it WR */
  96. if (STARPU_PTHREAD_RWLOCK_TRYWRLOCK(&graph_lock) == 0)
  97. /* Good, flush dropped nodes */
  98. _starpu_graph_wrunlock();
  99. }
  100. static void __starpu_graph_foreach(void (*func)(void *data, struct _starpu_graph_node *node), void *data)
  101. {
  102. struct _starpu_graph_node *node;
  103. for (node = _starpu_graph_node_multilist_begin_all(&all);
  104. node != _starpu_graph_node_multilist_end_all(&all);
  105. node = _starpu_graph_node_multilist_next_all(node))
  106. func(data, node);
  107. }
  108. /* Add a node to the graph */
  109. void _starpu_graph_add_job(struct _starpu_job *job)
  110. {
  111. struct _starpu_graph_node *node;
  112. _STARPU_CALLOC(node, 1, sizeof(*node));
  113. node->job = job;
  114. job->graph_node = node;
  115. STARPU_PTHREAD_MUTEX_INIT(&node->mutex, NULL);
  116. _starpu_graph_wrlock();
  117. /* It does not have any dependency yet, add to all lists */
  118. _starpu_graph_node_multilist_push_back_top(&top, node);
  119. _starpu_graph_node_multilist_push_back_bottom(&bottom, node);
  120. _starpu_graph_node_multilist_push_back_all(&all, node);
  121. _starpu_graph_wrunlock();
  122. }
  123. /* Add a node to an array of nodes */
  124. static unsigned add_node(struct _starpu_graph_node *node, struct _starpu_graph_node ***nodes, unsigned *n_nodes, unsigned *alloc_nodes, unsigned **slot)
  125. {
  126. unsigned ret;
  127. if (*n_nodes == *alloc_nodes)
  128. {
  129. if (*alloc_nodes)
  130. *alloc_nodes *= 2;
  131. else
  132. *alloc_nodes = 4;
  133. _STARPU_REALLOC(*nodes, *alloc_nodes * sizeof(**nodes));
  134. if (slot)
  135. {
  136. _STARPU_REALLOC(*slot, *alloc_nodes * sizeof(**slot));
  137. }
  138. }
  139. ret = (*n_nodes)++;
  140. (*nodes)[ret] = node;
  141. return ret;
  142. }
  143. /* Add a dependency between nodes */
  144. void _starpu_graph_add_job_dep(struct _starpu_job *job, struct _starpu_job *prev_job)
  145. {
  146. unsigned rank_incoming, rank_outgoing;
  147. _starpu_graph_wrlock();
  148. struct _starpu_graph_node *node = job->graph_node;
  149. struct _starpu_graph_node *prev_node = prev_job->graph_node;
  150. if (!node || !prev_node)
  151. {
  152. /* Already gone */
  153. _starpu_graph_wrunlock();
  154. return;
  155. }
  156. if (_starpu_graph_node_multilist_queued_bottom(prev_node))
  157. /* Previous node is not at bottom any more */
  158. _starpu_graph_node_multilist_erase_bottom(&bottom, prev_node);
  159. if (_starpu_graph_node_multilist_queued_top(node))
  160. /* Next node is not at top any more */
  161. _starpu_graph_node_multilist_erase_top(&top, node);
  162. rank_incoming = add_node(prev_node, &node->incoming, &node->n_incoming, &node->alloc_incoming, &node->incoming_slot);
  163. rank_outgoing = add_node(node, &prev_node->outgoing, &prev_node->n_outgoing, &prev_node->alloc_outgoing, &prev_node->outgoing_slot);
  164. prev_node->outgoing_slot[rank_outgoing] = rank_incoming;
  165. node->incoming_slot[rank_incoming] = rank_outgoing;
  166. _starpu_graph_wrunlock();
  167. }
  168. /* Drop a node, and thus its dependencies */
  169. void _starpu_graph_drop_node(struct _starpu_graph_node *node)
  170. {
  171. unsigned i;
  172. STARPU_ASSERT(!node->job);
  173. if (_starpu_graph_node_multilist_queued_bottom(node))
  174. _starpu_graph_node_multilist_erase_bottom(&bottom, node);
  175. if (_starpu_graph_node_multilist_queued_top(node))
  176. _starpu_graph_node_multilist_erase_top(&top, node);
  177. if (_starpu_graph_node_multilist_queued_all(node))
  178. _starpu_graph_node_multilist_erase_all(&all, node);
  179. /* Drop ourself from the incoming part of the outgoing nodes. */
  180. for (i = 0; i < node->n_outgoing; i++)
  181. {
  182. struct _starpu_graph_node *next = node->outgoing[i];
  183. if (next)
  184. next->incoming[node->outgoing_slot[i]] = NULL;
  185. }
  186. /* Drop ourself from the outgoing part of the incoming nodes,
  187. * in case we happen to get dropped before it. */
  188. for (i = 0; i < node->n_incoming; i++)
  189. {
  190. struct _starpu_graph_node *prev = node->incoming[i];
  191. if (prev)
  192. prev->outgoing[node->incoming_slot[i]] = NULL;
  193. }
  194. node->n_outgoing = 0;
  195. free(node->outgoing);
  196. node->outgoing = NULL;
  197. free(node->outgoing_slot);
  198. node->outgoing_slot = NULL;
  199. node->alloc_outgoing = 0;
  200. node->n_incoming = 0;
  201. free(node->incoming);
  202. node->incoming = NULL;
  203. free(node->incoming_slot);
  204. node->incoming_slot = NULL;
  205. node->alloc_incoming = 0;
  206. free(node);
  207. }
  208. /* Drop a job */
  209. void _starpu_graph_drop_job(struct _starpu_job *job)
  210. {
  211. struct _starpu_graph_node *node = job->graph_node;
  212. job->graph_node = NULL;
  213. if (!node)
  214. return;
  215. STARPU_PTHREAD_MUTEX_LOCK(&node->mutex);
  216. /* Will not be able to use the job any more */
  217. node->job = NULL;
  218. STARPU_PTHREAD_MUTEX_UNLOCK(&node->mutex);
  219. STARPU_PTHREAD_MUTEX_LOCK(&dropped_lock);
  220. /* Queue for removal when lock becomes available */
  221. _starpu_graph_node_multilist_push_back_dropped(&dropped, node);
  222. if (STARPU_PTHREAD_RWLOCK_TRYWRLOCK(&graph_lock) == 0)
  223. {
  224. /* Graph wrlock is available, drop nodes immediately */
  225. _starpu_graph_drop_dropped_nodes();
  226. }
  227. else
  228. STARPU_PTHREAD_MUTEX_UNLOCK(&dropped_lock);
  229. }
  230. static void _starpu_graph_set_n(void *data, struct _starpu_graph_node *node)
  231. {
  232. int value = (intptr_t) data;
  233. node->graph_n = value;
  234. }
  235. /* Call func for each vertex of the task graph, from bottom to top, in topological order */
  236. static void _starpu_graph_compute_bottom_up(void (*func)(struct _starpu_graph_node *next_node, struct _starpu_graph_node *prev_node, void *data), void *data)
  237. {
  238. struct _starpu_graph_node *node, *node2;
  239. struct _starpu_graph_node **current_set = NULL, **next_set = NULL, **swap_set;
  240. unsigned current_n, next_n, i, j;
  241. unsigned current_alloc = 0, next_alloc = 0, swap_alloc;
  242. /* Classical flow algorithm: start from bottom, and propagate depths to top */
  243. /* Set number of processed outgoing edges to 0 for each node */
  244. __starpu_graph_foreach(_starpu_graph_set_n, (void*) 0);
  245. /* Start with the bottom of the graph */
  246. current_n = 0;
  247. for (node = _starpu_graph_node_multilist_begin_bottom(&bottom);
  248. node != _starpu_graph_node_multilist_end_bottom(&bottom);
  249. node = _starpu_graph_node_multilist_next_bottom(node))
  250. add_node(node, &current_set, &current_n, &current_alloc, NULL);
  251. /* Now propagate to top as long as we have current nodes */
  252. while (current_n)
  253. {
  254. /* Next set is initially empty */
  255. next_n = 0;
  256. /* For each node in the current set */
  257. for (i = 0; i < current_n; i++)
  258. {
  259. node = current_set[i];
  260. /* For each parent of this node */
  261. for (j = 0; j < node->n_incoming; j++)
  262. {
  263. node2 = node->incoming[j];
  264. if (!node2)
  265. continue;
  266. node2->graph_n++;
  267. func(node, node2, data);
  268. if ((unsigned) node2->graph_n == node2->n_outgoing)
  269. /* All outgoing edges were processed, can now add to next set */
  270. add_node(node2, &next_set, &next_n, &next_alloc, NULL);
  271. }
  272. }
  273. /* Swap next set with current set */
  274. swap_set = next_set;
  275. swap_alloc = next_alloc;
  276. next_set = current_set;
  277. next_alloc = current_alloc;
  278. current_set = swap_set;
  279. current_alloc = swap_alloc;
  280. current_n = next_n;
  281. }
  282. free(current_set);
  283. free(next_set);
  284. }
  285. static void compute_depth(struct _starpu_graph_node *next_node, struct _starpu_graph_node *prev_node, void *data STARPU_ATTRIBUTE_UNUSED)
  286. {
  287. if (prev_node->depth < next_node->depth + 1)
  288. prev_node->depth = next_node->depth + 1;
  289. }
  290. void _starpu_graph_compute_depths(void)
  291. {
  292. struct _starpu_graph_node *node;
  293. _starpu_graph_wrlock();
  294. /* The bottom of the graph has depth 0 */
  295. for (node = _starpu_graph_node_multilist_begin_bottom(&bottom);
  296. node != _starpu_graph_node_multilist_end_bottom(&bottom);
  297. node = _starpu_graph_node_multilist_next_bottom(node))
  298. node->depth = 0;
  299. _starpu_graph_compute_bottom_up(compute_depth, NULL);
  300. _starpu_graph_wrunlock();
  301. }
  302. void _starpu_graph_compute_descendants(void)
  303. {
  304. struct _starpu_graph_node *node, *node2, *node3;
  305. struct _starpu_graph_node **current_set = NULL, **next_set = NULL, **swap_set;
  306. unsigned current_n, next_n, i, j;
  307. unsigned current_alloc = 0, next_alloc = 0, swap_alloc;
  308. _starpu_graph_wrlock();
  309. /* Yes, this is O(|V|.(|V|+|E|)) :( */
  310. /* We could get O(|V|.|E|) by doing a topological sort first.
  311. *
  312. * |E| is usually O(|V|), though (bounded number of data dependencies,
  313. * and we use synchronization tasks) */
  314. for (node = _starpu_graph_node_multilist_begin_all(&all);
  315. node != _starpu_graph_node_multilist_end_all(&all);
  316. node = _starpu_graph_node_multilist_next_all(node))
  317. {
  318. unsigned descendants;
  319. /* Mark all nodes as unseen */
  320. for (node2 = _starpu_graph_node_multilist_begin_all(&all);
  321. node2 != _starpu_graph_node_multilist_end_all(&all);
  322. node2 = _starpu_graph_node_multilist_next_all(node2))
  323. node2->graph_n = 0;
  324. /* Start with the node we want to compute the number of descendants of */
  325. current_n = 0;
  326. add_node(node, &current_set, &current_n, &current_alloc, NULL);
  327. node->graph_n = 1;
  328. descendants = 0;
  329. /* While we have descendants, count their descendants */
  330. while (current_n) {
  331. /* Next set is initially empty */
  332. next_n = 0;
  333. /* For each node in the current set */
  334. for (i = 0; i < current_n; i++)
  335. {
  336. node2 = current_set[i];
  337. /* For each child of this node2 */
  338. for (j = 0; j < node2->n_outgoing; j++)
  339. {
  340. node3 = node2->outgoing[j];
  341. if (!node3)
  342. continue;
  343. if (node3->graph_n)
  344. /* Already seen */
  345. continue;
  346. /* Add this node */
  347. node3->graph_n = 1;
  348. descendants++;
  349. add_node(node3, &next_set, &next_n, &next_alloc, NULL);
  350. }
  351. }
  352. /* Swap next set with current set */
  353. swap_set = next_set;
  354. swap_alloc = next_alloc;
  355. next_set = current_set;
  356. next_alloc = current_alloc;
  357. current_set = swap_set;
  358. current_alloc = swap_alloc;
  359. current_n = next_n;
  360. }
  361. node->descendants = descendants;
  362. }
  363. _starpu_graph_wrunlock();
  364. free(current_set);
  365. free(next_set);
  366. }
  367. void _starpu_graph_foreach(void (*func)(void *data, struct _starpu_graph_node *node), void *data)
  368. {
  369. _starpu_graph_wrlock();
  370. __starpu_graph_foreach(func, data);
  371. _starpu_graph_wrunlock();
  372. }