graph.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2016-2017 Université de Bordeaux
  4. * Copyright (C) 2017 Inria
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. /*
  18. * This stores the task graph structure, to used by the schedulers which need
  19. * it. We do not always enable it since it is costly. To avoid interfering
  20. * too much with execution, it may be a bit outdated, i.e. still contain jobs
  21. * which have completed very recently.
  22. *
  23. * This is because we drop nodes lazily: when a job terminates, we just add the
  24. * node to the dropped list (to avoid having to take the mutex on the whole
  25. * graph). The graph gets updated whenever the graph mutex becomes available.
  26. */
  27. #include <starpu.h>
  28. #include <core/jobs.h>
  29. #include <common/graph.h>
  30. #include <core/workers.h>
  31. /* Protects the whole task graph except the dropped list */
  32. static starpu_pthread_rwlock_t graph_lock;
  33. /* Whether we should enable recording the task graph */
  34. int _starpu_graph_record;
  35. /* This list contains all nodes without incoming dependency */
  36. struct _starpu_graph_node_multilist_top top;
  37. /* This list contains all nodes without outgoing dependency */
  38. struct _starpu_graph_node_multilist_bottom bottom;
  39. /* This list contains all nodes */
  40. struct _starpu_graph_node_multilist_all all;
  41. /* Protects the dropped list, always taken before graph lock */
  42. static starpu_pthread_mutex_t dropped_lock;
  43. /* This list contains all dropped nodes, i.e. the job terminated by the corresponding node is still int he graph */
  44. struct _starpu_graph_node_multilist_dropped dropped;
  45. void _starpu_graph_init(void)
  46. {
  47. STARPU_PTHREAD_RWLOCK_INIT(&graph_lock, NULL);
  48. _starpu_graph_node_multilist_init_top(&top);
  49. _starpu_graph_node_multilist_init_bottom(&bottom);
  50. _starpu_graph_node_multilist_init_all(&all);
  51. STARPU_PTHREAD_MUTEX_INIT(&dropped_lock, NULL);
  52. _starpu_graph_node_multilist_init_dropped(&dropped);
  53. }
  54. /* LockWR the graph lock */
  55. void _starpu_graph_wrlock(void)
  56. {
  57. _starpu_worker_relax_on();
  58. STARPU_PTHREAD_RWLOCK_WRLOCK(&graph_lock);
  59. _starpu_worker_relax_off();
  60. }
  61. void _starpu_graph_drop_node(struct _starpu_graph_node *node);
  62. /* This flushes the list of nodes to be dropped. Both the dropped_lock and
  63. * graph_lock mutexes have to be held on entry, and are released. */
  64. void _starpu_graph_drop_dropped_nodes(void)
  65. {
  66. struct _starpu_graph_node_multilist_dropped dropping;
  67. /* Pick up the list of dropped nodes */
  68. _starpu_graph_node_multilist_move_dropped(&dropped, &dropping);
  69. STARPU_PTHREAD_MUTEX_UNLOCK(&dropped_lock);
  70. /* And now process it if it's not empty. */
  71. if (!_starpu_graph_node_multilist_empty_dropped(&dropping))
  72. {
  73. struct _starpu_graph_node *node, *next;
  74. for (node = _starpu_graph_node_multilist_begin_dropped(&dropping);
  75. node != _starpu_graph_node_multilist_end_dropped(&dropping);
  76. node = next)
  77. {
  78. next = _starpu_graph_node_multilist_next_dropped(node);
  79. _starpu_graph_drop_node(node);
  80. }
  81. }
  82. STARPU_PTHREAD_RWLOCK_UNLOCK(&graph_lock);
  83. }
  84. /* UnlockWR the graph lock */
  85. void _starpu_graph_wrunlock(void)
  86. {
  87. _starpu_worker_relax_on();
  88. STARPU_PTHREAD_MUTEX_LOCK(&dropped_lock);
  89. _starpu_worker_relax_off();
  90. _starpu_graph_drop_dropped_nodes();
  91. }
  92. /* LockRD the graph lock */
  93. void _starpu_graph_rdlock(void)
  94. {
  95. _starpu_worker_relax_on();
  96. STARPU_PTHREAD_RWLOCK_RDLOCK(&graph_lock);
  97. _starpu_worker_relax_off();
  98. }
  99. /* UnlockRD the graph lock */
  100. void _starpu_graph_rdunlock(void)
  101. {
  102. STARPU_PTHREAD_RWLOCK_UNLOCK(&graph_lock);
  103. /* Take the opportunity to try to take it WR */
  104. if (STARPU_PTHREAD_RWLOCK_TRYWRLOCK(&graph_lock) == 0)
  105. /* Good, flush dropped nodes */
  106. _starpu_graph_wrunlock();
  107. }
  108. static void __starpu_graph_foreach(void (*func)(void *data, struct _starpu_graph_node *node), void *data)
  109. {
  110. struct _starpu_graph_node *node;
  111. for (node = _starpu_graph_node_multilist_begin_all(&all);
  112. node != _starpu_graph_node_multilist_end_all(&all);
  113. node = _starpu_graph_node_multilist_next_all(node))
  114. func(data, node);
  115. }
  116. /* Add a node to the graph */
  117. void _starpu_graph_add_job(struct _starpu_job *job)
  118. {
  119. struct _starpu_graph_node *node;
  120. _STARPU_CALLOC(node, 1, sizeof(*node));
  121. node->job = job;
  122. job->graph_node = node;
  123. STARPU_PTHREAD_MUTEX_INIT(&node->mutex, NULL);
  124. _starpu_graph_wrlock();
  125. /* It does not have any dependency yet, add to all lists */
  126. _starpu_graph_node_multilist_push_back_top(&top, node);
  127. _starpu_graph_node_multilist_push_back_bottom(&bottom, node);
  128. _starpu_graph_node_multilist_push_back_all(&all, node);
  129. _starpu_graph_wrunlock();
  130. }
  131. /* Add a node to an array of nodes */
  132. static unsigned add_node(struct _starpu_graph_node *node, struct _starpu_graph_node ***nodes, unsigned *n_nodes, unsigned *alloc_nodes, unsigned **slot)
  133. {
  134. unsigned ret;
  135. if (*n_nodes == *alloc_nodes)
  136. {
  137. if (*alloc_nodes)
  138. *alloc_nodes *= 2;
  139. else
  140. *alloc_nodes = 4;
  141. _STARPU_REALLOC(*nodes, *alloc_nodes * sizeof(**nodes));
  142. if (slot)
  143. {
  144. _STARPU_REALLOC(*slot, *alloc_nodes * sizeof(**slot));
  145. }
  146. }
  147. ret = (*n_nodes)++;
  148. (*nodes)[ret] = node;
  149. return ret;
  150. }
  151. /* Add a dependency between nodes */
  152. void _starpu_graph_add_job_dep(struct _starpu_job *job, struct _starpu_job *prev_job)
  153. {
  154. unsigned rank_incoming, rank_outgoing;
  155. _starpu_graph_wrlock();
  156. struct _starpu_graph_node *node = job->graph_node;
  157. struct _starpu_graph_node *prev_node = prev_job->graph_node;
  158. if (!node || !prev_node)
  159. {
  160. /* Already gone */
  161. _starpu_graph_wrunlock();
  162. return;
  163. }
  164. if (_starpu_graph_node_multilist_queued_bottom(prev_node))
  165. /* Previous node is not at bottom any more */
  166. _starpu_graph_node_multilist_erase_bottom(&bottom, prev_node);
  167. if (_starpu_graph_node_multilist_queued_top(node))
  168. /* Next node is not at top any more */
  169. _starpu_graph_node_multilist_erase_top(&top, node);
  170. rank_incoming = add_node(prev_node, &node->incoming, &node->n_incoming, &node->alloc_incoming, &node->incoming_slot);
  171. rank_outgoing = add_node(node, &prev_node->outgoing, &prev_node->n_outgoing, &prev_node->alloc_outgoing, &prev_node->outgoing_slot);
  172. prev_node->outgoing_slot[rank_outgoing] = rank_incoming;
  173. node->incoming_slot[rank_incoming] = rank_outgoing;
  174. _starpu_graph_wrunlock();
  175. }
  176. /* Drop a node, and thus its dependencies */
  177. void _starpu_graph_drop_node(struct _starpu_graph_node *node)
  178. {
  179. unsigned i;
  180. STARPU_ASSERT(!node->job);
  181. if (_starpu_graph_node_multilist_queued_bottom(node))
  182. _starpu_graph_node_multilist_erase_bottom(&bottom, node);
  183. if (_starpu_graph_node_multilist_queued_top(node))
  184. _starpu_graph_node_multilist_erase_top(&top, node);
  185. if (_starpu_graph_node_multilist_queued_all(node))
  186. _starpu_graph_node_multilist_erase_all(&all, node);
  187. /* Drop ourself from the incoming part of the outgoing nodes. */
  188. for (i = 0; i < node->n_outgoing; i++)
  189. {
  190. struct _starpu_graph_node *next = node->outgoing[i];
  191. if (next)
  192. next->incoming[node->outgoing_slot[i]] = NULL;
  193. }
  194. /* Drop ourself from the outgoing part of the incoming nodes,
  195. * in case we happen to get dropped before it. */
  196. for (i = 0; i < node->n_incoming; i++)
  197. {
  198. struct _starpu_graph_node *prev = node->incoming[i];
  199. if (prev)
  200. prev->outgoing[node->incoming_slot[i]] = NULL;
  201. }
  202. node->n_outgoing = 0;
  203. free(node->outgoing);
  204. node->outgoing = NULL;
  205. free(node->outgoing_slot);
  206. node->outgoing_slot = NULL;
  207. node->alloc_outgoing = 0;
  208. node->n_incoming = 0;
  209. free(node->incoming);
  210. node->incoming = NULL;
  211. free(node->incoming_slot);
  212. node->incoming_slot = NULL;
  213. node->alloc_incoming = 0;
  214. free(node);
  215. }
  216. /* Drop a job */
  217. void _starpu_graph_drop_job(struct _starpu_job *job)
  218. {
  219. struct _starpu_graph_node *node = job->graph_node;
  220. job->graph_node = NULL;
  221. if (!node)
  222. return;
  223. _starpu_worker_relax_on();
  224. STARPU_PTHREAD_MUTEX_LOCK(&node->mutex);
  225. _starpu_worker_relax_off();
  226. /* Will not be able to use the job any more */
  227. node->job = NULL;
  228. STARPU_PTHREAD_MUTEX_UNLOCK(&node->mutex);
  229. _starpu_worker_relax_on();
  230. STARPU_PTHREAD_MUTEX_LOCK(&dropped_lock);
  231. _starpu_worker_relax_off();
  232. /* Queue for removal when lock becomes available */
  233. _starpu_graph_node_multilist_push_back_dropped(&dropped, node);
  234. if (STARPU_PTHREAD_RWLOCK_TRYWRLOCK(&graph_lock) == 0)
  235. {
  236. /* Graph wrlock is available, drop nodes immediately */
  237. _starpu_graph_drop_dropped_nodes();
  238. }
  239. else
  240. STARPU_PTHREAD_MUTEX_UNLOCK(&dropped_lock);
  241. }
  242. static void _starpu_graph_set_n(void *data, struct _starpu_graph_node *node)
  243. {
  244. int value = (intptr_t) data;
  245. node->graph_n = value;
  246. }
  247. /* Call func for each vertex of the task graph, from bottom to top, in topological order */
  248. static void _starpu_graph_compute_bottom_up(void (*func)(struct _starpu_graph_node *next_node, struct _starpu_graph_node *prev_node, void *data), void *data)
  249. {
  250. struct _starpu_graph_node *node, *node2;
  251. struct _starpu_graph_node **current_set = NULL, **next_set = NULL, **swap_set;
  252. unsigned current_n, next_n, i, j;
  253. unsigned current_alloc = 0, next_alloc = 0, swap_alloc;
  254. /* Classical flow algorithm: start from bottom, and propagate depths to top */
  255. /* Set number of processed outgoing edges to 0 for each node */
  256. __starpu_graph_foreach(_starpu_graph_set_n, (void*) 0);
  257. /* Start with the bottom of the graph */
  258. current_n = 0;
  259. for (node = _starpu_graph_node_multilist_begin_bottom(&bottom);
  260. node != _starpu_graph_node_multilist_end_bottom(&bottom);
  261. node = _starpu_graph_node_multilist_next_bottom(node))
  262. add_node(node, &current_set, &current_n, &current_alloc, NULL);
  263. /* Now propagate to top as long as we have current nodes */
  264. while (current_n)
  265. {
  266. /* Next set is initially empty */
  267. next_n = 0;
  268. /* For each node in the current set */
  269. for (i = 0; i < current_n; i++)
  270. {
  271. node = current_set[i];
  272. /* For each parent of this node */
  273. for (j = 0; j < node->n_incoming; j++)
  274. {
  275. node2 = node->incoming[j];
  276. if (!node2)
  277. continue;
  278. node2->graph_n++;
  279. func(node, node2, data);
  280. if ((unsigned) node2->graph_n == node2->n_outgoing)
  281. /* All outgoing edges were processed, can now add to next set */
  282. add_node(node2, &next_set, &next_n, &next_alloc, NULL);
  283. }
  284. }
  285. /* Swap next set with current set */
  286. swap_set = next_set;
  287. swap_alloc = next_alloc;
  288. next_set = current_set;
  289. next_alloc = current_alloc;
  290. current_set = swap_set;
  291. current_alloc = swap_alloc;
  292. current_n = next_n;
  293. }
  294. free(current_set);
  295. free(next_set);
  296. }
  297. static void compute_depth(struct _starpu_graph_node *next_node, struct _starpu_graph_node *prev_node, void *data STARPU_ATTRIBUTE_UNUSED)
  298. {
  299. if (prev_node->depth < next_node->depth + 1)
  300. prev_node->depth = next_node->depth + 1;
  301. }
  302. void _starpu_graph_compute_depths(void)
  303. {
  304. struct _starpu_graph_node *node;
  305. _starpu_graph_wrlock();
  306. /* The bottom of the graph has depth 0 */
  307. for (node = _starpu_graph_node_multilist_begin_bottom(&bottom);
  308. node != _starpu_graph_node_multilist_end_bottom(&bottom);
  309. node = _starpu_graph_node_multilist_next_bottom(node))
  310. node->depth = 0;
  311. _starpu_graph_compute_bottom_up(compute_depth, NULL);
  312. _starpu_graph_wrunlock();
  313. }
  314. void _starpu_graph_compute_descendants(void)
  315. {
  316. struct _starpu_graph_node *node, *node2, *node3;
  317. struct _starpu_graph_node **current_set = NULL, **next_set = NULL, **swap_set;
  318. unsigned current_n, next_n, i, j;
  319. unsigned current_alloc = 0, next_alloc = 0, swap_alloc;
  320. _starpu_graph_wrlock();
  321. /* Yes, this is O(|V|.(|V|+|E|)) :( */
  322. /* We could get O(|V|.|E|) by doing a topological sort first.
  323. *
  324. * |E| is usually O(|V|), though (bounded number of data dependencies,
  325. * and we use synchronization tasks) */
  326. for (node = _starpu_graph_node_multilist_begin_all(&all);
  327. node != _starpu_graph_node_multilist_end_all(&all);
  328. node = _starpu_graph_node_multilist_next_all(node))
  329. {
  330. unsigned descendants;
  331. /* Mark all nodes as unseen */
  332. for (node2 = _starpu_graph_node_multilist_begin_all(&all);
  333. node2 != _starpu_graph_node_multilist_end_all(&all);
  334. node2 = _starpu_graph_node_multilist_next_all(node2))
  335. node2->graph_n = 0;
  336. /* Start with the node we want to compute the number of descendants of */
  337. current_n = 0;
  338. add_node(node, &current_set, &current_n, &current_alloc, NULL);
  339. node->graph_n = 1;
  340. descendants = 0;
  341. /* While we have descendants, count their descendants */
  342. while (current_n)
  343. {
  344. /* Next set is initially empty */
  345. next_n = 0;
  346. /* For each node in the current set */
  347. for (i = 0; i < current_n; i++)
  348. {
  349. node2 = current_set[i];
  350. /* For each child of this node2 */
  351. for (j = 0; j < node2->n_outgoing; j++)
  352. {
  353. node3 = node2->outgoing[j];
  354. if (!node3)
  355. continue;
  356. if (node3->graph_n)
  357. /* Already seen */
  358. continue;
  359. /* Add this node */
  360. node3->graph_n = 1;
  361. descendants++;
  362. add_node(node3, &next_set, &next_n, &next_alloc, NULL);
  363. }
  364. }
  365. /* Swap next set with current set */
  366. swap_set = next_set;
  367. swap_alloc = next_alloc;
  368. next_set = current_set;
  369. next_alloc = current_alloc;
  370. current_set = swap_set;
  371. current_alloc = swap_alloc;
  372. current_n = next_n;
  373. }
  374. node->descendants = descendants;
  375. }
  376. _starpu_graph_wrunlock();
  377. free(current_set);
  378. free(next_set);
  379. }
  380. void _starpu_graph_foreach(void (*func)(void *data, struct _starpu_graph_node *node), void *data)
  381. {
  382. _starpu_graph_wrlock();
  383. __starpu_graph_foreach(func, data);
  384. _starpu_graph_wrunlock();
  385. }