graph_builder.go 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620
  1. /*
  2. Copyright 2016 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package garbagecollector
  14. import (
  15. "fmt"
  16. "reflect"
  17. "sync"
  18. "time"
  19. "k8s.io/klog"
  20. "k8s.io/apimachinery/pkg/api/meta"
  21. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  22. "k8s.io/apimachinery/pkg/runtime/schema"
  23. utilerrors "k8s.io/apimachinery/pkg/util/errors"
  24. utilruntime "k8s.io/apimachinery/pkg/util/runtime"
  25. "k8s.io/apimachinery/pkg/util/sets"
  26. "k8s.io/apimachinery/pkg/util/wait"
  27. "k8s.io/client-go/metadata"
  28. "k8s.io/client-go/tools/cache"
  29. "k8s.io/client-go/util/workqueue"
  30. "k8s.io/kubernetes/pkg/controller"
  31. "k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
  32. )
  33. type eventType int
  34. func (e eventType) String() string {
  35. switch e {
  36. case addEvent:
  37. return "add"
  38. case updateEvent:
  39. return "update"
  40. case deleteEvent:
  41. return "delete"
  42. default:
  43. return fmt.Sprintf("unknown(%d)", int(e))
  44. }
  45. }
  46. const (
  47. addEvent eventType = iota
  48. updateEvent
  49. deleteEvent
  50. )
  51. type event struct {
  52. eventType eventType
  53. obj interface{}
  54. // the update event comes with an old object, but it's not used by the garbage collector.
  55. oldObj interface{}
  56. gvk schema.GroupVersionKind
  57. }
  58. // GraphBuilder processes events supplied by the informers, updates uidToNode,
  59. // a graph that caches the dependencies as we know, and enqueues
  60. // items to the attemptToDelete and attemptToOrphan.
  61. type GraphBuilder struct {
  62. restMapper meta.RESTMapper
  63. // each monitor list/watches a resource, the results are funneled to the
  64. // dependencyGraphBuilder
  65. monitors monitors
  66. monitorLock sync.RWMutex
  67. // informersStarted is closed after after all of the controllers have been initialized and are running.
  68. // After that it is safe to start them here, before that it is not.
  69. informersStarted <-chan struct{}
  70. // stopCh drives shutdown. When a receive from it unblocks, monitors will shut down.
  71. // This channel is also protected by monitorLock.
  72. stopCh <-chan struct{}
  73. // running tracks whether Run() has been called.
  74. // it is protected by monitorLock.
  75. running bool
  76. metadataClient metadata.Interface
  77. // monitors are the producer of the graphChanges queue, graphBuilder alters
  78. // the in-memory graph according to the changes.
  79. graphChanges workqueue.RateLimitingInterface
  80. // uidToNode doesn't require a lock to protect, because only the
  81. // single-threaded GraphBuilder.processGraphChanges() reads/writes it.
  82. uidToNode *concurrentUIDToNode
  83. // GraphBuilder is the producer of attemptToDelete and attemptToOrphan, GC is the consumer.
  84. attemptToDelete workqueue.RateLimitingInterface
  85. attemptToOrphan workqueue.RateLimitingInterface
  86. // GraphBuilder and GC share the absentOwnerCache. Objects that are known to
  87. // be non-existent are added to the cached.
  88. absentOwnerCache *UIDCache
  89. sharedInformers controller.InformerFactory
  90. ignoredResources map[schema.GroupResource]struct{}
  91. }
  92. // monitor runs a Controller with a local stop channel.
  93. type monitor struct {
  94. controller cache.Controller
  95. store cache.Store
  96. // stopCh stops Controller. If stopCh is nil, the monitor is considered to be
  97. // not yet started.
  98. stopCh chan struct{}
  99. }
  100. // Run is intended to be called in a goroutine. Multiple calls of this is an
  101. // error.
  102. func (m *monitor) Run() {
  103. m.controller.Run(m.stopCh)
  104. }
  105. type monitors map[schema.GroupVersionResource]*monitor
  106. func (gb *GraphBuilder) controllerFor(resource schema.GroupVersionResource, kind schema.GroupVersionKind) (cache.Controller, cache.Store, error) {
  107. handlers := cache.ResourceEventHandlerFuncs{
  108. // add the event to the dependencyGraphBuilder's graphChanges.
  109. AddFunc: func(obj interface{}) {
  110. event := &event{
  111. eventType: addEvent,
  112. obj: obj,
  113. gvk: kind,
  114. }
  115. gb.graphChanges.Add(event)
  116. },
  117. UpdateFunc: func(oldObj, newObj interface{}) {
  118. // TODO: check if there are differences in the ownerRefs,
  119. // finalizers, and DeletionTimestamp; if not, ignore the update.
  120. event := &event{
  121. eventType: updateEvent,
  122. obj: newObj,
  123. oldObj: oldObj,
  124. gvk: kind,
  125. }
  126. gb.graphChanges.Add(event)
  127. },
  128. DeleteFunc: func(obj interface{}) {
  129. // delta fifo may wrap the object in a cache.DeletedFinalStateUnknown, unwrap it
  130. if deletedFinalStateUnknown, ok := obj.(cache.DeletedFinalStateUnknown); ok {
  131. obj = deletedFinalStateUnknown.Obj
  132. }
  133. event := &event{
  134. eventType: deleteEvent,
  135. obj: obj,
  136. gvk: kind,
  137. }
  138. gb.graphChanges.Add(event)
  139. },
  140. }
  141. shared, err := gb.sharedInformers.ForResource(resource)
  142. if err != nil {
  143. klog.V(4).Infof("unable to use a shared informer for resource %q, kind %q: %v", resource.String(), kind.String(), err)
  144. return nil, nil, err
  145. }
  146. klog.V(4).Infof("using a shared informer for resource %q, kind %q", resource.String(), kind.String())
  147. // need to clone because it's from a shared cache
  148. shared.Informer().AddEventHandlerWithResyncPeriod(handlers, ResourceResyncTime)
  149. return shared.Informer().GetController(), shared.Informer().GetStore(), nil
  150. }
  151. // syncMonitors rebuilds the monitor set according to the supplied resources,
  152. // creating or deleting monitors as necessary. It will return any error
  153. // encountered, but will make an attempt to create a monitor for each resource
  154. // instead of immediately exiting on an error. It may be called before or after
  155. // Run. Monitors are NOT started as part of the sync. To ensure all existing
  156. // monitors are started, call startMonitors.
  157. func (gb *GraphBuilder) syncMonitors(resources map[schema.GroupVersionResource]struct{}) error {
  158. gb.monitorLock.Lock()
  159. defer gb.monitorLock.Unlock()
  160. toRemove := gb.monitors
  161. if toRemove == nil {
  162. toRemove = monitors{}
  163. }
  164. current := monitors{}
  165. errs := []error{}
  166. kept := 0
  167. added := 0
  168. for resource := range resources {
  169. if _, ok := gb.ignoredResources[resource.GroupResource()]; ok {
  170. continue
  171. }
  172. if m, ok := toRemove[resource]; ok {
  173. current[resource] = m
  174. delete(toRemove, resource)
  175. kept++
  176. continue
  177. }
  178. kind, err := gb.restMapper.KindFor(resource)
  179. if err != nil {
  180. errs = append(errs, fmt.Errorf("couldn't look up resource %q: %v", resource, err))
  181. continue
  182. }
  183. c, s, err := gb.controllerFor(resource, kind)
  184. if err != nil {
  185. errs = append(errs, fmt.Errorf("couldn't start monitor for resource %q: %v", resource, err))
  186. continue
  187. }
  188. current[resource] = &monitor{store: s, controller: c}
  189. added++
  190. }
  191. gb.monitors = current
  192. for _, monitor := range toRemove {
  193. if monitor.stopCh != nil {
  194. close(monitor.stopCh)
  195. }
  196. }
  197. klog.V(4).Infof("synced monitors; added %d, kept %d, removed %d", added, kept, len(toRemove))
  198. // NewAggregate returns nil if errs is 0-length
  199. return utilerrors.NewAggregate(errs)
  200. }
  201. // startMonitors ensures the current set of monitors are running. Any newly
  202. // started monitors will also cause shared informers to be started.
  203. //
  204. // If called before Run, startMonitors does nothing (as there is no stop channel
  205. // to support monitor/informer execution).
  206. func (gb *GraphBuilder) startMonitors() {
  207. gb.monitorLock.Lock()
  208. defer gb.monitorLock.Unlock()
  209. if !gb.running {
  210. return
  211. }
  212. // we're waiting until after the informer start that happens once all the controllers are initialized. This ensures
  213. // that they don't get unexpected events on their work queues.
  214. <-gb.informersStarted
  215. monitors := gb.monitors
  216. started := 0
  217. for _, monitor := range monitors {
  218. if monitor.stopCh == nil {
  219. monitor.stopCh = make(chan struct{})
  220. gb.sharedInformers.Start(gb.stopCh)
  221. go monitor.Run()
  222. started++
  223. }
  224. }
  225. klog.V(4).Infof("started %d new monitors, %d currently running", started, len(monitors))
  226. }
  227. // IsSynced returns true if any monitors exist AND all those monitors'
  228. // controllers HasSynced functions return true. This means IsSynced could return
  229. // true at one time, and then later return false if all monitors were
  230. // reconstructed.
  231. func (gb *GraphBuilder) IsSynced() bool {
  232. gb.monitorLock.Lock()
  233. defer gb.monitorLock.Unlock()
  234. if len(gb.monitors) == 0 {
  235. klog.V(4).Info("garbage controller monitor not synced: no monitors")
  236. return false
  237. }
  238. for resource, monitor := range gb.monitors {
  239. if !monitor.controller.HasSynced() {
  240. klog.V(4).Infof("garbage controller monitor not yet synced: %+v", resource)
  241. return false
  242. }
  243. }
  244. return true
  245. }
  246. // Run sets the stop channel and starts monitor execution until stopCh is
  247. // closed. Any running monitors will be stopped before Run returns.
  248. func (gb *GraphBuilder) Run(stopCh <-chan struct{}) {
  249. klog.Infof("GraphBuilder running")
  250. defer klog.Infof("GraphBuilder stopping")
  251. // Set up the stop channel.
  252. gb.monitorLock.Lock()
  253. gb.stopCh = stopCh
  254. gb.running = true
  255. gb.monitorLock.Unlock()
  256. // Start monitors and begin change processing until the stop channel is
  257. // closed.
  258. gb.startMonitors()
  259. wait.Until(gb.runProcessGraphChanges, 1*time.Second, stopCh)
  260. // Stop any running monitors.
  261. gb.monitorLock.Lock()
  262. defer gb.monitorLock.Unlock()
  263. monitors := gb.monitors
  264. stopped := 0
  265. for _, monitor := range monitors {
  266. if monitor.stopCh != nil {
  267. stopped++
  268. close(monitor.stopCh)
  269. }
  270. }
  271. // reset monitors so that the graph builder can be safely re-run/synced.
  272. gb.monitors = nil
  273. klog.Infof("stopped %d of %d monitors", stopped, len(monitors))
  274. }
  275. var ignoredResources = map[schema.GroupResource]struct{}{
  276. {Group: "", Resource: "events"}: {},
  277. }
  278. // DefaultIgnoredResources returns the default set of resources that the garbage collector controller
  279. // should ignore. This is exposed so downstream integrators can have access to the defaults, and add
  280. // to them as necessary when constructing the controller.
  281. func DefaultIgnoredResources() map[schema.GroupResource]struct{} {
  282. return ignoredResources
  283. }
  284. // enqueueVirtualDeleteEvent is used to add a virtual delete event to be processed for virtual nodes
  285. // once it is determined they do not have backing objects in storage
  286. func (gb *GraphBuilder) enqueueVirtualDeleteEvent(ref objectReference) {
  287. gb.graphChanges.Add(&event{
  288. eventType: deleteEvent,
  289. obj: &metaonly.MetadataOnlyObject{
  290. TypeMeta: metav1.TypeMeta{APIVersion: ref.APIVersion, Kind: ref.Kind},
  291. ObjectMeta: metav1.ObjectMeta{Namespace: ref.Namespace, UID: ref.UID, Name: ref.Name},
  292. },
  293. })
  294. }
  295. // addDependentToOwners adds n to owners' dependents list. If the owner does not
  296. // exist in the gb.uidToNode yet, a "virtual" node will be created to represent
  297. // the owner. The "virtual" node will be enqueued to the attemptToDelete, so that
  298. // attemptToDeleteItem() will verify if the owner exists according to the API server.
  299. func (gb *GraphBuilder) addDependentToOwners(n *node, owners []metav1.OwnerReference) {
  300. for _, owner := range owners {
  301. ownerNode, ok := gb.uidToNode.Read(owner.UID)
  302. if !ok {
  303. // Create a "virtual" node in the graph for the owner if it doesn't
  304. // exist in the graph yet.
  305. ownerNode = &node{
  306. identity: objectReference{
  307. OwnerReference: owner,
  308. Namespace: n.identity.Namespace,
  309. },
  310. dependents: make(map[*node]struct{}),
  311. virtual: true,
  312. }
  313. klog.V(5).Infof("add virtual node.identity: %s\n\n", ownerNode.identity)
  314. gb.uidToNode.Write(ownerNode)
  315. }
  316. ownerNode.addDependent(n)
  317. if !ok {
  318. // Enqueue the virtual node into attemptToDelete.
  319. // The garbage processor will enqueue a virtual delete
  320. // event to delete it from the graph if API server confirms this
  321. // owner doesn't exist.
  322. gb.attemptToDelete.Add(ownerNode)
  323. }
  324. }
  325. }
  326. // insertNode insert the node to gb.uidToNode; then it finds all owners as listed
  327. // in n.owners, and adds the node to their dependents list.
  328. func (gb *GraphBuilder) insertNode(n *node) {
  329. gb.uidToNode.Write(n)
  330. gb.addDependentToOwners(n, n.owners)
  331. }
  332. // removeDependentFromOwners remove n from owners' dependents list.
  333. func (gb *GraphBuilder) removeDependentFromOwners(n *node, owners []metav1.OwnerReference) {
  334. for _, owner := range owners {
  335. ownerNode, ok := gb.uidToNode.Read(owner.UID)
  336. if !ok {
  337. continue
  338. }
  339. ownerNode.deleteDependent(n)
  340. }
  341. }
  342. // removeNode removes the node from gb.uidToNode, then finds all
  343. // owners as listed in n.owners, and removes n from their dependents list.
  344. func (gb *GraphBuilder) removeNode(n *node) {
  345. gb.uidToNode.Delete(n.identity.UID)
  346. gb.removeDependentFromOwners(n, n.owners)
  347. }
  348. type ownerRefPair struct {
  349. oldRef metav1.OwnerReference
  350. newRef metav1.OwnerReference
  351. }
  352. // TODO: profile this function to see if a naive N^2 algorithm performs better
  353. // when the number of references is small.
  354. func referencesDiffs(old []metav1.OwnerReference, new []metav1.OwnerReference) (added []metav1.OwnerReference, removed []metav1.OwnerReference, changed []ownerRefPair) {
  355. oldUIDToRef := make(map[string]metav1.OwnerReference)
  356. for _, value := range old {
  357. oldUIDToRef[string(value.UID)] = value
  358. }
  359. oldUIDSet := sets.StringKeySet(oldUIDToRef)
  360. for _, value := range new {
  361. newUID := string(value.UID)
  362. if oldUIDSet.Has(newUID) {
  363. if !reflect.DeepEqual(oldUIDToRef[newUID], value) {
  364. changed = append(changed, ownerRefPair{oldRef: oldUIDToRef[newUID], newRef: value})
  365. }
  366. oldUIDSet.Delete(newUID)
  367. } else {
  368. added = append(added, value)
  369. }
  370. }
  371. for oldUID := range oldUIDSet {
  372. removed = append(removed, oldUIDToRef[oldUID])
  373. }
  374. return added, removed, changed
  375. }
  376. func deletionStartsWithFinalizer(oldObj interface{}, newAccessor metav1.Object, matchingFinalizer string) bool {
  377. // if the new object isn't being deleted, or doesn't have the finalizer we're interested in, return false
  378. if !beingDeleted(newAccessor) || !hasFinalizer(newAccessor, matchingFinalizer) {
  379. return false
  380. }
  381. // if the old object is nil, or wasn't being deleted, or didn't have the finalizer, return true
  382. if oldObj == nil {
  383. return true
  384. }
  385. oldAccessor, err := meta.Accessor(oldObj)
  386. if err != nil {
  387. utilruntime.HandleError(fmt.Errorf("cannot access oldObj: %v", err))
  388. return false
  389. }
  390. return !beingDeleted(oldAccessor) || !hasFinalizer(oldAccessor, matchingFinalizer)
  391. }
  392. func beingDeleted(accessor metav1.Object) bool {
  393. return accessor.GetDeletionTimestamp() != nil
  394. }
  395. func hasDeleteDependentsFinalizer(accessor metav1.Object) bool {
  396. return hasFinalizer(accessor, metav1.FinalizerDeleteDependents)
  397. }
  398. func hasOrphanFinalizer(accessor metav1.Object) bool {
  399. return hasFinalizer(accessor, metav1.FinalizerOrphanDependents)
  400. }
  401. func hasFinalizer(accessor metav1.Object, matchingFinalizer string) bool {
  402. finalizers := accessor.GetFinalizers()
  403. for _, finalizer := range finalizers {
  404. if finalizer == matchingFinalizer {
  405. return true
  406. }
  407. }
  408. return false
  409. }
  410. // this function takes newAccessor directly because the caller already
  411. // instantiates an accessor for the newObj.
  412. func startsWaitingForDependentsDeleted(oldObj interface{}, newAccessor metav1.Object) bool {
  413. return deletionStartsWithFinalizer(oldObj, newAccessor, metav1.FinalizerDeleteDependents)
  414. }
  415. // this function takes newAccessor directly because the caller already
  416. // instantiates an accessor for the newObj.
  417. func startsWaitingForDependentsOrphaned(oldObj interface{}, newAccessor metav1.Object) bool {
  418. return deletionStartsWithFinalizer(oldObj, newAccessor, metav1.FinalizerOrphanDependents)
  419. }
  420. // if an blocking ownerReference points to an object gets removed, or gets set to
  421. // "BlockOwnerDeletion=false", add the object to the attemptToDelete queue.
  422. func (gb *GraphBuilder) addUnblockedOwnersToDeleteQueue(removed []metav1.OwnerReference, changed []ownerRefPair) {
  423. for _, ref := range removed {
  424. if ref.BlockOwnerDeletion != nil && *ref.BlockOwnerDeletion {
  425. node, found := gb.uidToNode.Read(ref.UID)
  426. if !found {
  427. klog.V(5).Infof("cannot find %s in uidToNode", ref.UID)
  428. continue
  429. }
  430. gb.attemptToDelete.Add(node)
  431. }
  432. }
  433. for _, c := range changed {
  434. wasBlocked := c.oldRef.BlockOwnerDeletion != nil && *c.oldRef.BlockOwnerDeletion
  435. isUnblocked := c.newRef.BlockOwnerDeletion == nil || (c.newRef.BlockOwnerDeletion != nil && !*c.newRef.BlockOwnerDeletion)
  436. if wasBlocked && isUnblocked {
  437. node, found := gb.uidToNode.Read(c.newRef.UID)
  438. if !found {
  439. klog.V(5).Infof("cannot find %s in uidToNode", c.newRef.UID)
  440. continue
  441. }
  442. gb.attemptToDelete.Add(node)
  443. }
  444. }
  445. }
  446. func (gb *GraphBuilder) processTransitions(oldObj interface{}, newAccessor metav1.Object, n *node) {
  447. if startsWaitingForDependentsOrphaned(oldObj, newAccessor) {
  448. klog.V(5).Infof("add %s to the attemptToOrphan", n.identity)
  449. gb.attemptToOrphan.Add(n)
  450. return
  451. }
  452. if startsWaitingForDependentsDeleted(oldObj, newAccessor) {
  453. klog.V(2).Infof("add %s to the attemptToDelete, because it's waiting for its dependents to be deleted", n.identity)
  454. // if the n is added as a "virtual" node, its deletingDependents field is not properly set, so always set it here.
  455. n.markDeletingDependents()
  456. for dep := range n.dependents {
  457. gb.attemptToDelete.Add(dep)
  458. }
  459. gb.attemptToDelete.Add(n)
  460. }
  461. }
  462. func (gb *GraphBuilder) runProcessGraphChanges() {
  463. for gb.processGraphChanges() {
  464. }
  465. }
  466. // Dequeueing an event from graphChanges, updating graph, populating dirty_queue.
  467. func (gb *GraphBuilder) processGraphChanges() bool {
  468. item, quit := gb.graphChanges.Get()
  469. if quit {
  470. return false
  471. }
  472. defer gb.graphChanges.Done(item)
  473. event, ok := item.(*event)
  474. if !ok {
  475. utilruntime.HandleError(fmt.Errorf("expect a *event, got %v", item))
  476. return true
  477. }
  478. obj := event.obj
  479. accessor, err := meta.Accessor(obj)
  480. if err != nil {
  481. utilruntime.HandleError(fmt.Errorf("cannot access obj: %v", err))
  482. return true
  483. }
  484. klog.V(5).Infof("GraphBuilder process object: %s/%s, namespace %s, name %s, uid %s, event type %v", event.gvk.GroupVersion().String(), event.gvk.Kind, accessor.GetNamespace(), accessor.GetName(), string(accessor.GetUID()), event.eventType)
  485. // Check if the node already exists
  486. existingNode, found := gb.uidToNode.Read(accessor.GetUID())
  487. if found {
  488. // this marks the node as having been observed via an informer event
  489. // 1. this depends on graphChanges only containing add/update events from the actual informer
  490. // 2. this allows things tracking virtual nodes' existence to stop polling and rely on informer events
  491. existingNode.markObserved()
  492. }
  493. switch {
  494. case (event.eventType == addEvent || event.eventType == updateEvent) && !found:
  495. newNode := &node{
  496. identity: objectReference{
  497. OwnerReference: metav1.OwnerReference{
  498. APIVersion: event.gvk.GroupVersion().String(),
  499. Kind: event.gvk.Kind,
  500. UID: accessor.GetUID(),
  501. Name: accessor.GetName(),
  502. },
  503. Namespace: accessor.GetNamespace(),
  504. },
  505. dependents: make(map[*node]struct{}),
  506. owners: accessor.GetOwnerReferences(),
  507. deletingDependents: beingDeleted(accessor) && hasDeleteDependentsFinalizer(accessor),
  508. beingDeleted: beingDeleted(accessor),
  509. }
  510. gb.insertNode(newNode)
  511. // the underlying delta_fifo may combine a creation and a deletion into
  512. // one event, so we need to further process the event.
  513. gb.processTransitions(event.oldObj, accessor, newNode)
  514. case (event.eventType == addEvent || event.eventType == updateEvent) && found:
  515. // handle changes in ownerReferences
  516. added, removed, changed := referencesDiffs(existingNode.owners, accessor.GetOwnerReferences())
  517. if len(added) != 0 || len(removed) != 0 || len(changed) != 0 {
  518. // check if the changed dependency graph unblock owners that are
  519. // waiting for the deletion of their dependents.
  520. gb.addUnblockedOwnersToDeleteQueue(removed, changed)
  521. // update the node itself
  522. existingNode.owners = accessor.GetOwnerReferences()
  523. // Add the node to its new owners' dependent lists.
  524. gb.addDependentToOwners(existingNode, added)
  525. // remove the node from the dependent list of node that are no longer in
  526. // the node's owners list.
  527. gb.removeDependentFromOwners(existingNode, removed)
  528. }
  529. if beingDeleted(accessor) {
  530. existingNode.markBeingDeleted()
  531. }
  532. gb.processTransitions(event.oldObj, accessor, existingNode)
  533. case event.eventType == deleteEvent:
  534. if !found {
  535. klog.V(5).Infof("%v doesn't exist in the graph, this shouldn't happen", accessor.GetUID())
  536. return true
  537. }
  538. // removeNode updates the graph
  539. gb.removeNode(existingNode)
  540. existingNode.dependentsLock.RLock()
  541. defer existingNode.dependentsLock.RUnlock()
  542. if len(existingNode.dependents) > 0 {
  543. gb.absentOwnerCache.Add(accessor.GetUID())
  544. }
  545. for dep := range existingNode.dependents {
  546. gb.attemptToDelete.Add(dep)
  547. }
  548. for _, owner := range existingNode.owners {
  549. ownerNode, found := gb.uidToNode.Read(owner.UID)
  550. if !found || !ownerNode.isDeletingDependents() {
  551. continue
  552. }
  553. // this is to let attempToDeleteItem check if all the owner's
  554. // dependents are deleted, if so, the owner will be deleted.
  555. gb.attemptToDelete.Add(ownerNode)
  556. }
  557. }
  558. return true
  559. }