garbagecollector.go 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664
  1. /*
  2. Copyright 2016 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package garbagecollector
  14. import (
  15. "fmt"
  16. "reflect"
  17. "sync"
  18. "time"
  19. "k8s.io/klog"
  20. "k8s.io/apimachinery/pkg/api/errors"
  21. "k8s.io/apimachinery/pkg/api/meta"
  22. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  23. "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
  24. "k8s.io/apimachinery/pkg/runtime/schema"
  25. "k8s.io/apimachinery/pkg/types"
  26. utilerrors "k8s.io/apimachinery/pkg/util/errors"
  27. utilruntime "k8s.io/apimachinery/pkg/util/runtime"
  28. "k8s.io/apimachinery/pkg/util/sets"
  29. "k8s.io/apimachinery/pkg/util/wait"
  30. "k8s.io/client-go/discovery"
  31. "k8s.io/client-go/dynamic"
  32. "k8s.io/client-go/util/workqueue"
  33. "k8s.io/kubernetes/pkg/controller"
  34. // import known versions
  35. _ "k8s.io/client-go/kubernetes"
  36. )
  37. const ResourceResyncTime time.Duration = 0
  38. // GarbageCollector runs reflectors to watch for changes of managed API
  39. // objects, funnels the results to a single-threaded dependencyGraphBuilder,
  40. // which builds a graph caching the dependencies among objects. Triggered by the
  41. // graph changes, the dependencyGraphBuilder enqueues objects that can
  42. // potentially be garbage-collected to the `attemptToDelete` queue, and enqueues
  43. // objects whose dependents need to be orphaned to the `attemptToOrphan` queue.
  44. // The GarbageCollector has workers who consume these two queues, send requests
  45. // to the API server to delete/update the objects accordingly.
  46. // Note that having the dependencyGraphBuilder notify the garbage collector
  47. // ensures that the garbage collector operates with a graph that is at least as
  48. // up to date as the notification is sent.
  49. type GarbageCollector struct {
  50. restMapper resettableRESTMapper
  51. dynamicClient dynamic.Interface
  52. // garbage collector attempts to delete the items in attemptToDelete queue when the time is ripe.
  53. attemptToDelete workqueue.RateLimitingInterface
  54. // garbage collector attempts to orphan the dependents of the items in the attemptToOrphan queue, then deletes the items.
  55. attemptToOrphan workqueue.RateLimitingInterface
  56. dependencyGraphBuilder *GraphBuilder
  57. // GC caches the owners that do not exist according to the API server.
  58. absentOwnerCache *UIDCache
  59. sharedInformers controller.InformerFactory
  60. workerLock sync.RWMutex
  61. }
  62. func NewGarbageCollector(
  63. dynamicClient dynamic.Interface,
  64. mapper resettableRESTMapper,
  65. deletableResources map[schema.GroupVersionResource]struct{},
  66. ignoredResources map[schema.GroupResource]struct{},
  67. sharedInformers controller.InformerFactory,
  68. informersStarted <-chan struct{},
  69. ) (*GarbageCollector, error) {
  70. attemptToDelete := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_attempt_to_delete")
  71. attemptToOrphan := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_attempt_to_orphan")
  72. absentOwnerCache := NewUIDCache(500)
  73. gc := &GarbageCollector{
  74. dynamicClient: dynamicClient,
  75. restMapper: mapper,
  76. attemptToDelete: attemptToDelete,
  77. attemptToOrphan: attemptToOrphan,
  78. absentOwnerCache: absentOwnerCache,
  79. }
  80. gb := &GraphBuilder{
  81. informersStarted: informersStarted,
  82. restMapper: mapper,
  83. graphChanges: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_graph_changes"),
  84. uidToNode: &concurrentUIDToNode{
  85. uidToNode: make(map[types.UID]*node),
  86. },
  87. attemptToDelete: attemptToDelete,
  88. attemptToOrphan: attemptToOrphan,
  89. absentOwnerCache: absentOwnerCache,
  90. sharedInformers: sharedInformers,
  91. ignoredResources: ignoredResources,
  92. }
  93. if err := gb.syncMonitors(deletableResources); err != nil {
  94. utilruntime.HandleError(fmt.Errorf("failed to sync all monitors: %v", err))
  95. }
  96. gc.dependencyGraphBuilder = gb
  97. return gc, nil
  98. }
  99. // resyncMonitors starts or stops resource monitors as needed to ensure that all
  100. // (and only) those resources present in the map are monitored.
  101. func (gc *GarbageCollector) resyncMonitors(deletableResources map[schema.GroupVersionResource]struct{}) error {
  102. if err := gc.dependencyGraphBuilder.syncMonitors(deletableResources); err != nil {
  103. return err
  104. }
  105. gc.dependencyGraphBuilder.startMonitors()
  106. return nil
  107. }
  108. func (gc *GarbageCollector) Run(workers int, stopCh <-chan struct{}) {
  109. defer utilruntime.HandleCrash()
  110. defer gc.attemptToDelete.ShutDown()
  111. defer gc.attemptToOrphan.ShutDown()
  112. defer gc.dependencyGraphBuilder.graphChanges.ShutDown()
  113. klog.Infof("Starting garbage collector controller")
  114. defer klog.Infof("Shutting down garbage collector controller")
  115. go gc.dependencyGraphBuilder.Run(stopCh)
  116. if !controller.WaitForCacheSync("garbage collector", stopCh, gc.dependencyGraphBuilder.IsSynced) {
  117. return
  118. }
  119. klog.Infof("Garbage collector: all resource monitors have synced. Proceeding to collect garbage")
  120. // gc workers
  121. for i := 0; i < workers; i++ {
  122. go wait.Until(gc.runAttemptToDeleteWorker, 1*time.Second, stopCh)
  123. go wait.Until(gc.runAttemptToOrphanWorker, 1*time.Second, stopCh)
  124. }
  125. <-stopCh
  126. }
  127. // resettableRESTMapper is a RESTMapper which is capable of resetting itself
  128. // from discovery.
  129. type resettableRESTMapper interface {
  130. meta.RESTMapper
  131. Reset()
  132. }
  133. // Sync periodically resyncs the garbage collector when new resources are
  134. // observed from discovery. When new resources are detected, Sync will stop all
  135. // GC workers, reset gc.restMapper, and resync the monitors.
  136. //
  137. // Note that discoveryClient should NOT be shared with gc.restMapper, otherwise
  138. // the mapper's underlying discovery client will be unnecessarily reset during
  139. // the course of detecting new resources.
  140. func (gc *GarbageCollector) Sync(discoveryClient discovery.ServerResourcesInterface, period time.Duration, stopCh <-chan struct{}) {
  141. oldResources := make(map[schema.GroupVersionResource]struct{})
  142. wait.Until(func() {
  143. // Get the current resource list from discovery.
  144. newResources := GetDeletableResources(discoveryClient)
  145. // This can occur if there is an internal error in GetDeletableResources.
  146. if len(newResources) == 0 {
  147. klog.V(2).Infof("no resources reported by discovery, skipping garbage collector sync")
  148. return
  149. }
  150. // Decide whether discovery has reported a change.
  151. if reflect.DeepEqual(oldResources, newResources) {
  152. klog.V(5).Infof("no resource updates from discovery, skipping garbage collector sync")
  153. return
  154. }
  155. // Ensure workers are paused to avoid processing events before informers
  156. // have resynced.
  157. gc.workerLock.Lock()
  158. defer gc.workerLock.Unlock()
  159. // Once we get here, we should not unpause workers until we've successfully synced
  160. attempt := 0
  161. wait.PollImmediateUntil(100*time.Millisecond, func() (bool, error) {
  162. attempt++
  163. // On a reattempt, check if available resources have changed
  164. if attempt > 1 {
  165. newResources = GetDeletableResources(discoveryClient)
  166. if len(newResources) == 0 {
  167. klog.V(2).Infof("no resources reported by discovery (attempt %d)", attempt)
  168. return false, nil
  169. }
  170. }
  171. klog.V(2).Infof("syncing garbage collector with updated resources from discovery (attempt %d): %s", attempt, printDiff(oldResources, newResources))
  172. // Resetting the REST mapper will also invalidate the underlying discovery
  173. // client. This is a leaky abstraction and assumes behavior about the REST
  174. // mapper, but we'll deal with it for now.
  175. gc.restMapper.Reset()
  176. klog.V(4).Infof("reset restmapper")
  177. // Perform the monitor resync and wait for controllers to report cache sync.
  178. //
  179. // NOTE: It's possible that newResources will diverge from the resources
  180. // discovered by restMapper during the call to Reset, since they are
  181. // distinct discovery clients invalidated at different times. For example,
  182. // newResources may contain resources not returned in the restMapper's
  183. // discovery call if the resources appeared in-between the calls. In that
  184. // case, the restMapper will fail to map some of newResources until the next
  185. // attempt.
  186. if err := gc.resyncMonitors(newResources); err != nil {
  187. utilruntime.HandleError(fmt.Errorf("failed to sync resource monitors (attempt %d): %v", attempt, err))
  188. return false, nil
  189. }
  190. klog.V(4).Infof("resynced monitors")
  191. // wait for caches to fill for a while (our sync period) before attempting to rediscover resources and retry syncing.
  192. // this protects us from deadlocks where available resources changed and one of our informer caches will never fill.
  193. // informers keep attempting to sync in the background, so retrying doesn't interrupt them.
  194. // the call to resyncMonitors on the reattempt will no-op for resources that still exist.
  195. // note that workers stay paused until we successfully resync.
  196. if !controller.WaitForCacheSync("garbage collector", waitForStopOrTimeout(stopCh, period), gc.dependencyGraphBuilder.IsSynced) {
  197. utilruntime.HandleError(fmt.Errorf("timed out waiting for dependency graph builder sync during GC sync (attempt %d)", attempt))
  198. return false, nil
  199. }
  200. // success, break out of the loop
  201. return true, nil
  202. }, stopCh)
  203. // Finally, keep track of our new state. Do this after all preceding steps
  204. // have succeeded to ensure we'll retry on subsequent syncs if an error
  205. // occurred.
  206. oldResources = newResources
  207. klog.V(2).Infof("synced garbage collector")
  208. }, period, stopCh)
  209. }
  210. // printDiff returns a human-readable summary of what resources were added and removed
  211. func printDiff(oldResources, newResources map[schema.GroupVersionResource]struct{}) string {
  212. removed := sets.NewString()
  213. for oldResource := range oldResources {
  214. if _, ok := newResources[oldResource]; !ok {
  215. removed.Insert(fmt.Sprintf("%+v", oldResource))
  216. }
  217. }
  218. added := sets.NewString()
  219. for newResource := range newResources {
  220. if _, ok := oldResources[newResource]; !ok {
  221. added.Insert(fmt.Sprintf("%+v", newResource))
  222. }
  223. }
  224. return fmt.Sprintf("added: %v, removed: %v", added.List(), removed.List())
  225. }
  226. // waitForStopOrTimeout returns a stop channel that closes when the provided stop channel closes or when the specified timeout is reached
  227. func waitForStopOrTimeout(stopCh <-chan struct{}, timeout time.Duration) <-chan struct{} {
  228. stopChWithTimeout := make(chan struct{})
  229. go func() {
  230. select {
  231. case <-stopCh:
  232. case <-time.After(timeout):
  233. }
  234. close(stopChWithTimeout)
  235. }()
  236. return stopChWithTimeout
  237. }
  238. func (gc *GarbageCollector) IsSynced() bool {
  239. return gc.dependencyGraphBuilder.IsSynced()
  240. }
  241. func (gc *GarbageCollector) runAttemptToDeleteWorker() {
  242. for gc.attemptToDeleteWorker() {
  243. }
  244. }
  245. func (gc *GarbageCollector) attemptToDeleteWorker() bool {
  246. item, quit := gc.attemptToDelete.Get()
  247. gc.workerLock.RLock()
  248. defer gc.workerLock.RUnlock()
  249. if quit {
  250. return false
  251. }
  252. defer gc.attemptToDelete.Done(item)
  253. n, ok := item.(*node)
  254. if !ok {
  255. utilruntime.HandleError(fmt.Errorf("expect *node, got %#v", item))
  256. return true
  257. }
  258. err := gc.attemptToDeleteItem(n)
  259. if err != nil {
  260. if _, ok := err.(*restMappingError); ok {
  261. // There are at least two ways this can happen:
  262. // 1. The reference is to an object of a custom type that has not yet been
  263. // recognized by gc.restMapper (this is a transient error).
  264. // 2. The reference is to an invalid group/version. We don't currently
  265. // have a way to distinguish this from a valid type we will recognize
  266. // after the next discovery sync.
  267. // For now, record the error and retry.
  268. klog.V(5).Infof("error syncing item %s: %v", n, err)
  269. } else {
  270. utilruntime.HandleError(fmt.Errorf("error syncing item %s: %v", n, err))
  271. }
  272. // retry if garbage collection of an object failed.
  273. gc.attemptToDelete.AddRateLimited(item)
  274. } else if !n.isObserved() {
  275. // requeue if item hasn't been observed via an informer event yet.
  276. // otherwise a virtual node for an item added AND removed during watch reestablishment can get stuck in the graph and never removed.
  277. // see https://issue.k8s.io/56121
  278. klog.V(5).Infof("item %s hasn't been observed via informer yet", n.identity)
  279. gc.attemptToDelete.AddRateLimited(item)
  280. }
  281. return true
  282. }
  283. // isDangling check if a reference is pointing to an object that doesn't exist.
  284. // If isDangling looks up the referenced object at the API server, it also
  285. // returns its latest state.
  286. func (gc *GarbageCollector) isDangling(reference metav1.OwnerReference, item *node) (
  287. dangling bool, owner *unstructured.Unstructured, err error) {
  288. if gc.absentOwnerCache.Has(reference.UID) {
  289. klog.V(5).Infof("according to the absentOwnerCache, object %s's owner %s/%s, %s does not exist", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name)
  290. return true, nil, nil
  291. }
  292. // TODO: we need to verify the reference resource is supported by the
  293. // system. If it's not a valid resource, the garbage collector should i)
  294. // ignore the reference when decide if the object should be deleted, and
  295. // ii) should update the object to remove such references. This is to
  296. // prevent objects having references to an old resource from being
  297. // deleted during a cluster upgrade.
  298. resource, namespaced, err := gc.apiResource(reference.APIVersion, reference.Kind)
  299. if err != nil {
  300. return false, nil, err
  301. }
  302. // TODO: It's only necessary to talk to the API server if the owner node
  303. // is a "virtual" node. The local graph could lag behind the real
  304. // status, but in practice, the difference is small.
  305. owner, err = gc.dynamicClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.identity.Namespace)).Get(reference.Name, metav1.GetOptions{})
  306. switch {
  307. case errors.IsNotFound(err):
  308. gc.absentOwnerCache.Add(reference.UID)
  309. klog.V(5).Infof("object %s's owner %s/%s, %s is not found", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name)
  310. return true, nil, nil
  311. case err != nil:
  312. return false, nil, err
  313. }
  314. if owner.GetUID() != reference.UID {
  315. klog.V(5).Infof("object %s's owner %s/%s, %s is not found, UID mismatch", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name)
  316. gc.absentOwnerCache.Add(reference.UID)
  317. return true, nil, nil
  318. }
  319. return false, owner, nil
  320. }
  321. // classify the latestReferences to three categories:
  322. // solid: the owner exists, and is not "waitingForDependentsDeletion"
  323. // dangling: the owner does not exist
  324. // waitingForDependentsDeletion: the owner exists, its deletionTimestamp is non-nil, and it has
  325. // FinalizerDeletingDependents
  326. // This function communicates with the server.
  327. func (gc *GarbageCollector) classifyReferences(item *node, latestReferences []metav1.OwnerReference) (
  328. solid, dangling, waitingForDependentsDeletion []metav1.OwnerReference, err error) {
  329. for _, reference := range latestReferences {
  330. isDangling, owner, err := gc.isDangling(reference, item)
  331. if err != nil {
  332. return nil, nil, nil, err
  333. }
  334. if isDangling {
  335. dangling = append(dangling, reference)
  336. continue
  337. }
  338. ownerAccessor, err := meta.Accessor(owner)
  339. if err != nil {
  340. return nil, nil, nil, err
  341. }
  342. if ownerAccessor.GetDeletionTimestamp() != nil && hasDeleteDependentsFinalizer(ownerAccessor) {
  343. waitingForDependentsDeletion = append(waitingForDependentsDeletion, reference)
  344. } else {
  345. solid = append(solid, reference)
  346. }
  347. }
  348. return solid, dangling, waitingForDependentsDeletion, nil
  349. }
  350. func ownerRefsToUIDs(refs []metav1.OwnerReference) []types.UID {
  351. var ret []types.UID
  352. for _, ref := range refs {
  353. ret = append(ret, ref.UID)
  354. }
  355. return ret
  356. }
  357. func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
  358. klog.V(2).Infof("processing item %s", item.identity)
  359. // "being deleted" is an one-way trip to the final deletion. We'll just wait for the final deletion, and then process the object's dependents.
  360. if item.isBeingDeleted() && !item.isDeletingDependents() {
  361. klog.V(5).Infof("processing item %s returned at once, because its DeletionTimestamp is non-nil", item.identity)
  362. return nil
  363. }
  364. // TODO: It's only necessary to talk to the API server if this is a
  365. // "virtual" node. The local graph could lag behind the real status, but in
  366. // practice, the difference is small.
  367. latest, err := gc.getObject(item.identity)
  368. switch {
  369. case errors.IsNotFound(err):
  370. // the GraphBuilder can add "virtual" node for an owner that doesn't
  371. // exist yet, so we need to enqueue a virtual Delete event to remove
  372. // the virtual node from GraphBuilder.uidToNode.
  373. klog.V(5).Infof("item %v not found, generating a virtual delete event", item.identity)
  374. gc.dependencyGraphBuilder.enqueueVirtualDeleteEvent(item.identity)
  375. // since we're manually inserting a delete event to remove this node,
  376. // we don't need to keep tracking it as a virtual node and requeueing in attemptToDelete
  377. item.markObserved()
  378. return nil
  379. case err != nil:
  380. return err
  381. }
  382. if latest.GetUID() != item.identity.UID {
  383. klog.V(5).Infof("UID doesn't match, item %v not found, generating a virtual delete event", item.identity)
  384. gc.dependencyGraphBuilder.enqueueVirtualDeleteEvent(item.identity)
  385. // since we're manually inserting a delete event to remove this node,
  386. // we don't need to keep tracking it as a virtual node and requeueing in attemptToDelete
  387. item.markObserved()
  388. return nil
  389. }
  390. // TODO: attemptToOrphanWorker() routine is similar. Consider merging
  391. // attemptToOrphanWorker() into attemptToDeleteItem() as well.
  392. if item.isDeletingDependents() {
  393. return gc.processDeletingDependentsItem(item)
  394. }
  395. // compute if we should delete the item
  396. ownerReferences := latest.GetOwnerReferences()
  397. if len(ownerReferences) == 0 {
  398. klog.V(2).Infof("object %s's doesn't have an owner, continue on next item", item.identity)
  399. return nil
  400. }
  401. solid, dangling, waitingForDependentsDeletion, err := gc.classifyReferences(item, ownerReferences)
  402. if err != nil {
  403. return err
  404. }
  405. klog.V(5).Infof("classify references of %s.\nsolid: %#v\ndangling: %#v\nwaitingForDependentsDeletion: %#v\n", item.identity, solid, dangling, waitingForDependentsDeletion)
  406. switch {
  407. case len(solid) != 0:
  408. klog.V(2).Infof("object %#v has at least one existing owner: %#v, will not garbage collect", item.identity, solid)
  409. if len(dangling) == 0 && len(waitingForDependentsDeletion) == 0 {
  410. return nil
  411. }
  412. klog.V(2).Infof("remove dangling references %#v and waiting references %#v for object %s", dangling, waitingForDependentsDeletion, item.identity)
  413. // waitingForDependentsDeletion needs to be deleted from the
  414. // ownerReferences, otherwise the referenced objects will be stuck with
  415. // the FinalizerDeletingDependents and never get deleted.
  416. ownerUIDs := append(ownerRefsToUIDs(dangling), ownerRefsToUIDs(waitingForDependentsDeletion)...)
  417. patch := deleteOwnerRefStrategicMergePatch(item.identity.UID, ownerUIDs...)
  418. _, err = gc.patch(item, patch, func(n *node) ([]byte, error) {
  419. return gc.deleteOwnerRefJSONMergePatch(n, ownerUIDs...)
  420. })
  421. return err
  422. case len(waitingForDependentsDeletion) != 0 && item.dependentsLength() != 0:
  423. deps := item.getDependents()
  424. for _, dep := range deps {
  425. if dep.isDeletingDependents() {
  426. // this circle detection has false positives, we need to
  427. // apply a more rigorous detection if this turns out to be a
  428. // problem.
  429. // there are multiple workers run attemptToDeleteItem in
  430. // parallel, the circle detection can fail in a race condition.
  431. klog.V(2).Infof("processing object %s, some of its owners and its dependent [%s] have FinalizerDeletingDependents, to prevent potential cycle, its ownerReferences are going to be modified to be non-blocking, then the object is going to be deleted with Foreground", item.identity, dep.identity)
  432. patch, err := item.unblockOwnerReferencesStrategicMergePatch()
  433. if err != nil {
  434. return err
  435. }
  436. if _, err := gc.patch(item, patch, gc.unblockOwnerReferencesJSONMergePatch); err != nil {
  437. return err
  438. }
  439. break
  440. }
  441. }
  442. klog.V(2).Infof("at least one owner of object %s has FinalizerDeletingDependents, and the object itself has dependents, so it is going to be deleted in Foreground", item.identity)
  443. // the deletion event will be observed by the graphBuilder, so the item
  444. // will be processed again in processDeletingDependentsItem. If it
  445. // doesn't have dependents, the function will remove the
  446. // FinalizerDeletingDependents from the item, resulting in the final
  447. // deletion of the item.
  448. policy := metav1.DeletePropagationForeground
  449. return gc.deleteObject(item.identity, &policy)
  450. default:
  451. // item doesn't have any solid owner, so it needs to be garbage
  452. // collected. Also, none of item's owners is waiting for the deletion of
  453. // the dependents, so set propagationPolicy based on existing finalizers.
  454. var policy metav1.DeletionPropagation
  455. switch {
  456. case hasOrphanFinalizer(latest):
  457. // if an existing orphan finalizer is already on the object, honor it.
  458. policy = metav1.DeletePropagationOrphan
  459. case hasDeleteDependentsFinalizer(latest):
  460. // if an existing foreground finalizer is already on the object, honor it.
  461. policy = metav1.DeletePropagationForeground
  462. default:
  463. // otherwise, default to background.
  464. policy = metav1.DeletePropagationBackground
  465. }
  466. klog.V(2).Infof("delete object %s with propagation policy %s", item.identity, policy)
  467. return gc.deleteObject(item.identity, &policy)
  468. }
  469. }
  470. // process item that's waiting for its dependents to be deleted
  471. func (gc *GarbageCollector) processDeletingDependentsItem(item *node) error {
  472. blockingDependents := item.blockingDependents()
  473. if len(blockingDependents) == 0 {
  474. klog.V(2).Infof("remove DeleteDependents finalizer for item %s", item.identity)
  475. return gc.removeFinalizer(item, metav1.FinalizerDeleteDependents)
  476. }
  477. for _, dep := range blockingDependents {
  478. if !dep.isDeletingDependents() {
  479. klog.V(2).Infof("adding %s to attemptToDelete, because its owner %s is deletingDependents", dep.identity, item.identity)
  480. gc.attemptToDelete.Add(dep)
  481. }
  482. }
  483. return nil
  484. }
  485. // dependents are copies of pointers to the owner's dependents, they don't need to be locked.
  486. func (gc *GarbageCollector) orphanDependents(owner objectReference, dependents []*node) error {
  487. errCh := make(chan error, len(dependents))
  488. wg := sync.WaitGroup{}
  489. wg.Add(len(dependents))
  490. for i := range dependents {
  491. go func(dependent *node) {
  492. defer wg.Done()
  493. // the dependent.identity.UID is used as precondition
  494. patch := deleteOwnerRefStrategicMergePatch(dependent.identity.UID, owner.UID)
  495. _, err := gc.patch(dependent, patch, func(n *node) ([]byte, error) {
  496. return gc.deleteOwnerRefJSONMergePatch(n, owner.UID)
  497. })
  498. // note that if the target ownerReference doesn't exist in the
  499. // dependent, strategic merge patch will NOT return an error.
  500. if err != nil && !errors.IsNotFound(err) {
  501. errCh <- fmt.Errorf("orphaning %s failed, %v", dependent.identity, err)
  502. }
  503. }(dependents[i])
  504. }
  505. wg.Wait()
  506. close(errCh)
  507. var errorsSlice []error
  508. for e := range errCh {
  509. errorsSlice = append(errorsSlice, e)
  510. }
  511. if len(errorsSlice) != 0 {
  512. return fmt.Errorf("failed to orphan dependents of owner %s, got errors: %s", owner, utilerrors.NewAggregate(errorsSlice).Error())
  513. }
  514. klog.V(5).Infof("successfully updated all dependents of owner %s", owner)
  515. return nil
  516. }
  517. func (gc *GarbageCollector) runAttemptToOrphanWorker() {
  518. for gc.attemptToOrphanWorker() {
  519. }
  520. }
  521. // attemptToOrphanWorker dequeues a node from the attemptToOrphan, then finds its
  522. // dependents based on the graph maintained by the GC, then removes it from the
  523. // OwnerReferences of its dependents, and finally updates the owner to remove
  524. // the "Orphan" finalizer. The node is added back into the attemptToOrphan if any of
  525. // these steps fail.
  526. func (gc *GarbageCollector) attemptToOrphanWorker() bool {
  527. item, quit := gc.attemptToOrphan.Get()
  528. gc.workerLock.RLock()
  529. defer gc.workerLock.RUnlock()
  530. if quit {
  531. return false
  532. }
  533. defer gc.attemptToOrphan.Done(item)
  534. owner, ok := item.(*node)
  535. if !ok {
  536. utilruntime.HandleError(fmt.Errorf("expect *node, got %#v", item))
  537. return true
  538. }
  539. // we don't need to lock each element, because they never get updated
  540. owner.dependentsLock.RLock()
  541. dependents := make([]*node, 0, len(owner.dependents))
  542. for dependent := range owner.dependents {
  543. dependents = append(dependents, dependent)
  544. }
  545. owner.dependentsLock.RUnlock()
  546. err := gc.orphanDependents(owner.identity, dependents)
  547. if err != nil {
  548. utilruntime.HandleError(fmt.Errorf("orphanDependents for %s failed with %v", owner.identity, err))
  549. gc.attemptToOrphan.AddRateLimited(item)
  550. return true
  551. }
  552. // update the owner, remove "orphaningFinalizer" from its finalizers list
  553. err = gc.removeFinalizer(owner, metav1.FinalizerOrphanDependents)
  554. if err != nil {
  555. utilruntime.HandleError(fmt.Errorf("removeOrphanFinalizer for %s failed with %v", owner.identity, err))
  556. gc.attemptToOrphan.AddRateLimited(item)
  557. }
  558. return true
  559. }
  560. // *FOR TEST USE ONLY*
  561. // GraphHasUID returns if the GraphBuilder has a particular UID store in its
  562. // uidToNode graph. It's useful for debugging.
  563. // This method is used by integration tests.
  564. func (gc *GarbageCollector) GraphHasUID(u types.UID) bool {
  565. _, ok := gc.dependencyGraphBuilder.uidToNode.Read(u)
  566. return ok
  567. }
  568. // GetDeletableResources returns all resources from discoveryClient that the
  569. // garbage collector should recognize and work with. More specifically, all
  570. // preferred resources which support the 'delete', 'list', and 'watch' verbs.
  571. //
  572. // All discovery errors are considered temporary. Upon encountering any error,
  573. // GetDeletableResources will log and return any discovered resources it was
  574. // able to process (which may be none).
  575. func GetDeletableResources(discoveryClient discovery.ServerResourcesInterface) map[schema.GroupVersionResource]struct{} {
  576. preferredResources, err := discoveryClient.ServerPreferredResources()
  577. if err != nil {
  578. if discovery.IsGroupDiscoveryFailedError(err) {
  579. klog.Warningf("failed to discover some groups: %v", err.(*discovery.ErrGroupDiscoveryFailed).Groups)
  580. } else {
  581. klog.Warningf("failed to discover preferred resources: %v", err)
  582. }
  583. }
  584. if preferredResources == nil {
  585. return map[schema.GroupVersionResource]struct{}{}
  586. }
  587. // This is extracted from discovery.GroupVersionResources to allow tolerating
  588. // failures on a per-resource basis.
  589. deletableResources := discovery.FilteredBy(discovery.SupportsAllVerbs{Verbs: []string{"delete", "list", "watch"}}, preferredResources)
  590. deletableGroupVersionResources := map[schema.GroupVersionResource]struct{}{}
  591. for _, rl := range deletableResources {
  592. gv, err := schema.ParseGroupVersion(rl.GroupVersion)
  593. if err != nil {
  594. klog.Warningf("ignoring invalid discovered resource %q: %v", rl.GroupVersion, err)
  595. continue
  596. }
  597. for i := range rl.APIResources {
  598. deletableGroupVersionResources[schema.GroupVersionResource{Group: gv.Group, Version: gv.Version, Resource: rl.APIResources[i].Name}] = struct{}{}
  599. }
  600. }
  601. return deletableGroupVersionResources
  602. }