123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703 |
- /*
- Copyright 2016 The Kubernetes Authors.
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- */
- // ### ATTENTION ###
- //
- // This code implements both ReplicaSet and ReplicationController.
- //
- // For RC, the objects are converted on the way in and out (see ../replication/),
- // as if ReplicationController were just an older API version of ReplicaSet.
- // However, RC and RS still have separate storage and separate instantiations
- // of the ReplicaSetController object.
- //
- // Use rsc.Kind in log messages rather than hard-coding "ReplicaSet".
- package replicaset
- import (
- "fmt"
- "reflect"
- "sort"
- "strings"
- "sync"
- "time"
- apps "k8s.io/api/apps/v1"
- "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/apimachinery/pkg/runtime/schema"
- utilruntime "k8s.io/apimachinery/pkg/util/runtime"
- "k8s.io/apimachinery/pkg/util/wait"
- appsinformers "k8s.io/client-go/informers/apps/v1"
- coreinformers "k8s.io/client-go/informers/core/v1"
- clientset "k8s.io/client-go/kubernetes"
- "k8s.io/client-go/kubernetes/scheme"
- v1core "k8s.io/client-go/kubernetes/typed/core/v1"
- appslisters "k8s.io/client-go/listers/apps/v1"
- corelisters "k8s.io/client-go/listers/core/v1"
- "k8s.io/client-go/tools/cache"
- "k8s.io/client-go/tools/record"
- "k8s.io/client-go/util/workqueue"
- "k8s.io/klog"
- podutil "k8s.io/kubernetes/pkg/api/v1/pod"
- "k8s.io/kubernetes/pkg/controller"
- "k8s.io/kubernetes/pkg/util/metrics"
- "k8s.io/utils/integer"
- )
- const (
- // Realistic value of the burstReplica field for the replica set manager based off
- // performance requirements for kubernetes 1.0.
- BurstReplicas = 500
- // The number of times we retry updating a ReplicaSet's status.
- statusUpdateRetries = 1
- )
- // ReplicaSetController is responsible for synchronizing ReplicaSet objects stored
- // in the system with actual running pods.
- type ReplicaSetController struct {
- // GroupVersionKind indicates the controller type.
- // Different instances of this struct may handle different GVKs.
- // For example, this struct can be used (with adapters) to handle ReplicationController.
- schema.GroupVersionKind
- kubeClient clientset.Interface
- podControl controller.PodControlInterface
- // A ReplicaSet is temporarily suspended after creating/deleting these many replicas.
- // It resumes normal action after observing the watch events for them.
- burstReplicas int
- // To allow injection of syncReplicaSet for testing.
- syncHandler func(rsKey string) error
- // A TTLCache of pod creates/deletes each rc expects to see.
- expectations *controller.UIDTrackingControllerExpectations
- // A store of ReplicaSets, populated by the shared informer passed to NewReplicaSetController
- rsLister appslisters.ReplicaSetLister
- // rsListerSynced returns true if the pod store has been synced at least once.
- // Added as a member to the struct to allow injection for testing.
- rsListerSynced cache.InformerSynced
- // A store of pods, populated by the shared informer passed to NewReplicaSetController
- podLister corelisters.PodLister
- // podListerSynced returns true if the pod store has been synced at least once.
- // Added as a member to the struct to allow injection for testing.
- podListerSynced cache.InformerSynced
- // Controllers that need to be synced
- queue workqueue.RateLimitingInterface
- }
- // NewReplicaSetController configures a replica set controller with the specified event recorder
- func NewReplicaSetController(rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicaSetController {
- eventBroadcaster := record.NewBroadcaster()
- eventBroadcaster.StartLogging(klog.Infof)
- eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
- return NewBaseController(rsInformer, podInformer, kubeClient, burstReplicas,
- apps.SchemeGroupVersion.WithKind("ReplicaSet"),
- "replicaset_controller",
- "replicaset",
- controller.RealPodControl{
- KubeClient: kubeClient,
- Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "replicaset-controller"}),
- },
- )
- }
- // NewBaseController is the implementation of NewReplicaSetController with additional injected
- // parameters so that it can also serve as the implementation of NewReplicationController.
- func NewBaseController(rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int,
- gvk schema.GroupVersionKind, metricOwnerName, queueName string, podControl controller.PodControlInterface) *ReplicaSetController {
- if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
- metrics.RegisterMetricAndTrackRateLimiterUsage(metricOwnerName, kubeClient.CoreV1().RESTClient().GetRateLimiter())
- }
- rsc := &ReplicaSetController{
- GroupVersionKind: gvk,
- kubeClient: kubeClient,
- podControl: podControl,
- burstReplicas: burstReplicas,
- expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
- queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), queueName),
- }
- rsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
- AddFunc: rsc.enqueueReplicaSet,
- UpdateFunc: rsc.updateRS,
- // This will enter the sync loop and no-op, because the replica set has been deleted from the store.
- // Note that deleting a replica set immediately after scaling it to 0 will not work. The recommended
- // way of achieving this is by performing a `stop` operation on the replica set.
- DeleteFunc: rsc.enqueueReplicaSet,
- })
- rsc.rsLister = rsInformer.Lister()
- rsc.rsListerSynced = rsInformer.Informer().HasSynced
- podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
- AddFunc: rsc.addPod,
- // This invokes the ReplicaSet for every pod change, eg: host assignment. Though this might seem like
- // overkill the most frequent pod update is status, and the associated ReplicaSet will only list from
- // local storage, so it should be ok.
- UpdateFunc: rsc.updatePod,
- DeleteFunc: rsc.deletePod,
- })
- rsc.podLister = podInformer.Lister()
- rsc.podListerSynced = podInformer.Informer().HasSynced
- rsc.syncHandler = rsc.syncReplicaSet
- return rsc
- }
- // SetEventRecorder replaces the event recorder used by the ReplicaSetController
- // with the given recorder. Only used for testing.
- func (rsc *ReplicaSetController) SetEventRecorder(recorder record.EventRecorder) {
- // TODO: Hack. We can't cleanly shutdown the event recorder, so benchmarks
- // need to pass in a fake.
- rsc.podControl = controller.RealPodControl{KubeClient: rsc.kubeClient, Recorder: recorder}
- }
- // Run begins watching and syncing.
- func (rsc *ReplicaSetController) Run(workers int, stopCh <-chan struct{}) {
- defer utilruntime.HandleCrash()
- defer rsc.queue.ShutDown()
- controllerName := strings.ToLower(rsc.Kind)
- klog.Infof("Starting %v controller", controllerName)
- defer klog.Infof("Shutting down %v controller", controllerName)
- if !controller.WaitForCacheSync(rsc.Kind, stopCh, rsc.podListerSynced, rsc.rsListerSynced) {
- return
- }
- for i := 0; i < workers; i++ {
- go wait.Until(rsc.worker, time.Second, stopCh)
- }
- <-stopCh
- }
- // getPodReplicaSets returns a list of ReplicaSets matching the given pod.
- func (rsc *ReplicaSetController) getPodReplicaSets(pod *v1.Pod) []*apps.ReplicaSet {
- rss, err := rsc.rsLister.GetPodReplicaSets(pod)
- if err != nil {
- return nil
- }
- if len(rss) > 1 {
- // ControllerRef will ensure we don't do anything crazy, but more than one
- // item in this list nevertheless constitutes user error.
- utilruntime.HandleError(fmt.Errorf("user error! more than one %v is selecting pods with labels: %+v", rsc.Kind, pod.Labels))
- }
- return rss
- }
- // resolveControllerRef returns the controller referenced by a ControllerRef,
- // or nil if the ControllerRef could not be resolved to a matching controller
- // of the correct Kind.
- func (rsc *ReplicaSetController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *apps.ReplicaSet {
- // We can't look up by UID, so look up by Name and then verify UID.
- // Don't even try to look up by Name if it's the wrong Kind.
- if controllerRef.Kind != rsc.Kind {
- return nil
- }
- rs, err := rsc.rsLister.ReplicaSets(namespace).Get(controllerRef.Name)
- if err != nil {
- return nil
- }
- if rs.UID != controllerRef.UID {
- // The controller we found with this Name is not the same one that the
- // ControllerRef points to.
- return nil
- }
- return rs
- }
- // callback when RS is updated
- func (rsc *ReplicaSetController) updateRS(old, cur interface{}) {
- oldRS := old.(*apps.ReplicaSet)
- curRS := cur.(*apps.ReplicaSet)
- // You might imagine that we only really need to enqueue the
- // replica set when Spec changes, but it is safer to sync any
- // time this function is triggered. That way a full informer
- // resync can requeue any replica set that don't yet have pods
- // but whose last attempts at creating a pod have failed (since
- // we don't block on creation of pods) instead of those
- // replica sets stalling indefinitely. Enqueueing every time
- // does result in some spurious syncs (like when Status.Replica
- // is updated and the watch notification from it retriggers
- // this function), but in general extra resyncs shouldn't be
- // that bad as ReplicaSets that haven't met expectations yet won't
- // sync, and all the listing is done using local stores.
- if *(oldRS.Spec.Replicas) != *(curRS.Spec.Replicas) {
- klog.V(4).Infof("%v %v updated. Desired pod count change: %d->%d", rsc.Kind, curRS.Name, *(oldRS.Spec.Replicas), *(curRS.Spec.Replicas))
- }
- rsc.enqueueReplicaSet(cur)
- }
- // When a pod is created, enqueue the replica set that manages it and update its expectations.
- func (rsc *ReplicaSetController) addPod(obj interface{}) {
- pod := obj.(*v1.Pod)
- if pod.DeletionTimestamp != nil {
- // on a restart of the controller manager, it's possible a new pod shows up in a state that
- // is already pending deletion. Prevent the pod from being a creation observation.
- rsc.deletePod(pod)
- return
- }
- // If it has a ControllerRef, that's all that matters.
- if controllerRef := metav1.GetControllerOf(pod); controllerRef != nil {
- rs := rsc.resolveControllerRef(pod.Namespace, controllerRef)
- if rs == nil {
- return
- }
- rsKey, err := controller.KeyFunc(rs)
- if err != nil {
- return
- }
- klog.V(4).Infof("Pod %s created: %#v.", pod.Name, pod)
- rsc.expectations.CreationObserved(rsKey)
- rsc.enqueueReplicaSet(rs)
- return
- }
- // Otherwise, it's an orphan. Get a list of all matching ReplicaSets and sync
- // them to see if anyone wants to adopt it.
- // DO NOT observe creation because no controller should be waiting for an
- // orphan.
- rss := rsc.getPodReplicaSets(pod)
- if len(rss) == 0 {
- return
- }
- klog.V(4).Infof("Orphan Pod %s created: %#v.", pod.Name, pod)
- for _, rs := range rss {
- rsc.enqueueReplicaSet(rs)
- }
- }
- // When a pod is updated, figure out what replica set/s manage it and wake them
- // up. If the labels of the pod have changed we need to awaken both the old
- // and new replica set. old and cur must be *v1.Pod types.
- func (rsc *ReplicaSetController) updatePod(old, cur interface{}) {
- curPod := cur.(*v1.Pod)
- oldPod := old.(*v1.Pod)
- if curPod.ResourceVersion == oldPod.ResourceVersion {
- // Periodic resync will send update events for all known pods.
- // Two different versions of the same pod will always have different RVs.
- return
- }
- labelChanged := !reflect.DeepEqual(curPod.Labels, oldPod.Labels)
- if curPod.DeletionTimestamp != nil {
- // when a pod is deleted gracefully it's deletion timestamp is first modified to reflect a grace period,
- // and after such time has passed, the kubelet actually deletes it from the store. We receive an update
- // for modification of the deletion timestamp and expect an rs to create more replicas asap, not wait
- // until the kubelet actually deletes the pod. This is different from the Phase of a pod changing, because
- // an rs never initiates a phase change, and so is never asleep waiting for the same.
- rsc.deletePod(curPod)
- if labelChanged {
- // we don't need to check the oldPod.DeletionTimestamp because DeletionTimestamp cannot be unset.
- rsc.deletePod(oldPod)
- }
- return
- }
- curControllerRef := metav1.GetControllerOf(curPod)
- oldControllerRef := metav1.GetControllerOf(oldPod)
- controllerRefChanged := !reflect.DeepEqual(curControllerRef, oldControllerRef)
- if controllerRefChanged && oldControllerRef != nil {
- // The ControllerRef was changed. Sync the old controller, if any.
- if rs := rsc.resolveControllerRef(oldPod.Namespace, oldControllerRef); rs != nil {
- rsc.enqueueReplicaSet(rs)
- }
- }
- // If it has a ControllerRef, that's all that matters.
- if curControllerRef != nil {
- rs := rsc.resolveControllerRef(curPod.Namespace, curControllerRef)
- if rs == nil {
- return
- }
- klog.V(4).Infof("Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta)
- rsc.enqueueReplicaSet(rs)
- // TODO: MinReadySeconds in the Pod will generate an Available condition to be added in
- // the Pod status which in turn will trigger a requeue of the owning replica set thus
- // having its status updated with the newly available replica. For now, we can fake the
- // update by resyncing the controller MinReadySeconds after the it is requeued because
- // a Pod transitioned to Ready.
- // Note that this still suffers from #29229, we are just moving the problem one level
- // "closer" to kubelet (from the deployment to the replica set controller).
- if !podutil.IsPodReady(oldPod) && podutil.IsPodReady(curPod) && rs.Spec.MinReadySeconds > 0 {
- klog.V(2).Infof("%v %q will be enqueued after %ds for availability check", rsc.Kind, rs.Name, rs.Spec.MinReadySeconds)
- // Add a second to avoid milliseconds skew in AddAfter.
- // See https://github.com/kubernetes/kubernetes/issues/39785#issuecomment-279959133 for more info.
- rsc.enqueueReplicaSetAfter(rs, (time.Duration(rs.Spec.MinReadySeconds)*time.Second)+time.Second)
- }
- return
- }
- // Otherwise, it's an orphan. If anything changed, sync matching controllers
- // to see if anyone wants to adopt it now.
- if labelChanged || controllerRefChanged {
- rss := rsc.getPodReplicaSets(curPod)
- if len(rss) == 0 {
- return
- }
- klog.V(4).Infof("Orphan Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta)
- for _, rs := range rss {
- rsc.enqueueReplicaSet(rs)
- }
- }
- }
- // When a pod is deleted, enqueue the replica set that manages the pod and update its expectations.
- // obj could be an *v1.Pod, or a DeletionFinalStateUnknown marker item.
- func (rsc *ReplicaSetController) deletePod(obj interface{}) {
- pod, ok := obj.(*v1.Pod)
- // When a delete is dropped, the relist will notice a pod in the store not
- // in the list, leading to the insertion of a tombstone object which contains
- // the deleted key/value. Note that this value might be stale. If the pod
- // changed labels the new ReplicaSet will not be woken up till the periodic resync.
- if !ok {
- tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
- if !ok {
- utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %+v", obj))
- return
- }
- pod, ok = tombstone.Obj.(*v1.Pod)
- if !ok {
- utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a pod %#v", obj))
- return
- }
- }
- controllerRef := metav1.GetControllerOf(pod)
- if controllerRef == nil {
- // No controller should care about orphans being deleted.
- return
- }
- rs := rsc.resolveControllerRef(pod.Namespace, controllerRef)
- if rs == nil {
- return
- }
- rsKey, err := controller.KeyFunc(rs)
- if err != nil {
- return
- }
- klog.V(4).Infof("Pod %s/%s deleted through %v, timestamp %+v: %#v.", pod.Namespace, pod.Name, utilruntime.GetCaller(), pod.DeletionTimestamp, pod)
- rsc.expectations.DeletionObserved(rsKey, controller.PodKey(pod))
- rsc.enqueueReplicaSet(rs)
- }
- // obj could be an *apps.ReplicaSet, or a DeletionFinalStateUnknown marker item.
- func (rsc *ReplicaSetController) enqueueReplicaSet(obj interface{}) {
- key, err := controller.KeyFunc(obj)
- if err != nil {
- utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err))
- return
- }
- rsc.queue.Add(key)
- }
- // obj could be an *apps.ReplicaSet, or a DeletionFinalStateUnknown marker item.
- func (rsc *ReplicaSetController) enqueueReplicaSetAfter(obj interface{}, after time.Duration) {
- key, err := controller.KeyFunc(obj)
- if err != nil {
- utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err))
- return
- }
- rsc.queue.AddAfter(key, after)
- }
- // worker runs a worker thread that just dequeues items, processes them, and marks them done.
- // It enforces that the syncHandler is never invoked concurrently with the same key.
- func (rsc *ReplicaSetController) worker() {
- for rsc.processNextWorkItem() {
- }
- }
- func (rsc *ReplicaSetController) processNextWorkItem() bool {
- key, quit := rsc.queue.Get()
- if quit {
- return false
- }
- defer rsc.queue.Done(key)
- err := rsc.syncHandler(key.(string))
- if err == nil {
- rsc.queue.Forget(key)
- return true
- }
- utilruntime.HandleError(fmt.Errorf("Sync %q failed with %v", key, err))
- rsc.queue.AddRateLimited(key)
- return true
- }
- // manageReplicas checks and updates replicas for the given ReplicaSet.
- // Does NOT modify <filteredPods>.
- // It will requeue the replica set in case of an error while creating/deleting pods.
- func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *apps.ReplicaSet) error {
- diff := len(filteredPods) - int(*(rs.Spec.Replicas))
- rsKey, err := controller.KeyFunc(rs)
- if err != nil {
- utilruntime.HandleError(fmt.Errorf("Couldn't get key for %v %#v: %v", rsc.Kind, rs, err))
- return nil
- }
- if diff < 0 {
- diff *= -1
- if diff > rsc.burstReplicas {
- diff = rsc.burstReplicas
- }
- // TODO: Track UIDs of creates just like deletes. The problem currently
- // is we'd need to wait on the result of a create to record the pod's
- // UID, which would require locking *across* the create, which will turn
- // into a performance bottleneck. We should generate a UID for the pod
- // beforehand and store it via ExpectCreations.
- rsc.expectations.ExpectCreations(rsKey, diff)
- klog.V(2).Infof("Too few replicas for %v %s/%s, need %d, creating %d", rsc.Kind, rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff)
- // Batch the pod creates. Batch sizes start at SlowStartInitialBatchSize
- // and double with each successful iteration in a kind of "slow start".
- // This handles attempts to start large numbers of pods that would
- // likely all fail with the same error. For example a project with a
- // low quota that attempts to create a large number of pods will be
- // prevented from spamming the API service with the pod create requests
- // after one of its pods fails. Conveniently, this also prevents the
- // event spam that those failures would generate.
- successfulCreations, err := slowStartBatch(diff, controller.SlowStartInitialBatchSize, func() error {
- err := rsc.podControl.CreatePodsWithControllerRef(rs.Namespace, &rs.Spec.Template, rs, metav1.NewControllerRef(rs, rsc.GroupVersionKind))
- if err != nil && errors.IsTimeout(err) {
- // Pod is created but its initialization has timed out.
- // If the initialization is successful eventually, the
- // controller will observe the creation via the informer.
- // If the initialization fails, or if the pod keeps
- // uninitialized for a long time, the informer will not
- // receive any update, and the controller will create a new
- // pod when the expectation expires.
- return nil
- }
- return err
- })
- // Any skipped pods that we never attempted to start shouldn't be expected.
- // The skipped pods will be retried later. The next controller resync will
- // retry the slow start process.
- if skippedPods := diff - successfulCreations; skippedPods > 0 {
- klog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for %v %v/%v", skippedPods, rsc.Kind, rs.Namespace, rs.Name)
- for i := 0; i < skippedPods; i++ {
- // Decrement the expected number of creates because the informer won't observe this pod
- rsc.expectations.CreationObserved(rsKey)
- }
- }
- return err
- } else if diff > 0 {
- if diff > rsc.burstReplicas {
- diff = rsc.burstReplicas
- }
- klog.V(2).Infof("Too many replicas for %v %s/%s, need %d, deleting %d", rsc.Kind, rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff)
- // Choose which Pods to delete, preferring those in earlier phases of startup.
- podsToDelete := getPodsToDelete(filteredPods, diff)
- // Snapshot the UIDs (ns/name) of the pods we're expecting to see
- // deleted, so we know to record their expectations exactly once either
- // when we see it as an update of the deletion timestamp, or as a delete.
- // Note that if the labels on a pod/rs change in a way that the pod gets
- // orphaned, the rs will only wake up after the expectations have
- // expired even if other pods are deleted.
- rsc.expectations.ExpectDeletions(rsKey, getPodKeys(podsToDelete))
- errCh := make(chan error, diff)
- var wg sync.WaitGroup
- wg.Add(diff)
- for _, pod := range podsToDelete {
- go func(targetPod *v1.Pod) {
- defer wg.Done()
- if err := rsc.podControl.DeletePod(rs.Namespace, targetPod.Name, rs); err != nil {
- // Decrement the expected number of deletes because the informer won't observe this deletion
- podKey := controller.PodKey(targetPod)
- klog.V(2).Infof("Failed to delete %v, decrementing expectations for %v %s/%s", podKey, rsc.Kind, rs.Namespace, rs.Name)
- rsc.expectations.DeletionObserved(rsKey, podKey)
- errCh <- err
- }
- }(pod)
- }
- wg.Wait()
- select {
- case err := <-errCh:
- // all errors have been reported before and they're likely to be the same, so we'll only return the first one we hit.
- if err != nil {
- return err
- }
- default:
- }
- }
- return nil
- }
- // syncReplicaSet will sync the ReplicaSet with the given key if it has had its expectations fulfilled,
- // meaning it did not expect to see any more of its pods created or deleted. This function is not meant to be
- // invoked concurrently with the same key.
- func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
- startTime := time.Now()
- defer func() {
- klog.V(4).Infof("Finished syncing %v %q (%v)", rsc.Kind, key, time.Since(startTime))
- }()
- namespace, name, err := cache.SplitMetaNamespaceKey(key)
- if err != nil {
- return err
- }
- rs, err := rsc.rsLister.ReplicaSets(namespace).Get(name)
- if errors.IsNotFound(err) {
- klog.V(4).Infof("%v %v has been deleted", rsc.Kind, key)
- rsc.expectations.DeleteExpectations(key)
- return nil
- }
- if err != nil {
- return err
- }
- rsNeedsSync := rsc.expectations.SatisfiedExpectations(key)
- selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
- if err != nil {
- utilruntime.HandleError(fmt.Errorf("Error converting pod selector to selector: %v", err))
- return nil
- }
- // list all pods to include the pods that don't match the rs`s selector
- // anymore but has the stale controller ref.
- // TODO: Do the List and Filter in a single pass, or use an index.
- allPods, err := rsc.podLister.Pods(rs.Namespace).List(labels.Everything())
- if err != nil {
- return err
- }
- // Ignore inactive pods.
- filteredPods := controller.FilterActivePods(allPods)
- // NOTE: filteredPods are pointing to objects from cache - if you need to
- // modify them, you need to copy it first.
- filteredPods, err = rsc.claimPods(rs, selector, filteredPods)
- if err != nil {
- return err
- }
- var manageReplicasErr error
- if rsNeedsSync && rs.DeletionTimestamp == nil {
- manageReplicasErr = rsc.manageReplicas(filteredPods, rs)
- }
- rs = rs.DeepCopy()
- newStatus := calculateStatus(rs, filteredPods, manageReplicasErr)
- // Always updates status as pods come up or die.
- updatedRS, err := updateReplicaSetStatus(rsc.kubeClient.AppsV1().ReplicaSets(rs.Namespace), rs, newStatus)
- if err != nil {
- // Multiple things could lead to this update failing. Requeuing the replica set ensures
- // Returning an error causes a requeue without forcing a hotloop
- return err
- }
- // Resync the ReplicaSet after MinReadySeconds as a last line of defense to guard against clock-skew.
- if manageReplicasErr == nil && updatedRS.Spec.MinReadySeconds > 0 &&
- updatedRS.Status.ReadyReplicas == *(updatedRS.Spec.Replicas) &&
- updatedRS.Status.AvailableReplicas != *(updatedRS.Spec.Replicas) {
- rsc.enqueueReplicaSetAfter(updatedRS, time.Duration(updatedRS.Spec.MinReadySeconds)*time.Second)
- }
- return manageReplicasErr
- }
- func (rsc *ReplicaSetController) claimPods(rs *apps.ReplicaSet, selector labels.Selector, filteredPods []*v1.Pod) ([]*v1.Pod, error) {
- // If any adoptions are attempted, we should first recheck for deletion with
- // an uncached quorum read sometime after listing Pods (see #42639).
- canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
- fresh, err := rsc.kubeClient.AppsV1().ReplicaSets(rs.Namespace).Get(rs.Name, metav1.GetOptions{})
- if err != nil {
- return nil, err
- }
- if fresh.UID != rs.UID {
- return nil, fmt.Errorf("original %v %v/%v is gone: got uid %v, wanted %v", rsc.Kind, rs.Namespace, rs.Name, fresh.UID, rs.UID)
- }
- return fresh, nil
- })
- cm := controller.NewPodControllerRefManager(rsc.podControl, rs, selector, rsc.GroupVersionKind, canAdoptFunc)
- return cm.ClaimPods(filteredPods)
- }
- // slowStartBatch tries to call the provided function a total of 'count' times,
- // starting slow to check for errors, then speeding up if calls succeed.
- //
- // It groups the calls into batches, starting with a group of initialBatchSize.
- // Within each batch, it may call the function multiple times concurrently.
- //
- // If a whole batch succeeds, the next batch may get exponentially larger.
- // If there are any failures in a batch, all remaining batches are skipped
- // after waiting for the current batch to complete.
- //
- // It returns the number of successful calls to the function.
- func slowStartBatch(count int, initialBatchSize int, fn func() error) (int, error) {
- remaining := count
- successes := 0
- for batchSize := integer.IntMin(remaining, initialBatchSize); batchSize > 0; batchSize = integer.IntMin(2*batchSize, remaining) {
- errCh := make(chan error, batchSize)
- var wg sync.WaitGroup
- wg.Add(batchSize)
- for i := 0; i < batchSize; i++ {
- go func() {
- defer wg.Done()
- if err := fn(); err != nil {
- errCh <- err
- }
- }()
- }
- wg.Wait()
- curSuccesses := batchSize - len(errCh)
- successes += curSuccesses
- if len(errCh) > 0 {
- return successes, <-errCh
- }
- remaining -= batchSize
- }
- return successes, nil
- }
- func getPodsToDelete(filteredPods []*v1.Pod, diff int) []*v1.Pod {
- // No need to sort pods if we are about to delete all of them.
- // diff will always be <= len(filteredPods), so not need to handle > case.
- if diff < len(filteredPods) {
- // Sort the pods in the order such that not-ready < ready, unscheduled
- // < scheduled, and pending < running. This ensures that we delete pods
- // in the earlier stages whenever possible.
- sort.Sort(controller.ActivePods(filteredPods))
- }
- return filteredPods[:diff]
- }
- func getPodKeys(pods []*v1.Pod) []string {
- podKeys := make([]string, 0, len(pods))
- for _, pod := range pods {
- podKeys = append(podKeys, controller.PodKey(pod))
- }
- return podKeys
- }
|