sync.go 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546
  1. /*
  2. Copyright 2016 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package deployment
  14. import (
  15. "context"
  16. "fmt"
  17. "reflect"
  18. "sort"
  19. "strconv"
  20. apps "k8s.io/api/apps/v1"
  21. "k8s.io/api/core/v1"
  22. "k8s.io/apimachinery/pkg/api/errors"
  23. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  24. "k8s.io/klog"
  25. "k8s.io/kubernetes/pkg/controller"
  26. deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
  27. labelsutil "k8s.io/kubernetes/pkg/util/labels"
  28. )
  29. // syncStatusOnly only updates Deployments Status and doesn't take any mutating actions.
  30. func (dc *DeploymentController) syncStatusOnly(d *apps.Deployment, rsList []*apps.ReplicaSet) error {
  31. newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, false)
  32. if err != nil {
  33. return err
  34. }
  35. allRSs := append(oldRSs, newRS)
  36. return dc.syncDeploymentStatus(allRSs, newRS, d)
  37. }
  38. // sync is responsible for reconciling deployments on scaling events or when they
  39. // are paused.
  40. func (dc *DeploymentController) sync(d *apps.Deployment, rsList []*apps.ReplicaSet) error {
  41. newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, false)
  42. if err != nil {
  43. return err
  44. }
  45. if err := dc.scale(d, newRS, oldRSs); err != nil {
  46. // If we get an error while trying to scale, the deployment will be requeued
  47. // so we can abort this resync
  48. return err
  49. }
  50. // Clean up the deployment when it's paused and no rollback is in flight.
  51. if d.Spec.Paused && getRollbackTo(d) == nil {
  52. if err := dc.cleanupDeployment(oldRSs, d); err != nil {
  53. return err
  54. }
  55. }
  56. allRSs := append(oldRSs, newRS)
  57. return dc.syncDeploymentStatus(allRSs, newRS, d)
  58. }
  59. // checkPausedConditions checks if the given deployment is paused or not and adds an appropriate condition.
  60. // These conditions are needed so that we won't accidentally report lack of progress for resumed deployments
  61. // that were paused for longer than progressDeadlineSeconds.
  62. func (dc *DeploymentController) checkPausedConditions(d *apps.Deployment) error {
  63. if !deploymentutil.HasProgressDeadline(d) {
  64. return nil
  65. }
  66. cond := deploymentutil.GetDeploymentCondition(d.Status, apps.DeploymentProgressing)
  67. if cond != nil && cond.Reason == deploymentutil.TimedOutReason {
  68. // If we have reported lack of progress, do not overwrite it with a paused condition.
  69. return nil
  70. }
  71. pausedCondExists := cond != nil && cond.Reason == deploymentutil.PausedDeployReason
  72. needsUpdate := false
  73. if d.Spec.Paused && !pausedCondExists {
  74. condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.PausedDeployReason, "Deployment is paused")
  75. deploymentutil.SetDeploymentCondition(&d.Status, *condition)
  76. needsUpdate = true
  77. } else if !d.Spec.Paused && pausedCondExists {
  78. condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.ResumedDeployReason, "Deployment is resumed")
  79. deploymentutil.SetDeploymentCondition(&d.Status, *condition)
  80. needsUpdate = true
  81. }
  82. if !needsUpdate {
  83. return nil
  84. }
  85. var err error
  86. d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{})
  87. return err
  88. }
  89. // getAllReplicaSetsAndSyncRevision returns all the replica sets for the provided deployment (new and all old), with new RS's and deployment's revision updated.
  90. //
  91. // rsList should come from getReplicaSetsForDeployment(d).
  92. //
  93. // 1. Get all old RSes this deployment targets, and calculate the max revision number among them (maxOldV).
  94. // 2. Get new RS this deployment targets (whose pod template matches deployment's), and update new RS's revision number to (maxOldV + 1),
  95. // only if its revision number is smaller than (maxOldV + 1). If this step failed, we'll update it in the next deployment sync loop.
  96. // 3. Copy new RS's revision number to deployment (update deployment's revision). If this step failed, we'll update it in the next deployment sync loop.
  97. //
  98. // Note that currently the deployment controller is using caches to avoid querying the server for reads.
  99. // This may lead to stale reads of replica sets, thus incorrect deployment status.
  100. func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(d *apps.Deployment, rsList []*apps.ReplicaSet, createIfNotExisted bool) (*apps.ReplicaSet, []*apps.ReplicaSet, error) {
  101. _, allOldRSs := deploymentutil.FindOldReplicaSets(d, rsList)
  102. // Get new replica set with the updated revision number
  103. newRS, err := dc.getNewReplicaSet(d, rsList, allOldRSs, createIfNotExisted)
  104. if err != nil {
  105. return nil, nil, err
  106. }
  107. return newRS, allOldRSs, nil
  108. }
  109. const (
  110. // limit revision history length to 100 element (~2000 chars)
  111. maxRevHistoryLengthInChars = 2000
  112. )
  113. // Returns a replica set that matches the intent of the given deployment. Returns nil if the new replica set doesn't exist yet.
  114. // 1. Get existing new RS (the RS that the given deployment targets, whose pod template is the same as deployment's).
  115. // 2. If there's existing new RS, update its revision number if it's smaller than (maxOldRevision + 1), where maxOldRevision is the max revision number among all old RSes.
  116. // 3. If there's no existing new RS and createIfNotExisted is true, create one with appropriate revision number (maxOldRevision + 1) and replicas.
  117. // Note that the pod-template-hash will be added to adopted RSes and pods.
  118. func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, oldRSs []*apps.ReplicaSet, createIfNotExisted bool) (*apps.ReplicaSet, error) {
  119. existingNewRS := deploymentutil.FindNewReplicaSet(d, rsList)
  120. // Calculate the max revision number among all old RSes
  121. maxOldRevision := deploymentutil.MaxRevision(oldRSs)
  122. // Calculate revision number for this new replica set
  123. newRevision := strconv.FormatInt(maxOldRevision+1, 10)
  124. // Latest replica set exists. We need to sync its annotations (includes copying all but
  125. // annotationsToSkip from the parent deployment, and update revision, desiredReplicas,
  126. // and maxReplicas) and also update the revision annotation in the deployment with the
  127. // latest revision.
  128. if existingNewRS != nil {
  129. rsCopy := existingNewRS.DeepCopy()
  130. // Set existing new replica set's annotation
  131. annotationsUpdated := deploymentutil.SetNewReplicaSetAnnotations(d, rsCopy, newRevision, true, maxRevHistoryLengthInChars)
  132. minReadySecondsNeedsUpdate := rsCopy.Spec.MinReadySeconds != d.Spec.MinReadySeconds
  133. if annotationsUpdated || minReadySecondsNeedsUpdate {
  134. rsCopy.Spec.MinReadySeconds = d.Spec.MinReadySeconds
  135. return dc.client.AppsV1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(context.TODO(), rsCopy, metav1.UpdateOptions{})
  136. }
  137. // Should use the revision in existingNewRS's annotation, since it set by before
  138. needsUpdate := deploymentutil.SetDeploymentRevision(d, rsCopy.Annotations[deploymentutil.RevisionAnnotation])
  139. // If no other Progressing condition has been recorded and we need to estimate the progress
  140. // of this deployment then it is likely that old users started caring about progress. In that
  141. // case we need to take into account the first time we noticed their new replica set.
  142. cond := deploymentutil.GetDeploymentCondition(d.Status, apps.DeploymentProgressing)
  143. if deploymentutil.HasProgressDeadline(d) && cond == nil {
  144. msg := fmt.Sprintf("Found new replica set %q", rsCopy.Name)
  145. condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, deploymentutil.FoundNewRSReason, msg)
  146. deploymentutil.SetDeploymentCondition(&d.Status, *condition)
  147. needsUpdate = true
  148. }
  149. if needsUpdate {
  150. var err error
  151. if d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{}); err != nil {
  152. return nil, err
  153. }
  154. }
  155. return rsCopy, nil
  156. }
  157. if !createIfNotExisted {
  158. return nil, nil
  159. }
  160. // new ReplicaSet does not exist, create one.
  161. newRSTemplate := *d.Spec.Template.DeepCopy()
  162. podTemplateSpecHash := controller.ComputeHash(&newRSTemplate, d.Status.CollisionCount)
  163. newRSTemplate.Labels = labelsutil.CloneAndAddLabel(d.Spec.Template.Labels, apps.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash)
  164. // Add podTemplateHash label to selector.
  165. newRSSelector := labelsutil.CloneSelectorAndAddLabel(d.Spec.Selector, apps.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash)
  166. // Create new ReplicaSet
  167. newRS := apps.ReplicaSet{
  168. ObjectMeta: metav1.ObjectMeta{
  169. // Make the name deterministic, to ensure idempotence
  170. Name: d.Name + "-" + podTemplateSpecHash,
  171. Namespace: d.Namespace,
  172. OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(d, controllerKind)},
  173. Labels: newRSTemplate.Labels,
  174. },
  175. Spec: apps.ReplicaSetSpec{
  176. Replicas: new(int32),
  177. MinReadySeconds: d.Spec.MinReadySeconds,
  178. Selector: newRSSelector,
  179. Template: newRSTemplate,
  180. },
  181. }
  182. allRSs := append(oldRSs, &newRS)
  183. newReplicasCount, err := deploymentutil.NewRSNewReplicas(d, allRSs, &newRS)
  184. if err != nil {
  185. return nil, err
  186. }
  187. *(newRS.Spec.Replicas) = newReplicasCount
  188. // Set new replica set's annotation
  189. deploymentutil.SetNewReplicaSetAnnotations(d, &newRS, newRevision, false, maxRevHistoryLengthInChars)
  190. // Create the new ReplicaSet. If it already exists, then we need to check for possible
  191. // hash collisions. If there is any other error, we need to report it in the status of
  192. // the Deployment.
  193. alreadyExists := false
  194. createdRS, err := dc.client.AppsV1().ReplicaSets(d.Namespace).Create(context.TODO(), &newRS, metav1.CreateOptions{})
  195. switch {
  196. // We may end up hitting this due to a slow cache or a fast resync of the Deployment.
  197. case errors.IsAlreadyExists(err):
  198. alreadyExists = true
  199. // Fetch a copy of the ReplicaSet.
  200. rs, rsErr := dc.rsLister.ReplicaSets(newRS.Namespace).Get(newRS.Name)
  201. if rsErr != nil {
  202. return nil, rsErr
  203. }
  204. // If the Deployment owns the ReplicaSet and the ReplicaSet's PodTemplateSpec is semantically
  205. // deep equal to the PodTemplateSpec of the Deployment, it's the Deployment's new ReplicaSet.
  206. // Otherwise, this is a hash collision and we need to increment the collisionCount field in
  207. // the status of the Deployment and requeue to try the creation in the next sync.
  208. controllerRef := metav1.GetControllerOf(rs)
  209. if controllerRef != nil && controllerRef.UID == d.UID && deploymentutil.EqualIgnoreHash(&d.Spec.Template, &rs.Spec.Template) {
  210. createdRS = rs
  211. err = nil
  212. break
  213. }
  214. // Matching ReplicaSet is not equal - increment the collisionCount in the DeploymentStatus
  215. // and requeue the Deployment.
  216. if d.Status.CollisionCount == nil {
  217. d.Status.CollisionCount = new(int32)
  218. }
  219. preCollisionCount := *d.Status.CollisionCount
  220. *d.Status.CollisionCount++
  221. // Update the collisionCount for the Deployment and let it requeue by returning the original
  222. // error.
  223. _, dErr := dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{})
  224. if dErr == nil {
  225. klog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount)
  226. }
  227. return nil, err
  228. case errors.HasStatusCause(err, v1.NamespaceTerminatingCause):
  229. // if the namespace is terminating, all subsequent creates will fail and we can safely do nothing
  230. return nil, err
  231. case err != nil:
  232. msg := fmt.Sprintf("Failed to create new replica set %q: %v", newRS.Name, err)
  233. if deploymentutil.HasProgressDeadline(d) {
  234. cond := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionFalse, deploymentutil.FailedRSCreateReason, msg)
  235. deploymentutil.SetDeploymentCondition(&d.Status, *cond)
  236. // We don't really care about this error at this point, since we have a bigger issue to report.
  237. // TODO: Identify which errors are permanent and switch DeploymentIsFailed to take into account
  238. // these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568
  239. _, _ = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{})
  240. }
  241. dc.eventRecorder.Eventf(d, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg)
  242. return nil, err
  243. }
  244. if !alreadyExists && newReplicasCount > 0 {
  245. dc.eventRecorder.Eventf(d, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled up replica set %s to %d", createdRS.Name, newReplicasCount)
  246. }
  247. needsUpdate := deploymentutil.SetDeploymentRevision(d, newRevision)
  248. if !alreadyExists && deploymentutil.HasProgressDeadline(d) {
  249. msg := fmt.Sprintf("Created new replica set %q", createdRS.Name)
  250. condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, deploymentutil.NewReplicaSetReason, msg)
  251. deploymentutil.SetDeploymentCondition(&d.Status, *condition)
  252. needsUpdate = true
  253. }
  254. if needsUpdate {
  255. _, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{})
  256. }
  257. return createdRS, err
  258. }
  259. // scale scales proportionally in order to mitigate risk. Otherwise, scaling up can increase the size
  260. // of the new replica set and scaling down can decrease the sizes of the old ones, both of which would
  261. // have the effect of hastening the rollout progress, which could produce a higher proportion of unavailable
  262. // replicas in the event of a problem with the rolled out template. Should run only on scaling events or
  263. // when a deployment is paused and not during the normal rollout process.
  264. func (dc *DeploymentController) scale(deployment *apps.Deployment, newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet) error {
  265. // If there is only one active replica set then we should scale that up to the full count of the
  266. // deployment. If there is no active replica set, then we should scale up the newest replica set.
  267. if activeOrLatest := deploymentutil.FindActiveOrLatest(newRS, oldRSs); activeOrLatest != nil {
  268. if *(activeOrLatest.Spec.Replicas) == *(deployment.Spec.Replicas) {
  269. return nil
  270. }
  271. _, _, err := dc.scaleReplicaSetAndRecordEvent(activeOrLatest, *(deployment.Spec.Replicas), deployment)
  272. return err
  273. }
  274. // If the new replica set is saturated, old replica sets should be fully scaled down.
  275. // This case handles replica set adoption during a saturated new replica set.
  276. if deploymentutil.IsSaturated(deployment, newRS) {
  277. for _, old := range controller.FilterActiveReplicaSets(oldRSs) {
  278. if _, _, err := dc.scaleReplicaSetAndRecordEvent(old, 0, deployment); err != nil {
  279. return err
  280. }
  281. }
  282. return nil
  283. }
  284. // There are old replica sets with pods and the new replica set is not saturated.
  285. // We need to proportionally scale all replica sets (new and old) in case of a
  286. // rolling deployment.
  287. if deploymentutil.IsRollingUpdate(deployment) {
  288. allRSs := controller.FilterActiveReplicaSets(append(oldRSs, newRS))
  289. allRSsReplicas := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
  290. allowedSize := int32(0)
  291. if *(deployment.Spec.Replicas) > 0 {
  292. allowedSize = *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment)
  293. }
  294. // Number of additional replicas that can be either added or removed from the total
  295. // replicas count. These replicas should be distributed proportionally to the active
  296. // replica sets.
  297. deploymentReplicasToAdd := allowedSize - allRSsReplicas
  298. // The additional replicas should be distributed proportionally amongst the active
  299. // replica sets from the larger to the smaller in size replica set. Scaling direction
  300. // drives what happens in case we are trying to scale replica sets of the same size.
  301. // In such a case when scaling up, we should scale up newer replica sets first, and
  302. // when scaling down, we should scale down older replica sets first.
  303. var scalingOperation string
  304. switch {
  305. case deploymentReplicasToAdd > 0:
  306. sort.Sort(controller.ReplicaSetsBySizeNewer(allRSs))
  307. scalingOperation = "up"
  308. case deploymentReplicasToAdd < 0:
  309. sort.Sort(controller.ReplicaSetsBySizeOlder(allRSs))
  310. scalingOperation = "down"
  311. }
  312. // Iterate over all active replica sets and estimate proportions for each of them.
  313. // The absolute value of deploymentReplicasAdded should never exceed the absolute
  314. // value of deploymentReplicasToAdd.
  315. deploymentReplicasAdded := int32(0)
  316. nameToSize := make(map[string]int32)
  317. for i := range allRSs {
  318. rs := allRSs[i]
  319. // Estimate proportions if we have replicas to add, otherwise simply populate
  320. // nameToSize with the current sizes for each replica set.
  321. if deploymentReplicasToAdd != 0 {
  322. proportion := deploymentutil.GetProportion(rs, *deployment, deploymentReplicasToAdd, deploymentReplicasAdded)
  323. nameToSize[rs.Name] = *(rs.Spec.Replicas) + proportion
  324. deploymentReplicasAdded += proportion
  325. } else {
  326. nameToSize[rs.Name] = *(rs.Spec.Replicas)
  327. }
  328. }
  329. // Update all replica sets
  330. for i := range allRSs {
  331. rs := allRSs[i]
  332. // Add/remove any leftovers to the largest replica set.
  333. if i == 0 && deploymentReplicasToAdd != 0 {
  334. leftover := deploymentReplicasToAdd - deploymentReplicasAdded
  335. nameToSize[rs.Name] = nameToSize[rs.Name] + leftover
  336. if nameToSize[rs.Name] < 0 {
  337. nameToSize[rs.Name] = 0
  338. }
  339. }
  340. // TODO: Use transactions when we have them.
  341. if _, _, err := dc.scaleReplicaSet(rs, nameToSize[rs.Name], deployment, scalingOperation); err != nil {
  342. // Return as soon as we fail, the deployment is requeued
  343. return err
  344. }
  345. }
  346. }
  347. return nil
  348. }
  349. func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *apps.ReplicaSet, newScale int32, deployment *apps.Deployment) (bool, *apps.ReplicaSet, error) {
  350. // No need to scale
  351. if *(rs.Spec.Replicas) == newScale {
  352. return false, rs, nil
  353. }
  354. var scalingOperation string
  355. if *(rs.Spec.Replicas) < newScale {
  356. scalingOperation = "up"
  357. } else {
  358. scalingOperation = "down"
  359. }
  360. scaled, newRS, err := dc.scaleReplicaSet(rs, newScale, deployment, scalingOperation)
  361. return scaled, newRS, err
  362. }
  363. func (dc *DeploymentController) scaleReplicaSet(rs *apps.ReplicaSet, newScale int32, deployment *apps.Deployment, scalingOperation string) (bool, *apps.ReplicaSet, error) {
  364. sizeNeedsUpdate := *(rs.Spec.Replicas) != newScale
  365. annotationsNeedUpdate := deploymentutil.ReplicasAnnotationsNeedUpdate(rs, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment))
  366. scaled := false
  367. var err error
  368. if sizeNeedsUpdate || annotationsNeedUpdate {
  369. rsCopy := rs.DeepCopy()
  370. *(rsCopy.Spec.Replicas) = newScale
  371. deploymentutil.SetReplicasAnnotations(rsCopy, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment))
  372. rs, err = dc.client.AppsV1().ReplicaSets(rsCopy.Namespace).Update(context.TODO(), rsCopy, metav1.UpdateOptions{})
  373. if err == nil && sizeNeedsUpdate {
  374. scaled = true
  375. dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale)
  376. }
  377. }
  378. return scaled, rs, err
  379. }
  380. // cleanupDeployment is responsible for cleaning up a deployment ie. retains all but the latest N old replica sets
  381. // where N=d.Spec.RevisionHistoryLimit. Old replica sets are older versions of the podtemplate of a deployment kept
  382. // around by default 1) for historical reasons and 2) for the ability to rollback a deployment.
  383. func (dc *DeploymentController) cleanupDeployment(oldRSs []*apps.ReplicaSet, deployment *apps.Deployment) error {
  384. if !deploymentutil.HasRevisionHistoryLimit(deployment) {
  385. return nil
  386. }
  387. // Avoid deleting replica set with deletion timestamp set
  388. aliveFilter := func(rs *apps.ReplicaSet) bool {
  389. return rs != nil && rs.ObjectMeta.DeletionTimestamp == nil
  390. }
  391. cleanableRSes := controller.FilterReplicaSets(oldRSs, aliveFilter)
  392. diff := int32(len(cleanableRSes)) - *deployment.Spec.RevisionHistoryLimit
  393. if diff <= 0 {
  394. return nil
  395. }
  396. sort.Sort(controller.ReplicaSetsByCreationTimestamp(cleanableRSes))
  397. klog.V(4).Infof("Looking to cleanup old replica sets for deployment %q", deployment.Name)
  398. for i := int32(0); i < diff; i++ {
  399. rs := cleanableRSes[i]
  400. // Avoid delete replica set with non-zero replica counts
  401. if rs.Status.Replicas != 0 || *(rs.Spec.Replicas) != 0 || rs.Generation > rs.Status.ObservedGeneration || rs.DeletionTimestamp != nil {
  402. continue
  403. }
  404. klog.V(4).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name)
  405. if err := dc.client.AppsV1().ReplicaSets(rs.Namespace).Delete(context.TODO(), rs.Name, nil); err != nil && !errors.IsNotFound(err) {
  406. // Return error instead of aggregating and continuing DELETEs on the theory
  407. // that we may be overloading the api server.
  408. return err
  409. }
  410. }
  411. return nil
  412. }
  413. // syncDeploymentStatus checks if the status is up-to-date and sync it if necessary
  414. func (dc *DeploymentController) syncDeploymentStatus(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, d *apps.Deployment) error {
  415. newStatus := calculateStatus(allRSs, newRS, d)
  416. if reflect.DeepEqual(d.Status, newStatus) {
  417. return nil
  418. }
  419. newDeployment := d
  420. newDeployment.Status = newStatus
  421. _, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(context.TODO(), newDeployment, metav1.UpdateOptions{})
  422. return err
  423. }
  424. // calculateStatus calculates the latest status for the provided deployment by looking into the provided replica sets.
  425. func calculateStatus(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, deployment *apps.Deployment) apps.DeploymentStatus {
  426. availableReplicas := deploymentutil.GetAvailableReplicaCountForReplicaSets(allRSs)
  427. totalReplicas := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
  428. unavailableReplicas := totalReplicas - availableReplicas
  429. // If unavailableReplicas is negative, then that means the Deployment has more available replicas running than
  430. // desired, e.g. whenever it scales down. In such a case we should simply default unavailableReplicas to zero.
  431. if unavailableReplicas < 0 {
  432. unavailableReplicas = 0
  433. }
  434. status := apps.DeploymentStatus{
  435. // TODO: Ensure that if we start retrying status updates, we won't pick up a new Generation value.
  436. ObservedGeneration: deployment.Generation,
  437. Replicas: deploymentutil.GetActualReplicaCountForReplicaSets(allRSs),
  438. UpdatedReplicas: deploymentutil.GetActualReplicaCountForReplicaSets([]*apps.ReplicaSet{newRS}),
  439. ReadyReplicas: deploymentutil.GetReadyReplicaCountForReplicaSets(allRSs),
  440. AvailableReplicas: availableReplicas,
  441. UnavailableReplicas: unavailableReplicas,
  442. CollisionCount: deployment.Status.CollisionCount,
  443. }
  444. // Copy conditions one by one so we won't mutate the original object.
  445. conditions := deployment.Status.Conditions
  446. for i := range conditions {
  447. status.Conditions = append(status.Conditions, conditions[i])
  448. }
  449. if availableReplicas >= *(deployment.Spec.Replicas)-deploymentutil.MaxUnavailable(*deployment) {
  450. minAvailability := deploymentutil.NewDeploymentCondition(apps.DeploymentAvailable, v1.ConditionTrue, deploymentutil.MinimumReplicasAvailable, "Deployment has minimum availability.")
  451. deploymentutil.SetDeploymentCondition(&status, *minAvailability)
  452. } else {
  453. noMinAvailability := deploymentutil.NewDeploymentCondition(apps.DeploymentAvailable, v1.ConditionFalse, deploymentutil.MinimumReplicasUnavailable, "Deployment does not have minimum availability.")
  454. deploymentutil.SetDeploymentCondition(&status, *noMinAvailability)
  455. }
  456. return status
  457. }
  458. // isScalingEvent checks whether the provided deployment has been updated with a scaling event
  459. // by looking at the desired-replicas annotation in the active replica sets of the deployment.
  460. //
  461. // rsList should come from getReplicaSetsForDeployment(d).
  462. // podMap should come from getPodMapForDeployment(d, rsList).
  463. func (dc *DeploymentController) isScalingEvent(d *apps.Deployment, rsList []*apps.ReplicaSet) (bool, error) {
  464. newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, false)
  465. if err != nil {
  466. return false, err
  467. }
  468. allRSs := append(oldRSs, newRS)
  469. for _, rs := range controller.FilterActiveReplicaSets(allRSs) {
  470. desired, ok := deploymentutil.GetDesiredReplicasAnnotation(rs)
  471. if !ok {
  472. continue
  473. }
  474. if desired != *(d.Spec.Replicas) {
  475. return true, nil
  476. }
  477. }
  478. return false, nil
  479. }