deployment.go 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144
  1. /*
  2. Copyright 2015 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package apps
  14. import (
  15. "context"
  16. "fmt"
  17. "math/rand"
  18. "time"
  19. "github.com/davecgh/go-spew/spew"
  20. "github.com/onsi/ginkgo"
  21. "github.com/onsi/gomega"
  22. appsv1 "k8s.io/api/apps/v1"
  23. v1 "k8s.io/api/core/v1"
  24. apierrors "k8s.io/apimachinery/pkg/api/errors"
  25. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  26. "k8s.io/apimachinery/pkg/labels"
  27. "k8s.io/apimachinery/pkg/types"
  28. "k8s.io/apimachinery/pkg/util/intstr"
  29. "k8s.io/apimachinery/pkg/util/wait"
  30. "k8s.io/apimachinery/pkg/watch"
  31. clientset "k8s.io/client-go/kubernetes"
  32. appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
  33. watchtools "k8s.io/client-go/tools/watch"
  34. appsinternal "k8s.io/kubernetes/pkg/apis/apps"
  35. deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
  36. "k8s.io/kubernetes/test/e2e/framework"
  37. e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
  38. e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
  39. "k8s.io/kubernetes/test/e2e/framework/replicaset"
  40. e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
  41. e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
  42. testutil "k8s.io/kubernetes/test/utils"
  43. utilpointer "k8s.io/utils/pointer"
  44. )
  45. const (
  46. poll = 2 * time.Second
  47. pollLongTimeout = 5 * time.Minute
  48. dRetryPeriod = 2 * time.Second
  49. dRetryTimeout = 5 * time.Minute
  50. )
  51. var (
  52. nilRs *appsv1.ReplicaSet
  53. )
  54. var _ = SIGDescribe("Deployment", func() {
  55. var ns string
  56. var c clientset.Interface
  57. ginkgo.AfterEach(func() {
  58. failureTrap(c, ns)
  59. })
  60. f := framework.NewDefaultFramework("deployment")
  61. ginkgo.BeforeEach(func() {
  62. c = f.ClientSet
  63. ns = f.Namespace.Name
  64. })
  65. ginkgo.It("deployment reaping should cascade to its replica sets and pods", func() {
  66. testDeleteDeployment(f)
  67. })
  68. /*
  69. Testname: Deployment RollingUpdate
  70. Description: A conformant Kubernetes distribution MUST support the Deployment with RollingUpdate strategy.
  71. */
  72. framework.ConformanceIt("RollingUpdateDeployment should delete old pods and create new ones", func() {
  73. testRollingUpdateDeployment(f)
  74. })
  75. /*
  76. Testname: Deployment Recreate
  77. Description: A conformant Kubernetes distribution MUST support the Deployment with Recreate strategy.
  78. */
  79. framework.ConformanceIt("RecreateDeployment should delete old pods and create new ones", func() {
  80. testRecreateDeployment(f)
  81. })
  82. /*
  83. Testname: Deployment RevisionHistoryLimit
  84. Description: A conformant Kubernetes distribution MUST clean up Deployment's ReplicaSets based on
  85. the Deployment's `.spec.revisionHistoryLimit`.
  86. */
  87. framework.ConformanceIt("deployment should delete old replica sets", func() {
  88. testDeploymentCleanUpPolicy(f)
  89. })
  90. /*
  91. Testname: Deployment Rollover
  92. Description: A conformant Kubernetes distribution MUST support Deployment rollover,
  93. i.e. allow arbitrary number of changes to desired state during rolling update
  94. before the rollout finishes.
  95. */
  96. framework.ConformanceIt("deployment should support rollover", func() {
  97. testRolloverDeployment(f)
  98. })
  99. ginkgo.It("iterative rollouts should eventually progress", func() {
  100. testIterativeDeployments(f)
  101. })
  102. ginkgo.It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func() {
  103. testDeploymentsControllerRef(f)
  104. })
  105. /*
  106. Testname: Deployment Proportional Scaling
  107. Description: A conformant Kubernetes distribution MUST support Deployment
  108. proportional scaling, i.e. proportionally scale a Deployment's ReplicaSets
  109. when a Deployment is scaled.
  110. */
  111. framework.ConformanceIt("deployment should support proportional scaling", func() {
  112. testProportionalScalingDeployment(f)
  113. })
  114. ginkgo.It("should not disrupt a cloud load-balancer's connectivity during rollout", func() {
  115. e2eskipper.SkipUnlessProviderIs("aws", "azure", "gce", "gke")
  116. testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f)
  117. })
  118. // TODO: add tests that cover deployment.Spec.MinReadySeconds once we solved clock-skew issues
  119. // See https://github.com/kubernetes/kubernetes/issues/29229
  120. })
  121. func failureTrap(c clientset.Interface, ns string) {
  122. deployments, err := c.AppsV1().Deployments(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()})
  123. if err != nil {
  124. framework.Logf("Could not list Deployments in namespace %q: %v", ns, err)
  125. return
  126. }
  127. for i := range deployments.Items {
  128. d := deployments.Items[i]
  129. framework.Logf(spew.Sprintf("Deployment %q:\n%+v\n", d.Name, d))
  130. _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(&d, c.AppsV1())
  131. if err != nil {
  132. framework.Logf("Could not list ReplicaSets for Deployment %q: %v", d.Name, err)
  133. return
  134. }
  135. testutil.LogReplicaSetsOfDeployment(&d, allOldRSs, newRS, framework.Logf)
  136. rsList := allOldRSs
  137. if newRS != nil {
  138. rsList = append(rsList, newRS)
  139. }
  140. testutil.LogPodsOfDeployment(c, &d, rsList, framework.Logf)
  141. }
  142. // We need print all the ReplicaSets if there are no Deployment object created
  143. if len(deployments.Items) != 0 {
  144. return
  145. }
  146. framework.Logf("Log out all the ReplicaSets if there is no deployment created")
  147. rss, err := c.AppsV1().ReplicaSets(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()})
  148. if err != nil {
  149. framework.Logf("Could not list ReplicaSets in namespace %q: %v", ns, err)
  150. return
  151. }
  152. for _, rs := range rss.Items {
  153. framework.Logf(spew.Sprintf("ReplicaSet %q:\n%+v\n", rs.Name, rs))
  154. selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
  155. if err != nil {
  156. framework.Logf("failed to get selector of ReplicaSet %s: %v", rs.Name, err)
  157. }
  158. options := metav1.ListOptions{LabelSelector: selector.String()}
  159. podList, err := c.CoreV1().Pods(rs.Namespace).List(context.TODO(), options)
  160. if err != nil {
  161. framework.Logf("Failed to list Pods in namespace %s: %v", rs.Namespace, err)
  162. continue
  163. }
  164. for _, pod := range podList.Items {
  165. framework.Logf(spew.Sprintf("pod: %q:\n%+v\n", pod.Name, pod))
  166. }
  167. }
  168. }
  169. func intOrStrP(num int) *intstr.IntOrString {
  170. intstr := intstr.FromInt(num)
  171. return &intstr
  172. }
  173. func stopDeployment(c clientset.Interface, ns, deploymentName string) {
  174. deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
  175. framework.ExpectNoError(err)
  176. framework.Logf("Deleting deployment %s", deploymentName)
  177. err = framework.DeleteResourceAndWaitForGC(c, appsinternal.Kind("Deployment"), ns, deployment.Name)
  178. framework.ExpectNoError(err)
  179. framework.Logf("Ensuring deployment %s was deleted", deploymentName)
  180. _, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deployment.Name, metav1.GetOptions{})
  181. framework.ExpectError(err)
  182. framework.ExpectEqual(apierrors.IsNotFound(err), true)
  183. framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
  184. selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
  185. framework.ExpectNoError(err)
  186. options := metav1.ListOptions{LabelSelector: selector.String()}
  187. rss, err := c.AppsV1().ReplicaSets(ns).List(context.TODO(), options)
  188. framework.ExpectNoError(err)
  189. gomega.Expect(rss.Items).Should(gomega.HaveLen(0))
  190. framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName)
  191. var pods *v1.PodList
  192. if err := wait.PollImmediate(time.Second, timeout, func() (bool, error) {
  193. pods, err = c.CoreV1().Pods(ns).List(context.TODO(), options)
  194. if err != nil {
  195. return false, err
  196. }
  197. // Pods may be created by overlapping deployments right after this deployment is deleted, ignore them
  198. if len(pods.Items) == 0 {
  199. return true, nil
  200. }
  201. return false, nil
  202. }); err != nil {
  203. framework.Failf("Err : %s\n. Failed to remove deployment %s pods : %+v", err, deploymentName, pods)
  204. }
  205. }
  206. func testDeleteDeployment(f *framework.Framework) {
  207. ns := f.Namespace.Name
  208. c := f.ClientSet
  209. deploymentName := "test-new-deployment"
  210. podLabels := map[string]string{"name": WebserverImageName}
  211. replicas := int32(1)
  212. framework.Logf("Creating simple deployment %s", deploymentName)
  213. d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
  214. d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
  215. deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
  216. framework.ExpectNoError(err)
  217. // Wait for it to be updated to revision 1
  218. err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", WebserverImage)
  219. framework.ExpectNoError(err)
  220. err = e2edeploy.WaitForDeploymentComplete(c, deploy)
  221. framework.ExpectNoError(err)
  222. deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
  223. framework.ExpectNoError(err)
  224. newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
  225. framework.ExpectNoError(err)
  226. framework.ExpectNotEqual(newRS, nilRs)
  227. stopDeployment(c, ns, deploymentName)
  228. }
  229. func testRollingUpdateDeployment(f *framework.Framework) {
  230. ns := f.Namespace.Name
  231. c := f.ClientSet
  232. // Create webserver pods.
  233. deploymentPodLabels := map[string]string{"name": "sample-pod"}
  234. rsPodLabels := map[string]string{
  235. "name": "sample-pod",
  236. "pod": WebserverImageName,
  237. }
  238. rsName := "test-rolling-update-controller"
  239. replicas := int32(1)
  240. rsRevision := "3546343826724305832"
  241. annotations := make(map[string]string)
  242. annotations[deploymentutil.RevisionAnnotation] = rsRevision
  243. rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil)
  244. rs.Annotations = annotations
  245. framework.Logf("Creating replica set %q (going to be adopted)", rs.Name)
  246. _, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs, metav1.CreateOptions{})
  247. framework.ExpectNoError(err)
  248. // Verify that the required pods have come up.
  249. err = e2epod.VerifyPodsRunning(c, ns, "sample-pod", false, replicas)
  250. framework.ExpectNoError(err, "error in waiting for pods to come up: %s", err)
  251. // Create a deployment to delete webserver pods and instead bring up agnhost pods.
  252. deploymentName := "test-rolling-update-deployment"
  253. framework.Logf("Creating deployment %q", deploymentName)
  254. d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType)
  255. deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
  256. framework.ExpectNoError(err)
  257. // Wait for it to be updated to revision 3546343826724305833.
  258. framework.Logf("Ensuring deployment %q gets the next revision from the one the adopted replica set %q has", deploy.Name, rs.Name)
  259. err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", AgnhostImage)
  260. framework.ExpectNoError(err)
  261. framework.Logf("Ensuring status for deployment %q is the expected", deploy.Name)
  262. err = e2edeploy.WaitForDeploymentComplete(c, deploy)
  263. framework.ExpectNoError(err)
  264. // There should be 1 old RS (webserver-controller, which is adopted)
  265. framework.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name)
  266. deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
  267. framework.ExpectNoError(err)
  268. _, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.AppsV1())
  269. framework.ExpectNoError(err)
  270. framework.ExpectEqual(len(allOldRSs), 1)
  271. }
  272. func testRecreateDeployment(f *framework.Framework) {
  273. ns := f.Namespace.Name
  274. c := f.ClientSet
  275. // Create a deployment that brings up agnhost pods.
  276. deploymentName := "test-recreate-deployment"
  277. framework.Logf("Creating deployment %q", deploymentName)
  278. d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, AgnhostImageName, AgnhostImage, appsv1.RecreateDeploymentStrategyType)
  279. deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
  280. framework.ExpectNoError(err)
  281. // Wait for it to be updated to revision 1
  282. framework.Logf("Waiting deployment %q to be updated to revision 1", deploymentName)
  283. err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", AgnhostImage)
  284. framework.ExpectNoError(err)
  285. framework.Logf("Waiting deployment %q to complete", deploymentName)
  286. err = e2edeploy.WaitForDeploymentComplete(c, deployment)
  287. framework.ExpectNoError(err)
  288. // Update deployment to delete agnhost pods and bring up webserver pods.
  289. framework.Logf("Triggering a new rollout for deployment %q", deploymentName)
  290. deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *appsv1.Deployment) {
  291. update.Spec.Template.Spec.Containers[0].Name = WebserverImageName
  292. update.Spec.Template.Spec.Containers[0].Image = WebserverImage
  293. })
  294. framework.ExpectNoError(err)
  295. framework.Logf("Watching deployment %q to verify that new pods will not run with olds pods", deploymentName)
  296. err = watchRecreateDeployment(c, deployment)
  297. framework.ExpectNoError(err)
  298. }
  299. // testDeploymentCleanUpPolicy tests that deployment supports cleanup policy
  300. func testDeploymentCleanUpPolicy(f *framework.Framework) {
  301. ns := f.Namespace.Name
  302. c := f.ClientSet
  303. // Create webserver pods.
  304. deploymentPodLabels := map[string]string{"name": "cleanup-pod"}
  305. rsPodLabels := map[string]string{
  306. "name": "cleanup-pod",
  307. "pod": WebserverImageName,
  308. }
  309. rsName := "test-cleanup-controller"
  310. replicas := int32(1)
  311. revisionHistoryLimit := utilpointer.Int32Ptr(0)
  312. _, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil), metav1.CreateOptions{})
  313. framework.ExpectNoError(err)
  314. // Verify that the required pods have come up.
  315. err = e2epod.VerifyPodsRunning(c, ns, "cleanup-pod", false, replicas)
  316. framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
  317. // Create a deployment to delete webserver pods and instead bring up agnhost pods.
  318. deploymentName := "test-cleanup-deployment"
  319. framework.Logf("Creating deployment %s", deploymentName)
  320. pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()})
  321. framework.ExpectNoError(err, "Failed to query for pods: %v", err)
  322. options := metav1.ListOptions{
  323. ResourceVersion: pods.ListMeta.ResourceVersion,
  324. }
  325. stopCh := make(chan struct{})
  326. defer close(stopCh)
  327. w, err := c.CoreV1().Pods(ns).Watch(context.TODO(), options)
  328. framework.ExpectNoError(err)
  329. go func() {
  330. // There should be only one pod being created, which is the pod with the agnhost image.
  331. // The old RS shouldn't create new pod when deployment controller adding pod template hash label to its selector.
  332. numPodCreation := 1
  333. for {
  334. select {
  335. case event, _ := <-w.ResultChan():
  336. if event.Type != watch.Added {
  337. continue
  338. }
  339. numPodCreation--
  340. if numPodCreation < 0 {
  341. framework.Failf("Expect only one pod creation, the second creation event: %#v\n", event)
  342. }
  343. pod, ok := event.Object.(*v1.Pod)
  344. if !ok {
  345. framework.Failf("Expect event Object to be a pod")
  346. }
  347. if pod.Spec.Containers[0].Name != AgnhostImageName {
  348. framework.Failf("Expect the created pod to have container name %s, got pod %#v\n", AgnhostImageName, pod)
  349. }
  350. case <-stopCh:
  351. return
  352. }
  353. }
  354. }()
  355. d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType)
  356. d.Spec.RevisionHistoryLimit = revisionHistoryLimit
  357. _, err = c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
  358. framework.ExpectNoError(err)
  359. ginkgo.By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName))
  360. err = waitForDeploymentOldRSsNum(c, ns, deploymentName, int(*revisionHistoryLimit))
  361. framework.ExpectNoError(err)
  362. }
  363. // testRolloverDeployment tests that deployment supports rollover.
  364. // i.e. we can change desired state and kick off rolling update, then change desired state again before it finishes.
  365. func testRolloverDeployment(f *framework.Framework) {
  366. ns := f.Namespace.Name
  367. c := f.ClientSet
  368. podName := "rollover-pod"
  369. deploymentPodLabels := map[string]string{"name": podName}
  370. rsPodLabels := map[string]string{
  371. "name": podName,
  372. "pod": WebserverImageName,
  373. }
  374. rsName := "test-rollover-controller"
  375. rsReplicas := int32(1)
  376. _, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), newRS(rsName, rsReplicas, rsPodLabels, WebserverImageName, WebserverImage, nil), metav1.CreateOptions{})
  377. framework.ExpectNoError(err)
  378. // Verify that the required pods have come up.
  379. err = e2epod.VerifyPodsRunning(c, ns, podName, false, rsReplicas)
  380. framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
  381. // Wait for replica set to become ready before adopting it.
  382. framework.Logf("Waiting for pods owned by replica set %q to become ready", rsName)
  383. err = replicaset.WaitForReadyReplicaSet(c, ns, rsName)
  384. framework.ExpectNoError(err)
  385. // Create a deployment to delete webserver pods and instead bring up redis-slave pods.
  386. // We use a nonexistent image here, so that we make sure it won't finish
  387. deploymentName, deploymentImageName := "test-rollover-deployment", "redis-slave"
  388. deploymentReplicas := int32(1)
  389. deploymentImage := "gcr.io/google_samples/gb-redisslave:nonexistent"
  390. deploymentStrategyType := appsv1.RollingUpdateDeploymentStrategyType
  391. framework.Logf("Creating deployment %q", deploymentName)
  392. newDeployment := e2edeploy.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
  393. newDeployment.Spec.Strategy.RollingUpdate = &appsv1.RollingUpdateDeployment{
  394. MaxUnavailable: intOrStrP(0),
  395. MaxSurge: intOrStrP(1),
  396. }
  397. newDeployment.Spec.MinReadySeconds = int32(10)
  398. _, err = c.AppsV1().Deployments(ns).Create(context.TODO(), newDeployment, metav1.CreateOptions{})
  399. framework.ExpectNoError(err)
  400. // Verify that the pods were scaled up and down as expected.
  401. deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
  402. framework.ExpectNoError(err)
  403. framework.Logf("Make sure deployment %q performs scaling operations", deploymentName)
  404. // Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1
  405. err = waitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation)
  406. // Check if it's updated to revision 1 correctly
  407. framework.Logf("Check revision of new replica set for deployment %q", deploymentName)
  408. err = checkDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
  409. framework.ExpectNoError(err)
  410. framework.Logf("Ensure that both replica sets have 1 created replica")
  411. oldRS, err := c.AppsV1().ReplicaSets(ns).Get(context.TODO(), rsName, metav1.GetOptions{})
  412. framework.ExpectNoError(err)
  413. ensureReplicas(oldRS, int32(1))
  414. newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
  415. framework.ExpectNoError(err)
  416. ensureReplicas(newRS, int32(1))
  417. // The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up agnhost pods.
  418. framework.Logf("Rollover old replica sets for deployment %q with new image update", deploymentName)
  419. updatedDeploymentImageName, updatedDeploymentImage := AgnhostImageName, AgnhostImage
  420. deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *appsv1.Deployment) {
  421. update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
  422. update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
  423. })
  424. framework.ExpectNoError(err)
  425. // Use observedGeneration to determine if the controller noticed the pod template update.
  426. framework.Logf("Wait deployment %q to be observed by the deployment controller", deploymentName)
  427. err = waitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
  428. framework.ExpectNoError(err)
  429. // Wait for it to be updated to revision 2
  430. framework.Logf("Wait for revision update of deployment %q to 2", deploymentName)
  431. err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
  432. framework.ExpectNoError(err)
  433. framework.Logf("Make sure deployment %q is complete", deploymentName)
  434. err = waitForDeploymentCompleteAndCheckRolling(c, deployment)
  435. framework.ExpectNoError(err)
  436. framework.Logf("Ensure that both old replica sets have no replicas")
  437. oldRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), rsName, metav1.GetOptions{})
  438. framework.ExpectNoError(err)
  439. ensureReplicas(oldRS, int32(0))
  440. // Not really the new replica set anymore but we GET by name so that's fine.
  441. newRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), newRS.Name, metav1.GetOptions{})
  442. framework.ExpectNoError(err)
  443. ensureReplicas(newRS, int32(0))
  444. }
  445. func ensureReplicas(rs *appsv1.ReplicaSet, replicas int32) {
  446. framework.ExpectEqual(*rs.Spec.Replicas, replicas)
  447. framework.ExpectEqual(rs.Status.Replicas, replicas)
  448. }
  449. func randomScale(d *appsv1.Deployment, i int) {
  450. switch r := rand.Float32(); {
  451. case r < 0.3:
  452. framework.Logf("%02d: scaling up", i)
  453. *(d.Spec.Replicas)++
  454. case r < 0.6:
  455. if *(d.Spec.Replicas) > 1 {
  456. framework.Logf("%02d: scaling down", i)
  457. *(d.Spec.Replicas)--
  458. }
  459. }
  460. }
  461. func testIterativeDeployments(f *framework.Framework) {
  462. ns := f.Namespace.Name
  463. c := f.ClientSet
  464. podLabels := map[string]string{"name": WebserverImageName}
  465. replicas := int32(6)
  466. zero := int64(0)
  467. two := int32(2)
  468. // Create a webserver deployment.
  469. deploymentName := "webserver"
  470. thirty := int32(30)
  471. d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
  472. d.Spec.ProgressDeadlineSeconds = &thirty
  473. d.Spec.RevisionHistoryLimit = &two
  474. d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero
  475. framework.Logf("Creating deployment %q", deploymentName)
  476. deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
  477. framework.ExpectNoError(err)
  478. iterations := 20
  479. for i := 0; i < iterations; i++ {
  480. if r := rand.Float32(); r < 0.6 {
  481. time.Sleep(time.Duration(float32(i) * r * float32(time.Second)))
  482. }
  483. switch n := rand.Float32(); {
  484. case n < 0.2:
  485. // trigger a new deployment
  486. framework.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name)
  487. deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
  488. newEnv := v1.EnvVar{Name: "A", Value: fmt.Sprintf("%d", i)}
  489. update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv)
  490. randomScale(update, i)
  491. })
  492. framework.ExpectNoError(err)
  493. case n < 0.4:
  494. // rollback to the previous version
  495. framework.Logf("%02d: rolling back a rollout for deployment %q", i, deployment.Name)
  496. deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
  497. if update.Annotations == nil {
  498. update.Annotations = make(map[string]string)
  499. }
  500. update.Annotations[appsv1.DeprecatedRollbackTo] = "0"
  501. })
  502. framework.ExpectNoError(err)
  503. case n < 0.6:
  504. // just scaling
  505. framework.Logf("%02d: scaling deployment %q", i, deployment.Name)
  506. deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
  507. randomScale(update, i)
  508. })
  509. framework.ExpectNoError(err)
  510. case n < 0.8:
  511. // toggling the deployment
  512. if deployment.Spec.Paused {
  513. framework.Logf("%02d: pausing deployment %q", i, deployment.Name)
  514. deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
  515. update.Spec.Paused = true
  516. randomScale(update, i)
  517. })
  518. framework.ExpectNoError(err)
  519. } else {
  520. framework.Logf("%02d: resuming deployment %q", i, deployment.Name)
  521. deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
  522. update.Spec.Paused = false
  523. randomScale(update, i)
  524. })
  525. framework.ExpectNoError(err)
  526. }
  527. default:
  528. // arbitrarily delete deployment pods
  529. framework.Logf("%02d: arbitrarily deleting one or more deployment pods for deployment %q", i, deployment.Name)
  530. selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
  531. framework.ExpectNoError(err)
  532. opts := metav1.ListOptions{LabelSelector: selector.String()}
  533. podList, err := c.CoreV1().Pods(ns).List(context.TODO(), opts)
  534. framework.ExpectNoError(err)
  535. if len(podList.Items) == 0 {
  536. framework.Logf("%02d: no deployment pods to delete", i)
  537. continue
  538. }
  539. for p := range podList.Items {
  540. if rand.Float32() < 0.5 {
  541. continue
  542. }
  543. name := podList.Items[p].Name
  544. framework.Logf("%02d: deleting deployment pod %q", i, name)
  545. err := c.CoreV1().Pods(ns).Delete(context.TODO(), name, nil)
  546. if err != nil && !apierrors.IsNotFound(err) {
  547. framework.ExpectNoError(err)
  548. }
  549. }
  550. }
  551. }
  552. // unpause the deployment if we end up pausing it
  553. deployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deployment.Name, metav1.GetOptions{})
  554. framework.ExpectNoError(err)
  555. if deployment.Spec.Paused {
  556. deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
  557. update.Spec.Paused = false
  558. })
  559. }
  560. framework.Logf("Waiting for deployment %q to be observed by the controller", deploymentName)
  561. err = waitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
  562. framework.ExpectNoError(err)
  563. framework.Logf("Waiting for deployment %q status", deploymentName)
  564. err = e2edeploy.WaitForDeploymentComplete(c, deployment)
  565. framework.ExpectNoError(err)
  566. framework.Logf("Checking deployment %q for a complete condition", deploymentName)
  567. err = waitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, appsv1.DeploymentProgressing)
  568. framework.ExpectNoError(err)
  569. }
  570. func testDeploymentsControllerRef(f *framework.Framework) {
  571. ns := f.Namespace.Name
  572. c := f.ClientSet
  573. deploymentName := "test-orphan-deployment"
  574. framework.Logf("Creating Deployment %q", deploymentName)
  575. podLabels := map[string]string{"name": WebserverImageName}
  576. replicas := int32(1)
  577. d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
  578. deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
  579. framework.ExpectNoError(err)
  580. err = e2edeploy.WaitForDeploymentComplete(c, deploy)
  581. framework.ExpectNoError(err)
  582. framework.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName)
  583. rsList := listDeploymentReplicaSets(c, ns, podLabels)
  584. framework.ExpectEqual(len(rsList.Items), 1)
  585. framework.Logf("Obtaining the ReplicaSet's UID")
  586. orphanedRSUID := rsList.Items[0].UID
  587. framework.Logf("Checking the ReplicaSet has the right controllerRef")
  588. err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels)
  589. framework.ExpectNoError(err)
  590. framework.Logf("Deleting Deployment %q and orphaning its ReplicaSet", deploymentName)
  591. err = orphanDeploymentReplicaSets(c, deploy)
  592. framework.ExpectNoError(err)
  593. ginkgo.By("Wait for the ReplicaSet to be orphaned")
  594. err = wait.Poll(dRetryPeriod, dRetryTimeout, waitDeploymentReplicaSetsOrphaned(c, ns, podLabels))
  595. framework.ExpectNoError(err, "error waiting for Deployment ReplicaSet to be orphaned")
  596. deploymentName = "test-adopt-deployment"
  597. framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName)
  598. d = e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
  599. deploy, err = c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
  600. framework.ExpectNoError(err)
  601. err = e2edeploy.WaitForDeploymentComplete(c, deploy)
  602. framework.ExpectNoError(err)
  603. framework.Logf("Waiting for the ReplicaSet to have the right controllerRef")
  604. err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels)
  605. framework.ExpectNoError(err)
  606. framework.Logf("Verifying no extra ReplicaSet is created (Deployment %q still has only one ReplicaSet after adoption)", deploymentName)
  607. rsList = listDeploymentReplicaSets(c, ns, podLabels)
  608. framework.ExpectEqual(len(rsList.Items), 1)
  609. framework.Logf("Verifying the ReplicaSet has the same UID as the orphaned ReplicaSet")
  610. framework.ExpectEqual(rsList.Items[0].UID, orphanedRSUID)
  611. }
  612. // testProportionalScalingDeployment tests that when a RollingUpdate Deployment is scaled in the middle
  613. // of a rollout (either in progress or paused), then the Deployment will balance additional replicas
  614. // in existing active ReplicaSets (ReplicaSets with more than 0 replica) in order to mitigate risk.
  615. func testProportionalScalingDeployment(f *framework.Framework) {
  616. ns := f.Namespace.Name
  617. c := f.ClientSet
  618. podLabels := map[string]string{"name": WebserverImageName}
  619. replicas := int32(10)
  620. // Create a webserver deployment.
  621. deploymentName := "webserver-deployment"
  622. d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
  623. d.Spec.Strategy.RollingUpdate = new(appsv1.RollingUpdateDeployment)
  624. d.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3)
  625. d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2)
  626. framework.Logf("Creating deployment %q", deploymentName)
  627. deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
  628. framework.ExpectNoError(err)
  629. framework.Logf("Waiting for observed generation %d", deployment.Generation)
  630. err = waitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
  631. framework.ExpectNoError(err)
  632. // Verify that the required pods have come up.
  633. framework.Logf("Waiting for all required pods to come up")
  634. err = e2epod.VerifyPodsRunning(c, ns, WebserverImageName, false, *(deployment.Spec.Replicas))
  635. framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
  636. framework.Logf("Waiting for deployment %q to complete", deployment.Name)
  637. err = e2edeploy.WaitForDeploymentComplete(c, deployment)
  638. framework.ExpectNoError(err)
  639. firstRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
  640. framework.ExpectNoError(err)
  641. // Update the deployment with a non-existent image so that the new replica set
  642. // will be blocked to simulate a partial rollout.
  643. framework.Logf("Updating deployment %q with a non-existent image", deploymentName)
  644. deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *appsv1.Deployment) {
  645. update.Spec.Template.Spec.Containers[0].Image = "webserver:404"
  646. })
  647. framework.ExpectNoError(err)
  648. framework.Logf("Waiting for observed generation %d", deployment.Generation)
  649. err = waitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
  650. framework.ExpectNoError(err)
  651. // Checking state of first rollout's replicaset.
  652. maxUnavailable, err := intstr.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, int(*(deployment.Spec.Replicas)), false)
  653. framework.ExpectNoError(err)
  654. // First rollout's replicaset should have Deployment's (replicas - maxUnavailable) = 10 - 2 = 8 available replicas.
  655. minAvailableReplicas := replicas - int32(maxUnavailable)
  656. framework.Logf("Waiting for the first rollout's replicaset to have .status.availableReplicas = %d", minAvailableReplicas)
  657. err = replicaset.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas)
  658. framework.ExpectNoError(err)
  659. // First rollout's replicaset should have .spec.replicas = 8 too.
  660. framework.Logf("Waiting for the first rollout's replicaset to have .spec.replicas = %d", minAvailableReplicas)
  661. err = waitForReplicaSetTargetSpecReplicas(c, firstRS, minAvailableReplicas)
  662. framework.ExpectNoError(err)
  663. // The desired replicas wait makes sure that the RS controller has created expected number of pods.
  664. framework.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
  665. firstRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), firstRS.Name, metav1.GetOptions{})
  666. framework.ExpectNoError(err)
  667. err = waitForReplicaSetDesiredReplicas(c.AppsV1(), firstRS)
  668. framework.ExpectNoError(err)
  669. // Checking state of second rollout's replicaset.
  670. secondRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
  671. framework.ExpectNoError(err)
  672. maxSurge, err := intstr.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), false)
  673. framework.ExpectNoError(err)
  674. // Second rollout's replicaset should have 0 available replicas.
  675. framework.Logf("Verifying that the second rollout's replicaset has .status.availableReplicas = 0")
  676. framework.ExpectEqual(secondRS.Status.AvailableReplicas, int32(0))
  677. // Second rollout's replicaset should have Deployment's (replicas + maxSurge - first RS's replicas) = 10 + 3 - 8 = 5 for .spec.replicas.
  678. newReplicas := replicas + int32(maxSurge) - minAvailableReplicas
  679. framework.Logf("Waiting for the second rollout's replicaset to have .spec.replicas = %d", newReplicas)
  680. err = waitForReplicaSetTargetSpecReplicas(c, secondRS, newReplicas)
  681. framework.ExpectNoError(err)
  682. // The desired replicas wait makes sure that the RS controller has created expected number of pods.
  683. framework.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
  684. secondRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), secondRS.Name, metav1.GetOptions{})
  685. framework.ExpectNoError(err)
  686. err = waitForReplicaSetDesiredReplicas(c.AppsV1(), secondRS)
  687. framework.ExpectNoError(err)
  688. // Check the deployment's minimum availability.
  689. framework.Logf("Verifying that deployment %q has minimum required number of available replicas", deploymentName)
  690. if deployment.Status.AvailableReplicas < minAvailableReplicas {
  691. err = fmt.Errorf("observed %d available replicas, less than min required %d", deployment.Status.AvailableReplicas, minAvailableReplicas)
  692. framework.ExpectNoError(err)
  693. }
  694. // Scale the deployment to 30 replicas.
  695. newReplicas = int32(30)
  696. framework.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas)
  697. deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
  698. update.Spec.Replicas = &newReplicas
  699. })
  700. framework.ExpectNoError(err)
  701. framework.Logf("Waiting for the replicasets of deployment %q to have desired number of replicas", deploymentName)
  702. firstRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), firstRS.Name, metav1.GetOptions{})
  703. framework.ExpectNoError(err)
  704. secondRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), secondRS.Name, metav1.GetOptions{})
  705. framework.ExpectNoError(err)
  706. // First rollout's replicaset should have .spec.replicas = 8 + (30-10)*(8/13) = 8 + 12 = 20 replicas.
  707. // Note that 12 comes from rounding (30-10)*(8/13) to nearest integer.
  708. framework.Logf("Verifying that first rollout's replicaset has .spec.replicas = 20")
  709. err = waitForReplicaSetTargetSpecReplicas(c, firstRS, 20)
  710. framework.ExpectNoError(err)
  711. // Second rollout's replicaset should have .spec.replicas = 5 + (30-10)*(5/13) = 5 + 8 = 13 replicas.
  712. // Note that 8 comes from rounding (30-10)*(5/13) to nearest integer.
  713. framework.Logf("Verifying that second rollout's replicaset has .spec.replicas = 13")
  714. err = waitForReplicaSetTargetSpecReplicas(c, secondRS, 13)
  715. framework.ExpectNoError(err)
  716. }
  717. func checkDeploymentReplicaSetsControllerRef(c clientset.Interface, ns string, uid types.UID, label map[string]string) error {
  718. rsList := listDeploymentReplicaSets(c, ns, label)
  719. for _, rs := range rsList.Items {
  720. // This rs is adopted only when its controller ref is update
  721. if controllerRef := metav1.GetControllerOf(&rs); controllerRef == nil || controllerRef.UID != uid {
  722. return fmt.Errorf("ReplicaSet %s has unexpected controllerRef %v", rs.Name, controllerRef)
  723. }
  724. }
  725. return nil
  726. }
  727. func waitDeploymentReplicaSetsOrphaned(c clientset.Interface, ns string, label map[string]string) func() (bool, error) {
  728. return func() (bool, error) {
  729. rsList := listDeploymentReplicaSets(c, ns, label)
  730. for _, rs := range rsList.Items {
  731. // This rs is orphaned only when controller ref is cleared
  732. if controllerRef := metav1.GetControllerOf(&rs); controllerRef != nil {
  733. return false, nil
  734. }
  735. }
  736. return true, nil
  737. }
  738. }
  739. func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[string]string) *appsv1.ReplicaSetList {
  740. selector := labels.Set(label).AsSelector()
  741. options := metav1.ListOptions{LabelSelector: selector.String()}
  742. rsList, err := c.AppsV1().ReplicaSets(ns).List(context.TODO(), options)
  743. framework.ExpectNoError(err)
  744. gomega.Expect(len(rsList.Items)).To(gomega.BeNumerically(">", 0))
  745. return rsList
  746. }
  747. func orphanDeploymentReplicaSets(c clientset.Interface, d *appsv1.Deployment) error {
  748. trueVar := true
  749. deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
  750. deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(d.UID))
  751. return c.AppsV1().Deployments(d.Namespace).Delete(context.TODO(), d.Name, deleteOptions)
  752. }
  753. func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framework) {
  754. ns := f.Namespace.Name
  755. c := f.ClientSet
  756. name := "test-rolling-update-with-lb"
  757. framework.Logf("Creating Deployment %q", name)
  758. podLabels := map[string]string{"name": name}
  759. replicas := int32(3)
  760. d := e2edeploy.NewDeployment(name, replicas, podLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType)
  761. // NewDeployment assigned the same value to both d.Spec.Selector and
  762. // d.Spec.Template.Labels, so mutating the one would mutate the other.
  763. // Thus we need to set d.Spec.Template.Labels to a new value if we want
  764. // to mutate it alone.
  765. d.Spec.Template.Labels = map[string]string{
  766. "iteration": "0",
  767. "name": name,
  768. }
  769. d.Spec.Template.Spec.Containers[0].Args = []string{"netexec", "--http-port=80", "--udp-port=80"}
  770. // To ensure that a node that had a local endpoint prior to a rolling
  771. // update continues to have a local endpoint throughout the rollout, we
  772. // need an affinity policy that will cause pods to be scheduled on the
  773. // same nodes as old pods, and we need the deployment to scale up a new
  774. // pod before deleting an old pod. This affinity policy will define
  775. // inter-pod affinity for pods of different rollouts and anti-affinity
  776. // for pods of the same rollout, so it will need to be updated when
  777. // performing a rollout.
  778. setAffinities(d, false)
  779. d.Spec.Strategy.RollingUpdate = &appsv1.RollingUpdateDeployment{
  780. MaxSurge: intOrStrP(1),
  781. MaxUnavailable: intOrStrP(0),
  782. }
  783. deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
  784. framework.ExpectNoError(err)
  785. err = e2edeploy.WaitForDeploymentComplete(c, deployment)
  786. framework.ExpectNoError(err)
  787. framework.Logf("Creating a service %s with type=LoadBalancer and externalTrafficPolicy=Local in namespace %s", name, ns)
  788. jig := e2eservice.NewTestJig(c, ns, name)
  789. jig.Labels = podLabels
  790. service, err := jig.CreateLoadBalancerService(e2eservice.GetServiceLoadBalancerCreationTimeout(c), func(svc *v1.Service) {
  791. svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
  792. })
  793. framework.ExpectNoError(err)
  794. lbNameOrAddress := e2eservice.GetIngressPoint(&service.Status.LoadBalancer.Ingress[0])
  795. svcPort := int(service.Spec.Ports[0].Port)
  796. framework.Logf("Hitting the replica set's pods through the service's load balancer")
  797. timeout := e2eservice.LoadBalancerLagTimeoutDefault
  798. if framework.ProviderIs("aws") {
  799. timeout = e2eservice.LoadBalancerLagTimeoutAWS
  800. }
  801. e2eservice.TestReachableHTTP(lbNameOrAddress, svcPort, timeout)
  802. framework.Logf("Starting a goroutine to watch the service's endpoints in the background")
  803. done := make(chan struct{})
  804. failed := make(chan struct{})
  805. defer close(done)
  806. go func() {
  807. defer ginkgo.GinkgoRecover()
  808. expectedNodes, err := jig.GetEndpointNodeNames()
  809. framework.ExpectNoError(err)
  810. // The affinity policy should ensure that before an old pod is
  811. // deleted, a new pod will have been created on the same node.
  812. // Thus the set of nodes with local endpoints for the service
  813. // should remain unchanged.
  814. wait.Until(func() {
  815. actualNodes, err := jig.GetEndpointNodeNames()
  816. framework.ExpectNoError(err)
  817. if !actualNodes.Equal(expectedNodes) {
  818. framework.Logf("The set of nodes with local endpoints changed; started with %v, now have %v", expectedNodes.List(), actualNodes.List())
  819. failed <- struct{}{}
  820. }
  821. }, framework.Poll, done)
  822. }()
  823. framework.Logf("Triggering a rolling deployment several times")
  824. for i := 1; i <= 3; i++ {
  825. framework.Logf("Updating label deployment %q pod spec (iteration #%d)", name, i)
  826. deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *appsv1.Deployment) {
  827. update.Spec.Template.Labels["iteration"] = fmt.Sprintf("%d", i)
  828. setAffinities(update, true)
  829. })
  830. framework.ExpectNoError(err)
  831. framework.Logf("Waiting for observed generation %d", deployment.Generation)
  832. err = waitForObservedDeployment(c, ns, name, deployment.Generation)
  833. framework.ExpectNoError(err)
  834. framework.Logf("Make sure deployment %q is complete", name)
  835. err = waitForDeploymentCompleteAndCheckRolling(c, deployment)
  836. framework.ExpectNoError(err)
  837. }
  838. select {
  839. case <-failed:
  840. framework.Failf("Connectivity to the load balancer was interrupted")
  841. case <-time.After(1 * time.Minute):
  842. }
  843. }
  844. // setAffinities set PodAntiAffinity across pods from the same generation
  845. // of Deployment and if, explicitly requested, also affinity with pods
  846. // from other generations.
  847. // It is required to make those "Required" so that in large clusters where
  848. // scheduler may not score all nodes if a lot of them are feasible, the
  849. // test will also have a chance to pass.
  850. func setAffinities(d *appsv1.Deployment, setAffinity bool) {
  851. affinity := &v1.Affinity{
  852. PodAntiAffinity: &v1.PodAntiAffinity{
  853. RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
  854. {
  855. TopologyKey: "kubernetes.io/hostname",
  856. LabelSelector: &metav1.LabelSelector{
  857. MatchExpressions: []metav1.LabelSelectorRequirement{
  858. {
  859. Key: "name",
  860. Operator: metav1.LabelSelectorOpIn,
  861. Values: []string{d.Spec.Template.Labels["name"]},
  862. },
  863. {
  864. Key: "iteration",
  865. Operator: metav1.LabelSelectorOpIn,
  866. Values: []string{d.Spec.Template.Labels["iteration"]},
  867. },
  868. },
  869. },
  870. },
  871. },
  872. },
  873. }
  874. if setAffinity {
  875. affinity.PodAffinity = &v1.PodAffinity{
  876. RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
  877. {
  878. TopologyKey: "kubernetes.io/hostname",
  879. LabelSelector: &metav1.LabelSelector{
  880. MatchExpressions: []metav1.LabelSelectorRequirement{
  881. {
  882. Key: "name",
  883. Operator: metav1.LabelSelectorOpIn,
  884. Values: []string{d.Spec.Template.Labels["name"]},
  885. },
  886. {
  887. Key: "iteration",
  888. Operator: metav1.LabelSelectorOpNotIn,
  889. Values: []string{d.Spec.Template.Labels["iteration"]},
  890. },
  891. },
  892. },
  893. },
  894. },
  895. }
  896. }
  897. d.Spec.Template.Spec.Affinity = affinity
  898. }
  899. // watchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with
  900. // old pods.
  901. func watchRecreateDeployment(c clientset.Interface, d *appsv1.Deployment) error {
  902. if d.Spec.Strategy.Type != appsv1.RecreateDeploymentStrategyType {
  903. return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type)
  904. }
  905. w, err := c.AppsV1().Deployments(d.Namespace).Watch(context.TODO(), metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
  906. if err != nil {
  907. return err
  908. }
  909. status := d.Status
  910. condition := func(event watch.Event) (bool, error) {
  911. d := event.Object.(*appsv1.Deployment)
  912. status = d.Status
  913. if d.Status.UpdatedReplicas > 0 && d.Status.Replicas != d.Status.UpdatedReplicas {
  914. _, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c.AppsV1())
  915. newRS, nerr := deploymentutil.GetNewReplicaSet(d, c.AppsV1())
  916. if err == nil && nerr == nil {
  917. framework.Logf("%+v", d)
  918. testutil.LogReplicaSetsOfDeployment(d, allOldRSs, newRS, framework.Logf)
  919. testutil.LogPodsOfDeployment(c, d, append(allOldRSs, newRS), framework.Logf)
  920. }
  921. return false, fmt.Errorf("deployment %q is running new pods alongside old pods: %#v", d.Name, status)
  922. }
  923. return *(d.Spec.Replicas) == d.Status.Replicas &&
  924. *(d.Spec.Replicas) == d.Status.UpdatedReplicas &&
  925. d.Generation <= d.Status.ObservedGeneration, nil
  926. }
  927. ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
  928. defer cancel()
  929. _, err = watchtools.UntilWithoutRetry(ctx, w, condition)
  930. if err == wait.ErrWaitTimeout {
  931. err = fmt.Errorf("deployment %q never completed: %#v", d.Name, status)
  932. }
  933. return err
  934. }
  935. // waitForDeploymentOldRSsNum waits for the deployment to clean up old rcs.
  936. func waitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
  937. var oldRSs []*appsv1.ReplicaSet
  938. var d *appsv1.Deployment
  939. pollErr := wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) {
  940. deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
  941. if err != nil {
  942. return false, err
  943. }
  944. d = deployment
  945. _, oldRSs, err = deploymentutil.GetOldReplicaSets(deployment, c.AppsV1())
  946. if err != nil {
  947. return false, err
  948. }
  949. return len(oldRSs) == desiredRSNum, nil
  950. })
  951. if pollErr == wait.ErrWaitTimeout {
  952. pollErr = fmt.Errorf("%d old replica sets were not cleaned up for deployment %q", len(oldRSs)-desiredRSNum, deploymentName)
  953. testutil.LogReplicaSetsOfDeployment(d, oldRSs, nil, framework.Logf)
  954. }
  955. return pollErr
  956. }
  957. // waitForReplicaSetDesiredReplicas waits until the replicaset has desired number of replicas.
  958. func waitForReplicaSetDesiredReplicas(rsClient appsclient.ReplicaSetsGetter, replicaSet *appsv1.ReplicaSet) error {
  959. desiredGeneration := replicaSet.Generation
  960. err := wait.PollImmediate(framework.Poll, framework.PollShortTimeout, func() (bool, error) {
  961. rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(context.TODO(), replicaSet.Name, metav1.GetOptions{})
  962. if err != nil {
  963. return false, err
  964. }
  965. return rs.Status.ObservedGeneration >= desiredGeneration && rs.Status.Replicas == *(replicaSet.Spec.Replicas) && rs.Status.Replicas == *(rs.Spec.Replicas), nil
  966. })
  967. if err == wait.ErrWaitTimeout {
  968. err = fmt.Errorf("replicaset %q never had desired number of replicas", replicaSet.Name)
  969. }
  970. return err
  971. }
  972. // waitForReplicaSetTargetSpecReplicas waits for .spec.replicas of a RS to equal targetReplicaNum
  973. func waitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *appsv1.ReplicaSet, targetReplicaNum int32) error {
  974. desiredGeneration := replicaSet.Generation
  975. err := wait.PollImmediate(framework.Poll, framework.PollShortTimeout, func() (bool, error) {
  976. rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(context.TODO(), replicaSet.Name, metav1.GetOptions{})
  977. if err != nil {
  978. return false, err
  979. }
  980. return rs.Status.ObservedGeneration >= desiredGeneration && *rs.Spec.Replicas == targetReplicaNum, nil
  981. })
  982. if err == wait.ErrWaitTimeout {
  983. err = fmt.Errorf("replicaset %q never had desired number of .spec.replicas", replicaSet.Name)
  984. }
  985. return err
  986. }
  987. // checkDeploymentRevisionAndImage checks if the input deployment's and its new replica set's revision and image are as expected.
  988. func checkDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName, revision, image string) error {
  989. return testutil.CheckDeploymentRevisionAndImage(c, ns, deploymentName, revision, image)
  990. }
  991. // waitForObservedDeployment waits for the specified deployment generation.
  992. func waitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error {
  993. return testutil.WaitForObservedDeployment(c, ns, deploymentName, desiredGeneration)
  994. }
  995. // waitForDeploymentWithCondition waits for the specified deployment condition.
  996. func waitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType appsv1.DeploymentConditionType) error {
  997. return testutil.WaitForDeploymentWithCondition(c, ns, deploymentName, reason, condType, framework.Logf, poll, pollLongTimeout)
  998. }
  999. // waitForDeploymentCompleteAndCheckRolling waits for the deployment to complete, and check rolling update strategy isn't broken at any times.
  1000. // Rolling update strategy should not be broken during a rolling update.
  1001. func waitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *appsv1.Deployment) error {
  1002. return testutil.WaitForDeploymentCompleteAndCheckRolling(c, d, framework.Logf, poll, pollLongTimeout)
  1003. }
  1004. // waitForDeploymentUpdatedReplicasGTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
  1005. func waitForDeploymentUpdatedReplicasGTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64) error {
  1006. return testutil.WaitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, minUpdatedReplicas, desiredGeneration, poll, pollLongTimeout)
  1007. }