123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508 |
- /*
- Copyright 2017 The Kubernetes Authors.
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- */
- package deployment
- import (
- "fmt"
- "net/http/httptest"
- "sync"
- "testing"
- "time"
- apps "k8s.io/api/apps/v1"
- "k8s.io/api/core/v1"
- extensions "k8s.io/api/extensions/v1beta1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/util/intstr"
- "k8s.io/apimachinery/pkg/util/wait"
- "k8s.io/client-go/informers"
- clientset "k8s.io/client-go/kubernetes"
- restclient "k8s.io/client-go/rest"
- podutil "k8s.io/kubernetes/pkg/api/v1/pod"
- "k8s.io/kubernetes/pkg/controller/deployment"
- deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
- "k8s.io/kubernetes/pkg/controller/replicaset"
- "k8s.io/kubernetes/test/integration/framework"
- testutil "k8s.io/kubernetes/test/utils"
- )
- const (
- pollInterval = 100 * time.Millisecond
- pollTimeout = 60 * time.Second
- fakeContainerName = "fake-name"
- fakeImage = "fakeimage"
- )
- var pauseFn = func(update *apps.Deployment) {
- update.Spec.Paused = true
- }
- var resumeFn = func(update *apps.Deployment) {
- update.Spec.Paused = false
- }
- type deploymentTester struct {
- t *testing.T
- c clientset.Interface
- deployment *apps.Deployment
- }
- func testLabels() map[string]string {
- return map[string]string{"name": "test"}
- }
- // newDeployment returns a RollingUpdate Deployment with a fake container image
- func newDeployment(name, ns string, replicas int32) *apps.Deployment {
- return &apps.Deployment{
- TypeMeta: metav1.TypeMeta{
- Kind: "Deployment",
- APIVersion: "apps/v1",
- },
- ObjectMeta: metav1.ObjectMeta{
- Namespace: ns,
- Name: name,
- },
- Spec: apps.DeploymentSpec{
- Replicas: &replicas,
- Selector: &metav1.LabelSelector{MatchLabels: testLabels()},
- Strategy: apps.DeploymentStrategy{
- Type: apps.RollingUpdateDeploymentStrategyType,
- RollingUpdate: new(apps.RollingUpdateDeployment),
- },
- Template: v1.PodTemplateSpec{
- ObjectMeta: metav1.ObjectMeta{
- Labels: testLabels(),
- },
- Spec: v1.PodSpec{
- Containers: []v1.Container{
- {
- Name: fakeContainerName,
- Image: fakeImage,
- },
- },
- },
- },
- },
- }
- }
- func newReplicaSet(name, ns string, replicas int32) *apps.ReplicaSet {
- return &apps.ReplicaSet{
- TypeMeta: metav1.TypeMeta{
- Kind: "ReplicaSet",
- APIVersion: "apps/v1",
- },
- ObjectMeta: metav1.ObjectMeta{
- Namespace: ns,
- Name: name,
- Labels: testLabels(),
- },
- Spec: apps.ReplicaSetSpec{
- Selector: &metav1.LabelSelector{
- MatchLabels: testLabels(),
- },
- Replicas: &replicas,
- Template: v1.PodTemplateSpec{
- ObjectMeta: metav1.ObjectMeta{
- Labels: testLabels(),
- },
- Spec: v1.PodSpec{
- Containers: []v1.Container{
- {
- Name: fakeContainerName,
- Image: fakeImage,
- },
- },
- },
- },
- },
- }
- }
- func newDeploymentRollback(name string, annotations map[string]string, revision int64) *extensions.DeploymentRollback {
- return &extensions.DeploymentRollback{
- Name: name,
- UpdatedAnnotations: annotations,
- RollbackTo: extensions.RollbackConfig{Revision: revision},
- }
- }
- // dcSetup sets up necessities for Deployment integration test, including master, apiserver, informers, and clientset
- func dcSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, *replicaset.ReplicaSetController, *deployment.DeploymentController, informers.SharedInformerFactory, clientset.Interface) {
- masterConfig := framework.NewIntegrationTestMasterConfig()
- _, s, closeFn := framework.RunAMaster(masterConfig)
- config := restclient.Config{Host: s.URL}
- clientSet, err := clientset.NewForConfig(&config)
- if err != nil {
- t.Fatalf("error in create clientset: %v", err)
- }
- resyncPeriod := 12 * time.Hour
- informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "deployment-informers")), resyncPeriod)
- dc, err := deployment.NewDeploymentController(
- informers.Apps().V1().Deployments(),
- informers.Apps().V1().ReplicaSets(),
- informers.Core().V1().Pods(),
- clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "deployment-controller")),
- )
- if err != nil {
- t.Fatalf("error creating Deployment controller: %v", err)
- }
- rm := replicaset.NewReplicaSetController(
- informers.Apps().V1().ReplicaSets(),
- informers.Core().V1().Pods(),
- clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replicaset-controller")),
- replicaset.BurstReplicas,
- )
- return s, closeFn, rm, dc, informers, clientSet
- }
- // dcSimpleSetup sets up necessities for Deployment integration test, including master, apiserver,
- // and clientset, but not controllers and informers
- func dcSimpleSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, clientset.Interface) {
- masterConfig := framework.NewIntegrationTestMasterConfig()
- _, s, closeFn := framework.RunAMaster(masterConfig)
- config := restclient.Config{Host: s.URL}
- clientSet, err := clientset.NewForConfig(&config)
- if err != nil {
- t.Fatalf("error in create clientset: %v", err)
- }
- return s, closeFn, clientSet
- }
- // addPodConditionReady sets given pod status to ready at given time
- func addPodConditionReady(pod *v1.Pod, time metav1.Time) {
- pod.Status = v1.PodStatus{
- Phase: v1.PodRunning,
- Conditions: []v1.PodCondition{
- {
- Type: v1.PodReady,
- Status: v1.ConditionTrue,
- LastTransitionTime: time,
- },
- },
- }
- }
- func (d *deploymentTester) waitForDeploymentRevisionAndImage(revision, image string) error {
- if err := testutil.WaitForDeploymentRevisionAndImage(d.c, d.deployment.Namespace, d.deployment.Name, revision, image, d.t.Logf, pollInterval, pollTimeout); err != nil {
- return fmt.Errorf("failed to wait for Deployment revision %s: %v", d.deployment.Name, err)
- }
- return nil
- }
- func markPodReady(c clientset.Interface, ns string, pod *v1.Pod) error {
- addPodConditionReady(pod, metav1.Now())
- _, err := c.CoreV1().Pods(ns).UpdateStatus(pod)
- return err
- }
- func intOrStrP(num int) *intstr.IntOrString {
- intstr := intstr.FromInt(num)
- return &intstr
- }
- // markUpdatedPodsReady manually marks updated Deployment pods status to ready,
- // until the deployment is complete
- func (d *deploymentTester) markUpdatedPodsReady(wg *sync.WaitGroup) {
- defer wg.Done()
- ns := d.deployment.Namespace
- err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
- // We're done when the deployment is complete
- if completed, err := d.deploymentComplete(); err != nil {
- return false, err
- } else if completed {
- return true, nil
- }
- // Otherwise, mark remaining pods as ready
- pods, err := d.listUpdatedPods()
- if err != nil {
- d.t.Log(err)
- return false, nil
- }
- d.t.Logf("%d/%d of deployment pods are created", len(pods), *d.deployment.Spec.Replicas)
- for i := range pods {
- pod := pods[i]
- if podutil.IsPodReady(&pod) {
- continue
- }
- if err = markPodReady(d.c, ns, &pod); err != nil {
- d.t.Logf("failed to update Deployment pod %s, will retry later: %v", pod.Name, err)
- }
- }
- return false, nil
- })
- if err != nil {
- d.t.Fatalf("failed to mark updated Deployment pods to ready: %v", err)
- }
- }
- func (d *deploymentTester) deploymentComplete() (bool, error) {
- latest, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
- if err != nil {
- return false, err
- }
- return deploymentutil.DeploymentComplete(d.deployment, &latest.Status), nil
- }
- // Waits for the deployment to complete, and check rolling update strategy isn't broken at any times.
- // Rolling update strategy should not be broken during a rolling update.
- func (d *deploymentTester) waitForDeploymentCompleteAndCheckRolling() error {
- return testutil.WaitForDeploymentCompleteAndCheckRolling(d.c, d.deployment, d.t.Logf, pollInterval, pollTimeout)
- }
- // Waits for the deployment to complete, and don't check if rolling update strategy is broken.
- // Rolling update strategy is used only during a rolling update, and can be violated in other situations,
- // such as shortly after a scaling event or the deployment is just created.
- func (d *deploymentTester) waitForDeploymentComplete() error {
- return testutil.WaitForDeploymentComplete(d.c, d.deployment, d.t.Logf, pollInterval, pollTimeout)
- }
- // waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady waits for the Deployment to complete
- // while marking updated Deployment pods as ready at the same time.
- // Uses hard check to make sure rolling update strategy is not violated at any times.
- func (d *deploymentTester) waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady() error {
- var wg sync.WaitGroup
- // Manually mark updated Deployment pods as ready in a separate goroutine
- wg.Add(1)
- go d.markUpdatedPodsReady(&wg)
- // Wait for goroutine to finish, for all return paths.
- defer wg.Wait()
- // Wait for the Deployment status to complete while Deployment pods are becoming ready
- err := d.waitForDeploymentCompleteAndCheckRolling()
- if err != nil {
- return fmt.Errorf("failed to wait for Deployment %s to complete: %v", d.deployment.Name, err)
- }
- return nil
- }
- // waitForDeploymentCompleteAndMarkPodsReady waits for the Deployment to complete
- // while marking updated Deployment pods as ready at the same time.
- func (d *deploymentTester) waitForDeploymentCompleteAndMarkPodsReady() error {
- var wg sync.WaitGroup
- // Manually mark updated Deployment pods as ready in a separate goroutine
- wg.Add(1)
- go d.markUpdatedPodsReady(&wg)
- // Wait for the Deployment status to complete using soft check, while Deployment pods are becoming ready
- err := d.waitForDeploymentComplete()
- if err != nil {
- return fmt.Errorf("failed to wait for Deployment status %s: %v", d.deployment.Name, err)
- }
- // Wait for goroutine to finish
- wg.Wait()
- return nil
- }
- func (d *deploymentTester) updateDeployment(applyUpdate testutil.UpdateDeploymentFunc) (*apps.Deployment, error) {
- return testutil.UpdateDeploymentWithRetries(d.c, d.deployment.Namespace, d.deployment.Name, applyUpdate, d.t.Logf, pollInterval, pollTimeout)
- }
- func (d *deploymentTester) waitForObservedDeployment(desiredGeneration int64) error {
- if err := testutil.WaitForObservedDeployment(d.c, d.deployment.Namespace, d.deployment.Name, desiredGeneration); err != nil {
- return fmt.Errorf("failed waiting for ObservedGeneration of deployment %s to become %d: %v", d.deployment.Name, desiredGeneration, err)
- }
- return nil
- }
- func (d *deploymentTester) getNewReplicaSet() (*apps.ReplicaSet, error) {
- deployment, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
- if err != nil {
- return nil, fmt.Errorf("failed retrieving deployment %s: %v", d.deployment.Name, err)
- }
- rs, err := deploymentutil.GetNewReplicaSet(deployment, d.c.AppsV1())
- if err != nil {
- return nil, fmt.Errorf("failed retrieving new replicaset of deployment %s: %v", d.deployment.Name, err)
- }
- return rs, nil
- }
- func (d *deploymentTester) expectNoNewReplicaSet() error {
- rs, err := d.getNewReplicaSet()
- if err != nil {
- return err
- }
- if rs != nil {
- return fmt.Errorf("expected deployment %s not to create a new replicaset, got %v", d.deployment.Name, rs)
- }
- return nil
- }
- func (d *deploymentTester) expectNewReplicaSet() (*apps.ReplicaSet, error) {
- rs, err := d.getNewReplicaSet()
- if err != nil {
- return nil, err
- }
- if rs == nil {
- return nil, fmt.Errorf("expected deployment %s to create a new replicaset, got nil", d.deployment.Name)
- }
- return rs, nil
- }
- func (d *deploymentTester) updateReplicaSet(name string, applyUpdate testutil.UpdateReplicaSetFunc) (*apps.ReplicaSet, error) {
- return testutil.UpdateReplicaSetWithRetries(d.c, d.deployment.Namespace, name, applyUpdate, d.t.Logf, pollInterval, pollTimeout)
- }
- func (d *deploymentTester) updateReplicaSetStatus(name string, applyStatusUpdate testutil.UpdateReplicaSetFunc) (*apps.ReplicaSet, error) {
- return testutil.UpdateReplicaSetStatusWithRetries(d.c, d.deployment.Namespace, name, applyStatusUpdate, d.t.Logf, pollInterval, pollTimeout)
- }
- // waitForDeploymentRollbackCleared waits for deployment either started rolling back or doesn't need to rollback.
- func (d *deploymentTester) waitForDeploymentRollbackCleared() error {
- return testutil.WaitForDeploymentRollbackCleared(d.c, d.deployment.Namespace, d.deployment.Name, pollInterval, pollTimeout)
- }
- // checkDeploymentRevisionAndImage checks if the input deployment's and its new replica set's revision and image are as expected.
- func (d *deploymentTester) checkDeploymentRevisionAndImage(revision, image string) error {
- return testutil.CheckDeploymentRevisionAndImage(d.c, d.deployment.Namespace, d.deployment.Name, revision, image)
- }
- func (d *deploymentTester) waitForDeploymentUpdatedReplicasGTE(minUpdatedReplicas int32) error {
- return testutil.WaitForDeploymentUpdatedReplicasGTE(d.c, d.deployment.Namespace, d.deployment.Name, minUpdatedReplicas, d.deployment.Generation, pollInterval, pollTimeout)
- }
- func (d *deploymentTester) waitForDeploymentWithCondition(reason string, condType apps.DeploymentConditionType) error {
- return testutil.WaitForDeploymentWithCondition(d.c, d.deployment.Namespace, d.deployment.Name, reason, condType, d.t.Logf, pollInterval, pollTimeout)
- }
- func (d *deploymentTester) listUpdatedPods() ([]v1.Pod, error) {
- selector, err := metav1.LabelSelectorAsSelector(d.deployment.Spec.Selector)
- if err != nil {
- return nil, fmt.Errorf("failed to parse deployment selector: %v", err)
- }
- pods, err := d.c.CoreV1().Pods(d.deployment.Namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
- if err != nil {
- return nil, fmt.Errorf("failed to list deployment pods, will retry later: %v", err)
- }
- newRS, err := d.getNewReplicaSet()
- if err != nil {
- return nil, fmt.Errorf("failed to get new replicaset of deployment %q: %v", d.deployment.Name, err)
- }
- if newRS == nil {
- return nil, fmt.Errorf("unable to find new replicaset of deployment %q", d.deployment.Name)
- }
- var ownedPods []v1.Pod
- for _, pod := range pods.Items {
- rs := metav1.GetControllerOf(&pod)
- if rs.UID == newRS.UID {
- ownedPods = append(ownedPods, pod)
- }
- }
- return ownedPods, nil
- }
- func (d *deploymentTester) waitRSStable(replicaset *apps.ReplicaSet) error {
- return testutil.WaitRSStable(d.t, d.c, replicaset, pollInterval, pollTimeout)
- }
- func (d *deploymentTester) scaleDeployment(newReplicas int32) error {
- var err error
- d.deployment, err = d.updateDeployment(func(update *apps.Deployment) {
- update.Spec.Replicas = &newReplicas
- })
- if err != nil {
- return fmt.Errorf("failed updating deployment %q: %v", d.deployment.Name, err)
- }
- if err := d.waitForDeploymentCompleteAndMarkPodsReady(); err != nil {
- return err
- }
- rs, err := d.expectNewReplicaSet()
- if err != nil {
- return err
- }
- if *rs.Spec.Replicas != newReplicas {
- return fmt.Errorf("expected new replicaset replicas = %d, got %d", newReplicas, *rs.Spec.Replicas)
- }
- return nil
- }
- // waitForReadyReplicas waits for number of ready replicas to equal number of replicas.
- func (d *deploymentTester) waitForReadyReplicas() error {
- if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
- deployment, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
- if err != nil {
- return false, fmt.Errorf("failed to get deployment %q: %v", d.deployment.Name, err)
- }
- return deployment.Status.ReadyReplicas == *deployment.Spec.Replicas, nil
- }); err != nil {
- return fmt.Errorf("failed to wait for .readyReplicas to equal .replicas: %v", err)
- }
- return nil
- }
- // markUpdatedPodsReadyWithoutComplete marks updated Deployment pods as ready without waiting for deployment to complete.
- func (d *deploymentTester) markUpdatedPodsReadyWithoutComplete() error {
- if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
- pods, err := d.listUpdatedPods()
- if err != nil {
- return false, err
- }
- for i := range pods {
- pod := pods[i]
- if podutil.IsPodReady(&pod) {
- continue
- }
- if err = markPodReady(d.c, d.deployment.Namespace, &pod); err != nil {
- d.t.Logf("failed to update Deployment pod %q, will retry later: %v", pod.Name, err)
- return false, nil
- }
- }
- return true, nil
- }); err != nil {
- return fmt.Errorf("failed to mark all updated pods as ready: %v", err)
- }
- return nil
- }
- // Verify all replicas fields of DeploymentStatus have desired count.
- // Immediately return an error when found a non-matching replicas field.
- func (d *deploymentTester) checkDeploymentStatusReplicasFields(replicas, updatedReplicas, readyReplicas, availableReplicas, unavailableReplicas int32) error {
- deployment, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
- if err != nil {
- return fmt.Errorf("failed to get deployment %q: %v", d.deployment.Name, err)
- }
- if deployment.Status.Replicas != replicas {
- return fmt.Errorf("unexpected .replicas: expect %d, got %d", replicas, deployment.Status.Replicas)
- }
- if deployment.Status.UpdatedReplicas != updatedReplicas {
- return fmt.Errorf("unexpected .updatedReplicas: expect %d, got %d", updatedReplicas, deployment.Status.UpdatedReplicas)
- }
- if deployment.Status.ReadyReplicas != readyReplicas {
- return fmt.Errorf("unexpected .readyReplicas: expect %d, got %d", readyReplicas, deployment.Status.ReadyReplicas)
- }
- if deployment.Status.AvailableReplicas != availableReplicas {
- return fmt.Errorf("unexpected .replicas: expect %d, got %d", availableReplicas, deployment.Status.AvailableReplicas)
- }
- if deployment.Status.UnavailableReplicas != unavailableReplicas {
- return fmt.Errorf("unexpected .replicas: expect %d, got %d", unavailableReplicas, deployment.Status.UnavailableReplicas)
- }
- return nil
- }
|