123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695 |
- #!/usr/bin/env bash
- # Copyright 2018 The Kubernetes Authors.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- set -o errexit
- set -o nounset
- set -o pipefail
- run_daemonset_tests() {
- set -o nounset
- set -o errexit
- create_and_use_new_namespace
- kube::log::status "Testing kubectl(v1:daemonsets)"
- ### Create a rolling update DaemonSet
- # Pre-condition: no DaemonSet exists
- kube::test::get_object_assert daemonsets "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- # Command
- kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]:?}"
- # Template Generation should be 1
- kube::test::get_object_assert 'daemonsets bind' "{{${generation_field:?}}}" '1'
- kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]:?}"
- # Template Generation should stay 1
- kube::test::get_object_assert 'daemonsets bind' "{{${generation_field:?}}}" '1'
- # Test set commands
- kubectl set image daemonsets/bind "${kube_flags[@]:?}" "*=k8s.gcr.io/pause:test-cmd"
- kube::test::get_object_assert 'daemonsets bind' "{{${generation_field:?}}}" '2'
- kubectl set env daemonsets/bind "${kube_flags[@]:?}" foo=bar
- kube::test::get_object_assert 'daemonsets bind' "{{${generation_field:?}}}" '3'
- kubectl set resources daemonsets/bind "${kube_flags[@]:?}" --limits=cpu=200m,memory=512Mi
- kube::test::get_object_assert 'daemonsets bind' "{{${generation_field:?}}}" '4'
- # Rollout restart should change generation
- kubectl rollout restart daemonset/bind "${kube_flags[@]:?}"
- kube::test::get_object_assert 'daemonsets bind' "{{${generation_field:?}}}" '5'
- # Clean up
- kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]:?}"
- set +o nounset
- set +o errexit
- }
- run_daemonset_history_tests() {
- set -o nounset
- set -o errexit
- create_and_use_new_namespace
- kube::log::status "Testing kubectl(v1:daemonsets, v1:controllerrevisions)"
- ### Test rolling back a DaemonSet
- # Pre-condition: no DaemonSet or its pods exists
- kube::test::get_object_assert daemonsets "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- # Command
- # Create a DaemonSet (revision 1)
- kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml --record "${kube_flags[@]:?}"
- kube::test::wait_object_assert controllerrevisions "{{range.items}}{{${annotations_field:?}}}:{{end}}" ".*rollingupdate-daemonset.yaml --record.*"
- # Rollback to revision 1 - should be no-op
- kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]:?}"
- kube::test::get_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:"
- kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "1"
- # Update the DaemonSet (revision 2)
- kubectl apply -f hack/testdata/rollingupdate-daemonset-rv2.yaml --record "${kube_flags[@]:?}"
- kube::test::wait_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
- kube::test::wait_object_assert daemonset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
- kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "2"
- kube::test::wait_object_assert controllerrevisions "{{range.items}}{{${annotations_field:?}}}:{{end}}" ".*rollingupdate-daemonset-rv2.yaml --record.*"
- # Rollback to revision 1 with dry-run - should be no-op
- kubectl rollout undo daemonset --dry-run=client "${kube_flags[@]:?}"
- kubectl rollout undo daemonset --dry-run=server "${kube_flags[@]:?}"
- kube::test::get_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
- kube::test::get_object_assert daemonset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
- kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "2"
- # Rollback to revision 1
- kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]:?}"
- kube::test::wait_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:"
- kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "1"
- # Rollback to revision 1000000 - should fail
- output_message=$(! kubectl rollout undo daemonset --to-revision=1000000 "${kube_flags[@]:?}" 2>&1)
- kube::test::if_has_string "${output_message}" "unable to find specified revision"
- kube::test::get_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:"
- kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "1"
- # Rollback to last revision
- kubectl rollout undo daemonset "${kube_flags[@]:?}"
- kube::test::wait_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
- kube::test::wait_object_assert daemonset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
- kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "2"
- # Clean up
- kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]:?}"
- set +o nounset
- set +o errexit
- }
- run_kubectl_apply_deployments_tests() {
- set -o nounset
- set -o errexit
- create_and_use_new_namespace
- kube::log::status "Testing kubectl apply deployments"
- ## kubectl apply should propagate user defined null values
- # Pre-Condition: no Deployments, ReplicaSets, Pods exist
- kube::test::get_object_assert deployments "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- kube::test::get_object_assert replicasets "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- # apply base deployment
- kubectl apply -f hack/testdata/null-propagation/deployment-l1.yaml "${kube_flags[@]:?}"
- # check right deployment exists
- kube::test::get_object_assert 'deployments my-depl' "{{${id_field:?}}}" 'my-depl'
- # check right labels exists
- kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" 'l1'
- kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" 'l1'
- kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" 'l1'
- # apply new deployment with new template labels
- kubectl apply -f hack/testdata/null-propagation/deployment-l2.yaml "${kube_flags[@]:?}"
- # check right labels exists
- kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" 'l1'
- kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" 'l1'
- kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" '<no value>'
- # cleanup
- # need to explicitly remove replicasets and pods because we changed the deployment selector and orphaned things
- kubectl delete deployments,rs,pods --all --cascade=false --grace-period=0
- # Post-Condition: no Deployments, ReplicaSets, Pods exist
- kube::test::wait_object_assert deployments "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- kube::test::wait_object_assert replicasets "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- # kubectl apply deployment --overwrite=true --force=true
- # Pre-Condition: no deployment exists
- kube::test::get_object_assert deployments "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- # apply deployment nginx
- kubectl apply -f hack/testdata/deployment-label-change1.yaml "${kube_flags[@]:?}"
- # check right deployment exists
- kube::test::get_object_assert 'deployment nginx' "{{${id_field:?}}}" 'nginx'
- # apply deployment with new labels and a conflicting resourceVersion
- output_message=$(! kubectl apply -f hack/testdata/deployment-label-change2.yaml 2>&1 "${kube_flags[@]:?}")
- kube::test::if_has_string "${output_message}" 'Error from server (Conflict)'
- # apply deployment with --force and --overwrite will succeed
- kubectl apply -f hack/testdata/deployment-label-change2.yaml --overwrite=true --force=true --grace-period=10
- # check the changed deployment
- output_message=$(kubectl apply view-last-applied deploy/nginx -o json 2>&1 "${kube_flags[@]:?}" |grep nginx2)
- kube::test::if_has_string "${output_message}" '"name": "nginx2"'
- # applying a resource (with --force) that is both conflicting and invalid will
- # cause the server to only return a "Conflict" error when we attempt to patch.
- # This means that we will delete the existing resource after receiving 5 conflict
- # errors in a row from the server, and will attempt to create the modified
- # resource that we are passing to "apply". Since the modified resource is also
- # invalid, we will receive an invalid error when we attempt to create it, after
- # having deleted the old resource. Ensure that when this case is reached, the
- # old resource is restored once again, and the validation error is printed.
- output_message=$(! kubectl apply -f hack/testdata/deployment-label-change3.yaml --force 2>&1 "${kube_flags[@]:?}")
- kube::test::if_has_string "${output_message}" 'Invalid value'
- # Ensure that the old object has been restored
- kube::test::get_object_assert 'deployment nginx' "{{${template_labels:?}}}" 'nginx2'
- # cleanup
- kubectl delete deployments --all --grace-period=10
- set +o nounset
- set +o errexit
- }
- run_deployment_tests() {
- set -o nounset
- set -o errexit
- create_and_use_new_namespace
- kube::log::status "Testing deployments"
- # Test kubectl create deployment (using default - old generator)
- kubectl create deployment test-nginx-extensions --image=k8s.gcr.io/nginx:test-cmd
- # Post-Condition: Deployment "nginx" is created.
- kube::test::get_object_assert 'deploy test-nginx-extensions' "{{${container_name_field:?}}}" 'nginx'
- # and old generator was used, iow. old defaults are applied
- output_message=$(kubectl get deployment.apps/test-nginx-extensions -o jsonpath='{.spec.revisionHistoryLimit}')
- kube::test::if_has_not_string "${output_message}" '2'
- # Ensure we can interact with deployments through apps endpoints
- output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]:?}")
- kube::test::if_has_string "${output_message}" 'apps/v1'
- # Clean up
- kubectl delete deployment test-nginx-extensions "${kube_flags[@]:?}"
- # Test kubectl create deployment
- kubectl create deployment test-nginx-apps --image=k8s.gcr.io/nginx:test-cmd --generator=deployment-basic/apps.v1
- # Post-Condition: Deployment "nginx" is created.
- kube::test::get_object_assert 'deploy test-nginx-apps' "{{${container_name_field:?}}}" 'nginx'
- # and new generator was used, iow. new defaults are applied
- output_message=$(kubectl get deployment/test-nginx-apps -o jsonpath='{.spec.revisionHistoryLimit}')
- kube::test::if_has_string "${output_message}" '10'
- # Ensure we can interact with deployments through apps endpoints
- output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]:?}")
- kube::test::if_has_string "${output_message}" 'apps/v1'
- # Describe command (resource only) should print detailed information
- kube::test::describe_resource_assert rs "Name:" "Pod Template:" "Labels:" "Selector:" "Controlled By" "Replicas:" "Pods Status:" "Volumes:"
- # Describe command (resource only) should print detailed information
- kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Controlled By"
- # Clean up
- kubectl delete deployment test-nginx-apps "${kube_flags[@]:?}"
- ### Test kubectl create deployment with image and command
- # Pre-Condition: No deployment exists.
- kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- # Dry-run command
- kubectl create deployment nginx-with-command --dry-run=client --image=k8s.gcr.io/nginx:test-cmd -- /bin/sleep infinity
- kubectl create deployment nginx-with-command --dry-run=server --image=k8s.gcr.io/nginx:test-cmd -- /bin/sleep infinity
- kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- # Command
- kubectl create deployment nginx-with-command --image=k8s.gcr.io/nginx:test-cmd -- /bin/sleep infinity
- # Post-Condition: Deployment "nginx" is created.
- kube::test::get_object_assert 'deploy nginx-with-command' "{{${container_name_field:?}}}" 'nginx'
- # Clean up
- kubectl delete deployment nginx-with-command "${kube_flags[@]:?}"
- ### Test kubectl create deployment should not fail validation
- # Pre-Condition: No deployment exists.
- kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- # Command
- kubectl create -f hack/testdata/deployment-with-UnixUserID.yaml "${kube_flags[@]:?}"
- # Post-Condition: Deployment "deployment-with-unixuserid" is created.
- kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'deployment-with-unixuserid:'
- # Clean up
- kubectl delete deployment deployment-with-unixuserid "${kube_flags[@]:?}"
- ### Test cascading deletion
- ## Test that rs is deleted when deployment is deleted.
- # Pre-condition: no deployment exists
- kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- # Create deployment
- kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]:?}"
- # Wait for rs to come up.
- kube::test::wait_object_assert rs "{{range.items}}{{${rs_replicas_field:?}}}{{end}}" '3'
- # Deleting the deployment should delete the rs.
- kubectl delete deployment nginx-deployment "${kube_flags[@]:?}"
- kube::test::wait_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- ## Test that rs is not deleted when deployment is deleted with cascade set to false.
- # Pre-condition: no deployment and rs exist
- kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- # Create deployment
- kubectl create deployment nginx-deployment --image=k8s.gcr.io/nginx:test-cmd
- # Wait for rs to come up.
- kube::test::wait_object_assert rs "{{range.items}}{{${rs_replicas_field:?}}}{{end}}" '1'
- # Delete the deployment with cascade set to false.
- kubectl delete deployment nginx-deployment "${kube_flags[@]:?}" --cascade=false
- # Wait for the deployment to be deleted and then verify that rs is not
- # deleted.
- kube::test::wait_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- kube::test::get_object_assert rs "{{range.items}}{{${rs_replicas_field:?}}}{{end}}" '1'
- # Cleanup
- # Find the name of the rs to be deleted.
- output_message=$(kubectl get rs "${kube_flags[@]:?}" -o template --template="{{range.items}}{{${id_field:?}}}{{end}}")
- kubectl delete rs "${output_message}" "${kube_flags[@]:?}"
- ### Auto scale deployment
- # Pre-condition: no deployment exists
- kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- # Pre-condition: no hpa exists
- kube::test::get_object_assert 'hpa' "{{range.items}}{{ if eq $id_field \\\"nginx-deployment\\\" }}found{{end}}{{end}}:" ':'
- # Command
- kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]:?}"
- kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'nginx-deployment:'
- # Dry-run autoscale
- kubectl-with-retry autoscale deployment nginx-deployment --dry-run=client "${kube_flags[@]:?}" --min=2 --max=3
- kubectl-with-retry autoscale deployment nginx-deployment --dry-run=server "${kube_flags[@]:?}" --min=2 --max=3
- kube::test::get_object_assert 'hpa' "{{range.items}}{{ if eq $id_field \\\"nginx-deployment\\\" }}found{{end}}{{end}}:" ':'
- # autoscale 2~3 pods, no CPU utilization specified
- kubectl-with-retry autoscale deployment nginx-deployment "${kube_flags[@]:?}" --min=2 --max=3
- kube::test::get_object_assert 'hpa nginx-deployment' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '2 3 80'
- # Clean up
- # Note that we should delete hpa first, otherwise it may fight with the deployment reaper.
- kubectl delete hpa nginx-deployment "${kube_flags[@]:?}"
- kubectl delete deployment.apps nginx-deployment "${kube_flags[@]:?}"
- ### Rollback a deployment
- # Pre-condition: no deployment exists
- kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- # Command
- # Create a deployment (revision 1)
- kubectl create -f hack/testdata/deployment-revision1.yaml "${kube_flags[@]:?}"
- kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'nginx:'
- kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
- # Rollback to revision 1 - should be no-op
- kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]:?}"
- kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
- # Update the deployment (revision 2)
- kubectl apply -f hack/testdata/deployment-revision2.yaml "${kube_flags[@]:?}"
- kube::test::get_object_assert deployment.apps "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
- # Rollback to revision 1 with dry-run - should be no-op
- kubectl rollout undo deployment nginx --dry-run=client "${kube_flags[@]:?}" | grep "test-cmd"
- kubectl rollout undo deployment nginx --dry-run=server "${kube_flags[@]:?}"
- kube::test::get_object_assert deployment.apps "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
- # Rollback to revision 1
- kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]:?}"
- sleep 1
- kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
- # Rollback to revision 1000000 - should be no-op
- ! kubectl rollout undo deployment nginx --to-revision=1000000 "${kube_flags[@]:?}" || exit 1
- kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
- # Rollback to last revision
- kubectl rollout undo deployment nginx "${kube_flags[@]:?}"
- sleep 1
- kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
- # Pause the deployment
- kubectl-with-retry rollout pause deployment nginx "${kube_flags[@]:?}"
- # A paused deployment cannot be rolled back
- ! kubectl rollout undo deployment nginx "${kube_flags[@]:?}" || exit 1
- # A paused deployment cannot be restarted
- ! kubectl rollout restart deployment nginx "${kube_flags[@]:?}" || exit 1
- # Resume the deployment
- kubectl-with-retry rollout resume deployment nginx "${kube_flags[@]:?}"
- # The resumed deployment can now be rolled back
- kubectl rollout undo deployment nginx "${kube_flags[@]:?}"
- # Check that the new replica set has all old revisions stored in an annotation
- newrs="$(kubectl describe deployment nginx | grep NewReplicaSet | awk '{print $2}')"
- kubectl get rs "${newrs}" -o yaml | grep "deployment.kubernetes.io/revision-history: 1,3"
- # Check that trying to watch the status of a superseded revision returns an error
- ! kubectl rollout status deployment/nginx --revision=3 || exit 1
- # Restarting the deployment creates a new replicaset
- kubectl rollout restart deployment/nginx
- sleep 1
- newrs="$(kubectl describe deployment nginx | grep NewReplicaSet | awk '{print $2}')"
- rs="$(kubectl get rs "${newrs}" -o yaml)"
- kube::test::if_has_string "${rs}" "deployment.kubernetes.io/revision: \"6\""
- ${SED} "s/name: nginx$/name: nginx2/" hack/testdata/deployment-revision1.yaml | kubectl create -f - "${kube_flags[@]:?}"
- # Deletion of both deployments should not be blocked
- kubectl delete deployment nginx2 "${kube_flags[@]:?}"
- # Clean up
- kubectl delete deployment nginx "${kube_flags[@]:?}"
- ### Set image of a deployment
- # Pre-condition: no deployment exists
- kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- # Create a deployment
- kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]:?}"
- kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'nginx-deployment:'
- kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
- kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:"
- # Dry-run set the deployment's image
- kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" --dry-run=client "${kube_flags[@]:?}"
- kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" --dry-run=server "${kube_flags[@]:?}"
- kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
- kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:"
- # Set the deployment's image
- kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]:?}"
- kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
- kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:"
- # Set non-existing container should fail
- ! kubectl set image deployment nginx-deployment redis=redis "${kube_flags[@]:?}" || exit 1
- # Set image of deployments without specifying name
- kubectl set image deployments --all nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]:?}"
- kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
- kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:"
- # Set image of a deployment specified by file
- kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]:?}"
- kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
- kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:"
- # Set image of a local file without talking to the server
- kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]:?}" --local -o yaml
- kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
- kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:"
- # Set image of all containers of the deployment
- kubectl set image deployment nginx-deployment "*=${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]:?}"
- kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
- kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
- # Set image of all containers of the deployment again when image not change
- kubectl set image deployment nginx-deployment "*=${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]:?}"
- kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
- kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
- # Clean up
- kubectl delete deployment nginx-deployment "${kube_flags[@]:?}"
- ### Set env of a deployment
- # Pre-condition: no deployment exists
- kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- # Create a deployment
- kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]:?}"
- kubectl create -f hack/testdata/configmap.yaml "${kube_flags[@]:?}"
- kubectl create -f hack/testdata/secret.yaml "${kube_flags[@]:?}"
- kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'nginx-deployment:'
- #configmap is special here due to controller will create kube-root-ca.crt for each namespace automatically
- kube::test::get_object_assert 'configmaps/test-set-env-config' "{{${id_field:?}}}" 'test-set-env-config'
- kube::test::get_object_assert secret "{{range.items}}{{${id_field:?}}}:{{end}}" 'test-set-env-secret:'
- # Set env of deployments by configmap from keys
- kubectl set env deployment nginx-deployment --keys=key-2 --from=configmap/test-set-env-config "${kube_flags[@]:?}"
- # Assert correct value in deployment env
- kube::test::get_object_assert 'deploy nginx-deployment' "{{ (index (index .spec.template.spec.containers 0).env 0).name}}" 'KEY_2'
- # Assert single value in deployment env
- kube::test::get_object_assert 'deploy nginx-deployment' "{{ len (index .spec.template.spec.containers 0).env }}" '1'
- # Dry-run set env
- kubectl set env deployment nginx-deployment --dry-run=client --from=configmap/test-set-env-config "${kube_flags[@]:?}"
- kubectl set env deployment nginx-deployment --dry-run=server --from=configmap/test-set-env-config "${kube_flags[@]:?}"
- kube::test::get_object_assert 'deploy nginx-deployment' "{{ len (index .spec.template.spec.containers 0).env }}" '1'
- # Set env of deployments by configmap
- kubectl set env deployment nginx-deployment --from=configmap/test-set-env-config "${kube_flags[@]:?}"
- # Assert all values in deployment env
- kube::test::get_object_assert 'deploy nginx-deployment' "{{ len (index .spec.template.spec.containers 0).env }}" '2'
- # Set env of deployments for all container
- kubectl set env deployment nginx-deployment env=prod "${kube_flags[@]:?}"
- # Set env of deployments for specific container
- kubectl set env deployment nginx-deployment superenv=superprod -c=nginx "${kube_flags[@]:?}"
- # Set env of deployments by secret from keys
- kubectl set env deployment nginx-deployment --keys=username --from=secret/test-set-env-secret "${kube_flags[@]:?}"
- # Set env of deployments by secret
- kubectl set env deployment nginx-deployment --from=secret/test-set-env-secret "${kube_flags[@]:?}"
- # Remove specific env of deployment
- kubectl set env deployment nginx-deployment env-
- # Clean up
- kubectl delete deployment nginx-deployment "${kube_flags[@]:?}"
- kubectl delete configmap test-set-env-config "${kube_flags[@]:?}"
- kubectl delete secret test-set-env-secret "${kube_flags[@]:?}"
- set +o nounset
- set +o errexit
- }
- run_statefulset_history_tests() {
- set -o nounset
- set -o errexit
- create_and_use_new_namespace
- kube::log::status "Testing kubectl(v1:statefulsets, v1:controllerrevisions)"
- ### Test rolling back a StatefulSet
- # Pre-condition: no statefulset or its pods exists
- kube::test::get_object_assert statefulset "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- # Command
- # Create a StatefulSet (revision 1)
- kubectl apply -f hack/testdata/rollingupdate-statefulset.yaml --record "${kube_flags[@]:?}"
- kube::test::wait_object_assert controllerrevisions "{{range.items}}{{${annotations_field:?}}}:{{end}}" ".*rollingupdate-statefulset.yaml --record.*"
- # Rollback to revision 1 - should be no-op
- kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]:?}"
- kube::test::get_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
- kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "1"
- # Update the statefulset (revision 2)
- kubectl apply -f hack/testdata/rollingupdate-statefulset-rv2.yaml --record "${kube_flags[@]:?}"
- kube::test::wait_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
- kube::test::wait_object_assert statefulset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:"
- kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "2"
- kube::test::wait_object_assert controllerrevisions "{{range.items}}{{${annotations_field:?}}}:{{end}}" ".*rollingupdate-statefulset-rv2.yaml --record.*"
- # Rollback to revision 1 with dry-run - should be no-op
- kubectl rollout undo statefulset --dry-run=client "${kube_flags[@]:?}"
- kubectl rollout undo statefulset --dry-run=server "${kube_flags[@]:?}"
- kube::test::get_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
- kube::test::get_object_assert statefulset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:"
- kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "2"
- # Rollback to revision 1
- kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]:?}"
- kube::test::wait_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
- kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "1"
- # Rollback to revision 1000000 - should fail
- output_message=$(! kubectl rollout undo statefulset --to-revision=1000000 "${kube_flags[@]:?}" 2>&1)
- kube::test::if_has_string "${output_message}" "unable to find specified revision"
- kube::test::get_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
- kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "1"
- # Rollback to last revision
- kubectl rollout undo statefulset "${kube_flags[@]:?}"
- kube::test::wait_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
- kube::test::wait_object_assert statefulset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:"
- kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "2"
- # Clean up - delete newest configuration
- kubectl delete -f hack/testdata/rollingupdate-statefulset-rv2.yaml "${kube_flags[@]:?}"
- # Post-condition: no pods from statefulset controller
- wait-for-pods-with-label "app=nginx-statefulset" ""
- set +o nounset
- set +o errexit
- }
- run_stateful_set_tests() {
- set -o nounset
- set -o errexit
- create_and_use_new_namespace
- kube::log::status "Testing kubectl(v1:statefulsets)"
- ### Create and stop statefulset, make sure it doesn't leak pods
- # Pre-condition: no statefulset exists
- kube::test::get_object_assert statefulset "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- # Command: create statefulset
- kubectl create -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]:?}"
- ### Scale statefulset test with current-replicas and replicas
- # Pre-condition: 0 replicas
- kube::test::get_object_assert 'statefulset nginx' "{{${statefulset_replicas_field:?}}}" '0'
- kube::test::wait_object_assert 'statefulset nginx' "{{${statefulset_observed_generation:?}}}" '1'
- # Command: Scale up
- kubectl scale --current-replicas=0 --replicas=1 statefulset nginx "${kube_flags[@]:?}"
- # Post-condition: 1 replica, named nginx-0
- kube::test::get_object_assert 'statefulset nginx' "{{${statefulset_replicas_field:?}}}" '1'
- kube::test::wait_object_assert 'statefulset nginx' "{{${statefulset_observed_generation:?}}}" '2'
- # Typically we'd wait and confirm that N>1 replicas are up, but this framework
- # doesn't start the scheduler, so pet-0 will block all others.
- # TODO: test robust scaling in an e2e.
- wait-for-pods-with-label "app=nginx-statefulset" "nginx-0"
- # Rollout restart should change generation
- kubectl rollout restart statefulset nginx "${kube_flags[@]}"
- kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '3'
- ### Clean up
- kubectl delete -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]:?}"
- # Post-condition: no pods from statefulset controller
- wait-for-pods-with-label "app=nginx-statefulset" ""
- set +o nounset
- set +o errexit
- }
- run_rs_tests() {
- set -o nounset
- set -o errexit
- create_and_use_new_namespace
- kube::log::status "Testing kubectl(v1:replicasets)"
- ### Create and stop a replica set, make sure it doesn't leak pods
- # Pre-condition: no replica set exists
- kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- # Command
- kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}"
- kube::log::status "Deleting rs"
- kubectl delete rs frontend "${kube_flags[@]:?}"
- # Post-condition: no pods from frontend replica set
- kube::test::wait_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- ### Create and then delete a replica set with cascade=false, make sure it doesn't delete pods.
- # Pre-condition: no replica set exists
- kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- # Command
- kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
- # wait for all 3 pods to be set up
- kube::test::wait_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{${pod_container_name_field:?}}}:{{end}}" 'php-redis:php-redis:php-redis:'
- kube::log::status "Deleting rs"
- kubectl delete rs frontend "${kube_flags[@]:?}" --cascade=false
- # Wait for the rs to be deleted.
- kube::test::wait_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- # Post-condition: All 3 pods still remain from frontend replica set
- kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$pod_container_name_field}}:{{end}}" 'php-redis:php-redis:php-redis:'
- # Cleanup
- kubectl delete pods -l "tier=frontend" "${kube_flags[@]:?}"
- kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- ### Create replica set frontend from YAML
- # Pre-condition: no replica set exists
- kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- # Command
- kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}"
- # Post-condition: frontend replica set is created
- kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:'
- # Describe command should print detailed information
- kube::test::describe_object_assert rs 'frontend' "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:"
- # Describe command should print events information by default
- kube::test::describe_object_events_assert rs 'frontend'
- # Describe command should not print events information when show-events=false
- kube::test::describe_object_events_assert rs 'frontend' false
- # Describe command should print events information when show-events=true
- kube::test::describe_object_events_assert rs 'frontend' true
- # Describe command (resource only) should print detailed information
- kube::test::describe_resource_assert rs "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:"
- # Describe command should print events information by default
- kube::test::describe_resource_events_assert rs
- # Describe command should not print events information when show-events=false
- kube::test::describe_resource_events_assert rs false
- # Describe command should print events information when show-events=true
- kube::test::describe_resource_events_assert rs true
- # Describe command (resource only) should print detailed information
- kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Controlled By"
- ### Scale replica set frontend with current-replicas and replicas
- # Pre-condition: 3 replicas
- kube::test::get_object_assert 'rs frontend' "{{${rs_replicas_field:?}}}" '3'
- # Command
- kubectl scale --current-replicas=3 --replicas=2 replicasets frontend "${kube_flags[@]:?}"
- # Post-condition: 2 replicas
- kube::test::get_object_assert 'rs frontend' "{{${rs_replicas_field:?}}}" '2'
- # Set up three deploy, two deploy have same label
- kubectl create -f hack/testdata/scale-deploy-1.yaml "${kube_flags[@]:?}"
- kubectl create -f hack/testdata/scale-deploy-2.yaml "${kube_flags[@]:?}"
- kubectl create -f hack/testdata/scale-deploy-3.yaml "${kube_flags[@]:?}"
- kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '1'
- kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '1'
- kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '1'
- # Test kubectl scale --selector
- kubectl scale deploy --replicas=2 -l run=hello
- kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '2'
- kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '2'
- kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '1'
- # Test kubectl scale --all
- kubectl scale deploy --replicas=3 --all
- kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '3'
- kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '3'
- kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '3'
- # Clean-up
- kubectl delete rs frontend "${kube_flags[@]:?}"
- kubectl delete deploy scale-1 scale-2 scale-3 "${kube_flags[@]:?}"
- ### Expose replica set as service
- kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}"
- # Pre-condition: 3 replicas
- kube::test::get_object_assert 'rs frontend' "{{${rs_replicas_field:?}}}" '3'
- # Command
- kubectl expose rs frontend --port=80 "${kube_flags[@]:?}"
- # Post-condition: service exists and the port is unnamed
- kube::test::get_object_assert 'service frontend' "{{${port_name:?}}} {{${port_field:?}}}" '<no value> 80'
- # Create a service using service/v1 generator
- kubectl expose rs frontend --port=80 --name=frontend-2 --generator=service/v1 "${kube_flags[@]:?}"
- # Post-condition: service exists and the port is named default.
- kube::test::get_object_assert 'service frontend-2' "{{${port_name:?}}} {{${port_field:?}}}" 'default 80'
- # Cleanup services
- kubectl delete service frontend{,-2} "${kube_flags[@]:?}"
- # Test set commands
- # Pre-condition: frontend replica set exists at generation 1
- kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '1'
- kubectl set image rs/frontend "${kube_flags[@]:?}" "*=k8s.gcr.io/pause:test-cmd"
- kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '2'
- kubectl set env rs/frontend "${kube_flags[@]:?}" foo=bar
- kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '3'
- kubectl set resources rs/frontend --dry-run=client "${kube_flags[@]:?}" --limits=cpu=200m,memory=512Mi
- kubectl set resources rs/frontend --dry-run=server "${kube_flags[@]:?}" --limits=cpu=200m,memory=512Mi
- kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '3'
- kubectl set resources rs/frontend "${kube_flags[@]:?}" --limits=cpu=200m,memory=512Mi
- kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '4'
- kubectl set serviceaccount rs/frontend --dry-run=client "${kube_flags[@]:?}" serviceaccount1
- kubectl set serviceaccount rs/frontend --dry-run=server "${kube_flags[@]:?}" serviceaccount1
- kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '4'
- kubectl set serviceaccount rs/frontend "${kube_flags[@]:?}" serviceaccount1
- kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '5'
- ### Delete replica set with id
- # Pre-condition: frontend replica set exists
- kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:'
- # Command
- kubectl delete rs frontend "${kube_flags[@]:?}"
- # Post-condition: no replica set exists
- kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- ### Create two replica sets
- # Pre-condition: no replica set exists
- kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- # Command
- kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}"
- kubectl create -f hack/testdata/redis-slave-replicaset.yaml "${kube_flags[@]:?}"
- # Post-condition: frontend and redis-slave
- kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:redis-slave:'
- ### Delete multiple replica sets at once
- # Pre-condition: frontend and redis-slave
- kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:redis-slave:'
- # Command
- kubectl delete rs frontend redis-slave "${kube_flags[@]:?}" # delete multiple replica sets at once
- # Post-condition: no replica set exists
- kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- if kube::test::if_supports_resource "horizontalpodautoscalers" ; then
- ### Auto scale replica set
- # Pre-condition: no replica set exists
- kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
- # Command
- kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}"
- kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:'
- # autoscale 1~2 pods, CPU utilization 70%, replica set specified by file
- kubectl autoscale -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}" --max=2 --cpu-percent=70
- kube::test::get_object_assert 'hpa frontend' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '1 2 70'
- kubectl delete hpa frontend "${kube_flags[@]:?}"
- # autoscale 2~3 pods, no CPU utilization specified, replica set specified by name
- kubectl autoscale rs frontend "${kube_flags[@]:?}" --min=2 --max=3
- kube::test::get_object_assert 'hpa frontend' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '2 3 80'
- kubectl delete hpa frontend "${kube_flags[@]:?}"
- # autoscale without specifying --max should fail
- ! kubectl autoscale rs frontend "${kube_flags[@]:?}" || exit 1
- # Clean up
- kubectl delete rs frontend "${kube_flags[@]:?}"
- fi
- set +o nounset
- set +o errexit
- }
|