node-management.sh 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. #!/usr/bin/env bash
  2. # Copyright 2018 The Kubernetes Authors.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. set -o errexit
  16. set -o nounset
  17. set -o pipefail
  18. run_cluster_management_tests() {
  19. set -o nounset
  20. set -o errexit
  21. kube::log::status "Testing cluster-management commands"
  22. kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
  23. # create test pods we can work with
  24. kubectl create -f - "${kube_flags[@]}" << __EOF__
  25. {
  26. "kind": "Pod",
  27. "apiVersion": "v1",
  28. "metadata": {
  29. "name": "test-pod-1",
  30. "labels": {
  31. "e": "f"
  32. }
  33. },
  34. "spec": {
  35. "containers": [
  36. {
  37. "name": "container-1",
  38. "resources": {},
  39. "image": "test-image"
  40. }
  41. ]
  42. }
  43. }
  44. __EOF__
  45. kubectl create -f - "${kube_flags[@]}" << __EOF__
  46. {
  47. "kind": "Pod",
  48. "apiVersion": "v1",
  49. "metadata": {
  50. "name": "test-pod-2",
  51. "labels": {
  52. "c": "d"
  53. }
  54. },
  55. "spec": {
  56. "containers": [
  57. {
  58. "name": "container-1",
  59. "resources": {},
  60. "image": "test-image"
  61. }
  62. ]
  63. }
  64. }
  65. __EOF__
  66. # taint/untaint
  67. # Pre-condition: node doesn't have dedicated=foo:PreferNoSchedule taint
  68. kube::test::get_object_assert "nodes 127.0.0.1" '{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}' "" # expect no output
  69. # taint can add a taint
  70. kubectl taint node 127.0.0.1 dedicated=foo:PreferNoSchedule
  71. kube::test::get_object_assert "nodes 127.0.0.1" '{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}' "dedicated=foo:PreferNoSchedule"
  72. # taint can remove a taint
  73. kubectl taint node 127.0.0.1 dedicated-
  74. # Post-condition: node doesn't have dedicated=foo:PreferNoSchedule taint
  75. kube::test::get_object_assert "nodes 127.0.0.1" '{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}' "" # expect no output
  76. ### kubectl cordon update with --dry-run does not mark node unschedulable
  77. # Pre-condition: node is schedulable
  78. kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
  79. kubectl cordon "127.0.0.1" --dry-run
  80. kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
  81. ### kubectl drain update with --dry-run does not mark node unschedulable
  82. # Pre-condition: node is schedulable
  83. kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
  84. kubectl drain "127.0.0.1" --dry-run
  85. # Post-condition: node still exists, node is still schedulable
  86. kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
  87. kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
  88. ### kubectl drain with --pod-selector only evicts pods that match the given selector
  89. # Pre-condition: node is schedulable
  90. kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
  91. # Pre-condition: test-pod-1 and test-pod-2 exist
  92. kube::test::get_object_assert "pods" "{{range .items}}{{.metadata.name}},{{end}}" 'test-pod-1,test-pod-2,'
  93. kubectl drain "127.0.0.1" --pod-selector 'e in (f)'
  94. # only "test-pod-1" should have been matched and deleted - test-pod-2 should still exist
  95. kube::test::get_object_assert "pods/test-pod-2" "{{.metadata.name}}" 'test-pod-2'
  96. # delete pod no longer in use
  97. kubectl delete pod/test-pod-2
  98. # Post-condition: node is schedulable
  99. kubectl uncordon "127.0.0.1"
  100. kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
  101. ### kubectl uncordon update with --dry-run is a no-op
  102. # Pre-condition: node is already schedulable
  103. kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
  104. response=$(kubectl uncordon "127.0.0.1" --dry-run)
  105. kube::test::if_has_string "${response}" 'already uncordoned'
  106. # Post-condition: node is still schedulable
  107. kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
  108. ### kubectl drain command fails when both --selector and a node argument are given
  109. # Pre-condition: node exists and contains label test=label
  110. kubectl label node "127.0.0.1" "test=label"
  111. kube::test::get_object_assert "nodes 127.0.0.1" '{{.metadata.labels.test}}' 'label'
  112. response=$(! kubectl drain "127.0.0.1" --selector test=label 2>&1)
  113. kube::test::if_has_string "${response}" 'cannot specify both a node name'
  114. ### kubectl cordon command fails when no arguments are passed
  115. # Pre-condition: node exists
  116. response=$(! kubectl cordon 2>&1)
  117. kube::test::if_has_string "${response}" 'error\: USAGE\: cordon NODE'
  118. ### kubectl cordon selects no nodes with an empty --selector=
  119. # Pre-condition: node "127.0.0.1" is uncordoned
  120. kubectl uncordon "127.0.0.1"
  121. response=$(! kubectl cordon --selector= 2>&1)
  122. kube::test::if_has_string "${response}" 'must provide one or more resources'
  123. # test=label matches our node
  124. response=$(kubectl cordon --selector test=label)
  125. kube::test::if_has_string "${response}" 'node/127.0.0.1 cordoned'
  126. # invalid=label does not match any nodes
  127. response=$(kubectl cordon --selector invalid=label)
  128. kube::test::if_has_not_string "${response}" 'cordoned'
  129. # Post-condition: node "127.0.0.1" is cordoned
  130. kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" 'true'
  131. set +o nounset
  132. set +o errexit
  133. }