node-management.sh 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. #!/usr/bin/env bash
  2. # Copyright 2018 The Kubernetes Authors.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. set -o errexit
  16. set -o nounset
  17. set -o pipefail
  18. run_cluster_management_tests() {
  19. set -o nounset
  20. set -o errexit
  21. kube::log::status "Testing cluster-management commands"
  22. kube::test::get_object_assert nodes "{{range.items}}{{${id_field:?}}}:{{end}}" '127.0.0.1:'
  23. # create test pods we can work with
  24. kubectl create -f - "${kube_flags[@]:?}" << __EOF__
  25. {
  26. "kind": "Pod",
  27. "apiVersion": "v1",
  28. "metadata": {
  29. "name": "test-pod-1",
  30. "labels": {
  31. "e": "f"
  32. }
  33. },
  34. "spec": {
  35. "containers": [
  36. {
  37. "name": "container-1",
  38. "resources": {},
  39. "image": "test-image"
  40. }
  41. ]
  42. }
  43. }
  44. __EOF__
  45. kubectl create -f - "${kube_flags[@]}" << __EOF__
  46. {
  47. "kind": "Pod",
  48. "apiVersion": "v1",
  49. "metadata": {
  50. "name": "test-pod-2",
  51. "labels": {
  52. "c": "d"
  53. }
  54. },
  55. "spec": {
  56. "containers": [
  57. {
  58. "name": "container-1",
  59. "resources": {},
  60. "image": "test-image"
  61. }
  62. ]
  63. }
  64. }
  65. __EOF__
  66. # taint/untaint
  67. # Pre-condition: node doesn't have dedicated=foo:PreferNoSchedule taint
  68. kube::test::get_object_assert "nodes 127.0.0.1" '{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}' "" # expect no output
  69. # taint can add a taint (<key>=<value>:<effect>)
  70. kubectl taint node 127.0.0.1 dedicated=foo:PreferNoSchedule
  71. kube::test::get_object_assert "nodes 127.0.0.1" '{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}' "dedicated=foo:PreferNoSchedule"
  72. # taint can remove a taint
  73. kubectl taint node 127.0.0.1 dedicated-
  74. # taint can add a taint (<key>:<effect>)
  75. kubectl taint node 127.0.0.1 dedicated:PreferNoSchedule
  76. kube::test::get_object_assert "nodes 127.0.0.1" '{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}' "dedicated=<no value>:PreferNoSchedule"
  77. # taint can remove a taint
  78. kubectl taint node 127.0.0.1 dedicated-
  79. # Post-condition: node doesn't have dedicated=foo:PreferNoSchedule taint
  80. kube::test::get_object_assert "nodes 127.0.0.1" '{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}' "" # expect no output
  81. ### kubectl cordon update with --dry-run does not mark node unschedulable
  82. # Pre-condition: node is schedulable
  83. kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
  84. kubectl cordon "127.0.0.1" --dry-run
  85. kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
  86. ### kubectl drain update with --dry-run does not mark node unschedulable
  87. # Pre-condition: node is schedulable
  88. kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
  89. kubectl drain "127.0.0.1" --dry-run
  90. # Post-condition: node still exists, node is still schedulable
  91. kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
  92. kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
  93. ### kubectl drain with --pod-selector only evicts pods that match the given selector
  94. # Pre-condition: node is schedulable
  95. kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
  96. # Pre-condition: test-pod-1 and test-pod-2 exist
  97. kube::test::get_object_assert "pods" "{{range .items}}{{.metadata.name}},{{end}}" 'test-pod-1,test-pod-2,'
  98. kubectl drain "127.0.0.1" --pod-selector 'e in (f)'
  99. # only "test-pod-1" should have been matched and deleted - test-pod-2 should still exist
  100. kube::test::get_object_assert "pods/test-pod-2" "{{.metadata.name}}" 'test-pod-2'
  101. # delete pod no longer in use
  102. kubectl delete pod/test-pod-2
  103. # Post-condition: node is schedulable
  104. kubectl uncordon "127.0.0.1"
  105. kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
  106. ### kubectl uncordon update with --dry-run is a no-op
  107. # Pre-condition: node is already schedulable
  108. kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
  109. response=$(kubectl uncordon "127.0.0.1" --dry-run)
  110. kube::test::if_has_string "${response}" 'already uncordoned'
  111. # Post-condition: node is still schedulable
  112. kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
  113. ### kubectl drain command fails when both --selector and a node argument are given
  114. # Pre-condition: node exists and contains label test=label
  115. kubectl label node "127.0.0.1" "test=label"
  116. kube::test::get_object_assert "nodes 127.0.0.1" '{{.metadata.labels.test}}' 'label'
  117. response=$(! kubectl drain "127.0.0.1" --selector test=label 2>&1)
  118. kube::test::if_has_string "${response}" 'cannot specify both a node name'
  119. ### kubectl cordon command fails when no arguments are passed
  120. # Pre-condition: node exists
  121. response=$(! kubectl cordon 2>&1)
  122. kube::test::if_has_string "${response}" 'error\: USAGE\: cordon NODE'
  123. ### kubectl cordon selects no nodes with an empty --selector=
  124. # Pre-condition: node "127.0.0.1" is uncordoned
  125. kubectl uncordon "127.0.0.1"
  126. response=$(! kubectl cordon --selector= 2>&1)
  127. kube::test::if_has_string "${response}" 'must provide one or more resources'
  128. # test=label matches our node
  129. response=$(kubectl cordon --selector test=label)
  130. kube::test::if_has_string "${response}" 'node/127.0.0.1 cordoned'
  131. # invalid=label does not match any nodes
  132. response=$(kubectl cordon --selector invalid=label)
  133. kube::test::if_has_not_string "${response}" 'cordoned'
  134. # Post-condition: node "127.0.0.1" is cordoned
  135. kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" 'true'
  136. set +o nounset
  137. set +o errexit
  138. }