core.sh 86 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555
  1. #!/usr/bin/env bash
  2. # Copyright 2018 The Kubernetes Authors.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. set -o errexit
  16. set -o nounset
  17. set -o pipefail
  18. run_configmap_tests() {
  19. set -o nounset
  20. set -o errexit
  21. create_and_use_new_namespace
  22. kube::log::status "Testing configmaps"
  23. kubectl create -f test/fixtures/doc-yaml/user-guide/configmap/configmap.yaml
  24. kube::test::get_object_assert 'configmap/test-configmap' "{{${id_field:?}}}" 'test-configmap'
  25. kubectl delete configmap test-configmap "${kube_flags[@]:?}"
  26. ### Create a new namespace
  27. # Pre-condition: the test-configmaps namespace does not exist
  28. kube::test::get_object_assert 'namespaces' "{{range.items}}{{ if eq $id_field \\\"test-configmaps\\\" }}found{{end}}{{end}}:" ':'
  29. # Command
  30. kubectl create namespace test-configmaps
  31. # Post-condition: namespace 'test-configmaps' is created.
  32. kube::test::get_object_assert 'namespaces/test-configmaps' "{{$id_field}}" 'test-configmaps'
  33. ### Create a generic configmap in a specific namespace
  34. # Pre-condition: configmap test-configmap and test-binary-configmap does not exist
  35. kube::test::get_object_assert 'configmaps' "{{range.items}}{{ if eq $id_field \\\"test-configmap\\\" }}found{{end}}{{end}}:" ':'
  36. kube::test::get_object_assert 'configmaps' "{{range.items}}{{ if eq $id_field \\\"test-binary-configmap\\\" }}found{{end}}{{end}}:" ':'
  37. # Dry-run command
  38. kubectl create configmap test-configmap --dry-run=client --from-literal=key1=value1 --namespace=test-configmaps
  39. kubectl create configmap test-configmap --dry-run=server --from-literal=key1=value1 --namespace=test-configmaps
  40. kube::test::get_object_assert 'configmaps' "{{range.items}}{{ if eq $id_field \\\"test-configmap\\\" }}found{{end}}{{end}}:" ':'
  41. # Command
  42. kubectl create configmap test-configmap --from-literal=key1=value1 --namespace=test-configmaps
  43. kubectl create configmap test-binary-configmap --from-file <( head -c 256 /dev/urandom ) --namespace=test-configmaps
  44. # Post-condition: configmap exists and has expected values
  45. kube::test::get_object_assert 'configmap/test-configmap --namespace=test-configmaps' "{{$id_field}}" 'test-configmap'
  46. kube::test::get_object_assert 'configmap/test-binary-configmap --namespace=test-configmaps' "{{$id_field}}" 'test-binary-configmap'
  47. grep -q "key1: value1" <<< "$(kubectl get configmap/test-configmap --namespace=test-configmaps -o yaml "${kube_flags[@]}")"
  48. grep -q "binaryData" <<< "$(kubectl get configmap/test-binary-configmap --namespace=test-configmaps -o yaml "${kube_flags[@]}")"
  49. # Clean-up
  50. kubectl delete configmap test-configmap --namespace=test-configmaps
  51. kubectl delete configmap test-binary-configmap --namespace=test-configmaps
  52. kubectl delete namespace test-configmaps
  53. set +o nounset
  54. set +o errexit
  55. }
  56. # Runs all pod related tests.
  57. run_pod_tests() {
  58. set -o nounset
  59. set -o errexit
  60. kube::log::status "Testing kubectl(v1:pods)"
  61. ### Create POD valid-pod from JSON
  62. # Pre-condition: no POD exists
  63. create_and_use_new_namespace
  64. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  65. # Command
  66. kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
  67. # Post-condition: valid-pod POD is created
  68. kubectl get "${kube_flags[@]}" pods -o json
  69. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  70. kube::test::get_object_assert 'pod valid-pod' "{{$id_field}}" 'valid-pod'
  71. kube::test::get_object_assert 'pod/valid-pod' "{{$id_field}}" 'valid-pod'
  72. kube::test::get_object_assert 'pods/valid-pod' "{{$id_field}}" 'valid-pod'
  73. # Repeat above test using jsonpath template
  74. kube::test::get_object_jsonpath_assert pods "{.items[*]$id_field}" 'valid-pod'
  75. kube::test::get_object_jsonpath_assert 'pod valid-pod' "{$id_field}" 'valid-pod'
  76. kube::test::get_object_jsonpath_assert 'pod/valid-pod' "{$id_field}" 'valid-pod'
  77. kube::test::get_object_jsonpath_assert 'pods/valid-pod' "{$id_field}" 'valid-pod'
  78. # Describe command should print detailed information
  79. kube::test::describe_object_assert pods 'valid-pod' "Name:" "Image:" "Node:" "Labels:" "Status:"
  80. # Describe command should print events information by default
  81. kube::test::describe_object_events_assert pods 'valid-pod'
  82. # Describe command should not print events information when show-events=false
  83. kube::test::describe_object_events_assert pods 'valid-pod' false
  84. # Describe command should print events information when show-events=true
  85. kube::test::describe_object_events_assert pods 'valid-pod' true
  86. # Describe command (resource only) should print detailed information
  87. kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:"
  88. # Describe command should print events information by default
  89. kube::test::describe_resource_events_assert pods
  90. # Describe command should not print events information when show-events=false
  91. kube::test::describe_resource_events_assert pods false
  92. # Describe command should print events information when show-events=true
  93. kube::test::describe_resource_events_assert pods true
  94. ### Validate Export ###
  95. kube::test::get_object_assert 'pods/valid-pod' "{{.metadata.namespace}} {{.metadata.name}}" '<no value> valid-pod' "--export=true"
  96. ### Dump current valid-pod POD
  97. output_pod=$(kubectl get pod valid-pod -o yaml "${kube_flags[@]}")
  98. ### Delete POD valid-pod by id
  99. # Pre-condition: valid-pod POD exists
  100. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  101. # Command
  102. kubectl delete pod valid-pod "${kube_flags[@]}" --grace-period=0 --force
  103. # Post-condition: valid-pod POD doesn't exist
  104. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  105. ### Delete POD valid-pod by id with --now
  106. # Pre-condition: valid-pod POD exists
  107. kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
  108. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  109. # Command
  110. kubectl delete pod valid-pod "${kube_flags[@]}" --now
  111. # Post-condition: valid-pod POD doesn't exist
  112. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  113. ### Delete POD valid-pod by id with --grace-period=0
  114. # Pre-condition: valid-pod POD exists
  115. kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
  116. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  117. # Command succeeds without --force by waiting
  118. kubectl delete pod valid-pod "${kube_flags[@]}" --grace-period=0
  119. # Post-condition: valid-pod POD doesn't exist
  120. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  121. ### Create POD valid-pod from dumped YAML
  122. # Pre-condition: no POD exists
  123. create_and_use_new_namespace
  124. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  125. # Command
  126. echo "${output_pod}" | ${SED} '/namespace:/d' | kubectl create -f - "${kube_flags[@]}"
  127. # Post-condition: valid-pod POD is created
  128. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  129. ### Delete POD valid-pod from JSON
  130. # Pre-condition: valid-pod POD exists
  131. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  132. # Command
  133. kubectl delete -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}" --grace-period=0 --force
  134. # Post-condition: valid-pod POD doesn't exist
  135. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  136. ### Create POD valid-pod from JSON
  137. # Pre-condition: no POD exists
  138. create_and_use_new_namespace
  139. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  140. # Command
  141. kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
  142. # Post-condition: valid-pod POD is created
  143. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  144. ### Delete POD valid-pod with label
  145. # Pre-condition: valid-pod POD exists
  146. kube::test::get_object_assert "pods -l'name in (valid-pod)'" "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  147. # Command
  148. kubectl delete pods -l'name in (valid-pod)' "${kube_flags[@]}" --grace-period=0 --force
  149. # Post-condition: valid-pod POD doesn't exist
  150. kube::test::get_object_assert "pods -l'name in (valid-pod)'" "{{range.items}}{{$id_field}}:{{end}}" ''
  151. ### Create POD valid-pod from YAML
  152. # Pre-condition: no POD exists
  153. create_and_use_new_namespace
  154. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  155. # Command
  156. kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
  157. # Post-condition: valid-pod POD is created
  158. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  159. # Command
  160. output_message=$(kubectl get pods --field-selector metadata.name=valid-pod "${kube_flags[@]}")
  161. kube::test::if_has_string "${output_message}" "valid-pod"
  162. # Command
  163. phase=$(kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{ .status.phase }}')
  164. output_message=$(kubectl get pods --field-selector status.phase="${phase}" "${kube_flags[@]}")
  165. kube::test::if_has_string "${output_message}" "valid-pod"
  166. ### Delete PODs with no parameter mustn't kill everything
  167. # Pre-condition: valid-pod POD exists
  168. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  169. # Command
  170. ! kubectl delete pods "${kube_flags[@]}" || exit 1
  171. # Post-condition: valid-pod POD exists
  172. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  173. ### Delete PODs with --all and a label selector is not permitted
  174. # Pre-condition: valid-pod POD exists
  175. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  176. # Command
  177. ! kubectl delete --all pods -l'name in (valid-pod)' "${kube_flags[@]}" || exit 1
  178. # Post-condition: valid-pod POD exists
  179. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  180. ### Delete all PODs
  181. # Pre-condition: valid-pod POD exists
  182. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  183. # Command
  184. kubectl delete --all pods "${kube_flags[@]}" --grace-period=0 --force # --all remove all the pods
  185. # Post-condition: no POD exists
  186. kube::test::get_object_assert "pods -l'name in (valid-pod)'" "{{range.items}}{{$id_field}}:{{end}}" ''
  187. # Detailed tests for describe pod output
  188. ### Create a new namespace
  189. # Pre-condition: the test-secrets namespace does not exist
  190. kube::test::get_object_assert 'namespaces' "{{range.items}}{{ if eq $id_field \\\"test-kubectl-describe-pod\\\" }}found{{end}}{{end}}:" ':'
  191. # Command
  192. kubectl create namespace test-kubectl-describe-pod
  193. # Post-condition: namespace 'test-secrets' is created.
  194. kube::test::get_object_assert 'namespaces/test-kubectl-describe-pod' "{{$id_field}}" 'test-kubectl-describe-pod'
  195. ### Create a generic secret
  196. # Pre-condition: no SECRET exists
  197. kube::test::get_object_assert 'secrets --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
  198. # Dry-run command
  199. kubectl create secret generic test-secret --dry-run=client --from-literal=key-1=value1 --type=test-type --namespace=test-kubectl-describe-pod
  200. kubectl create secret generic test-secret --dry-run=server --from-literal=key-1=value1 --type=test-type --namespace=test-kubectl-describe-pod
  201. kube::test::get_object_assert 'secrets --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
  202. # Command
  203. kubectl create secret generic test-secret --from-literal=key-1=value1 --type=test-type --namespace=test-kubectl-describe-pod
  204. # Post-condition: secret exists and has expected values
  205. kube::test::get_object_assert 'secret/test-secret --namespace=test-kubectl-describe-pod' "{{$id_field}}" 'test-secret'
  206. kube::test::get_object_assert 'secret/test-secret --namespace=test-kubectl-describe-pod' "{{${secret_type:?}}}" 'test-type'
  207. ### Create a generic configmap
  208. # Pre-condition: CONFIGMAP test-configmap does not exist
  209. #kube::test::get_object_assert 'configmap/test-configmap --namespace=test-kubectl-describe-pod' "{{$id_field}}" ''
  210. kube::test::get_object_assert 'configmaps --namespace=test-kubectl-describe-pod' "{{range.items}}{{ if eq $id_field \\\"test-configmap\\\" }}found{{end}}{{end}}:" ':'
  211. #kube::test::get_object_assert 'configmaps --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
  212. # Command
  213. kubectl create configmap test-configmap --from-literal=key-2=value2 --namespace=test-kubectl-describe-pod
  214. # Post-condition: configmap exists and has expected values
  215. kube::test::get_object_assert 'configmap/test-configmap --namespace=test-kubectl-describe-pod' "{{$id_field}}" 'test-configmap'
  216. ### Create a pod disruption budget with minAvailable
  217. # Pre-condition: pdb does not exist
  218. kube::test::get_object_assert 'pdb --namespace=test-kubectl-describe-pod' "{{range.items}}{{ if eq $id_field \\\"test-pdb-1\\\" }}found{{end}}{{end}}:" ':'
  219. # Dry-run command
  220. kubectl create pdb test-pdb-1 --dry-run=client --selector=app=rails --min-available=2 --namespace=test-kubectl-describe-pod
  221. kubectl create pdb test-pdb-1 --dry-run=server --selector=app=rails --min-available=2 --namespace=test-kubectl-describe-pod
  222. kube::test::get_object_assert 'pdb --namespace=test-kubectl-describe-pod' "{{range.items}}{{ if eq $id_field \\\"test-pdb-1\\\" }}found{{end}}{{end}}:" ':'
  223. # Command
  224. kubectl create pdb test-pdb-1 --selector=app=rails --min-available=2 --namespace=test-kubectl-describe-pod
  225. # Post-condition: pdb exists and has expected values
  226. kube::test::get_object_assert 'pdb/test-pdb-1 --namespace=test-kubectl-describe-pod' "{{${pdb_min_available:?}}}" '2'
  227. # Command
  228. kubectl create pdb test-pdb-2 --selector=app=rails --min-available=50% --namespace=test-kubectl-describe-pod
  229. # Post-condition: pdb exists and has expected values
  230. kube::test::get_object_assert 'pdb/test-pdb-2 --namespace=test-kubectl-describe-pod' "{{$pdb_min_available}}" '50%'
  231. ### Create a pod disruption budget with maxUnavailable
  232. # Command
  233. kubectl create pdb test-pdb-3 --selector=app=rails --max-unavailable=2 --namespace=test-kubectl-describe-pod
  234. # Post-condition: pdb exists and has expected values
  235. kube::test::get_object_assert 'pdb/test-pdb-3 --namespace=test-kubectl-describe-pod' "{{${pdb_max_unavailable:?}}}" '2'
  236. # Command
  237. kubectl create pdb test-pdb-4 --selector=app=rails --max-unavailable=50% --namespace=test-kubectl-describe-pod
  238. # Post-condition: pdb exists and has expected values
  239. kube::test::get_object_assert 'pdb/test-pdb-4 --namespace=test-kubectl-describe-pod' "{{$pdb_max_unavailable}}" '50%'
  240. ### Fail creating a pod disruption budget if both maxUnavailable and minAvailable specified
  241. ! kubectl create pdb test-pdb --selector=app=rails --min-available=2 --max-unavailable=3 --namespace=test-kubectl-describe-pod || exit 1
  242. # Create a pod that consumes secret, configmap, and downward API keys as envs
  243. kube::test::get_object_assert 'pods --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
  244. kubectl create -f hack/testdata/pod-with-api-env.yaml --namespace=test-kubectl-describe-pod
  245. kube::test::describe_object_assert 'pods --namespace=test-kubectl-describe-pod' 'env-test-pod' "TEST_CMD_1" "<set to the key 'key-1' in secret 'test-secret'>" "TEST_CMD_2" "<set to the key 'key-2' of config map 'test-configmap'>" "TEST_CMD_3" "env-test-pod (v1:metadata.name)"
  246. # Describe command (resource only) should print detailed information about environment variables
  247. kube::test::describe_resource_assert 'pods --namespace=test-kubectl-describe-pod' "TEST_CMD_1" "<set to the key 'key-1' in secret 'test-secret'>" "TEST_CMD_2" "<set to the key 'key-2' of config map 'test-configmap'>" "TEST_CMD_3" "env-test-pod (v1:metadata.name)"
  248. # Clean-up
  249. kubectl delete pod env-test-pod --namespace=test-kubectl-describe-pod
  250. kubectl delete secret test-secret --namespace=test-kubectl-describe-pod
  251. kubectl delete configmap test-configmap --namespace=test-kubectl-describe-pod
  252. kubectl delete pdb/test-pdb-1 pdb/test-pdb-2 pdb/test-pdb-3 pdb/test-pdb-4 --namespace=test-kubectl-describe-pod
  253. kubectl delete namespace test-kubectl-describe-pod
  254. ### Priority Class
  255. kube::test::get_object_assert 'priorityclasses' "{{range.items}}{{ if eq $id_field \\\"test-priorityclass\\\" }}found{{end}}{{end}}:" ':'
  256. # Dry-run command
  257. kubectl create priorityclass test-priorityclass --dry-run=client
  258. kubectl create priorityclass test-priorityclass --dry-run=server
  259. kube::test::get_object_assert 'priorityclasses' "{{range.items}}{{ if eq $id_field \\\"test-priorityclass\\\" }}found{{end}}{{end}}:" ':'
  260. # Command
  261. kubectl create priorityclass test-priorityclass
  262. kube::test::get_object_assert 'priorityclasses' "{{range.items}}{{ if eq $id_field \\\"test-priorityclass\\\" }}found{{end}}{{end}}:" 'found:'
  263. kubectl delete priorityclass test-priorityclass
  264. ### Create two PODs
  265. # Pre-condition: no POD exists
  266. create_and_use_new_namespace
  267. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  268. # Command
  269. kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
  270. kubectl create -f test/e2e/testing-manifests/kubectl/agnhost-master-pod.yaml "${kube_flags[@]}"
  271. # Post-condition: valid-pod and agnhost-master PODs are created
  272. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'agnhost-master:valid-pod:'
  273. ### Delete multiple PODs at once
  274. # Pre-condition: valid-pod and agnhost-master PODs exist
  275. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'agnhost-master:valid-pod:'
  276. # Command
  277. kubectl delete pods valid-pod agnhost-master "${kube_flags[@]}" --grace-period=0 --force # delete multiple pods at once
  278. # Post-condition: no POD exists
  279. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  280. ### Create valid-pod POD
  281. # Pre-condition: no POD exists
  282. create_and_use_new_namespace
  283. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  284. # Command
  285. kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
  286. # Post-condition: valid-pod POD is created
  287. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  288. ### Dry-run label the valid-pod POD
  289. # Pre-condition: valid-pod is not labelled
  290. kube::test::get_object_assert 'pod valid-pod' "{{range${labels_field:?}}}{{.}}:{{end}}" 'valid-pod:'
  291. # Command
  292. kubectl label pods valid-pod new-name=new-valid-pod --dry-run=client "${kube_flags[@]}"
  293. kubectl label pods valid-pod new-name=new-valid-pod --dry-run=server "${kube_flags[@]}"
  294. # Post-condition: valid-pod is not labelled
  295. kube::test::get_object_assert 'pod valid-pod' "{{range${labels_field:?}}}{{.}}:{{end}}" 'valid-pod:'
  296. ### Label the valid-pod POD
  297. # Pre-condition: valid-pod is not labelled
  298. kube::test::get_object_assert 'pod valid-pod' "{{range${labels_field:?}}}{{.}}:{{end}}" 'valid-pod:'
  299. # Command
  300. kubectl label pods valid-pod new-name=new-valid-pod "${kube_flags[@]}"
  301. # Post-condition: valid-pod is labelled
  302. kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:new-valid-pod:'
  303. ### Label the valid-pod POD with empty label value
  304. # Pre-condition: valid-pod does not have label "emptylabel"
  305. kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:new-valid-pod:'
  306. # Command
  307. kubectl label pods valid-pod emptylabel="" "${kube_flags[@]}"
  308. # Post-condition: valid pod contains "emptylabel" with no value
  309. kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.emptylabel}}" ''
  310. ### Dry-run annotate the valid-pod POD with empty annotation value
  311. # Pre-condition: valid-pod does not have annotation "emptyannotation"
  312. kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field:?}.emptyannotation}}" '<no value>'
  313. # Command
  314. kubectl annotate pods valid-pod emptyannotation="" --dry-run=client "${kube_flags[@]}"
  315. kubectl annotate pods valid-pod emptyannotation="" --dry-run=server "${kube_flags[@]}"
  316. # Post-condition: valid-pod does not have annotation "emptyannotation"
  317. kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field:?}.emptyannotation}}" '<no value>'
  318. ### Annotate the valid-pod POD with empty annotation value
  319. # Pre-condition: valid-pod does not have annotation "emptyannotation"
  320. kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field:?}.emptyannotation}}" '<no value>'
  321. # Command
  322. kubectl annotate pods valid-pod emptyannotation="" "${kube_flags[@]}"
  323. # Post-condition: valid pod contains "emptyannotation" with no value
  324. kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field}.emptyannotation}}" ''
  325. ### Record label change
  326. # Pre-condition: valid-pod does not have record annotation
  327. kube::test::get_object_assert 'pod valid-pod' "{{range.items}}{{$annotations_field}}:{{end}}" ''
  328. # Command
  329. kubectl label pods valid-pod record-change=true --record=true "${kube_flags[@]}"
  330. # Post-condition: valid-pod has record annotation
  331. kube::test::get_object_assert 'pod valid-pod' "{{range$annotations_field}}{{.}}:{{end}}" ".*--record=true.*"
  332. ### Do not record label change
  333. # Command
  334. kubectl label pods valid-pod no-record-change=true --record=false "${kube_flags[@]}"
  335. # Post-condition: valid-pod's record annotation still contains command with --record=true
  336. kube::test::get_object_assert 'pod valid-pod' "{{range$annotations_field}}{{.}}:{{end}}" ".*--record=true.*"
  337. ### Record label change with specified flag and previous change already recorded
  338. ### we are no longer tricked by data from another user into revealing more information about our client
  339. # Command
  340. kubectl label pods valid-pod new-record-change=true --record=true "${kube_flags[@]}"
  341. # Post-condition: valid-pod's record annotation contains new change
  342. kube::test::get_object_assert 'pod valid-pod' "{{range$annotations_field}}{{.}}:{{end}}" ".*new-record-change=true.*"
  343. ### Delete POD by label
  344. # Pre-condition: valid-pod POD exists
  345. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  346. # Command
  347. kubectl delete pods -lnew-name=new-valid-pod --grace-period=0 --force "${kube_flags[@]}"
  348. # Post-condition: valid-pod POD doesn't exist
  349. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  350. ### Create pod-with-precision POD
  351. # Pre-condition: no POD is running
  352. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  353. # Command
  354. kubectl create -f hack/testdata/pod-with-precision.json "${kube_flags[@]}"
  355. # Post-condition: valid-pod POD is running
  356. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'pod-with-precision:'
  357. ## Patch preserves precision
  358. # Command
  359. kubectl patch "${kube_flags[@]}" pod pod-with-precision -p='{"metadata":{"annotations":{"patchkey": "patchvalue"}}}'
  360. # Post-condition: pod-with-precision POD has patched annotation
  361. kube::test::get_object_assert 'pod pod-with-precision' "{{${annotations_field}.patchkey}}" 'patchvalue'
  362. # Command
  363. kubectl label pods pod-with-precision labelkey=labelvalue "${kube_flags[@]}"
  364. # Post-condition: pod-with-precision POD has label
  365. kube::test::get_object_assert 'pod pod-with-precision' "{{${labels_field}.labelkey}}" 'labelvalue'
  366. # Command
  367. kubectl annotate pods pod-with-precision annotatekey=annotatevalue "${kube_flags[@]}"
  368. # Post-condition: pod-with-precision POD has annotation
  369. kube::test::get_object_assert 'pod pod-with-precision' "{{${annotations_field}.annotatekey}}" 'annotatevalue'
  370. # Cleanup
  371. kubectl delete pod pod-with-precision "${kube_flags[@]}"
  372. ### Annotate POD YAML file locally without effecting the live pod.
  373. kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
  374. # Command
  375. kubectl annotate -f hack/testdata/pod.yaml annotatekey=annotatevalue "${kube_flags[@]}"
  376. # Pre-condition: annotationkey is annotationvalue
  377. kube::test::get_object_assert 'pod test-pod' "{{${annotations_field}.annotatekey}}" 'annotatevalue'
  378. # Command
  379. output_message=$(kubectl annotate --local -f hack/testdata/pod.yaml annotatekey=localvalue -o yaml "${kube_flags[@]}")
  380. echo "$output_message"
  381. # Post-condition: annotationkey is still annotationvalue in the live pod, but command output is the new value
  382. kube::test::get_object_assert 'pod test-pod' "{{${annotations_field}.annotatekey}}" 'annotatevalue'
  383. kube::test::if_has_string "${output_message}" "localvalue"
  384. # Cleanup
  385. kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
  386. ### Create valid-pod POD
  387. # Pre-condition: no services and no rcs exist
  388. kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" ''
  389. kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
  390. ## kubectl create --edit can update the label filed of multiple resources. tmp-editor.sh is a fake editor
  391. TEMP=$(mktemp /tmp/tmp-editor-XXXXXXXX.sh)
  392. echo -e "#!/usr/bin/env bash\n${SED} -i \"s/mock/modified/g\" \$1" > "${TEMP}"
  393. chmod +x "${TEMP}"
  394. # Command
  395. EDITOR=${TEMP} kubectl create --edit -f hack/testdata/multi-resource-json.json "${kube_flags[@]}"
  396. # Post-condition: service named modified and rc named modified are created
  397. kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
  398. kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
  399. # Clean up
  400. kubectl delete service/modified "${kube_flags[@]}"
  401. kubectl delete rc/modified "${kube_flags[@]}"
  402. # Pre-condition: no services and no rcs exist
  403. kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" ''
  404. kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
  405. # Command
  406. EDITOR=${TEMP} kubectl create --edit -f hack/testdata/multi-resource-list.json "${kube_flags[@]}"
  407. # Post-condition: service named modified and rc named modified are created
  408. kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
  409. kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
  410. # Clean up
  411. rm "${TEMP}"
  412. kubectl delete service/modified "${kube_flags[@]}"
  413. kubectl delete rc/modified "${kube_flags[@]}"
  414. ## kubectl create --edit won't create anything if user makes no changes
  415. grep -q 'Edit cancelled' <<< "$(EDITOR="cat" kubectl create --edit -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -o json 2>&1)"
  416. ## Create valid-pod POD
  417. # Pre-condition: no POD exists
  418. kube::test::wait_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  419. # Command
  420. kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
  421. # Post-condition: valid-pod POD is created
  422. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  423. ## Patch can modify a local object
  424. kubectl patch --local -f test/fixtures/pkg/kubectl/cmd/patch/validPod.yaml --patch='{"spec": {"restartPolicy":"Never"}}' -o yaml | grep -q "Never"
  425. ## Patch fails with type restore error and exit code 1
  426. output_message=$(! kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"metadata":{"labels":"invalid"}}' 2>&1)
  427. kube::test::if_has_string "${output_message}" 'cannot restore map from string'
  428. ## Patch exits with error message "patched (no change)" and exit code 0 when no-op occurs
  429. output_message=$(kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"metadata":{"labels":{"name":"valid-pod"}}}' 2>&1)
  430. kube::test::if_has_string "${output_message}" 'patched (no change)'
  431. ## Patch pod can change image
  432. # Command
  433. kubectl patch "${kube_flags[@]}" pod valid-pod --record -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]}}'
  434. # Post-condition: valid-pod POD has image nginx
  435. kube::test::get_object_assert pods "{{range.items}}{{${image_field:?}}}:{{end}}" 'nginx:'
  436. # Post-condition: valid-pod has the record annotation
  437. kube::test::get_object_assert pods "{{range.items}}{{$annotations_field}}:{{end}}" "${change_cause_annotation:?}"
  438. # prove that patch can use different types
  439. kubectl patch "${kube_flags[@]}" pod valid-pod --type="json" -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"nginx2"}]'
  440. # Post-condition: valid-pod POD has image nginx
  441. kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx2:'
  442. # prove that patch can use different types
  443. kubectl patch "${kube_flags[@]}" pod valid-pod --type="json" -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"nginx"}]'
  444. # Post-condition: valid-pod POD has image nginx
  445. kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
  446. # Dry-run change image
  447. kubectl patch "${kube_flags[@]}" pod valid-pod --record --dry-run=client -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "not-nginx"}]}}'
  448. kubectl patch "${kube_flags[@]}" pod valid-pod --record --dry-run=server -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "not-nginx"}]}}'
  449. # Post-condition: valid-pod POD has image nginx
  450. kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
  451. # prove that yaml input works too
  452. YAML_PATCH=$'spec:\n containers:\n - name: kubernetes-serve-hostname\n image: changed-with-yaml\n'
  453. kubectl patch "${kube_flags[@]}" pod valid-pod -p="${YAML_PATCH}"
  454. # Post-condition: valid-pod POD has image nginx
  455. kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'changed-with-yaml:'
  456. ## Patch pod from JSON can change image
  457. # Command
  458. kubectl patch "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "k8s.gcr.io/pause:3.2"}]}}'
  459. # Post-condition: valid-pod POD has expected image
  460. kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'k8s.gcr.io/pause:3.2:'
  461. ## If resourceVersion is specified in the patch, it will be treated as a precondition, i.e., if the resourceVersion is different from that is stored in the server, the Patch should be rejected
  462. ERROR_FILE="${KUBE_TEMP}/conflict-error"
  463. ## If the resourceVersion is the same as the one stored in the server, the patch will be applied.
  464. # Command
  465. # Needs to retry because other party may change the resource.
  466. for count in {0..3}; do
  467. resourceVersion=$(kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{ .metadata.resourceVersion }}')
  468. kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]},"metadata":{"resourceVersion":"'"${resourceVersion}"'"}}' 2> "${ERROR_FILE}" || true
  469. if grep -q "the object has been modified" "${ERROR_FILE}"; then
  470. kube::log::status "retry $1, error: $(cat "${ERROR_FILE}")"
  471. rm "${ERROR_FILE}"
  472. sleep $((2**count))
  473. else
  474. rm "${ERROR_FILE}"
  475. kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
  476. break
  477. fi
  478. done
  479. ## If the resourceVersion is the different from the one stored in the server, the patch will be rejected.
  480. resourceVersion=$(kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{ .metadata.resourceVersion }}')
  481. ((resourceVersion+=100))
  482. # Command
  483. kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]},"metadata":{"resourceVersion":"'$resourceVersion'"}}' 2> "${ERROR_FILE}" || true
  484. # Post-condition: should get an error reporting the conflict
  485. if grep -q "please apply your changes to the latest version and try again" "${ERROR_FILE}"; then
  486. kube::log::status "\"kubectl patch with resourceVersion $resourceVersion\" returns error as expected: $(cat "${ERROR_FILE}")"
  487. else
  488. kube::log::status "\"kubectl patch with resourceVersion $resourceVersion\" returns unexpected error or non-error: $(cat "${ERROR_FILE}")"
  489. exit 1
  490. fi
  491. rm "${ERROR_FILE}"
  492. ## --force replace pod can change other field, e.g., spec.container.name
  493. # Command
  494. kubectl get "${kube_flags[@]}" pod valid-pod -o json | ${SED} 's/"kubernetes-serve-hostname"/"replaced-k8s-serve-hostname"/g' > /tmp/tmp-valid-pod.json
  495. kubectl replace "${kube_flags[@]}" --force -f /tmp/tmp-valid-pod.json
  496. # Post-condition: spec.container.name = "replaced-k8s-serve-hostname"
  497. kube::test::get_object_assert 'pod valid-pod' "{{(index .spec.containers 0).name}}" 'replaced-k8s-serve-hostname'
  498. ## check replace --grace-period requires --force
  499. output_message=$(! kubectl replace "${kube_flags[@]}" --grace-period=1 -f /tmp/tmp-valid-pod.json 2>&1)
  500. kube::test::if_has_string "${output_message}" '\-\-grace-period must have \-\-force specified'
  501. ## check replace --timeout requires --force
  502. output_message=$(! kubectl replace "${kube_flags[@]}" --timeout=1s -f /tmp/tmp-valid-pod.json 2>&1)
  503. kube::test::if_has_string "${output_message}" '\-\-timeout must have \-\-force specified'
  504. #cleaning
  505. rm /tmp/tmp-valid-pod.json
  506. ## replace of a cluster scoped resource can succeed
  507. # Pre-condition: a node exists
  508. kubectl create -f - "${kube_flags[@]}" << __EOF__
  509. {
  510. "kind": "Node",
  511. "apiVersion": "v1",
  512. "metadata": {
  513. "name": "node-v1-test"
  514. }
  515. }
  516. __EOF__
  517. kube::test::get_object_assert "node node-v1-test" "{{range.items}}{{if .metadata.annotations.a}}found{{end}}{{end}}:" ':'
  518. # Dry-run command
  519. kubectl replace --dry-run=server -f - "${kube_flags[@]}" << __EOF__
  520. {
  521. "kind": "Node",
  522. "apiVersion": "v1",
  523. "metadata": {
  524. "name": "node-v1-test",
  525. "annotations": {"a":"b"},
  526. "resourceVersion": "0"
  527. }
  528. }
  529. __EOF__
  530. kubectl replace --dry-run=client -f - "${kube_flags[@]}" << __EOF__
  531. {
  532. "kind": "Node",
  533. "apiVersion": "v1",
  534. "metadata": {
  535. "name": "node-v1-test",
  536. "annotations": {"a":"b"},
  537. "resourceVersion": "0"
  538. }
  539. }
  540. __EOF__
  541. kube::test::get_object_assert "node node-v1-test" "{{range.items}}{{if .metadata.annotations.a}}found{{end}}{{end}}:" ':'
  542. # Command
  543. kubectl replace -f - "${kube_flags[@]}" << __EOF__
  544. {
  545. "kind": "Node",
  546. "apiVersion": "v1",
  547. "metadata": {
  548. "name": "node-v1-test",
  549. "annotations": {"a":"b"},
  550. "resourceVersion": "0"
  551. }
  552. }
  553. __EOF__
  554. # Post-condition: the node command succeeds
  555. kube::test::get_object_assert "node node-v1-test" "{{.metadata.annotations.a}}" 'b'
  556. kubectl delete node node-v1-test "${kube_flags[@]}"
  557. ## kubectl edit can update the image field of a POD. tmp-editor.sh is a fake editor
  558. echo -e "#!/usr/bin/env bash\n${SED} -i \"s/nginx/k8s.gcr.io\/serve_hostname/g\" \$1" > /tmp/tmp-editor.sh
  559. chmod +x /tmp/tmp-editor.sh
  560. # Pre-condition: valid-pod POD has image nginx
  561. kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
  562. grep -q 'Patch:' <<< "$(EDITOR=/tmp/tmp-editor.sh kubectl edit "${kube_flags[@]}" pods/valid-pod --output-patch=true)"
  563. # Post-condition: valid-pod POD has image k8s.gcr.io/serve_hostname
  564. kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'k8s.gcr.io/serve_hostname:'
  565. # cleaning
  566. rm /tmp/tmp-editor.sh
  567. ## kubectl edit should work on Windows
  568. grep -q 'Edit cancelled' <<< "$(EDITOR="cat" kubectl edit pod/valid-pod 2>&1)"
  569. grep -q 'name: valid-pod' <<< "$(EDITOR="cat" kubectl edit pod/valid-pod)"
  570. grep -q CRLF <<< "$(EDITOR="cat" kubectl edit --windows-line-endings pod/valid-pod | file - )"
  571. ! grep -q CRLF <<< "$(EDITOR="cat" kubectl edit --windows-line-endings=false pod/valid-pod | file - )" || exit 1
  572. grep -q 'kind: List' <<< "$(EDITOR="cat" kubectl edit ns)"
  573. ### Label POD YAML file locally without effecting the live pod.
  574. # Pre-condition: name is valid-pod
  575. kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
  576. # Command
  577. output_message=$(kubectl label --local --overwrite -f hack/testdata/pod.yaml name=localonlyvalue -o yaml "${kube_flags[@]}")
  578. echo "$output_message"
  579. # Post-condition: name is still valid-pod in the live pod, but command output is the new value
  580. kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
  581. kube::test::if_has_string "${output_message}" "localonlyvalue"
  582. ### Overwriting an existing label is not permitted
  583. # Pre-condition: name is valid-pod
  584. kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
  585. # Command
  586. ! kubectl label pods valid-pod name=valid-pod-super-sayan "${kube_flags[@]}" || exit 1
  587. # Post-condition: name is still valid-pod
  588. kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
  589. ### --overwrite must be used to overwrite existing label, can be applied to all resources
  590. # Pre-condition: name is valid-pod
  591. kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
  592. # Command
  593. kubectl label --overwrite pods --all name=valid-pod-super-sayan "${kube_flags[@]}"
  594. # Post-condition: name is valid-pod-super-sayan
  595. kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod-super-sayan'
  596. ### Delete POD by label
  597. # Pre-condition: valid-pod POD exists
  598. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  599. # Command
  600. kubectl delete pods -l'name in (valid-pod-super-sayan)' --grace-period=0 --force "${kube_flags[@]}"
  601. # Post-condition: valid-pod POD doesn't exist
  602. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  603. ### Create two PODs from 1 yaml file
  604. # Pre-condition: no POD exists
  605. create_and_use_new_namespace
  606. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  607. # Command
  608. kubectl create -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml "${kube_flags[@]}"
  609. # Post-condition: redis-master and valid-pod PODs exist
  610. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:valid-pod:'
  611. ### Delete two PODs from 1 yaml file
  612. # Pre-condition: redis-master and valid-pod PODs exist
  613. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:valid-pod:'
  614. # Command
  615. kubectl delete -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml "${kube_flags[@]}"
  616. # Post-condition: no PODs exist
  617. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  618. ## kubectl apply should update configuration annotations only if apply is already called
  619. ## 1. kubectl create doesn't set the annotation
  620. # Pre-Condition: no POD exists
  621. create_and_use_new_namespace
  622. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  623. # Command: create a pod "test-pod"
  624. kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
  625. # Post-Condition: pod "test-pod" is created
  626. kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
  627. # Post-Condition: pod "test-pod" doesn't have configuration annotation
  628. ! grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" )" || exit 1
  629. ## 2. kubectl replace doesn't set the annotation
  630. kubectl get pods test-pod -o yaml "${kube_flags[@]}" | ${SED} 's/test-pod-label/test-pod-replaced/g' > "${KUBE_TEMP}"/test-pod-replace.yaml
  631. # Command: replace the pod "test-pod"
  632. kubectl replace -f "${KUBE_TEMP}"/test-pod-replace.yaml "${kube_flags[@]}"
  633. # Post-Condition: pod "test-pod" is replaced
  634. kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-replaced'
  635. # Post-Condition: pod "test-pod" doesn't have configuration annotation
  636. ! grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")" || exit 1
  637. ## 3. kubectl apply does set the annotation
  638. # Command: apply the pod "test-pod"
  639. kubectl apply -f hack/testdata/pod-apply.yaml "${kube_flags[@]}"
  640. # Post-Condition: pod "test-pod" is applied
  641. kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-applied'
  642. # Post-Condition: pod "test-pod" has configuration annotation
  643. grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")"
  644. kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration > "${KUBE_TEMP}"/annotation-configuration
  645. ## 4. kubectl replace updates an existing annotation
  646. kubectl get pods test-pod -o yaml "${kube_flags[@]}" | ${SED} 's/test-pod-applied/test-pod-replaced/g' > "${KUBE_TEMP}"/test-pod-replace.yaml
  647. # Command: replace the pod "test-pod"
  648. kubectl replace -f "${KUBE_TEMP}"/test-pod-replace.yaml "${kube_flags[@]}"
  649. # Post-Condition: pod "test-pod" is replaced
  650. kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-replaced'
  651. # Post-Condition: pod "test-pod" has configuration annotation, and it's updated (different from the annotation when it's applied)
  652. grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" )"
  653. kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration > "${KUBE_TEMP}"/annotation-configuration-replaced
  654. ! [[ $(diff -q "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced > /dev/null) ]] || exit 1
  655. # Clean up
  656. rm "${KUBE_TEMP}"/test-pod-replace.yaml "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced
  657. kubectl delete pods test-pod "${kube_flags[@]}"
  658. set +o nounset
  659. set +o errexit
  660. }
  661. # runs specific kubectl create tests
  662. run_create_secret_tests() {
  663. set -o nounset
  664. set -o errexit
  665. ### Create generic secret with explicit namespace
  666. # Pre-condition: secret 'mysecret' does not exist
  667. output_message=$(! kubectl get secrets mysecret 2>&1 "${kube_flags[@]}")
  668. kube::test::if_has_string "${output_message}" 'secrets "mysecret" not found'
  669. # Command
  670. output_message=$(kubectl create "${kube_flags[@]}" secret generic mysecret --dry-run --from-literal=foo=bar -o jsonpath='{.metadata.namespace}' --namespace=user-specified)
  671. # Post-condition: mysecret still not created since --dry-run was used
  672. # Output from 'create' command should contain the specified --namespace value
  673. failure_message=$(! kubectl get secrets mysecret 2>&1 "${kube_flags[@]}")
  674. kube::test::if_has_string "${failure_message}" 'secrets "mysecret" not found'
  675. kube::test::if_has_string "${output_message}" 'user-specified'
  676. # Command
  677. output_message=$(kubectl create "${kube_flags[@]}" secret generic mysecret --dry-run --from-literal=foo=bar -o jsonpath='{.metadata.namespace}')
  678. # Post-condition: jsonpath for .metadata.namespace should be empty for object since --namespace was not explicitly specified
  679. kube::test::if_empty_string "${output_message}"
  680. # check to make sure that replace correctly PUTs to a URL
  681. kubectl create configmap tester-update-cm -o json --dry-run=client | kubectl create "${kube_flags[@]}" --raw /api/v1/namespaces/default/configmaps -f -
  682. output_message=$(kubectl create configmap tester-update-cm --from-literal=key1=config1 -o json --dry-run | kubectl replace "${kube_flags[@]}" --raw /api/v1/namespaces/default/configmaps/tester-update-cm -f -)
  683. # the message should show the body returned which will include a UID not present in the input
  684. kube::test::if_has_string "${output_message}" 'uid'
  685. # if the PUT was well-formed, the server will now have a key and value we can retrieve on GET
  686. output_message=$(kubectl get "${kube_flags[@]}" --raw /api/v1/namespaces/default/configmaps/tester-update-cm 2>&1 "${kube_flags[@]}")
  687. kube::test::if_has_string "${output_message}" 'config1'
  688. # if DELETE raw works correctly, this will delete the configmap
  689. kubectl delete "${kube_flags[@]}" --raw /api/v1/namespaces/default/configmaps/tester-update-cm
  690. output_message=$(! kubectl get "${kube_flags[@]}" configmap tester-update-cm 2>&1 "${kube_flags[@]}")
  691. kube::test::if_has_string "${output_message}" 'configmaps "tester-update-cm" not found'
  692. set +o nounset
  693. set +o errexit
  694. }
  695. run_secrets_test() {
  696. set -o nounset
  697. set -o errexit
  698. create_and_use_new_namespace
  699. kube::log::status "Testing secrets"
  700. # Ensure dry run succeeds and includes kind, apiVersion and data, and doesn't require a server connection
  701. output_message=$(kubectl create secret generic test --from-literal=key1=value1 --dry-run -o yaml --server=example.com --v=6)
  702. kube::test::if_has_string "${output_message}" 'kind: Secret'
  703. kube::test::if_has_string "${output_message}" 'apiVersion: v1'
  704. kube::test::if_has_string "${output_message}" 'key1: dmFsdWUx'
  705. kube::test::if_has_not_string "${output_message}" 'example.com'
  706. ### Create a new namespace
  707. # Pre-condition: the test-secrets namespace does not exist
  708. kube::test::get_object_assert 'namespaces' "{{range.items}}{{ if eq $id_field \\\"test-secrets\\\" }}found{{end}}{{end}}:" ':'
  709. # Command
  710. kubectl create namespace test-secrets
  711. # Post-condition: namespace 'test-secrets' is created.
  712. kube::test::get_object_assert 'namespaces/test-secrets' "{{$id_field}}" 'test-secrets'
  713. ### Create a generic secret in a specific namespace
  714. # Pre-condition: no SECRET exists
  715. kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
  716. # Command
  717. kubectl create secret generic test-secret --from-literal=key1=value1 --type=test-type --namespace=test-secrets
  718. # Post-condition: secret exists and has expected values
  719. kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
  720. kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'test-type'
  721. grep -q 'key1: dmFsdWUx' <<< "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}")"
  722. # Clean-up
  723. kubectl delete secret test-secret --namespace=test-secrets
  724. ### Create a docker-registry secret in a specific namespace
  725. if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
  726. kube::test::wait_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
  727. fi
  728. # Pre-condition: no SECRET exists
  729. kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
  730. # Command
  731. kubectl create secret docker-registry test-secret --docker-username=test-user --docker-password=test-password --docker-email='test-user@test.com' --namespace=test-secrets
  732. # Post-condition: secret exists and has expected values
  733. kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
  734. kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'kubernetes.io/dockerconfigjson'
  735. grep -q '.dockerconfigjson: eyJhdXRocyI6eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsidXNlcm5hbWUiOiJ0ZXN0LXVzZXIiLCJwYXNzd29yZCI6InRlc3QtcGFzc3dvcmQiLCJlbWFpbCI6InRlc3QtdXNlckB0ZXN0LmNvbSIsImF1dGgiOiJkR1Z6ZEMxMWMyVnlPblJsYzNRdGNHRnpjM2R2Y21RPSJ9fX0=' <<< "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}")"
  736. # Clean-up
  737. kubectl delete secret test-secret --namespace=test-secrets
  738. ### Create a tls secret
  739. if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
  740. kube::test::wait_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
  741. fi
  742. # Pre-condition: no SECRET exists
  743. kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
  744. # Command
  745. kubectl create secret tls test-secret --namespace=test-secrets --key=hack/testdata/tls.key --cert=hack/testdata/tls.crt
  746. kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
  747. kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'kubernetes.io/tls'
  748. # Clean-up
  749. kubectl delete secret test-secret --namespace=test-secrets
  750. # Command with process substitution
  751. kubectl create secret tls test-secret --namespace=test-secrets --key <(cat hack/testdata/tls.key) --cert <(cat hack/testdata/tls.crt)
  752. kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
  753. kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'kubernetes.io/tls'
  754. # Clean-up
  755. kubectl delete secret test-secret --namespace=test-secrets
  756. # Create a secret using stringData
  757. kubectl create --namespace=test-secrets -f - "${kube_flags[@]}" << __EOF__
  758. {
  759. "kind": "Secret",
  760. "apiVersion": "v1",
  761. "metadata": {
  762. "name": "secret-string-data"
  763. },
  764. "data": {
  765. "k1":"djE=",
  766. "k2":""
  767. },
  768. "stringData": {
  769. "k2":"v2"
  770. }
  771. }
  772. __EOF__
  773. # Post-condition: secret-string-data secret is created with expected data, merged/overridden data from stringData, and a cleared stringData field
  774. kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.data}}' '.*k1:djE=.*'
  775. kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.data}}' '.*k2:djI=.*'
  776. kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.stringData}}' '<no value>'
  777. # Clean up
  778. kubectl delete secret secret-string-data --namespace=test-secrets
  779. ### Create a secret using output flags
  780. if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
  781. kube::test::wait_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
  782. fi
  783. # Pre-condition: no secret exists
  784. kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
  785. # Command
  786. grep -q 'test-secret:' <<< "$(kubectl create secret generic test-secret --namespace=test-secrets --from-literal=key1=value1 --output=go-template --template="{{.metadata.name}}:")"
  787. ## Clean-up
  788. kubectl delete secret test-secret --namespace=test-secrets
  789. # Clean up
  790. kubectl delete namespace test-secrets
  791. set +o nounset
  792. set +o errexit
  793. }
  794. run_service_accounts_tests() {
  795. set -o nounset
  796. set -o errexit
  797. create_and_use_new_namespace
  798. kube::log::status "Testing service accounts"
  799. ### Create a new namespace
  800. # Pre-condition: the test-service-accounts namespace does not exist
  801. kube::test::get_object_assert 'namespaces' "{{range.items}}{{ if eq $id_field \\\"test-service-accounts\\\" }}found{{end}}{{end}}:" ':'
  802. # Command
  803. kubectl create namespace test-service-accounts
  804. # Post-condition: namespace 'test-service-accounts' is created.
  805. kube::test::get_object_assert 'namespaces/test-service-accounts' "{{$id_field}}" 'test-service-accounts'
  806. ### Create a service account in a specific namespace
  807. # Pre-condition: service account does not exist
  808. kube::test::get_object_assert 'serviceaccount --namespace=test-service-accounts' "{{range.items}}{{ if eq $id_field \\\"test-service-account\\\" }}found{{end}}{{end}}:" ':'
  809. # Dry-run command
  810. kubectl create serviceaccount test-service-account --dry-run=client --namespace=test-service-accounts
  811. kubectl create serviceaccount test-service-account --dry-run=server --namespace=test-service-accounts
  812. kube::test::get_object_assert 'serviceaccount --namespace=test-service-accounts' "{{range.items}}{{ if eq $id_field \\\"test-service-account\\\" }}found{{end}}{{end}}:" ':'
  813. # Command
  814. kubectl create serviceaccount test-service-account --namespace=test-service-accounts
  815. # Post-condition: secret exists and has expected values
  816. kube::test::get_object_assert 'serviceaccount/test-service-account --namespace=test-service-accounts' "{{$id_field}}" 'test-service-account'
  817. # Clean-up
  818. kubectl delete serviceaccount test-service-account --namespace=test-service-accounts
  819. # Clean up
  820. kubectl delete namespace test-service-accounts
  821. set +o nounset
  822. set +o errexit
  823. }
  824. run_service_tests() {
  825. set -o nounset
  826. set -o errexit
  827. # switch back to the default namespace
  828. kubectl config set-context "${CONTEXT}" --namespace=""
  829. kube::log::status "Testing kubectl(v1:services)"
  830. ### Create redis-master service from JSON
  831. # Pre-condition: Only the default kubernetes services exist
  832. kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  833. # Command
  834. kubectl create -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml "${kube_flags[@]}"
  835. # Post-condition: redis-master service exists
  836. kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
  837. # Describe command should print detailed information
  838. kube::test::describe_object_assert services 'redis-master' "Name:" "Labels:" "Selector:" "IP:" "Port:" "Endpoints:" "Session Affinity:"
  839. # Describe command should print events information by default
  840. kube::test::describe_object_events_assert services 'redis-master'
  841. # Describe command should not print events information when show-events=false
  842. kube::test::describe_object_events_assert services 'redis-master' false
  843. # Describe command should print events information when show-events=true
  844. kube::test::describe_object_events_assert services 'redis-master' true
  845. # Describe command (resource only) should print detailed information
  846. kube::test::describe_resource_assert services "Name:" "Labels:" "Selector:" "IP:" "Port:" "Endpoints:" "Session Affinity:"
  847. # Describe command should print events information by default
  848. kube::test::describe_resource_events_assert services
  849. # Describe command should not print events information when show-events=false
  850. kube::test::describe_resource_events_assert services false
  851. # Describe command should print events information when show-events=true
  852. kube::test::describe_resource_events_assert services true
  853. ### set selector
  854. # prove role=master
  855. kube::test::get_object_assert 'services redis-master' "{{range${service_selector_field:?}}}{{.}}:{{end}}" "redis:master:backend:"
  856. # Set selector of a local file without talking to the server
  857. kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml role=padawan --local -o yaml "${kube_flags[@]}"
  858. kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml role=padawan --dry-run -o yaml "${kube_flags[@]}"
  859. # Set command to change the selector.
  860. kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml role=padawan
  861. # prove role=padawan
  862. kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "padawan:"
  863. # Set command to reset the selector back to the original one.
  864. kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml app=redis,role=master,tier=backend
  865. # prove role=master
  866. kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"
  867. # Show dry-run works on running selector
  868. kubectl set selector services redis-master role=padawan --dry-run=client -o yaml "${kube_flags[@]}"
  869. kubectl set selector services redis-master role=padawan --dry-run=server -o yaml "${kube_flags[@]}"
  870. ! kubectl set selector services redis-master role=padawan --local -o yaml "${kube_flags[@]}" || exit 1
  871. kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"
  872. # --resource-version=<current-resource-version> succeeds
  873. rv=$(kubectl get services redis-master -o jsonpath='{.metadata.resourceVersion}' "${kube_flags[@]}")
  874. kubectl set selector services redis-master rvtest1=true "--resource-version=${rv}" "${kube_flags[@]}"
  875. # --resource-version=<non-current-resource-version> fails
  876. output_message=$(! kubectl set selector services redis-master rvtest1=true --resource-version=1 2>&1 "${kube_flags[@]}")
  877. kube::test::if_has_string "${output_message}" 'Conflict'
  878. ### Dump current redis-master service
  879. output_service=$(kubectl get service redis-master -o json "${kube_flags[@]}")
  880. ### Delete redis-master-service by id
  881. # Pre-condition: redis-master service exists
  882. kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
  883. # Command
  884. kubectl delete service redis-master "${kube_flags[@]}"
  885. if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
  886. kube::test::wait_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  887. fi
  888. # Post-condition: Only the default kubernetes services exist
  889. kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  890. ### Create redis-master-service from dumped JSON
  891. # Pre-condition: Only the default kubernetes services exist
  892. kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  893. # Command
  894. echo "${output_service}" | kubectl create -f - "${kube_flags[@]}"
  895. # Post-condition: redis-master service is created
  896. kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
  897. ### Create redis-master-v1-test service
  898. # Pre-condition: redis-master-service service exists
  899. kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
  900. # Command
  901. kubectl create -f - "${kube_flags[@]}" << __EOF__
  902. {
  903. "kind": "Service",
  904. "apiVersion": "v1",
  905. "metadata": {
  906. "name": "service-v1-test"
  907. },
  908. "spec": {
  909. "ports": [
  910. {
  911. "protocol": "TCP",
  912. "port": 80,
  913. "targetPort": 80
  914. }
  915. ]
  916. }
  917. }
  918. __EOF__
  919. # Post-condition: service-v1-test service is created
  920. kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:service-.*-test:'
  921. ### Identity
  922. kubectl get service "${kube_flags[@]}" service-v1-test -o json | kubectl replace "${kube_flags[@]}" -f -
  923. ### Delete services by id
  924. # Pre-condition: service-v1-test exists
  925. kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:service-.*-test:'
  926. # Command
  927. kubectl delete service redis-master "${kube_flags[@]}"
  928. kubectl delete service "service-v1-test" "${kube_flags[@]}"
  929. if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
  930. kube::test::wait_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  931. fi
  932. # Post-condition: Only the default kubernetes services exist
  933. kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  934. ### Create two services
  935. # Pre-condition: Only the default kubernetes services exist
  936. kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  937. # Command
  938. kubectl create -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml "${kube_flags[@]}"
  939. kubectl create -f test/e2e/testing-manifests/guestbook/redis-slave-service.yaml "${kube_flags[@]}"
  940. # Post-condition: redis-master and redis-slave services are created
  941. kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:redis-slave:'
  942. ### Custom columns can be specified
  943. # Pre-condition: generate output using custom columns
  944. output_message=$(kubectl get services -o=custom-columns=NAME:.metadata.name,RSRC:.metadata.resourceVersion 2>&1 "${kube_flags[@]}")
  945. # Post-condition: should contain name column
  946. kube::test::if_has_string "${output_message}" 'redis-master'
  947. ### Delete multiple services at once
  948. # Pre-condition: redis-master and redis-slave services exist
  949. kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:redis-slave:'
  950. # Command
  951. kubectl delete services redis-master redis-slave "${kube_flags[@]}" # delete multiple services at once
  952. if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
  953. kube::test::wait_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  954. fi
  955. # Post-condition: Only the default kubernetes services exist
  956. kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  957. ### Create an ExternalName service
  958. # Pre-condition: Only the default kubernetes service exist
  959. kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  960. # Dry-run command
  961. kubectl create service externalname beep-boop --dry-run=client --external-name bar.com
  962. kubectl create service externalname beep-boop --dry-run=server --external-name bar.com
  963. kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  964. # Command
  965. kubectl create service externalname beep-boop --external-name bar.com
  966. # Post-condition: beep-boop service is created
  967. kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'beep-boop:kubernetes:'
  968. ### Delete beep-boop service by id
  969. # Pre-condition: beep-boop service exists
  970. kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'beep-boop:kubernetes:'
  971. # Command
  972. kubectl delete service beep-boop "${kube_flags[@]}"
  973. if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
  974. kube::test::wait_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  975. fi
  976. # Post-condition: Only the default kubernetes services exist
  977. kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  978. ### Create pod and service
  979. # Pre-condition: no pod exists
  980. kube::test::wait_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  981. # Pre-condition: Only the default kubernetes services exist
  982. kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  983. # Dry-run command
  984. kubectl run testmetadata --image=nginx --port=80 --expose --dry-run=client --service-overrides='{ "metadata": { "annotations": { "zone-context": "home" } } } '
  985. kubectl run testmetadata --image=nginx --port=80 --expose --dry-run=server --service-overrides='{ "metadata": { "annotations": { "zone-context": "home" } } } '
  986. # Check only the default kubernetes services exist
  987. kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  988. # Command
  989. kubectl run testmetadata --image=nginx --port=80 --expose --service-overrides='{ "metadata": { "annotations": { "zone-context": "home" } } } '
  990. # Check result
  991. kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'testmetadata:'
  992. kube::test::get_object_assert 'service testmetadata' "{{.metadata.annotations}}" "map\[zone-context:home\]"
  993. ### Expose pod as a new service
  994. # Command
  995. kubectl expose pod testmetadata --port=1000 --target-port=80 --type=NodePort --name=exposemetadata --overrides='{ "metadata": { "annotations": { "zone-context": "work" } } } '
  996. # Check result
  997. kube::test::get_object_assert 'service exposemetadata' "{{.metadata.annotations}}" "map\[zone-context:work\]"
  998. # Clean-Up
  999. # Command
  1000. kubectl delete service exposemetadata testmetadata "${kube_flags[@]}"
  1001. if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
  1002. kube::test::wait_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  1003. fi
  1004. kubectl delete pod testmetadata "${kube_flags[@]}"
  1005. if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
  1006. kube::test::wait_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
  1007. fi
  1008. set +o nounset
  1009. set +o errexit
  1010. }
  1011. run_rc_tests() {
  1012. set -o nounset
  1013. set -o errexit
  1014. create_and_use_new_namespace
  1015. kube::log::status "Testing kubectl(v1:replicationcontrollers)"
  1016. ### Create and stop controller, make sure it doesn't leak pods
  1017. # Pre-condition: no replication controller exists
  1018. kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
  1019. # Command
  1020. kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
  1021. kubectl delete rc frontend "${kube_flags[@]}"
  1022. # Post-condition: no pods from frontend controller
  1023. kube::test::wait_object_assert 'pods -l "name=frontend"' "{{range.items}}{{$id_field}}:{{end}}" ''
  1024. ### Create replication controller frontend from JSON
  1025. # Pre-condition: no replication controller exists
  1026. kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
  1027. # Command
  1028. kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
  1029. # Post-condition: frontend replication controller is created
  1030. kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
  1031. # Describe command should print detailed information
  1032. kube::test::describe_object_assert rc 'frontend' "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:" "GET_HOSTS_FROM:"
  1033. # Describe command should print events information by default
  1034. kube::test::describe_object_events_assert rc 'frontend'
  1035. # Describe command should not print events information when show-events=false
  1036. kube::test::describe_object_events_assert rc 'frontend' false
  1037. # Describe command should print events information when show-events=true
  1038. kube::test::describe_object_events_assert rc 'frontend' true
  1039. # Describe command (resource only) should print detailed information
  1040. kube::test::describe_resource_assert rc "Name:" "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:" "GET_HOSTS_FROM:"
  1041. # Describe command should print events information by default
  1042. kube::test::describe_resource_events_assert rc
  1043. # Describe command should not print events information when show-events=false
  1044. kube::test::describe_resource_events_assert rc false
  1045. # Describe command should print events information when show-events=true
  1046. kube::test::describe_resource_events_assert rc true
  1047. ### Scale replication controller frontend with current-replicas and replicas
  1048. # Pre-condition: 3 replicas
  1049. kube::test::get_object_assert 'rc frontend' "{{${rc_replicas_field:?}}}" '3'
  1050. # Command
  1051. kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}"
  1052. # Post-condition: 2 replicas
  1053. kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
  1054. ### Scale replication controller frontend with (wrong) current-replicas and replicas
  1055. # Pre-condition: 2 replicas
  1056. kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
  1057. # Command
  1058. ! kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}" || exit 1
  1059. # Post-condition: nothing changed
  1060. kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
  1061. ### Scale replication controller frontend with replicas only
  1062. # Pre-condition: 2 replicas
  1063. kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
  1064. # Command
  1065. kubectl scale --replicas=3 replicationcontrollers frontend "${kube_flags[@]}"
  1066. # Post-condition: 3 replicas
  1067. kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
  1068. ### Scale replication controller from JSON with replicas only
  1069. # Pre-condition: 3 replicas
  1070. kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
  1071. # Command
  1072. kubectl scale --replicas=2 -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
  1073. # Post-condition: 2 replicas
  1074. kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
  1075. # Clean-up
  1076. kubectl delete rc frontend "${kube_flags[@]}"
  1077. ### Scale multiple replication controllers
  1078. kubectl create -f test/e2e/testing-manifests/guestbook/legacy/redis-master-controller.yaml "${kube_flags[@]}"
  1079. kubectl create -f test/e2e/testing-manifests/guestbook/legacy/redis-slave-controller.yaml "${kube_flags[@]}"
  1080. # Command
  1081. kubectl scale rc/redis-master rc/redis-slave --replicas=4 "${kube_flags[@]}"
  1082. # Post-condition: 4 replicas each
  1083. kube::test::get_object_assert 'rc redis-master' "{{$rc_replicas_field}}" '4'
  1084. kube::test::get_object_assert 'rc redis-slave' "{{$rc_replicas_field}}" '4'
  1085. # Clean-up
  1086. kubectl delete rc redis-{master,slave} "${kube_flags[@]}"
  1087. ### Scale a deployment
  1088. kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
  1089. # Command
  1090. kubectl scale --current-replicas=3 --replicas=1 deployment/nginx-deployment
  1091. # Post-condition: 1 replica for nginx-deployment
  1092. kube::test::get_object_assert 'deployment nginx-deployment' "{{${deployment_replicas:?}}}" '1'
  1093. # Clean-up
  1094. kubectl delete deployment/nginx-deployment "${kube_flags[@]}"
  1095. ### Expose deployments by creating a service
  1096. # Uses deployment selectors for created service
  1097. output_message=$(kubectl expose -f test/fixtures/pkg/kubectl/cmd/expose/appsv1deployment.yaml --port 80 2>&1 "${kube_flags[@]}")
  1098. # Post-condition: service created for deployment.
  1099. kube::test::if_has_string "${output_message}" 'service/expose-test-deployment exposed'
  1100. # Clean-up
  1101. kubectl delete service/expose-test-deployment "${kube_flags[@]}"
  1102. # Contains no selectors, should fail.
  1103. output_message=$(! kubectl expose -f test/fixtures/pkg/kubectl/cmd/expose/appsv1deployment-no-selectors.yaml --port 80 2>&1 "${kube_flags[@]}")
  1104. # Post-condition: service created for deployment.
  1105. kube::test::if_has_string "${output_message}" 'invalid deployment: no selectors'
  1106. ### Expose a deployment as a service
  1107. kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
  1108. # Pre-condition: 3 replicas
  1109. kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '3'
  1110. # Command
  1111. kubectl expose deployment/nginx-deployment
  1112. # Post-condition: service exists and exposes deployment port (80)
  1113. kube::test::get_object_assert 'service nginx-deployment' "{{${port_field:?}}}" '80'
  1114. # Clean-up
  1115. kubectl delete deployment/nginx-deployment service/nginx-deployment "${kube_flags[@]}"
  1116. ### Expose replication controller as service
  1117. kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
  1118. # Pre-condition: 3 replicas
  1119. kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
  1120. # Command
  1121. kubectl expose rc frontend --port=80 "${kube_flags[@]}"
  1122. # Post-condition: service exists and the port is unnamed
  1123. kube::test::get_object_assert 'service frontend' "{{${port_name:?}}} {{$port_field}}" '<no value> 80'
  1124. # Command
  1125. kubectl expose service frontend --port=443 --name=frontend-2 "${kube_flags[@]}"
  1126. # Post-condition: service exists and the port is unnamed
  1127. kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" '<no value> 443'
  1128. # Command
  1129. kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
  1130. kubectl expose pod valid-pod --port=444 --name=frontend-3 "${kube_flags[@]}"
  1131. # Post-condition: service exists and the port is unnamed
  1132. kube::test::get_object_assert 'service frontend-3' "{{$port_name}} {{$port_field}}" '<no value> 444'
  1133. # Create a service using service/v1 generator
  1134. kubectl expose rc frontend --port=80 --name=frontend-4 --generator=service/v1 "${kube_flags[@]}"
  1135. # Post-condition: service exists and the port is named default.
  1136. kube::test::get_object_assert 'service frontend-4' "{{$port_name}} {{$port_field}}" 'default 80'
  1137. # Verify that expose service works without specifying a port.
  1138. kubectl expose service frontend --name=frontend-5 "${kube_flags[@]}"
  1139. # Post-condition: service exists with the same port as the original service.
  1140. kube::test::get_object_assert 'service frontend-5' "{{$port_field}}" '80'
  1141. # Cleanup services
  1142. kubectl delete pod valid-pod "${kube_flags[@]}"
  1143. kubectl delete service frontend{,-2,-3,-4,-5} "${kube_flags[@]}"
  1144. ### Expose negative invalid resource test
  1145. # Pre-condition: don't need
  1146. # Command
  1147. output_message=$(! kubectl expose nodes 127.0.0.1 2>&1 "${kube_flags[@]}")
  1148. # Post-condition: the error message has "cannot expose" string
  1149. kube::test::if_has_string "${output_message}" 'cannot expose'
  1150. ### Try to generate a service with invalid name (exceeding maximum valid size)
  1151. # Pre-condition: use --name flag
  1152. output_message=$(! kubectl expose -f hack/testdata/pod-with-large-name.yaml --name=invalid-large-service-name-that-has-more-than-sixty-three-characters --port=8081 2>&1 "${kube_flags[@]}")
  1153. # Post-condition: should fail due to invalid name
  1154. kube::test::if_has_string "${output_message}" 'metadata.name: Invalid value'
  1155. # Pre-condition: default run without --name flag; should succeed by truncating the inherited name
  1156. output_message=$(kubectl expose -f hack/testdata/pod-with-large-name.yaml --port=8081 2>&1 "${kube_flags[@]}")
  1157. # Post-condition: inherited name from pod has been truncated
  1158. kube::test::if_has_string "${output_message}" 'kubernetes-serve-hostname-testing-sixty-three-characters-in-len exposed'
  1159. # Clean-up
  1160. kubectl delete svc kubernetes-serve-hostname-testing-sixty-three-characters-in-len "${kube_flags[@]}"
  1161. ### Expose multiport object as a new service
  1162. # Pre-condition: don't use --port flag
  1163. output_message=$(kubectl expose -f test/fixtures/doc-yaml/admin/high-availability/etcd.yaml --selector=test=etcd 2>&1 "${kube_flags[@]}")
  1164. # Post-condition: expose succeeded
  1165. kube::test::if_has_string "${output_message}" 'etcd-server exposed'
  1166. # Post-condition: generated service has both ports from the exposed pod
  1167. kube::test::get_object_assert 'service etcd-server' "{{$port_name}} {{$port_field}}" 'port-1 2380'
  1168. kube::test::get_object_assert 'service etcd-server' "{{${second_port_name:?}}} {{${second_port_field:?}}}" 'port-2 2379'
  1169. # Clean-up
  1170. kubectl delete svc etcd-server "${kube_flags[@]}"
  1171. ### Delete replication controller with id
  1172. # Pre-condition: frontend replication controller exists
  1173. kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
  1174. # Command
  1175. kubectl delete rc frontend "${kube_flags[@]}"
  1176. # Post-condition: no replication controller exists
  1177. kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
  1178. ### Create two replication controllers
  1179. # Pre-condition: no replication controller exists
  1180. kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
  1181. # Command
  1182. kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
  1183. kubectl create -f test/e2e/testing-manifests/guestbook/legacy/redis-slave-controller.yaml "${kube_flags[@]}"
  1184. # Post-condition: frontend and redis-slave
  1185. kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
  1186. ### Delete multiple controllers at once
  1187. # Pre-condition: frontend and redis-slave
  1188. kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
  1189. # Command
  1190. kubectl delete rc frontend redis-slave "${kube_flags[@]}" # delete multiple controllers at once
  1191. # Post-condition: no replication controller exists
  1192. kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
  1193. ### Auto scale replication controller
  1194. # Pre-condition: no replication controller exists
  1195. kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
  1196. # Command
  1197. kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
  1198. kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
  1199. # autoscale 1~2 pods, CPU utilization 70%, rc specified by file
  1200. kubectl autoscale -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70
  1201. kube::test::get_object_assert 'hpa frontend' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '1 2 70'
  1202. kubectl delete hpa frontend "${kube_flags[@]}"
  1203. # autoscale 2~3 pods, no CPU utilization specified, rc specified by name
  1204. kubectl autoscale rc frontend "${kube_flags[@]}" --min=2 --max=3
  1205. kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
  1206. kubectl delete hpa frontend "${kube_flags[@]}"
  1207. # autoscale without specifying --max should fail
  1208. ! kubectl autoscale rc frontend "${kube_flags[@]}" || exit 1
  1209. # Clean up
  1210. kubectl delete rc frontend "${kube_flags[@]}"
  1211. ## Set resource limits/request of a deployment
  1212. # Pre-condition: no deployment exists
  1213. kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
  1214. # Set resources of a local file without talking to the server
  1215. kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --local -o yaml "${kube_flags[@]}"
  1216. ! kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --dry-run -o yaml "${kube_flags[@]}" || exit 1
  1217. # Create a deployment
  1218. kubectl create -f hack/testdata/deployment-multicontainer-resources.yaml "${kube_flags[@]}"
  1219. kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment-resources:'
  1220. kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
  1221. kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:"
  1222. # Set the deployment's cpu limits
  1223. kubectl set resources deployment nginx-deployment-resources --limits=cpu=100m "${kube_flags[@]}"
  1224. kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "100m:"
  1225. kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "100m:"
  1226. # Set a non-existing container should fail
  1227. ! kubectl set resources deployment nginx-deployment-resources -c=redis --limits=cpu=100m || exit 1
  1228. # Set the limit of a specific container in deployment
  1229. kubectl set resources deployment nginx-deployment-resources -c=nginx --limits=cpu=200m "${kube_flags[@]}"
  1230. kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
  1231. kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "100m:"
  1232. # Set limits/requests of a deployment specified by a file
  1233. kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m "${kube_flags[@]}"
  1234. kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
  1235. kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "300m:"
  1236. kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}" "300m:"
  1237. # Show dry-run works on running deployments
  1238. kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --dry-run -o yaml "${kube_flags[@]}"
  1239. ! kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --local -o yaml "${kube_flags[@]}" || exit 1
  1240. kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
  1241. kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "300m:"
  1242. kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}" "300m:"
  1243. # Clean up
  1244. kubectl delete deployment nginx-deployment-resources "${kube_flags[@]}"
  1245. set +o nounset
  1246. set +o errexit
  1247. }
  1248. run_namespace_tests() {
  1249. set -o nounset
  1250. set -o errexit
  1251. kube::log::status "Testing kubectl(v1:namespaces)"
  1252. ### Create a new namespace
  1253. # Pre-condition: test namespace does not exist
  1254. output_message=$(! kubectl get ns/my-namespace 2>&1 "${kube_flags[@]}")
  1255. kube::test::if_has_string "${output_message}" ' not found'
  1256. # Dry-run command
  1257. kubectl create namespace my-namespace --dry-run=client
  1258. kubectl create namespace my-namespace --dry-run=server
  1259. output_message=$(! kubectl get ns/my-namespace 2>&1 "${kube_flags[@]}")
  1260. kube::test::if_has_string "${output_message}" ' not found'
  1261. # Command
  1262. kubectl create namespace my-namespace
  1263. # Post-condition: namespace 'my-namespace' is created.
  1264. kube::test::get_object_assert 'namespaces/my-namespace' "{{$id_field}}" 'my-namespace'
  1265. # Clean up
  1266. kubectl delete namespace my-namespace --wait=false
  1267. # make sure that wait properly waits for finalization
  1268. kubectl wait --for=delete ns/my-namespace
  1269. output_message=$(! kubectl get ns/my-namespace 2>&1 "${kube_flags[@]}")
  1270. kube::test::if_has_string "${output_message}" ' not found'
  1271. kubectl create namespace my-namespace
  1272. kube::test::get_object_assert 'namespaces/my-namespace' "{{$id_field}}" 'my-namespace'
  1273. output_message=$(! kubectl delete namespace -n my-namespace --all 2>&1 "${kube_flags[@]}")
  1274. kube::test::if_has_string "${output_message}" 'warning: deleting cluster-scoped resources'
  1275. kube::test::if_has_string "${output_message}" 'namespace "my-namespace" deleted'
  1276. ### Quota
  1277. kubectl create namespace quotas
  1278. kube::test::get_object_assert 'namespaces/quotas' "{{$id_field}}" 'quotas'
  1279. kube::test::get_object_assert 'quota --namespace=quotas' "{{range.items}}{{ if eq $id_field \\\"test-quota\\\" }}found{{end}}{{end}}:" ':'
  1280. # Dry-run command
  1281. kubectl create quota test-quota --dry-run=client --namespace=quotas
  1282. kubectl create quota test-quota --dry-run=server --namespace=quotas
  1283. kube::test::get_object_assert 'quota --namespace=quotas' "{{range.items}}{{ if eq $id_field \\\"test-quota\\\" }}found{{end}}{{end}}:" ':'
  1284. # Command
  1285. kubectl create quota test-quota --namespace=quotas
  1286. kube::test::get_object_assert 'quota --namespace=quotas' "{{range.items}}{{ if eq $id_field \\\"test-quota\\\" }}found{{end}}{{end}}:" 'found:'
  1287. # Clean up
  1288. kubectl delete quota test-quota --namespace=quotas
  1289. kubectl delete namespace quotas
  1290. ######################
  1291. # Pods in Namespaces #
  1292. ######################
  1293. if kube::test::if_supports_resource "${pods:?}" ; then
  1294. ### Create a new namespace
  1295. # Pre-condition: the other namespace does not exist
  1296. kube::test::get_object_assert 'namespaces' "{{range.items}}{{ if eq $id_field \\\"other\\\" }}found{{end}}{{end}}:" ':'
  1297. # Command
  1298. kubectl create namespace other
  1299. # Post-condition: namespace 'other' is created.
  1300. kube::test::get_object_assert 'namespaces/other' "{{$id_field}}" 'other'
  1301. ### Create POD valid-pod in specific namespace
  1302. # Pre-condition: no POD exists
  1303. kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
  1304. # Command
  1305. kubectl create "${kube_flags[@]}" --namespace=other -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
  1306. # Post-condition: valid-pod POD is created
  1307. kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  1308. # Post-condition: verify shorthand `-n other` has the same results as `--namespace=other`
  1309. kube::test::get_object_assert 'pods -n other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  1310. # Post-condition: a resource cannot be retrieved by name across all namespaces
  1311. output_message=$(! kubectl get "${kube_flags[@]}" pod valid-pod --all-namespaces 2>&1)
  1312. kube::test::if_has_string "${output_message}" "a resource cannot be retrieved by name across all namespaces"
  1313. ### Delete POD valid-pod in specific namespace
  1314. # Pre-condition: valid-pod POD exists
  1315. kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  1316. # Command
  1317. kubectl delete "${kube_flags[@]}" pod --namespace=other valid-pod --grace-period=0 --force
  1318. # Post-condition: valid-pod POD doesn't exist
  1319. kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
  1320. # Clean up
  1321. kubectl delete namespace other
  1322. fi
  1323. set +o nounset
  1324. set +o errexit
  1325. }
  1326. run_nodes_tests() {
  1327. set -o nounset
  1328. set -o errexit
  1329. kube::log::status "Testing kubectl(v1:nodes)"
  1330. kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
  1331. kube::test::describe_object_assert nodes "127.0.0.1" "Name:" "Labels:" "CreationTimestamp:" "Conditions:" "Addresses:" "Capacity:" "Pods:"
  1332. # Describe command should print events information by default
  1333. kube::test::describe_object_events_assert nodes "127.0.0.1"
  1334. # Describe command should not print events information when show-events=false
  1335. kube::test::describe_object_events_assert nodes "127.0.0.1" false
  1336. # Describe command should print events information when show-events=true
  1337. kube::test::describe_object_events_assert nodes "127.0.0.1" true
  1338. # Describe command (resource only) should print detailed information
  1339. kube::test::describe_resource_assert nodes "Name:" "Labels:" "CreationTimestamp:" "Conditions:" "Addresses:" "Capacity:" "Pods:"
  1340. # Describe command should print events information by default
  1341. kube::test::describe_resource_events_assert nodes
  1342. # Describe command should not print events information when show-events=false
  1343. kube::test::describe_resource_events_assert nodes false
  1344. # Describe command should print events information when show-events=true
  1345. kube::test::describe_resource_events_assert nodes true
  1346. ### kubectl patch update can mark node unschedulable
  1347. # Pre-condition: node is schedulable
  1348. kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
  1349. kubectl patch "${kube_flags[@]}" nodes "127.0.0.1" -p='{"spec":{"unschedulable":true}}'
  1350. # Post-condition: node is unschedulable
  1351. kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" 'true'
  1352. kubectl patch "${kube_flags[@]}" nodes "127.0.0.1" -p='{"spec":{"unschedulable":null}}'
  1353. # Post-condition: node is schedulable
  1354. kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
  1355. # check webhook token authentication endpoint, kubectl doesn't actually display the returned object so this isn't super useful
  1356. # but it proves that works
  1357. kubectl create -f test/fixtures/pkg/kubectl/cmd/create/tokenreview-v1beta1.json --validate=false
  1358. kubectl create -f test/fixtures/pkg/kubectl/cmd/create/tokenreview-v1.json --validate=false
  1359. set +o nounset
  1360. set +o errexit
  1361. }
  1362. run_pod_templates_tests() {
  1363. set -o nounset
  1364. set -o errexit
  1365. create_and_use_new_namespace
  1366. kube::log::status "Testing pod templates"
  1367. ### Create PODTEMPLATE
  1368. # Pre-condition: no PODTEMPLATE
  1369. kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" ''
  1370. # Command
  1371. kubectl create -f test/fixtures/doc-yaml/user-guide/walkthrough/podtemplate.json "${kube_flags[@]}"
  1372. # Post-condition: nginx PODTEMPLATE is available
  1373. kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" 'nginx:'
  1374. ### Printing pod templates works
  1375. kubectl get podtemplates "${kube_flags[@]}"
  1376. grep -q nginx <<< "$(kubectl get podtemplates -o yaml "${kube_flags[@]}")"
  1377. ### Delete nginx pod template by name
  1378. # Pre-condition: nginx pod template is available
  1379. kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" 'nginx:'
  1380. # Command
  1381. kubectl delete podtemplate nginx "${kube_flags[@]}"
  1382. # Post-condition: No templates exist
  1383. kube::test::get_object_assert podtemplate "{{range.items}}{{.metadata.name}}:{{end}}" ''
  1384. set +o nounset
  1385. set +o errexit
  1386. }