utils.go 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706
  1. /*
  2. Copyright 2017 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package utils
  14. import (
  15. "context"
  16. "crypto/sha256"
  17. "encoding/base64"
  18. "fmt"
  19. "math/rand"
  20. "path/filepath"
  21. "strings"
  22. "time"
  23. "github.com/onsi/ginkgo"
  24. "github.com/onsi/gomega"
  25. v1 "k8s.io/api/core/v1"
  26. rbacv1 "k8s.io/api/rbac/v1"
  27. apierrors "k8s.io/apimachinery/pkg/api/errors"
  28. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  29. "k8s.io/apimachinery/pkg/util/sets"
  30. "k8s.io/apimachinery/pkg/util/wait"
  31. clientset "k8s.io/client-go/kubernetes"
  32. clientexec "k8s.io/client-go/util/exec"
  33. "k8s.io/kubernetes/test/e2e/framework"
  34. e2enode "k8s.io/kubernetes/test/e2e/framework/node"
  35. e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
  36. e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
  37. imageutils "k8s.io/kubernetes/test/utils/image"
  38. uexec "k8s.io/utils/exec"
  39. )
  40. // KubeletOpt type definition
  41. type KubeletOpt string
  42. const (
  43. // NodeStateTimeout defines Timeout
  44. NodeStateTimeout = 1 * time.Minute
  45. // KStart defines start value
  46. KStart KubeletOpt = "start"
  47. // KStop defines stop value
  48. KStop KubeletOpt = "stop"
  49. // KRestart defines restart value
  50. KRestart KubeletOpt = "restart"
  51. )
  52. const (
  53. // ClusterRole name for e2e test Priveledged Pod Security Policy User
  54. podSecurityPolicyPrivilegedClusterRoleName = "e2e-test-privileged-psp"
  55. )
  56. // PodExec runs f.ExecCommandInContainerWithFullOutput to execute a shell cmd in target pod
  57. func PodExec(f *framework.Framework, pod *v1.Pod, shExec string) (string, error) {
  58. stdout, _, err := f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", shExec)
  59. return stdout, err
  60. }
  61. // VerifyExecInPodSucceed verifies shell cmd in target pod succeed
  62. func VerifyExecInPodSucceed(f *framework.Framework, pod *v1.Pod, shExec string) {
  63. _, err := PodExec(f, pod, shExec)
  64. if err != nil {
  65. if exiterr, ok := err.(uexec.CodeExitError); ok {
  66. exitCode := exiterr.ExitStatus()
  67. framework.ExpectNoError(err,
  68. "%q should succeed, but failed with exit code %d and error message %q",
  69. shExec, exitCode, exiterr)
  70. } else {
  71. framework.ExpectNoError(err,
  72. "%q should succeed, but failed with error message %q",
  73. shExec, err)
  74. }
  75. }
  76. }
  77. // VerifyExecInPodFail verifies shell cmd in target pod fail with certain exit code
  78. func VerifyExecInPodFail(f *framework.Framework, pod *v1.Pod, shExec string, exitCode int) {
  79. _, err := PodExec(f, pod, shExec)
  80. if err != nil {
  81. if exiterr, ok := err.(clientexec.ExitError); ok {
  82. actualExitCode := exiterr.ExitStatus()
  83. framework.ExpectEqual(actualExitCode, exitCode,
  84. "%q should fail with exit code %d, but failed with exit code %d and error message %q",
  85. shExec, exitCode, actualExitCode, exiterr)
  86. } else {
  87. framework.ExpectNoError(err,
  88. "%q should fail with exit code %d, but failed with error message %q",
  89. shExec, exitCode, err)
  90. }
  91. }
  92. framework.ExpectError(err, "%q should fail with exit code %d, but exit without error", shExec, exitCode)
  93. }
  94. func isSudoPresent(nodeIP string, provider string) bool {
  95. framework.Logf("Checking if sudo command is present")
  96. sshResult, err := e2essh.SSH("sudo --version", nodeIP, provider)
  97. framework.ExpectNoError(err, "SSH to %q errored.", nodeIP)
  98. if !strings.Contains(sshResult.Stderr, "command not found") {
  99. return true
  100. }
  101. return false
  102. }
  103. // getHostAddress gets the node for a pod and returns the first
  104. // address. Returns an error if the node the pod is on doesn't have an
  105. // address.
  106. func getHostAddress(client clientset.Interface, p *v1.Pod) (string, error) {
  107. node, err := client.CoreV1().Nodes().Get(context.TODO(), p.Spec.NodeName, metav1.GetOptions{})
  108. if err != nil {
  109. return "", err
  110. }
  111. // Try externalAddress first
  112. for _, address := range node.Status.Addresses {
  113. if address.Type == v1.NodeExternalIP {
  114. if address.Address != "" {
  115. return address.Address, nil
  116. }
  117. }
  118. }
  119. // If no externalAddress found, try internalAddress
  120. for _, address := range node.Status.Addresses {
  121. if address.Type == v1.NodeInternalIP {
  122. if address.Address != "" {
  123. return address.Address, nil
  124. }
  125. }
  126. }
  127. // If not found, return error
  128. return "", fmt.Errorf("No address for pod %v on node %v",
  129. p.Name, p.Spec.NodeName)
  130. }
  131. // KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits
  132. // for the desired statues..
  133. // - First issues the command via `systemctl`
  134. // - If `systemctl` returns stderr "command not found, issues the command via `service`
  135. // - If `service` also returns stderr "command not found", the test is aborted.
  136. // Allowed kubeletOps are `KStart`, `KStop`, and `KRestart`
  137. func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
  138. command := ""
  139. systemctlPresent := false
  140. kubeletPid := ""
  141. nodeIP, err := getHostAddress(c, pod)
  142. framework.ExpectNoError(err)
  143. nodeIP = nodeIP + ":22"
  144. framework.Logf("Checking if systemctl command is present")
  145. sshResult, err := e2essh.SSH("systemctl --version", nodeIP, framework.TestContext.Provider)
  146. framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
  147. if !strings.Contains(sshResult.Stderr, "command not found") {
  148. command = fmt.Sprintf("systemctl %s kubelet", string(kOp))
  149. systemctlPresent = true
  150. } else {
  151. command = fmt.Sprintf("service kubelet %s", string(kOp))
  152. }
  153. sudoPresent := isSudoPresent(nodeIP, framework.TestContext.Provider)
  154. if sudoPresent {
  155. command = fmt.Sprintf("sudo %s", command)
  156. }
  157. if kOp == KRestart {
  158. kubeletPid = getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
  159. }
  160. framework.Logf("Attempting `%s`", command)
  161. sshResult, err = e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
  162. framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
  163. e2essh.LogResult(sshResult)
  164. gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult)
  165. if kOp == KStop {
  166. if ok := e2enode.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
  167. framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName)
  168. }
  169. }
  170. if kOp == KRestart {
  171. // Wait for a minute to check if kubelet Pid is getting changed
  172. isPidChanged := false
  173. for start := time.Now(); time.Since(start) < 1*time.Minute; time.Sleep(2 * time.Second) {
  174. kubeletPidAfterRestart := getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
  175. if kubeletPid != kubeletPidAfterRestart {
  176. isPidChanged = true
  177. break
  178. }
  179. }
  180. framework.ExpectEqual(isPidChanged, true, "Kubelet PID remained unchanged after restarting Kubelet")
  181. framework.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back")
  182. time.Sleep(30 * time.Second)
  183. }
  184. if kOp == KStart || kOp == KRestart {
  185. // For kubelet start and restart operations, Wait until Node becomes Ready
  186. if ok := e2enode.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
  187. framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName)
  188. }
  189. }
  190. }
  191. // getKubeletMainPid return the Main PID of the Kubelet Process
  192. func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) string {
  193. command := ""
  194. if systemctlPresent {
  195. command = "systemctl status kubelet | grep 'Main PID'"
  196. } else {
  197. command = "service kubelet status | grep 'Main PID'"
  198. }
  199. if sudoPresent {
  200. command = fmt.Sprintf("sudo %s", command)
  201. }
  202. framework.Logf("Attempting `%s`", command)
  203. sshResult, err := e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
  204. framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", nodeIP))
  205. e2essh.LogResult(sshResult)
  206. gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to get kubelet PID")
  207. gomega.Expect(sshResult.Stdout).NotTo(gomega.BeEmpty(), "Kubelet Main PID should not be Empty")
  208. return sshResult.Stdout
  209. }
  210. // TestKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts
  211. func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
  212. path := "/mnt/volume1"
  213. byteLen := 64
  214. seed := time.Now().UTC().UnixNano()
  215. ginkgo.By("Writing to the volume.")
  216. CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, path, byteLen, seed)
  217. ginkgo.By("Restarting kubelet")
  218. KubeletCommand(KRestart, c, clientPod)
  219. ginkgo.By("Testing that written file is accessible.")
  220. CheckReadFromPath(f, clientPod, v1.PersistentVolumeFilesystem, path, byteLen, seed)
  221. framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, path)
  222. }
  223. // TestKubeletRestartsAndRestoresMap tests that a volume mapped to a pod remains mapped after a kubelet restarts
  224. func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
  225. path := "/mnt/volume1"
  226. byteLen := 64
  227. seed := time.Now().UTC().UnixNano()
  228. ginkgo.By("Writing to the volume.")
  229. CheckWriteToPath(f, clientPod, v1.PersistentVolumeBlock, path, byteLen, seed)
  230. ginkgo.By("Restarting kubelet")
  231. KubeletCommand(KRestart, c, clientPod)
  232. ginkgo.By("Testing that written pv is accessible.")
  233. CheckReadFromPath(f, clientPod, v1.PersistentVolumeBlock, path, byteLen, seed)
  234. framework.Logf("Volume map detected on pod %s and written data %s is readable post-restart.", clientPod.Name, path)
  235. }
  236. // TestVolumeUnmountsFromDeletedPodWithForceOption tests that a volume unmounts if the client pod was deleted while the kubelet was down.
  237. // forceDelete is true indicating whether the pod is forcefully deleted.
  238. // checkSubpath is true indicating whether the subpath should be checked.
  239. func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool) {
  240. nodeIP, err := getHostAddress(c, clientPod)
  241. framework.ExpectNoError(err)
  242. nodeIP = nodeIP + ":22"
  243. ginkgo.By("Expecting the volume mount to be found.")
  244. result, err := e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
  245. e2essh.LogResult(result)
  246. framework.ExpectNoError(err, "Encountered SSH error.")
  247. framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
  248. if checkSubpath {
  249. ginkgo.By("Expecting the volume subpath mount to be found.")
  250. result, err := e2essh.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
  251. e2essh.LogResult(result)
  252. framework.ExpectNoError(err, "Encountered SSH error.")
  253. framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
  254. }
  255. // This command is to make sure kubelet is started after test finishes no matter it fails or not.
  256. defer func() {
  257. KubeletCommand(KStart, c, clientPod)
  258. }()
  259. ginkgo.By("Stopping the kubelet.")
  260. KubeletCommand(KStop, c, clientPod)
  261. ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
  262. if forceDelete {
  263. err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, metav1.NewDeleteOptions(0))
  264. } else {
  265. err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, &metav1.DeleteOptions{})
  266. }
  267. framework.ExpectNoError(err)
  268. ginkgo.By("Starting the kubelet and waiting for pod to delete.")
  269. KubeletCommand(KStart, c, clientPod)
  270. err = f.WaitForPodNotFound(clientPod.Name, framework.PodDeleteTimeout)
  271. if err != nil {
  272. framework.ExpectNoError(err, "Expected pod to be not found.")
  273. }
  274. if forceDelete {
  275. // With forceDelete, since pods are immediately deleted from API server, there is no way to be sure when volumes are torn down
  276. // so wait some time to finish
  277. time.Sleep(30 * time.Second)
  278. }
  279. ginkgo.By("Expecting the volume mount not to be found.")
  280. result, err = e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
  281. e2essh.LogResult(result)
  282. framework.ExpectNoError(err, "Encountered SSH error.")
  283. gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).")
  284. framework.Logf("Volume unmounted on node %s", clientPod.Spec.NodeName)
  285. if checkSubpath {
  286. ginkgo.By("Expecting the volume subpath mount not to be found.")
  287. result, err = e2essh.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
  288. e2essh.LogResult(result)
  289. framework.ExpectNoError(err, "Encountered SSH error.")
  290. gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
  291. framework.Logf("Subpath volume unmounted on node %s", clientPod.Spec.NodeName)
  292. }
  293. }
  294. // TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
  295. func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
  296. TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, false, false)
  297. }
  298. // TestVolumeUnmountsFromForceDeletedPod tests that a volume unmounts if the client pod was forcefully deleted while the kubelet was down.
  299. func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
  300. TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, true, false)
  301. }
  302. // TestVolumeUnmapsFromDeletedPodWithForceOption tests that a volume unmaps if the client pod was deleted while the kubelet was down.
  303. // forceDelete is true indicating whether the pod is forcefully deleted.
  304. func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool) {
  305. nodeIP, err := getHostAddress(c, clientPod)
  306. framework.ExpectNoError(err, "Failed to get nodeIP.")
  307. nodeIP = nodeIP + ":22"
  308. // Creating command to check whether path exists
  309. podDirectoryCmd := fmt.Sprintf("ls /var/lib/kubelet/pods/%s/volumeDevices/*/ | grep '.'", clientPod.UID)
  310. if isSudoPresent(nodeIP, framework.TestContext.Provider) {
  311. podDirectoryCmd = fmt.Sprintf("sudo sh -c \"%s\"", podDirectoryCmd)
  312. }
  313. // Directories in the global directory have unpredictable names, however, device symlinks
  314. // have the same name as pod.UID. So just find anything with pod.UID name.
  315. globalBlockDirectoryCmd := fmt.Sprintf("find /var/lib/kubelet/plugins -name %s", clientPod.UID)
  316. if isSudoPresent(nodeIP, framework.TestContext.Provider) {
  317. globalBlockDirectoryCmd = fmt.Sprintf("sudo sh -c \"%s\"", globalBlockDirectoryCmd)
  318. }
  319. ginkgo.By("Expecting the symlinks from PodDeviceMapPath to be found.")
  320. result, err := e2essh.SSH(podDirectoryCmd, nodeIP, framework.TestContext.Provider)
  321. e2essh.LogResult(result)
  322. framework.ExpectNoError(err, "Encountered SSH error.")
  323. framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
  324. ginkgo.By("Expecting the symlinks from global map path to be found.")
  325. result, err = e2essh.SSH(globalBlockDirectoryCmd, nodeIP, framework.TestContext.Provider)
  326. e2essh.LogResult(result)
  327. framework.ExpectNoError(err, "Encountered SSH error.")
  328. framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected find exit code of 0, got %d", result.Code))
  329. // This command is to make sure kubelet is started after test finishes no matter it fails or not.
  330. defer func() {
  331. KubeletCommand(KStart, c, clientPod)
  332. }()
  333. ginkgo.By("Stopping the kubelet.")
  334. KubeletCommand(KStop, c, clientPod)
  335. ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
  336. if forceDelete {
  337. err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, metav1.NewDeleteOptions(0))
  338. } else {
  339. err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, &metav1.DeleteOptions{})
  340. }
  341. framework.ExpectNoError(err, "Failed to delete pod.")
  342. ginkgo.By("Starting the kubelet and waiting for pod to delete.")
  343. KubeletCommand(KStart, c, clientPod)
  344. err = f.WaitForPodNotFound(clientPod.Name, framework.PodDeleteTimeout)
  345. framework.ExpectNoError(err, "Expected pod to be not found.")
  346. if forceDelete {
  347. // With forceDelete, since pods are immediately deleted from API server, there is no way to be sure when volumes are torn down
  348. // so wait some time to finish
  349. time.Sleep(30 * time.Second)
  350. }
  351. ginkgo.By("Expecting the symlink from PodDeviceMapPath not to be found.")
  352. result, err = e2essh.SSH(podDirectoryCmd, nodeIP, framework.TestContext.Provider)
  353. e2essh.LogResult(result)
  354. framework.ExpectNoError(err, "Encountered SSH error.")
  355. gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty.")
  356. ginkgo.By("Expecting the symlinks from global map path not to be found.")
  357. result, err = e2essh.SSH(globalBlockDirectoryCmd, nodeIP, framework.TestContext.Provider)
  358. e2essh.LogResult(result)
  359. framework.ExpectNoError(err, "Encountered SSH error.")
  360. gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected find stdout to be empty.")
  361. framework.Logf("Volume unmaped on node %s", clientPod.Spec.NodeName)
  362. }
  363. // TestVolumeUnmapsFromDeletedPod tests that a volume unmaps if the client pod was deleted while the kubelet was down.
  364. func TestVolumeUnmapsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
  365. TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, false)
  366. }
  367. // TestVolumeUnmapsFromForceDeletedPod tests that a volume unmaps if the client pod was forcefully deleted while the kubelet was down.
  368. func TestVolumeUnmapsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
  369. TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, true)
  370. }
  371. // RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
  372. func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
  373. pod := &v1.Pod{
  374. TypeMeta: metav1.TypeMeta{
  375. Kind: "Pod",
  376. APIVersion: "v1",
  377. },
  378. ObjectMeta: metav1.ObjectMeta{
  379. GenerateName: "pvc-volume-tester-",
  380. },
  381. Spec: v1.PodSpec{
  382. Containers: []v1.Container{
  383. {
  384. Name: "volume-tester",
  385. Image: imageutils.GetE2EImage(imageutils.BusyBox),
  386. Command: []string{"/bin/sh"},
  387. Args: []string{"-c", command},
  388. VolumeMounts: []v1.VolumeMount{
  389. {
  390. Name: "my-volume",
  391. MountPath: "/mnt/test",
  392. },
  393. },
  394. },
  395. },
  396. RestartPolicy: v1.RestartPolicyNever,
  397. Volumes: []v1.Volume{
  398. {
  399. Name: "my-volume",
  400. VolumeSource: v1.VolumeSource{
  401. PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
  402. ClaimName: claimName,
  403. ReadOnly: false,
  404. },
  405. },
  406. },
  407. },
  408. },
  409. }
  410. pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
  411. framework.ExpectNoError(err, "Failed to create pod: %v", err)
  412. defer func() {
  413. e2epod.DeletePodOrFail(c, ns, pod.Name)
  414. }()
  415. framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
  416. }
  417. // StartExternalProvisioner create external provisioner pod
  418. func StartExternalProvisioner(c clientset.Interface, ns string, externalPluginName string) *v1.Pod {
  419. podClient := c.CoreV1().Pods(ns)
  420. provisionerPod := &v1.Pod{
  421. TypeMeta: metav1.TypeMeta{
  422. Kind: "Pod",
  423. APIVersion: "v1",
  424. },
  425. ObjectMeta: metav1.ObjectMeta{
  426. GenerateName: "external-provisioner-",
  427. },
  428. Spec: v1.PodSpec{
  429. Containers: []v1.Container{
  430. {
  431. Name: "nfs-provisioner",
  432. Image: imageutils.GetE2EImage(imageutils.NFSProvisioner),
  433. SecurityContext: &v1.SecurityContext{
  434. Capabilities: &v1.Capabilities{
  435. Add: []v1.Capability{"DAC_READ_SEARCH"},
  436. },
  437. },
  438. Args: []string{
  439. "-provisioner=" + externalPluginName,
  440. "-grace-period=0",
  441. },
  442. Ports: []v1.ContainerPort{
  443. {Name: "nfs", ContainerPort: 2049},
  444. {Name: "mountd", ContainerPort: 20048},
  445. {Name: "rpcbind", ContainerPort: 111},
  446. {Name: "rpcbind-udp", ContainerPort: 111, Protocol: v1.ProtocolUDP},
  447. },
  448. Env: []v1.EnvVar{
  449. {
  450. Name: "POD_IP",
  451. ValueFrom: &v1.EnvVarSource{
  452. FieldRef: &v1.ObjectFieldSelector{
  453. FieldPath: "status.podIP",
  454. },
  455. },
  456. },
  457. },
  458. ImagePullPolicy: v1.PullIfNotPresent,
  459. VolumeMounts: []v1.VolumeMount{
  460. {
  461. Name: "export-volume",
  462. MountPath: "/export",
  463. },
  464. },
  465. },
  466. },
  467. Volumes: []v1.Volume{
  468. {
  469. Name: "export-volume",
  470. VolumeSource: v1.VolumeSource{
  471. EmptyDir: &v1.EmptyDirVolumeSource{},
  472. },
  473. },
  474. },
  475. },
  476. }
  477. provisionerPod, err := podClient.Create(context.TODO(), provisionerPod, metav1.CreateOptions{})
  478. framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err)
  479. framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, provisionerPod))
  480. ginkgo.By("locating the provisioner pod")
  481. pod, err := podClient.Get(context.TODO(), provisionerPod.Name, metav1.GetOptions{})
  482. framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err)
  483. return pod
  484. }
  485. // PrivilegedTestPSPClusterRoleBinding test Pod Security Policy Role bindings
  486. func PrivilegedTestPSPClusterRoleBinding(client clientset.Interface,
  487. namespace string,
  488. teardown bool,
  489. saNames []string) {
  490. bindingString := "Binding"
  491. if teardown {
  492. bindingString = "Unbinding"
  493. }
  494. roleBindingClient := client.RbacV1().RoleBindings(namespace)
  495. for _, saName := range saNames {
  496. ginkgo.By(fmt.Sprintf("%v priviledged Pod Security Policy to the service account %s", bindingString, saName))
  497. binding := &rbacv1.RoleBinding{
  498. ObjectMeta: metav1.ObjectMeta{
  499. Name: "psp-" + saName,
  500. Namespace: namespace,
  501. },
  502. Subjects: []rbacv1.Subject{
  503. {
  504. Kind: rbacv1.ServiceAccountKind,
  505. Name: saName,
  506. Namespace: namespace,
  507. },
  508. },
  509. RoleRef: rbacv1.RoleRef{
  510. Kind: "ClusterRole",
  511. Name: podSecurityPolicyPrivilegedClusterRoleName,
  512. APIGroup: "rbac.authorization.k8s.io",
  513. },
  514. }
  515. roleBindingClient.Delete(context.TODO(), binding.GetName(), &metav1.DeleteOptions{})
  516. err := wait.Poll(2*time.Second, 2*time.Minute, func() (bool, error) {
  517. _, err := roleBindingClient.Get(context.TODO(), binding.GetName(), metav1.GetOptions{})
  518. return apierrors.IsNotFound(err), nil
  519. })
  520. framework.ExpectNoError(err, "Timed out waiting for RBAC binding %s deletion: %v", binding.GetName(), err)
  521. if teardown {
  522. continue
  523. }
  524. _, err = roleBindingClient.Create(context.TODO(), binding, metav1.CreateOptions{})
  525. framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err)
  526. }
  527. }
  528. // CheckVolumeModeOfPath check mode of volume
  529. func CheckVolumeModeOfPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
  530. if volMode == v1.PersistentVolumeBlock {
  531. // Check if block exists
  532. VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -b %s", path))
  533. // Double check that it's not directory
  534. VerifyExecInPodFail(f, pod, fmt.Sprintf("test -d %s", path), 1)
  535. } else {
  536. // Check if directory exists
  537. VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -d %s", path))
  538. // Double check that it's not block
  539. VerifyExecInPodFail(f, pod, fmt.Sprintf("test -b %s", path), 1)
  540. }
  541. }
  542. // CheckReadWriteToPath check that path can b e read and written
  543. func CheckReadWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
  544. if volMode == v1.PersistentVolumeBlock {
  545. // random -> file1
  546. VerifyExecInPodSucceed(f, pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1")
  547. // file1 -> dev (write to dev)
  548. VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path))
  549. // dev -> file2 (read from dev)
  550. VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path))
  551. // file1 == file2 (check contents)
  552. VerifyExecInPodSucceed(f, pod, "diff /tmp/file1 /tmp/file2")
  553. // Clean up temp files
  554. VerifyExecInPodSucceed(f, pod, "rm -f /tmp/file1 /tmp/file2")
  555. // Check that writing file to block volume fails
  556. VerifyExecInPodFail(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1)
  557. } else {
  558. // text -> file1 (write to file)
  559. VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path))
  560. // grep file1 (read from file and check contents)
  561. VerifyExecInPodSucceed(f, pod, fmt.Sprintf("grep 'Hello world.' %s/file1.txt", path))
  562. // Check that writing to directory as block volume fails
  563. VerifyExecInPodFail(f, pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1)
  564. }
  565. }
  566. // genBinDataFromSeed generate binData with random seed
  567. func genBinDataFromSeed(len int, seed int64) []byte {
  568. binData := make([]byte, len)
  569. rand.Seed(seed)
  570. _, err := rand.Read(binData)
  571. if err != nil {
  572. fmt.Printf("Error: %v\n", err)
  573. }
  574. return binData
  575. }
  576. // CheckReadFromPath validate that file can be properly read.
  577. func CheckReadFromPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string, len int, seed int64) {
  578. var pathForVolMode string
  579. if volMode == v1.PersistentVolumeBlock {
  580. pathForVolMode = path
  581. } else {
  582. pathForVolMode = filepath.Join(path, "file1.txt")
  583. }
  584. sum := sha256.Sum256(genBinDataFromSeed(len, seed))
  585. VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s bs=%d count=1 | sha256sum", pathForVolMode, len))
  586. VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s bs=%d count=1 | sha256sum | grep -Fq %x", pathForVolMode, len, sum))
  587. }
  588. // CheckWriteToPath that file can be properly written.
  589. func CheckWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string, len int, seed int64) {
  590. var pathForVolMode string
  591. if volMode == v1.PersistentVolumeBlock {
  592. pathForVolMode = path
  593. } else {
  594. pathForVolMode = filepath.Join(path, "file1.txt")
  595. }
  596. encoded := base64.StdEncoding.EncodeToString(genBinDataFromSeed(len, seed))
  597. VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded))
  598. VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s bs=%d count=1", encoded, pathForVolMode, len))
  599. }
  600. // findMountPoints returns all mount points on given node under specified directory.
  601. func findMountPoints(hostExec HostExec, node *v1.Node, dir string) []string {
  602. result, err := hostExec.IssueCommandWithResult(fmt.Sprintf(`find %s -type d -exec mountpoint {} \; | grep 'is a mountpoint$' || true`, dir), node)
  603. framework.ExpectNoError(err, "Encountered HostExec error.")
  604. var mountPoints []string
  605. if err != nil {
  606. for _, line := range strings.Split(result, "\n") {
  607. if line == "" {
  608. continue
  609. }
  610. mountPoints = append(mountPoints, strings.TrimSuffix(line, " is a mountpoint"))
  611. }
  612. }
  613. return mountPoints
  614. }
  615. // FindVolumeGlobalMountPoints returns all volume global mount points on the node of given pod.
  616. func FindVolumeGlobalMountPoints(hostExec HostExec, node *v1.Node) sets.String {
  617. return sets.NewString(findMountPoints(hostExec, node, "/var/lib/kubelet/plugins")...)
  618. }