123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532 |
- /*
- Copyright 2017 The Kubernetes Authors.
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- */
- package utils
- import (
- "crypto/sha256"
- "encoding/base64"
- "fmt"
- "math/rand"
- "path/filepath"
- "strings"
- "time"
- "github.com/onsi/ginkgo"
- "github.com/onsi/gomega"
- v1 "k8s.io/api/core/v1"
- rbacv1 "k8s.io/api/rbac/v1"
- apierrs "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/util/wait"
- clientset "k8s.io/client-go/kubernetes"
- "k8s.io/kubernetes/test/e2e/framework"
- e2elog "k8s.io/kubernetes/test/e2e/framework/log"
- e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
- imageutils "k8s.io/kubernetes/test/utils/image"
- uexec "k8s.io/utils/exec"
- )
- type KubeletOpt string
- const (
- NodeStateTimeout = 1 * time.Minute
- KStart KubeletOpt = "start"
- KStop KubeletOpt = "stop"
- KRestart KubeletOpt = "restart"
- )
- const (
- // ClusterRole name for e2e test Priveledged Pod Security Policy User
- podSecurityPolicyPrivilegedClusterRoleName = "e2e-test-privileged-psp"
- )
- // PodExec wraps RunKubectl to execute a bash cmd in target pod
- func PodExec(pod *v1.Pod, bashExec string) (string, error) {
- return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--", "/bin/sh", "-c", bashExec)
- }
- // VerifyExecInPodSucceed verifies bash cmd in target pod succeed
- func VerifyExecInPodSucceed(pod *v1.Pod, bashExec string) {
- _, err := PodExec(pod, bashExec)
- if err != nil {
- if err, ok := err.(uexec.CodeExitError); ok {
- exitCode := err.ExitStatus()
- framework.ExpectNoError(err,
- "%q should succeed, but failed with exit code %d and error message %q",
- bashExec, exitCode, err)
- } else {
- framework.ExpectNoError(err,
- "%q should succeed, but failed with error message %q",
- bashExec, err)
- }
- }
- }
- // VerifyExecInPodFail verifies bash cmd in target pod fail with certain exit code
- func VerifyExecInPodFail(pod *v1.Pod, bashExec string, exitCode int) {
- _, err := PodExec(pod, bashExec)
- if err != nil {
- if err, ok := err.(uexec.CodeExitError); ok {
- actualExitCode := err.ExitStatus()
- gomega.Expect(actualExitCode).To(gomega.Equal(exitCode),
- "%q should fail with exit code %d, but failed with exit code %d and error message %q",
- bashExec, exitCode, actualExitCode, err)
- } else {
- framework.ExpectNoError(err,
- "%q should fail with exit code %d, but failed with error message %q",
- bashExec, exitCode, err)
- }
- }
- framework.ExpectError(err, "%q should fail with exit code %d, but exit without error", bashExec, exitCode)
- }
- // KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits
- // for the desired statues..
- // - First issues the command via `systemctl`
- // - If `systemctl` returns stderr "command not found, issues the command via `service`
- // - If `service` also returns stderr "command not found", the test is aborted.
- // Allowed kubeletOps are `KStart`, `KStop`, and `KRestart`
- func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
- command := ""
- sudoPresent := false
- systemctlPresent := false
- kubeletPid := ""
- nodeIP, err := framework.GetHostExternalAddress(c, pod)
- framework.ExpectNoError(err)
- nodeIP = nodeIP + ":22"
- e2elog.Logf("Checking if sudo command is present")
- sshResult, err := e2essh.SSH("sudo --version", nodeIP, framework.TestContext.Provider)
- framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
- if !strings.Contains(sshResult.Stderr, "command not found") {
- sudoPresent = true
- }
- e2elog.Logf("Checking if systemctl command is present")
- sshResult, err = e2essh.SSH("systemctl --version", nodeIP, framework.TestContext.Provider)
- framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
- if !strings.Contains(sshResult.Stderr, "command not found") {
- command = fmt.Sprintf("systemctl %s kubelet", string(kOp))
- systemctlPresent = true
- } else {
- command = fmt.Sprintf("service kubelet %s", string(kOp))
- }
- if sudoPresent {
- command = fmt.Sprintf("sudo %s", command)
- }
- if kOp == KRestart {
- kubeletPid = getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
- }
- e2elog.Logf("Attempting `%s`", command)
- sshResult, err = e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
- framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
- e2essh.LogResult(sshResult)
- gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult)
- if kOp == KStop {
- if ok := framework.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
- framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName)
- }
- }
- if kOp == KRestart {
- // Wait for a minute to check if kubelet Pid is getting changed
- isPidChanged := false
- for start := time.Now(); time.Since(start) < 1*time.Minute; time.Sleep(2 * time.Second) {
- kubeletPidAfterRestart := getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
- if kubeletPid != kubeletPidAfterRestart {
- isPidChanged = true
- break
- }
- }
- gomega.Expect(isPidChanged).To(gomega.BeTrue(), "Kubelet PID remained unchanged after restarting Kubelet")
- e2elog.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back")
- time.Sleep(30 * time.Second)
- }
- if kOp == KStart || kOp == KRestart {
- // For kubelet start and restart operations, Wait until Node becomes Ready
- if ok := framework.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
- framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName)
- }
- }
- }
- // getKubeletMainPid return the Main PID of the Kubelet Process
- func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) string {
- command := ""
- if systemctlPresent {
- command = "systemctl status kubelet | grep 'Main PID'"
- } else {
- command = "service kubelet status | grep 'Main PID'"
- }
- if sudoPresent {
- command = fmt.Sprintf("sudo %s", command)
- }
- e2elog.Logf("Attempting `%s`", command)
- sshResult, err := e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
- framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", nodeIP))
- e2essh.LogResult(sshResult)
- gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to get kubelet PID")
- gomega.Expect(sshResult.Stdout).NotTo(gomega.BeEmpty(), "Kubelet Main PID should not be Empty")
- return sshResult.Stdout
- }
- // TestKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts
- func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
- ginkgo.By("Writing to the volume.")
- file := "/mnt/_SUCCESS"
- out, err := PodExec(clientPod, fmt.Sprintf("touch %s", file))
- e2elog.Logf(out)
- framework.ExpectNoError(err)
- ginkgo.By("Restarting kubelet")
- KubeletCommand(KRestart, c, clientPod)
- ginkgo.By("Testing that written file is accessible.")
- out, err = PodExec(clientPod, fmt.Sprintf("cat %s", file))
- e2elog.Logf(out)
- framework.ExpectNoError(err)
- e2elog.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, file)
- }
- // TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
- // forceDelete is true indicating whether the pod is forcefully deleted.
- func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool) {
- nodeIP, err := framework.GetHostExternalAddress(c, clientPod)
- framework.ExpectNoError(err)
- nodeIP = nodeIP + ":22"
- ginkgo.By("Expecting the volume mount to be found.")
- result, err := e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
- e2essh.LogResult(result)
- framework.ExpectNoError(err, "Encountered SSH error.")
- gomega.Expect(result.Code).To(gomega.BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
- if checkSubpath {
- ginkgo.By("Expecting the volume subpath mount to be found.")
- result, err := e2essh.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
- e2essh.LogResult(result)
- framework.ExpectNoError(err, "Encountered SSH error.")
- gomega.Expect(result.Code).To(gomega.BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
- }
- // This command is to make sure kubelet is started after test finishes no matter it fails or not.
- defer func() {
- KubeletCommand(KStart, c, clientPod)
- }()
- ginkgo.By("Stopping the kubelet.")
- KubeletCommand(KStop, c, clientPod)
- ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
- if forceDelete {
- err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, metav1.NewDeleteOptions(0))
- } else {
- err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, &metav1.DeleteOptions{})
- }
- framework.ExpectNoError(err)
- ginkgo.By("Starting the kubelet and waiting for pod to delete.")
- KubeletCommand(KStart, c, clientPod)
- err = f.WaitForPodNotFound(clientPod.Name, framework.PodDeleteTimeout)
- if err != nil {
- framework.ExpectNoError(err, "Expected pod to be not found.")
- }
- if forceDelete {
- // With forceDelete, since pods are immediately deleted from API server, there is no way to be sure when volumes are torn down
- // so wait some time to finish
- time.Sleep(30 * time.Second)
- }
- ginkgo.By("Expecting the volume mount not to be found.")
- result, err = e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
- e2essh.LogResult(result)
- framework.ExpectNoError(err, "Encountered SSH error.")
- gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).")
- e2elog.Logf("Volume unmounted on node %s", clientPod.Spec.NodeName)
- if checkSubpath {
- ginkgo.By("Expecting the volume subpath mount not to be found.")
- result, err = e2essh.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
- e2essh.LogResult(result)
- framework.ExpectNoError(err, "Encountered SSH error.")
- gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
- e2elog.Logf("Subpath volume unmounted on node %s", clientPod.Spec.NodeName)
- }
- }
- // TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
- func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
- TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, false, false)
- }
- // TestVolumeUnmountsFromFoceDeletedPod tests that a volume unmounts if the client pod was forcefully deleted while the kubelet was down.
- func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
- TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, true, false)
- }
- // RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
- func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
- pod := &v1.Pod{
- TypeMeta: metav1.TypeMeta{
- Kind: "Pod",
- APIVersion: "v1",
- },
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "pvc-volume-tester-",
- },
- Spec: v1.PodSpec{
- Containers: []v1.Container{
- {
- Name: "volume-tester",
- Image: imageutils.GetE2EImage(imageutils.BusyBox),
- Command: []string{"/bin/sh"},
- Args: []string{"-c", command},
- VolumeMounts: []v1.VolumeMount{
- {
- Name: "my-volume",
- MountPath: "/mnt/test",
- },
- },
- },
- },
- RestartPolicy: v1.RestartPolicyNever,
- Volumes: []v1.Volume{
- {
- Name: "my-volume",
- VolumeSource: v1.VolumeSource{
- PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
- ClaimName: claimName,
- ReadOnly: false,
- },
- },
- },
- },
- },
- }
- pod, err := c.CoreV1().Pods(ns).Create(pod)
- framework.ExpectNoError(err, "Failed to create pod: %v", err)
- defer func() {
- framework.DeletePodOrFail(c, ns, pod.Name)
- }()
- framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
- }
- func StartExternalProvisioner(c clientset.Interface, ns string, externalPluginName string) *v1.Pod {
- podClient := c.CoreV1().Pods(ns)
- provisionerPod := &v1.Pod{
- TypeMeta: metav1.TypeMeta{
- Kind: "Pod",
- APIVersion: "v1",
- },
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "external-provisioner-",
- },
- Spec: v1.PodSpec{
- Containers: []v1.Container{
- {
- Name: "nfs-provisioner",
- Image: "quay.io/kubernetes_incubator/nfs-provisioner:v2.2.0-k8s1.12",
- SecurityContext: &v1.SecurityContext{
- Capabilities: &v1.Capabilities{
- Add: []v1.Capability{"DAC_READ_SEARCH"},
- },
- },
- Args: []string{
- "-provisioner=" + externalPluginName,
- "-grace-period=0",
- },
- Ports: []v1.ContainerPort{
- {Name: "nfs", ContainerPort: 2049},
- {Name: "mountd", ContainerPort: 20048},
- {Name: "rpcbind", ContainerPort: 111},
- {Name: "rpcbind-udp", ContainerPort: 111, Protocol: v1.ProtocolUDP},
- },
- Env: []v1.EnvVar{
- {
- Name: "POD_IP",
- ValueFrom: &v1.EnvVarSource{
- FieldRef: &v1.ObjectFieldSelector{
- FieldPath: "status.podIP",
- },
- },
- },
- },
- ImagePullPolicy: v1.PullIfNotPresent,
- VolumeMounts: []v1.VolumeMount{
- {
- Name: "export-volume",
- MountPath: "/export",
- },
- },
- },
- },
- Volumes: []v1.Volume{
- {
- Name: "export-volume",
- VolumeSource: v1.VolumeSource{
- EmptyDir: &v1.EmptyDirVolumeSource{},
- },
- },
- },
- },
- }
- provisionerPod, err := podClient.Create(provisionerPod)
- framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err)
- framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, provisionerPod))
- ginkgo.By("locating the provisioner pod")
- pod, err := podClient.Get(provisionerPod.Name, metav1.GetOptions{})
- framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err)
- return pod
- }
- func PrivilegedTestPSPClusterRoleBinding(client clientset.Interface,
- namespace string,
- teardown bool,
- saNames []string) {
- bindingString := "Binding"
- if teardown {
- bindingString = "Unbinding"
- }
- roleBindingClient := client.RbacV1().RoleBindings(namespace)
- for _, saName := range saNames {
- ginkgo.By(fmt.Sprintf("%v priviledged Pod Security Policy to the service account %s", bindingString, saName))
- binding := &rbacv1.RoleBinding{
- ObjectMeta: metav1.ObjectMeta{
- Name: "psp-" + saName,
- Namespace: namespace,
- },
- Subjects: []rbacv1.Subject{
- {
- Kind: rbacv1.ServiceAccountKind,
- Name: saName,
- Namespace: namespace,
- },
- },
- RoleRef: rbacv1.RoleRef{
- Kind: "ClusterRole",
- Name: podSecurityPolicyPrivilegedClusterRoleName,
- APIGroup: "rbac.authorization.k8s.io",
- },
- }
- roleBindingClient.Delete(binding.GetName(), &metav1.DeleteOptions{})
- err := wait.Poll(2*time.Second, 2*time.Minute, func() (bool, error) {
- _, err := roleBindingClient.Get(binding.GetName(), metav1.GetOptions{})
- return apierrs.IsNotFound(err), nil
- })
- framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
- if teardown {
- continue
- }
- _, err = roleBindingClient.Create(binding)
- framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err)
- }
- }
- func CheckVolumeModeOfPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
- if volMode == v1.PersistentVolumeBlock {
- // Check if block exists
- VerifyExecInPodSucceed(pod, fmt.Sprintf("test -b %s", path))
- // Double check that it's not directory
- VerifyExecInPodFail(pod, fmt.Sprintf("test -d %s", path), 1)
- } else {
- // Check if directory exists
- VerifyExecInPodSucceed(pod, fmt.Sprintf("test -d %s", path))
- // Double check that it's not block
- VerifyExecInPodFail(pod, fmt.Sprintf("test -b %s", path), 1)
- }
- }
- func CheckReadWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
- if volMode == v1.PersistentVolumeBlock {
- // random -> file1
- VerifyExecInPodSucceed(pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1")
- // file1 -> dev (write to dev)
- VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path))
- // dev -> file2 (read from dev)
- VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path))
- // file1 == file2 (check contents)
- VerifyExecInPodSucceed(pod, "diff /tmp/file1 /tmp/file2")
- // Clean up temp files
- VerifyExecInPodSucceed(pod, "rm -f /tmp/file1 /tmp/file2")
- // Check that writing file to block volume fails
- VerifyExecInPodFail(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1)
- } else {
- // text -> file1 (write to file)
- VerifyExecInPodSucceed(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path))
- // grep file1 (read from file and check contents)
- VerifyExecInPodSucceed(pod, fmt.Sprintf("grep 'Hello world.' %s/file1.txt", path))
- // Check that writing to directory as block volume fails
- VerifyExecInPodFail(pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1)
- }
- }
- func genBinDataFromSeed(len int, seed int64) []byte {
- binData := make([]byte, len)
- rand.Seed(seed)
- len, err := rand.Read(binData)
- if err != nil {
- fmt.Printf("Error: %v\n", err)
- }
- return binData
- }
- func CheckReadFromPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string, len int, seed int64) {
- var pathForVolMode string
- if volMode == v1.PersistentVolumeBlock {
- pathForVolMode = path
- } else {
- pathForVolMode = filepath.Join(path, "file1.txt")
- }
- sum := sha256.Sum256(genBinDataFromSeed(len, seed))
- VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=%s bs=%d count=1 | sha256sum", pathForVolMode, len))
- VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=%s bs=%d count=1 | sha256sum | grep -Fq %x", pathForVolMode, len, sum))
- }
- func CheckWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string, len int, seed int64) {
- var pathForVolMode string
- if volMode == v1.PersistentVolumeBlock {
- pathForVolMode = path
- } else {
- pathForVolMode = filepath.Join(path, "file1.txt")
- }
- encoded := base64.StdEncoding.EncodeToString(genBinDataFromSeed(len, seed))
- VerifyExecInPodSucceed(pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded))
- VerifyExecInPodSucceed(pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s bs=%d count=1", encoded, pathForVolMode, len))
- }
|