123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831 |
- /*
- Copyright 2019 The Kubernetes Authors.
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- */
- package storage
- import (
- "context"
- "crypto/sha256"
- "encoding/json"
- "fmt"
- "regexp"
- "strconv"
- "strings"
- "time"
- v1 "k8s.io/api/core/v1"
- storagev1 "k8s.io/api/storage/v1"
- apierrors "k8s.io/apimachinery/pkg/api/errors"
- "k8s.io/apimachinery/pkg/api/resource"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- utilerrors "k8s.io/apimachinery/pkg/util/errors"
- "k8s.io/apimachinery/pkg/util/sets"
- "k8s.io/apimachinery/pkg/util/wait"
- clientset "k8s.io/client-go/kubernetes"
- "k8s.io/kubernetes/test/e2e/framework"
- e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
- e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
- "k8s.io/kubernetes/test/e2e/storage/drivers"
- "k8s.io/kubernetes/test/e2e/storage/testsuites"
- "k8s.io/kubernetes/test/e2e/storage/utils"
- imageutils "k8s.io/kubernetes/test/utils/image"
- "github.com/onsi/ginkgo"
- "github.com/onsi/gomega"
- )
- type cleanupFuncs func()
- const (
- csiNodeLimitUpdateTimeout = 5 * time.Minute
- csiPodUnschedulableTimeout = 5 * time.Minute
- csiResizeWaitPeriod = 5 * time.Minute
- // how long to wait for Resizing Condition on PVC to appear
- csiResizingConditionWait = 2 * time.Minute
- )
- var _ = utils.SIGDescribe("CSI mock volume", func() {
- type testParameters struct {
- disableAttach bool
- attachLimit int
- registerDriver bool
- podInfo *bool
- scName string
- enableResizing bool // enable resizing for both CSI mock driver and storageClass.
- enableNodeExpansion bool // enable node expansion for CSI mock driver
- // just disable resizing on driver it overrides enableResizing flag for CSI mock driver
- disableResizingOnDriver bool
- }
- type mockDriverSetup struct {
- cs clientset.Interface
- config *testsuites.PerTestConfig
- testCleanups []cleanupFuncs
- pods []*v1.Pod
- pvcs []*v1.PersistentVolumeClaim
- sc map[string]*storagev1.StorageClass
- driver testsuites.TestDriver
- provisioner string
- tp testParameters
- }
- var m mockDriverSetup
- f := framework.NewDefaultFramework("csi-mock-volumes")
- init := func(tp testParameters) {
- m = mockDriverSetup{
- cs: f.ClientSet,
- sc: make(map[string]*storagev1.StorageClass),
- tp: tp,
- }
- cs := f.ClientSet
- var err error
- driverOpts := drivers.CSIMockDriverOpts{
- RegisterDriver: tp.registerDriver,
- PodInfo: tp.podInfo,
- AttachLimit: tp.attachLimit,
- DisableAttach: tp.disableAttach,
- EnableResizing: tp.enableResizing,
- EnableNodeExpansion: tp.enableNodeExpansion,
- }
- // this just disable resizing on driver, keeping resizing on SC enabled.
- if tp.disableResizingOnDriver {
- driverOpts.EnableResizing = false
- }
- m.driver = drivers.InitMockCSIDriver(driverOpts)
- config, testCleanup := m.driver.PrepareTest(f)
- m.testCleanups = append(m.testCleanups, testCleanup)
- m.config = config
- m.provisioner = config.GetUniqueDriverName()
- if tp.registerDriver {
- err = waitForCSIDriver(cs, m.config.GetUniqueDriverName())
- framework.ExpectNoError(err, "Failed to get CSIDriver : %v", err)
- m.testCleanups = append(m.testCleanups, func() {
- destroyCSIDriver(cs, m.config.GetUniqueDriverName())
- })
- }
- }
- createPod := func(ephemeral bool) (class *storagev1.StorageClass, claim *v1.PersistentVolumeClaim, pod *v1.Pod) {
- ginkgo.By("Creating pod")
- var sc *storagev1.StorageClass
- if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok {
- sc = dDriver.GetDynamicProvisionStorageClass(m.config, "")
- }
- scTest := testsuites.StorageClassTest{
- Name: m.driver.GetDriverInfo().Name,
- Provisioner: sc.Provisioner,
- Parameters: sc.Parameters,
- ClaimSize: "1Gi",
- ExpectedSize: "1Gi",
- }
- if m.tp.scName != "" {
- scTest.StorageClassName = m.tp.scName
- }
- if m.tp.enableResizing {
- scTest.AllowVolumeExpansion = true
- }
- // The mock driver only works when everything runs on a single node.
- nodeSelection := m.config.ClientNodeSelection
- if ephemeral {
- pod = startPausePodInline(f.ClientSet, scTest, nodeSelection, f.Namespace.Name)
- if pod != nil {
- m.pods = append(m.pods, pod)
- }
- } else {
- class, claim, pod = startPausePod(f.ClientSet, scTest, nodeSelection, f.Namespace.Name)
- if class != nil {
- m.sc[class.Name] = class
- }
- if claim != nil {
- m.pvcs = append(m.pvcs, claim)
- }
- if pod != nil {
- m.pods = append(m.pods, pod)
- }
- }
- return // result variables set above
- }
- createPodWithPVC := func(pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
- nodeSelection := m.config.ClientNodeSelection
- pod, err := startPausePodWithClaim(m.cs, pvc, nodeSelection, f.Namespace.Name)
- if pod != nil {
- m.pods = append(m.pods, pod)
- }
- return pod, err
- }
- cleanup := func() {
- cs := f.ClientSet
- var errs []error
- for _, pod := range m.pods {
- ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
- errs = append(errs, e2epod.DeletePodWithWait(cs, pod))
- }
- for _, claim := range m.pvcs {
- ginkgo.By(fmt.Sprintf("Deleting claim %s", claim.Name))
- claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{})
- if err == nil {
- cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.TODO(), claim.Name, nil)
- framework.WaitForPersistentVolumeDeleted(cs, claim.Spec.VolumeName, framework.Poll, 2*time.Minute)
- }
- }
- for _, sc := range m.sc {
- ginkgo.By(fmt.Sprintf("Deleting storageclass %s", sc.Name))
- cs.StorageV1().StorageClasses().Delete(context.TODO(), sc.Name, nil)
- }
- ginkgo.By("Cleaning up resources")
- for _, cleanupFunc := range m.testCleanups {
- cleanupFunc()
- }
- err := utilerrors.NewAggregate(errs)
- framework.ExpectNoError(err, "while cleaning up after test")
- }
- // The CSIDriverRegistry feature gate is needed for this test in Kubernetes 1.12.
- ginkgo.Context("CSI attach test using mock driver", func() {
- tests := []struct {
- name string
- disableAttach bool
- deployClusterRegistrar bool
- }{
- {
- name: "should not require VolumeAttach for drivers without attachment",
- disableAttach: true,
- deployClusterRegistrar: true,
- },
- {
- name: "should require VolumeAttach for drivers with attachment",
- deployClusterRegistrar: true,
- },
- {
- name: "should preserve attachment policy when no CSIDriver present",
- deployClusterRegistrar: false,
- },
- }
- for _, t := range tests {
- test := t
- ginkgo.It(t.name, func() {
- var err error
- init(testParameters{registerDriver: test.deployClusterRegistrar, disableAttach: test.disableAttach})
- defer cleanup()
- _, claim, pod := createPod(false)
- if pod == nil {
- return
- }
- err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
- framework.ExpectNoError(err, "Failed to start pod: %v", err)
- ginkgo.By("Checking if VolumeAttachment was created for the pod")
- handle := getVolumeHandle(m.cs, claim)
- attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, m.provisioner, m.config.ClientNodeSelection.Name)))
- attachmentName := fmt.Sprintf("csi-%x", attachmentHash)
- _, err = m.cs.StorageV1().VolumeAttachments().Get(context.TODO(), attachmentName, metav1.GetOptions{})
- if err != nil {
- if apierrors.IsNotFound(err) {
- if !test.disableAttach {
- framework.ExpectNoError(err, "Expected VolumeAttachment but none was found")
- }
- } else {
- framework.ExpectNoError(err, "Failed to find VolumeAttachment")
- }
- }
- if test.disableAttach {
- framework.ExpectError(err, "Unexpected VolumeAttachment found")
- }
- })
- }
- })
- ginkgo.Context("CSI workload information using mock driver", func() {
- var (
- err error
- podInfoTrue = true
- podInfoFalse = false
- )
- tests := []struct {
- name string
- podInfoOnMount *bool
- deployClusterRegistrar bool
- expectPodInfo bool
- expectEphemeral bool
- }{
- {
- name: "should not be passed when podInfoOnMount=nil",
- podInfoOnMount: nil,
- deployClusterRegistrar: true,
- expectPodInfo: false,
- expectEphemeral: false,
- },
- {
- name: "should be passed when podInfoOnMount=true",
- podInfoOnMount: &podInfoTrue,
- deployClusterRegistrar: true,
- expectPodInfo: true,
- expectEphemeral: false,
- },
- {
- name: "contain ephemeral=true when using inline volume",
- podInfoOnMount: &podInfoTrue,
- deployClusterRegistrar: true,
- expectPodInfo: true,
- expectEphemeral: true,
- },
- {
- name: "should not be passed when podInfoOnMount=false",
- podInfoOnMount: &podInfoFalse,
- deployClusterRegistrar: true,
- expectPodInfo: false,
- expectEphemeral: false,
- },
- {
- name: "should not be passed when CSIDriver does not exist",
- deployClusterRegistrar: false,
- expectPodInfo: false,
- expectEphemeral: false,
- },
- }
- for _, t := range tests {
- test := t
- ginkgo.It(t.name, func() {
- init(testParameters{
- registerDriver: test.deployClusterRegistrar,
- scName: "csi-mock-sc-" + f.UniqueName,
- podInfo: test.podInfoOnMount})
- defer cleanup()
- _, _, pod := createPod(test.expectEphemeral)
- if pod == nil {
- return
- }
- err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
- framework.ExpectNoError(err, "Failed to start pod: %v", err)
- // If we expect an ephemeral volume, the feature has to be enabled.
- // Otherwise need to check if we expect pod info, because the content
- // of that depends on whether the feature is enabled or not.
- csiInlineVolumesEnabled := test.expectEphemeral
- if test.expectPodInfo {
- ginkgo.By("checking for CSIInlineVolumes feature")
- csiInlineVolumesEnabled, err = testsuites.CSIInlineVolumesEnabled(m.cs, f.Namespace.Name)
- framework.ExpectNoError(err, "failed to test for CSIInlineVolumes")
- }
- ginkgo.By("Deleting the previously created pod")
- err = e2epod.DeletePodWithWait(m.cs, pod)
- framework.ExpectNoError(err, "while deleting")
- ginkgo.By("Checking CSI driver logs")
- // The driver is deployed as a statefulset with stable pod names
- driverPodName := "csi-mockplugin-0"
- err = checkPodLogs(m.cs, f.Namespace.Name, driverPodName, "mock", pod, test.expectPodInfo, test.expectEphemeral, csiInlineVolumesEnabled)
- framework.ExpectNoError(err)
- })
- }
- })
- ginkgo.Context("CSI volume limit information using mock driver", func() {
- ginkgo.It("should report attach limit when limit is bigger than 0 [Slow]", func() {
- // define volume limit to be 2 for this test
- var err error
- init(testParameters{attachLimit: 2})
- defer cleanup()
- nodeName := m.config.ClientNodeSelection.Name
- driverName := m.config.GetUniqueDriverName()
- csiNodeAttachLimit, err := checkCSINodeForLimits(nodeName, driverName, m.cs)
- framework.ExpectNoError(err, "while checking limits in CSINode: %v", err)
- gomega.Expect(csiNodeAttachLimit).To(gomega.BeNumerically("==", 2))
- _, _, pod1 := createPod(false)
- gomega.Expect(pod1).NotTo(gomega.BeNil(), "while creating first pod")
- err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod1.Name, pod1.Namespace)
- framework.ExpectNoError(err, "Failed to start pod1: %v", err)
- _, _, pod2 := createPod(false)
- gomega.Expect(pod2).NotTo(gomega.BeNil(), "while creating second pod")
- err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod2.Name, pod2.Namespace)
- framework.ExpectNoError(err, "Failed to start pod2: %v", err)
- _, _, pod3 := createPod(false)
- gomega.Expect(pod3).NotTo(gomega.BeNil(), "while creating third pod")
- err = waitForMaxVolumeCondition(pod3, m.cs)
- framework.ExpectNoError(err, "while waiting for max volume condition on pod : %+v", pod3)
- })
- })
- ginkgo.Context("CSI Volume expansion", func() {
- tests := []struct {
- name string
- nodeExpansionRequired bool
- disableAttach bool
- disableResizingOnDriver bool
- expectFailure bool
- }{
- {
- name: "should expand volume without restarting pod if nodeExpansion=off",
- nodeExpansionRequired: false,
- },
- {
- name: "should expand volume by restarting pod if attach=on, nodeExpansion=on",
- nodeExpansionRequired: true,
- },
- {
- name: "should expand volume by restarting pod if attach=off, nodeExpansion=on",
- disableAttach: true,
- nodeExpansionRequired: true,
- },
- {
- name: "should not expand volume if resizingOnDriver=off, resizingOnSC=on",
- disableResizingOnDriver: true,
- expectFailure: true,
- },
- }
- for _, t := range tests {
- test := t
- ginkgo.It(t.name, func() {
- var err error
- tp := testParameters{
- enableResizing: true,
- enableNodeExpansion: test.nodeExpansionRequired,
- disableResizingOnDriver: test.disableResizingOnDriver,
- }
- // disabling attach requires drive registration feature
- if test.disableAttach {
- tp.disableAttach = true
- tp.registerDriver = true
- }
- init(tp)
- defer cleanup()
- sc, pvc, pod := createPod(false)
- gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing")
- framework.ExpectEqual(*sc.AllowVolumeExpansion, true, "failed creating sc with allowed expansion")
- err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
- framework.ExpectNoError(err, "Failed to start pod1: %v", err)
- ginkgo.By("Expanding current pvc")
- newSize := resource.MustParse("6Gi")
- newPVC, err := testsuites.ExpandPVCSize(pvc, newSize, m.cs)
- framework.ExpectNoError(err, "While updating pvc for more size")
- pvc = newPVC
- gomega.Expect(pvc).NotTo(gomega.BeNil())
- pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
- if pvcSize.Cmp(newSize) != 0 {
- framework.Failf("error updating pvc size %q", pvc.Name)
- }
- if test.expectFailure {
- err = testsuites.WaitForResizingCondition(pvc, m.cs, csiResizingConditionWait)
- framework.ExpectError(err, "unexpected resizing condition on PVC")
- return
- }
- ginkgo.By("Waiting for persistent volume resize to finish")
- err = testsuites.WaitForControllerVolumeResize(pvc, m.cs, csiResizeWaitPeriod)
- framework.ExpectNoError(err, "While waiting for CSI PV resize to finish")
- checkPVCSize := func() {
- ginkgo.By("Waiting for PVC resize to finish")
- pvc, err = testsuites.WaitForFSResize(pvc, m.cs)
- framework.ExpectNoError(err, "while waiting for PVC resize to finish")
- pvcConditions := pvc.Status.Conditions
- framework.ExpectEqual(len(pvcConditions), 0, "pvc should not have conditions")
- }
- // if node expansion is not required PVC should be resized as well
- if !test.nodeExpansionRequired {
- checkPVCSize()
- } else {
- ginkgo.By("Checking for conditions on pvc")
- npvc, err := testsuites.WaitForPendingFSResizeCondition(pvc, m.cs)
- framework.ExpectNoError(err, "While waiting for pvc to have fs resizing condition")
- pvc = npvc
- inProgressConditions := pvc.Status.Conditions
- if len(inProgressConditions) > 0 {
- framework.ExpectEqual(inProgressConditions[0].Type, v1.PersistentVolumeClaimFileSystemResizePending, "pvc must have fs resizing condition")
- }
- ginkgo.By("Deleting the previously created pod")
- err = e2epod.DeletePodWithWait(m.cs, pod)
- framework.ExpectNoError(err, "while deleting pod for resizing")
- ginkgo.By("Creating a new pod with same volume")
- pod2, err := createPodWithPVC(pvc)
- gomega.Expect(pod2).NotTo(gomega.BeNil(), "while creating pod for csi resizing")
- framework.ExpectNoError(err, "while recreating pod for resizing")
- checkPVCSize()
- }
- })
- }
- })
- ginkgo.Context("CSI online volume expansion", func() {
- tests := []struct {
- name string
- disableAttach bool
- }{
- {
- name: "should expand volume without restarting pod if attach=on, nodeExpansion=on",
- },
- {
- name: "should expand volume without restarting pod if attach=off, nodeExpansion=on",
- disableAttach: true,
- },
- }
- for _, t := range tests {
- test := t
- ginkgo.It(test.name, func() {
- var err error
- params := testParameters{enableResizing: true, enableNodeExpansion: true}
- if test.disableAttach {
- params.disableAttach = true
- params.registerDriver = true
- }
- init(params)
- defer cleanup()
- sc, pvc, pod := createPod(false)
- gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing")
- framework.ExpectEqual(*sc.AllowVolumeExpansion, true, "failed creating sc with allowed expansion")
- err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
- framework.ExpectNoError(err, "Failed to start pod1: %v", err)
- ginkgo.By("Expanding current pvc")
- newSize := resource.MustParse("6Gi")
- newPVC, err := testsuites.ExpandPVCSize(pvc, newSize, m.cs)
- framework.ExpectNoError(err, "While updating pvc for more size")
- pvc = newPVC
- gomega.Expect(pvc).NotTo(gomega.BeNil())
- pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
- if pvcSize.Cmp(newSize) != 0 {
- framework.Failf("error updating pvc size %q", pvc.Name)
- }
- ginkgo.By("Waiting for persistent volume resize to finish")
- err = testsuites.WaitForControllerVolumeResize(pvc, m.cs, csiResizeWaitPeriod)
- framework.ExpectNoError(err, "While waiting for PV resize to finish")
- ginkgo.By("Waiting for PVC resize to finish")
- pvc, err = testsuites.WaitForFSResize(pvc, m.cs)
- framework.ExpectNoError(err, "while waiting for PVC to finish")
- pvcConditions := pvc.Status.Conditions
- framework.ExpectEqual(len(pvcConditions), 0, "pvc should not have conditions")
- })
- }
- })
- })
- func waitForMaxVolumeCondition(pod *v1.Pod, cs clientset.Interface) error {
- reg, err := regexp.Compile(`max.+volume.+count`)
- if err != nil {
- return err
- }
- waitErr := wait.PollImmediate(10*time.Second, csiPodUnschedulableTimeout, func() (bool, error) {
- pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
- if err != nil {
- return false, err
- }
- conditions := pod.Status.Conditions
- for _, condition := range conditions {
- matched := reg.MatchString(condition.Message)
- if condition.Reason == v1.PodReasonUnschedulable && matched {
- return true, nil
- }
- }
- return false, nil
- })
- if waitErr != nil {
- return fmt.Errorf("error waiting for pod %s/%s to have max volume condition: %v", pod.Namespace, pod.Name, waitErr)
- }
- return nil
- }
- func checkCSINodeForLimits(nodeName string, driverName string, cs clientset.Interface) (int32, error) {
- var attachLimit int32
- waitErr := wait.PollImmediate(10*time.Second, csiNodeLimitUpdateTimeout, func() (bool, error) {
- csiNode, err := cs.StorageV1().CSINodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
- if err != nil && !apierrors.IsNotFound(err) {
- return false, err
- }
- attachLimit = getVolumeLimitFromCSINode(csiNode, driverName)
- if attachLimit > 0 {
- return true, nil
- }
- return false, nil
- })
- if waitErr != nil {
- return 0, fmt.Errorf("error waiting for non-zero volume limit of driver %s on node %s: %v", driverName, nodeName, waitErr)
- }
- return attachLimit, nil
- }
- func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
- class := newStorageClass(t, ns, "")
- var err error
- _, err = cs.StorageV1().StorageClasses().Get(context.TODO(), class.Name, metav1.GetOptions{})
- if err != nil {
- class, err = cs.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{})
- framework.ExpectNoError(err, "Failed to create class : %v", err)
- }
- claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
- ClaimSize: t.ClaimSize,
- StorageClassName: &(class.Name),
- VolumeMode: &t.VolumeMode,
- }, ns)
- claim, err = cs.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), claim, metav1.CreateOptions{})
- framework.ExpectNoError(err, "Failed to create claim: %v", err)
- pvcClaims := []*v1.PersistentVolumeClaim{claim}
- _, err = e2epv.WaitForPVClaimBoundPhase(cs, pvcClaims, framework.ClaimProvisionTimeout)
- framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
- pod, err := startPausePodWithClaim(cs, claim, node, ns)
- framework.ExpectNoError(err, "Failed to create pod: %v", err)
- return class, claim, pod
- }
- func startPausePodInline(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, ns string) *v1.Pod {
- pod, err := startPausePodWithInlineVolume(cs,
- &v1.CSIVolumeSource{
- Driver: t.Provisioner,
- },
- node, ns)
- framework.ExpectNoError(err, "Failed to create pod: %v", err)
- return pod
- }
- func startPausePodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection, ns string) (*v1.Pod, error) {
- return startPausePodWithVolumeSource(cs,
- v1.VolumeSource{
- PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
- ClaimName: pvc.Name,
- ReadOnly: false,
- },
- },
- node, ns)
- }
- func startPausePodWithInlineVolume(cs clientset.Interface, inlineVolume *v1.CSIVolumeSource, node e2epod.NodeSelection, ns string) (*v1.Pod, error) {
- return startPausePodWithVolumeSource(cs,
- v1.VolumeSource{
- CSI: inlineVolume,
- },
- node, ns)
- }
- func startPausePodWithVolumeSource(cs clientset.Interface, volumeSource v1.VolumeSource, node e2epod.NodeSelection, ns string) (*v1.Pod, error) {
- pod := &v1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "pvc-volume-tester-",
- },
- Spec: v1.PodSpec{
- Containers: []v1.Container{
- {
- Name: "volume-tester",
- Image: imageutils.GetE2EImage(imageutils.Pause),
- VolumeMounts: []v1.VolumeMount{
- {
- Name: "my-volume",
- MountPath: "/mnt/test",
- },
- },
- },
- },
- RestartPolicy: v1.RestartPolicyNever,
- Volumes: []v1.Volume{
- {
- Name: "my-volume",
- VolumeSource: volumeSource,
- },
- },
- },
- }
- e2epod.SetNodeSelection(pod, node)
- return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
- }
- // checkPodLogs tests that NodePublish was called with expected volume_context and (for ephemeral inline volumes)
- // has the matching NodeUnpublish
- func checkPodLogs(cs clientset.Interface, namespace, driverPodName, driverContainerName string, pod *v1.Pod, expectPodInfo, ephemeralVolume, csiInlineVolumesEnabled bool) error {
- expectedAttributes := map[string]string{
- "csi.storage.k8s.io/pod.name": pod.Name,
- "csi.storage.k8s.io/pod.namespace": namespace,
- "csi.storage.k8s.io/pod.uid": string(pod.UID),
- "csi.storage.k8s.io/serviceAccount.name": "default",
- }
- if csiInlineVolumesEnabled {
- // This is only passed in 1.15 when the CSIInlineVolume feature gate is set.
- expectedAttributes["csi.storage.k8s.io/ephemeral"] = strconv.FormatBool(ephemeralVolume)
- }
- // Load logs of driver pod
- log, err := e2epod.GetPodLogs(cs, namespace, driverPodName, driverContainerName)
- if err != nil {
- return fmt.Errorf("could not load CSI driver logs: %s", err)
- }
- framework.Logf("CSI driver logs:\n%s", log)
- // Find NodePublish in the logs
- foundAttributes := sets.NewString()
- logLines := strings.Split(log, "\n")
- numNodePublishVolume := 0
- numNodeUnpublishVolume := 0
- for _, line := range logLines {
- if !strings.HasPrefix(line, "gRPCCall:") {
- continue
- }
- line = strings.TrimPrefix(line, "gRPCCall:")
- // Dummy structure that parses just volume_attributes out of logged CSI call
- type MockCSICall struct {
- Method string
- Request struct {
- VolumeContext map[string]string `json:"volume_context"`
- }
- }
- var call MockCSICall
- err := json.Unmarshal([]byte(line), &call)
- if err != nil {
- framework.Logf("Could not parse CSI driver log line %q: %s", line, err)
- continue
- }
- switch call.Method {
- case "/csi.v1.Node/NodePublishVolume":
- numNodePublishVolume++
- if numNodePublishVolume == 1 {
- // Check that NodePublish had expected attributes for first volume
- for k, v := range expectedAttributes {
- vv, found := call.Request.VolumeContext[k]
- if found && v == vv {
- foundAttributes.Insert(k)
- framework.Logf("Found volume attribute %s: %s", k, v)
- }
- }
- }
- case "/csi.v1.Node/NodeUnpublishVolume":
- framework.Logf("Found NodeUnpublishVolume: %+v", call)
- numNodeUnpublishVolume++
- }
- }
- if numNodePublishVolume == 0 {
- return fmt.Errorf("NodePublish was never called")
- }
- if numNodeUnpublishVolume == 0 {
- return fmt.Errorf("NodeUnpublish was never called")
- }
- if expectPodInfo {
- if foundAttributes.Len() != len(expectedAttributes) {
- return fmt.Errorf("number of found volume attributes does not match, expected %d, got %d", len(expectedAttributes), foundAttributes.Len())
- }
- return nil
- }
- if foundAttributes.Len() != 0 {
- return fmt.Errorf("some unexpected volume attributes were found: %+v", foundAttributes.List())
- }
- return nil
- }
- func waitForCSIDriver(cs clientset.Interface, driverName string) error {
- timeout := 4 * time.Minute
- framework.Logf("waiting up to %v for CSIDriver %q", timeout, driverName)
- for start := time.Now(); time.Since(start) < timeout; time.Sleep(framework.Poll) {
- _, err := cs.StorageV1beta1().CSIDrivers().Get(context.TODO(), driverName, metav1.GetOptions{})
- if !apierrors.IsNotFound(err) {
- return err
- }
- }
- return fmt.Errorf("gave up after waiting %v for CSIDriver %q", timeout, driverName)
- }
- func destroyCSIDriver(cs clientset.Interface, driverName string) {
- driverGet, err := cs.StorageV1beta1().CSIDrivers().Get(context.TODO(), driverName, metav1.GetOptions{})
- if err == nil {
- framework.Logf("deleting %s.%s: %s", driverGet.TypeMeta.APIVersion, driverGet.TypeMeta.Kind, driverGet.ObjectMeta.Name)
- // Uncomment the following line to get full dump of CSIDriver object
- // framework.Logf("%s", framework.PrettyPrint(driverGet))
- cs.StorageV1beta1().CSIDrivers().Delete(context.TODO(), driverName, nil)
- }
- }
- func getVolumeHandle(cs clientset.Interface, claim *v1.PersistentVolumeClaim) string {
- // re-get the claim to the latest state with bound volume
- claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{})
- if err != nil {
- framework.ExpectNoError(err, "Cannot get PVC")
- return ""
- }
- pvName := claim.Spec.VolumeName
- pv, err := cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
- if err != nil {
- framework.ExpectNoError(err, "Cannot get PV")
- return ""
- }
- if pv.Spec.CSI == nil {
- gomega.Expect(pv.Spec.CSI).NotTo(gomega.BeNil())
- return ""
- }
- return pv.Spec.CSI.VolumeHandle
- }
- func getVolumeLimitFromCSINode(csiNode *storagev1.CSINode, driverName string) int32 {
- for _, d := range csiNode.Spec.Drivers {
- if d.Name != driverName {
- continue
- }
- if d.Allocatable != nil && d.Allocatable.Count != nil {
- return *d.Allocatable.Count
- }
- }
- return 0
- }
|