csi_mock_volume.go 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808
  1. /*
  2. Copyright 2019 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package storage
  14. import (
  15. "crypto/sha256"
  16. "encoding/json"
  17. "fmt"
  18. "regexp"
  19. "strings"
  20. "time"
  21. v1 "k8s.io/api/core/v1"
  22. storagev1 "k8s.io/api/storage/v1"
  23. "k8s.io/apimachinery/pkg/api/errors"
  24. "k8s.io/apimachinery/pkg/api/resource"
  25. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  26. utilerrors "k8s.io/apimachinery/pkg/util/errors"
  27. "k8s.io/apimachinery/pkg/util/sets"
  28. "k8s.io/apimachinery/pkg/util/wait"
  29. clientset "k8s.io/client-go/kubernetes"
  30. volumeutil "k8s.io/kubernetes/pkg/volume/util"
  31. "k8s.io/kubernetes/test/e2e/framework"
  32. e2elog "k8s.io/kubernetes/test/e2e/framework/log"
  33. "k8s.io/kubernetes/test/e2e/storage/drivers"
  34. "k8s.io/kubernetes/test/e2e/storage/testsuites"
  35. "k8s.io/kubernetes/test/e2e/storage/utils"
  36. imageutils "k8s.io/kubernetes/test/utils/image"
  37. "github.com/onsi/ginkgo"
  38. "github.com/onsi/gomega"
  39. )
  40. type cleanupFuncs func()
  41. const (
  42. csiNodeLimitUpdateTimeout = 5 * time.Minute
  43. csiPodUnschedulableTimeout = 5 * time.Minute
  44. csiResizeWaitPeriod = 5 * time.Minute
  45. // how long to wait for Resizing Condition on PVC to appear
  46. csiResizingConditionWait = 2 * time.Minute
  47. )
  48. var _ = utils.SIGDescribe("CSI mock volume", func() {
  49. type testParameters struct {
  50. disableAttach bool
  51. attachLimit int
  52. registerDriver bool
  53. podInfo *bool
  54. scName string
  55. nodeSelectorKey string
  56. enableResizing bool // enable resizing for both CSI mock driver and storageClass.
  57. enableNodeExpansion bool // enable node expansion for CSI mock driver
  58. // just disable resizing on driver it overrides enableResizing flag for CSI mock driver
  59. disableResizingOnDriver bool
  60. }
  61. type mockDriverSetup struct {
  62. cs clientset.Interface
  63. config *testsuites.PerTestConfig
  64. testCleanups []cleanupFuncs
  65. pods []*v1.Pod
  66. pvcs []*v1.PersistentVolumeClaim
  67. sc map[string]*storagev1.StorageClass
  68. driver testsuites.TestDriver
  69. nodeLabel map[string]string
  70. provisioner string
  71. tp testParameters
  72. }
  73. var m mockDriverSetup
  74. f := framework.NewDefaultFramework("csi-mock-volumes")
  75. init := func(tp testParameters) {
  76. m = mockDriverSetup{
  77. cs: f.ClientSet,
  78. sc: make(map[string]*storagev1.StorageClass),
  79. tp: tp,
  80. }
  81. cs := f.ClientSet
  82. var err error
  83. driverOpts := drivers.CSIMockDriverOpts{
  84. RegisterDriver: tp.registerDriver,
  85. PodInfo: tp.podInfo,
  86. AttachLimit: tp.attachLimit,
  87. DisableAttach: tp.disableAttach,
  88. EnableResizing: tp.enableResizing,
  89. EnableNodeExpansion: tp.enableNodeExpansion,
  90. }
  91. // this just disable resizing on driver, keeping resizing on SC enabled.
  92. if tp.disableResizingOnDriver {
  93. driverOpts.EnableResizing = false
  94. }
  95. m.driver = drivers.InitMockCSIDriver(driverOpts)
  96. config, testCleanup := m.driver.PrepareTest(f)
  97. m.testCleanups = append(m.testCleanups, testCleanup)
  98. m.config = config
  99. m.provisioner = config.GetUniqueDriverName()
  100. if tp.nodeSelectorKey != "" {
  101. framework.AddOrUpdateLabelOnNode(m.cs, m.config.ClientNodeName, tp.nodeSelectorKey, f.Namespace.Name)
  102. m.nodeLabel = map[string]string{
  103. tp.nodeSelectorKey: f.Namespace.Name,
  104. }
  105. }
  106. if tp.registerDriver {
  107. err = waitForCSIDriver(cs, m.config.GetUniqueDriverName())
  108. framework.ExpectNoError(err, "Failed to get CSIDriver : %v", err)
  109. m.testCleanups = append(m.testCleanups, func() {
  110. destroyCSIDriver(cs, m.config.GetUniqueDriverName())
  111. })
  112. }
  113. }
  114. createPod := func() (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
  115. ginkgo.By("Creating pod")
  116. var sc *storagev1.StorageClass
  117. if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok {
  118. sc = dDriver.GetDynamicProvisionStorageClass(m.config, "")
  119. }
  120. nodeName := m.config.ClientNodeName
  121. scTest := testsuites.StorageClassTest{
  122. Name: m.driver.GetDriverInfo().Name,
  123. Provisioner: sc.Provisioner,
  124. Parameters: sc.Parameters,
  125. ClaimSize: "1Gi",
  126. ExpectedSize: "1Gi",
  127. }
  128. if m.tp.scName != "" {
  129. scTest.StorageClassName = m.tp.scName
  130. }
  131. if m.tp.enableResizing {
  132. scTest.AllowVolumeExpansion = true
  133. }
  134. nodeSelection := framework.NodeSelection{
  135. // The mock driver only works when everything runs on a single node.
  136. Name: nodeName,
  137. }
  138. if len(m.nodeLabel) > 0 {
  139. nodeSelection = framework.NodeSelection{
  140. Selector: m.nodeLabel,
  141. }
  142. }
  143. class, claim, pod := startPausePod(f.ClientSet, scTest, nodeSelection, f.Namespace.Name)
  144. if class != nil {
  145. m.sc[class.Name] = class
  146. }
  147. if claim != nil {
  148. m.pvcs = append(m.pvcs, claim)
  149. }
  150. if pod != nil {
  151. m.pods = append(m.pods, pod)
  152. }
  153. return class, claim, pod
  154. }
  155. createPodWithPVC := func(pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
  156. nodeName := m.config.ClientNodeName
  157. nodeSelection := framework.NodeSelection{
  158. Name: nodeName,
  159. }
  160. if len(m.nodeLabel) > 0 {
  161. nodeSelection = framework.NodeSelection{
  162. Selector: m.nodeLabel,
  163. }
  164. }
  165. pod, err := startPausePodWithClaim(m.cs, pvc, nodeSelection, f.Namespace.Name)
  166. if pod != nil {
  167. m.pods = append(m.pods, pod)
  168. }
  169. return pod, err
  170. }
  171. cleanup := func() {
  172. cs := f.ClientSet
  173. var errs []error
  174. for _, pod := range m.pods {
  175. ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
  176. errs = append(errs, framework.DeletePodWithWait(f, cs, pod))
  177. }
  178. for _, claim := range m.pvcs {
  179. ginkgo.By(fmt.Sprintf("Deleting claim %s", claim.Name))
  180. claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
  181. if err == nil {
  182. cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)
  183. framework.WaitForPersistentVolumeDeleted(cs, claim.Spec.VolumeName, framework.Poll, 2*time.Minute)
  184. }
  185. }
  186. for _, sc := range m.sc {
  187. ginkgo.By(fmt.Sprintf("Deleting storageclass %s", sc.Name))
  188. cs.StorageV1().StorageClasses().Delete(sc.Name, nil)
  189. }
  190. ginkgo.By("Cleaning up resources")
  191. for _, cleanupFunc := range m.testCleanups {
  192. cleanupFunc()
  193. }
  194. if len(m.nodeLabel) > 0 && len(m.tp.nodeSelectorKey) > 0 {
  195. framework.RemoveLabelOffNode(m.cs, m.config.ClientNodeName, m.tp.nodeSelectorKey)
  196. }
  197. err := utilerrors.NewAggregate(errs)
  198. framework.ExpectNoError(err, "while cleaning up after test")
  199. }
  200. // The CSIDriverRegistry feature gate is needed for this test in Kubernetes 1.12.
  201. ginkgo.Context("CSI attach test using mock driver", func() {
  202. tests := []struct {
  203. name string
  204. disableAttach bool
  205. deployClusterRegistrar bool
  206. }{
  207. {
  208. name: "should not require VolumeAttach for drivers without attachment",
  209. disableAttach: true,
  210. deployClusterRegistrar: true,
  211. },
  212. {
  213. name: "should require VolumeAttach for drivers with attachment",
  214. deployClusterRegistrar: true,
  215. },
  216. {
  217. name: "should preserve attachment policy when no CSIDriver present",
  218. deployClusterRegistrar: false,
  219. },
  220. }
  221. for _, t := range tests {
  222. test := t
  223. ginkgo.It(t.name, func() {
  224. var err error
  225. init(testParameters{registerDriver: test.deployClusterRegistrar, disableAttach: test.disableAttach})
  226. defer cleanup()
  227. _, claim, pod := createPod()
  228. if pod == nil {
  229. return
  230. }
  231. err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
  232. framework.ExpectNoError(err, "Failed to start pod: %v", err)
  233. ginkgo.By("Checking if VolumeAttachment was created for the pod")
  234. handle := getVolumeHandle(m.cs, claim)
  235. attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, m.provisioner, m.config.ClientNodeName)))
  236. attachmentName := fmt.Sprintf("csi-%x", attachmentHash)
  237. _, err = m.cs.StorageV1beta1().VolumeAttachments().Get(attachmentName, metav1.GetOptions{})
  238. if err != nil {
  239. if errors.IsNotFound(err) {
  240. if !test.disableAttach {
  241. framework.ExpectNoError(err, "Expected VolumeAttachment but none was found")
  242. }
  243. } else {
  244. framework.ExpectNoError(err, "Failed to find VolumeAttachment")
  245. }
  246. }
  247. if test.disableAttach {
  248. framework.ExpectError(err, "Unexpected VolumeAttachment found")
  249. }
  250. })
  251. }
  252. })
  253. ginkgo.Context("CSI workload information using mock driver", func() {
  254. var (
  255. err error
  256. podInfoTrue = true
  257. podInfoFalse = false
  258. )
  259. tests := []struct {
  260. name string
  261. podInfoOnMount *bool
  262. deployClusterRegistrar bool
  263. expectPodInfo bool
  264. }{
  265. {
  266. name: "should not be passed when podInfoOnMount=nil",
  267. podInfoOnMount: nil,
  268. deployClusterRegistrar: true,
  269. expectPodInfo: false,
  270. },
  271. {
  272. name: "should be passed when podInfoOnMount=true",
  273. podInfoOnMount: &podInfoTrue,
  274. deployClusterRegistrar: true,
  275. expectPodInfo: true,
  276. },
  277. {
  278. name: "should not be passed when podInfoOnMount=false",
  279. podInfoOnMount: &podInfoFalse,
  280. deployClusterRegistrar: true,
  281. expectPodInfo: false,
  282. },
  283. {
  284. name: "should not be passed when CSIDriver does not exist",
  285. deployClusterRegistrar: false,
  286. expectPodInfo: false,
  287. },
  288. }
  289. for _, t := range tests {
  290. test := t
  291. ginkgo.It(t.name, func() {
  292. init(testParameters{
  293. registerDriver: test.deployClusterRegistrar,
  294. scName: "csi-mock-sc-" + f.UniqueName,
  295. podInfo: test.podInfoOnMount})
  296. defer cleanup()
  297. _, _, pod := createPod()
  298. if pod == nil {
  299. return
  300. }
  301. err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
  302. framework.ExpectNoError(err, "Failed to start pod: %v", err)
  303. ginkgo.By("Checking CSI driver logs")
  304. // The driver is deployed as a statefulset with stable pod names
  305. driverPodName := "csi-mockplugin-0"
  306. err = checkPodInfo(m.cs, f.Namespace.Name, driverPodName, "mock", pod, test.expectPodInfo)
  307. framework.ExpectNoError(err)
  308. })
  309. }
  310. })
  311. ginkgo.Context("CSI volume limit information using mock driver", func() {
  312. ginkgo.It("should report attach limit when limit is bigger than 0 [Slow]", func() {
  313. // define volume limit to be 2 for this test
  314. var err error
  315. nodeSelectorKey := fmt.Sprintf("attach-limit-csi-%s", f.Namespace.Name)
  316. init(testParameters{nodeSelectorKey: nodeSelectorKey, attachLimit: 2})
  317. defer cleanup()
  318. nodeName := m.config.ClientNodeName
  319. attachKey := v1.ResourceName(volumeutil.GetCSIAttachLimitKey(m.provisioner))
  320. nodeAttachLimit, err := checkNodeForLimits(nodeName, attachKey, m.cs)
  321. framework.ExpectNoError(err, "while fetching node %v", err)
  322. gomega.Expect(nodeAttachLimit).To(gomega.Equal(2))
  323. _, _, pod1 := createPod()
  324. gomega.Expect(pod1).NotTo(gomega.BeNil(), "while creating first pod")
  325. err = framework.WaitForPodNameRunningInNamespace(m.cs, pod1.Name, pod1.Namespace)
  326. framework.ExpectNoError(err, "Failed to start pod1: %v", err)
  327. _, _, pod2 := createPod()
  328. gomega.Expect(pod2).NotTo(gomega.BeNil(), "while creating second pod")
  329. err = framework.WaitForPodNameRunningInNamespace(m.cs, pod2.Name, pod2.Namespace)
  330. framework.ExpectNoError(err, "Failed to start pod2: %v", err)
  331. _, _, pod3 := createPod()
  332. gomega.Expect(pod3).NotTo(gomega.BeNil(), "while creating third pod")
  333. err = waitForMaxVolumeCondition(pod3, m.cs)
  334. framework.ExpectNoError(err, "while waiting for max volume condition on pod : %+v", pod3)
  335. })
  336. })
  337. ginkgo.Context("CSI Volume expansion [Feature:ExpandCSIVolumes]", func() {
  338. tests := []struct {
  339. name string
  340. nodeExpansionRequired bool
  341. disableAttach bool
  342. disableResizingOnDriver bool
  343. expectFailure bool
  344. }{
  345. {
  346. name: "should expand volume without restarting pod if nodeExpansion=off",
  347. nodeExpansionRequired: false,
  348. },
  349. {
  350. name: "should expand volume by restarting pod if attach=on, nodeExpansion=on",
  351. nodeExpansionRequired: true,
  352. },
  353. {
  354. name: "should expand volume by restarting pod if attach=off, nodeExpansion=on",
  355. disableAttach: true,
  356. nodeExpansionRequired: true,
  357. },
  358. {
  359. name: "should not expand volume if resizingOnDriver=off, resizingOnSC=on",
  360. disableResizingOnDriver: true,
  361. expectFailure: true,
  362. },
  363. }
  364. for _, t := range tests {
  365. test := t
  366. ginkgo.It(t.name, func() {
  367. var err error
  368. tp := testParameters{
  369. enableResizing: true,
  370. enableNodeExpansion: test.nodeExpansionRequired,
  371. disableResizingOnDriver: test.disableResizingOnDriver,
  372. }
  373. // disabling attach requires drive registration feature
  374. if test.disableAttach {
  375. tp.disableAttach = true
  376. tp.registerDriver = true
  377. }
  378. init(tp)
  379. defer cleanup()
  380. ns := f.Namespace.Name
  381. sc, pvc, pod := createPod()
  382. gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing")
  383. gomega.Expect(*sc.AllowVolumeExpansion).To(gomega.BeTrue(), "failed creating sc with allowed expansion")
  384. err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
  385. framework.ExpectNoError(err, "Failed to start pod1: %v", err)
  386. ginkgo.By("Expanding current pvc")
  387. newSize := resource.MustParse("6Gi")
  388. pvc, err = expandPVCSize(pvc, newSize, m.cs)
  389. framework.ExpectNoError(err, "While updating pvc for more size")
  390. gomega.Expect(pvc).NotTo(gomega.BeNil())
  391. pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
  392. if pvcSize.Cmp(newSize) != 0 {
  393. framework.Failf("error updating pvc size %q", pvc.Name)
  394. }
  395. if test.expectFailure {
  396. err = waitForResizingCondition(pvc, m.cs, csiResizingConditionWait)
  397. framework.ExpectError(err, "unexpected resizing condition on PVC")
  398. return
  399. }
  400. ginkgo.By("Waiting for persistent volume resize to finish")
  401. err = waitForControllerVolumeResize(pvc, m.cs, csiResizeWaitPeriod)
  402. framework.ExpectNoError(err, "While waiting for CSI PV resize to finish")
  403. checkPVCSize := func() {
  404. ginkgo.By("Waiting for PVC resize to finish")
  405. pvc, err = waitForFSResize(pvc, m.cs)
  406. framework.ExpectNoError(err, "while waiting for PVC resize to finish")
  407. pvcConditions := pvc.Status.Conditions
  408. gomega.Expect(len(pvcConditions)).To(gomega.Equal(0), "pvc should not have conditions")
  409. }
  410. // if node expansion is not required PVC should be resized as well
  411. if !test.nodeExpansionRequired {
  412. checkPVCSize()
  413. } else {
  414. ginkgo.By("Checking for conditions on pvc")
  415. pvc, err = m.cs.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
  416. framework.ExpectNoError(err, "While fetching pvc after controller resize")
  417. inProgressConditions := pvc.Status.Conditions
  418. if len(inProgressConditions) > 0 {
  419. gomega.Expect(inProgressConditions[0].Type).To(gomega.Equal(v1.PersistentVolumeClaimFileSystemResizePending), "pvc must have fs resizing condition")
  420. }
  421. ginkgo.By("Deleting the previously created pod")
  422. err = framework.DeletePodWithWait(f, m.cs, pod)
  423. framework.ExpectNoError(err, "while deleting pod for resizing")
  424. ginkgo.By("Creating a new pod with same volume")
  425. pod2, err := createPodWithPVC(pvc)
  426. gomega.Expect(pod2).NotTo(gomega.BeNil(), "while creating pod for csi resizing")
  427. framework.ExpectNoError(err, "while recreating pod for resizing")
  428. checkPVCSize()
  429. }
  430. })
  431. }
  432. })
  433. ginkgo.Context("CSI online volume expansion [Feature:ExpandCSIVolumes][Feature:ExpandInUseVolumes]", func() {
  434. tests := []struct {
  435. name string
  436. disableAttach bool
  437. }{
  438. {
  439. name: "should expand volume without restarting pod if attach=on, nodeExpansion=on",
  440. },
  441. {
  442. name: "should expand volume without restarting pod if attach=off, nodeExpansion=on",
  443. disableAttach: true,
  444. },
  445. }
  446. for _, t := range tests {
  447. test := t
  448. ginkgo.It(test.name, func() {
  449. var err error
  450. params := testParameters{enableResizing: true, enableNodeExpansion: true}
  451. if test.disableAttach {
  452. params.disableAttach = true
  453. params.registerDriver = true
  454. }
  455. init(params)
  456. defer cleanup()
  457. sc, pvc, pod := createPod()
  458. gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing")
  459. gomega.Expect(*sc.AllowVolumeExpansion).To(gomega.BeTrue(), "failed creating sc with allowed expansion")
  460. err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
  461. framework.ExpectNoError(err, "Failed to start pod1: %v", err)
  462. ginkgo.By("Expanding current pvc")
  463. newSize := resource.MustParse("6Gi")
  464. pvc, err = expandPVCSize(pvc, newSize, m.cs)
  465. framework.ExpectNoError(err, "While updating pvc for more size")
  466. gomega.Expect(pvc).NotTo(gomega.BeNil())
  467. pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
  468. if pvcSize.Cmp(newSize) != 0 {
  469. framework.Failf("error updating pvc size %q", pvc.Name)
  470. }
  471. ginkgo.By("Waiting for persistent volume resize to finish")
  472. err = waitForControllerVolumeResize(pvc, m.cs, csiResizeWaitPeriod)
  473. framework.ExpectNoError(err, "While waiting for PV resize to finish")
  474. ginkgo.By("Waiting for PVC resize to finish")
  475. pvc, err = waitForFSResize(pvc, m.cs)
  476. framework.ExpectNoError(err, "while waiting for PVC to finish")
  477. pvcConditions := pvc.Status.Conditions
  478. gomega.Expect(len(pvcConditions)).To(gomega.Equal(0), "pvc should not have conditions")
  479. })
  480. }
  481. })
  482. })
  483. func waitForMaxVolumeCondition(pod *v1.Pod, cs clientset.Interface) error {
  484. var err error
  485. waitErr := wait.PollImmediate(10*time.Second, csiPodUnschedulableTimeout, func() (bool, error) {
  486. pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
  487. if err != nil {
  488. return false, err
  489. }
  490. conditions := pod.Status.Conditions
  491. for _, condition := range conditions {
  492. matched, _ := regexp.MatchString("max.+volume.+count", condition.Message)
  493. if condition.Reason == v1.PodReasonUnschedulable && matched {
  494. return true, nil
  495. }
  496. }
  497. return false, nil
  498. })
  499. return waitErr
  500. }
  501. func checkNodeForLimits(nodeName string, attachKey v1.ResourceName, cs clientset.Interface) (int, error) {
  502. var attachLimit int64
  503. waitErr := wait.PollImmediate(10*time.Second, csiNodeLimitUpdateTimeout, func() (bool, error) {
  504. node, err := cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
  505. if err != nil {
  506. return false, err
  507. }
  508. limits := getVolumeLimit(node)
  509. var ok bool
  510. if len(limits) > 0 {
  511. attachLimit, ok = limits[attachKey]
  512. if ok {
  513. return true, nil
  514. }
  515. }
  516. return false, nil
  517. })
  518. return int(attachLimit), waitErr
  519. }
  520. func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node framework.NodeSelection, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
  521. class := newStorageClass(t, ns, "")
  522. var err error
  523. _, err = cs.StorageV1().StorageClasses().Get(class.Name, metav1.GetOptions{})
  524. if err != nil {
  525. class, err = cs.StorageV1().StorageClasses().Create(class)
  526. framework.ExpectNoError(err, "Failed to create class : %v", err)
  527. }
  528. claim := newClaim(t, ns, "")
  529. claim.Spec.StorageClassName = &class.Name
  530. claim, err = cs.CoreV1().PersistentVolumeClaims(ns).Create(claim)
  531. framework.ExpectNoError(err, "Failed to create claim: %v", err)
  532. pvcClaims := []*v1.PersistentVolumeClaim{claim}
  533. _, err = framework.WaitForPVClaimBoundPhase(cs, pvcClaims, framework.ClaimProvisionTimeout)
  534. framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
  535. pod := &v1.Pod{
  536. ObjectMeta: metav1.ObjectMeta{
  537. GenerateName: "pvc-volume-tester-",
  538. },
  539. Spec: v1.PodSpec{
  540. Containers: []v1.Container{
  541. {
  542. Name: "volume-tester",
  543. Image: imageutils.GetE2EImage(imageutils.Pause),
  544. VolumeMounts: []v1.VolumeMount{
  545. {
  546. Name: "my-volume",
  547. MountPath: "/mnt/test",
  548. },
  549. },
  550. },
  551. },
  552. RestartPolicy: v1.RestartPolicyNever,
  553. Volumes: []v1.Volume{
  554. {
  555. Name: "my-volume",
  556. VolumeSource: v1.VolumeSource{
  557. PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
  558. ClaimName: claim.Name,
  559. ReadOnly: false,
  560. },
  561. },
  562. },
  563. },
  564. },
  565. }
  566. if node.Name != "" {
  567. pod.Spec.NodeName = node.Name
  568. }
  569. if len(node.Selector) != 0 {
  570. pod.Spec.NodeSelector = node.Selector
  571. }
  572. pod, err = cs.CoreV1().Pods(ns).Create(pod)
  573. framework.ExpectNoError(err, "Failed to create pod: %v", err)
  574. return class, claim, pod
  575. }
  576. func startPausePodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node framework.NodeSelection, ns string) (*v1.Pod, error) {
  577. pod := &v1.Pod{
  578. ObjectMeta: metav1.ObjectMeta{
  579. GenerateName: "pvc-volume-tester-",
  580. },
  581. Spec: v1.PodSpec{
  582. Containers: []v1.Container{
  583. {
  584. Name: "volume-tester",
  585. Image: imageutils.GetE2EImage(imageutils.Pause),
  586. VolumeMounts: []v1.VolumeMount{
  587. {
  588. Name: "my-volume",
  589. MountPath: "/mnt/test",
  590. },
  591. },
  592. },
  593. },
  594. RestartPolicy: v1.RestartPolicyNever,
  595. Volumes: []v1.Volume{
  596. {
  597. Name: "my-volume",
  598. VolumeSource: v1.VolumeSource{
  599. PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
  600. ClaimName: pvc.Name,
  601. ReadOnly: false,
  602. },
  603. },
  604. },
  605. },
  606. },
  607. }
  608. if node.Name != "" {
  609. pod.Spec.NodeName = node.Name
  610. }
  611. if len(node.Selector) != 0 {
  612. pod.Spec.NodeSelector = node.Selector
  613. }
  614. return cs.CoreV1().Pods(ns).Create(pod)
  615. }
  616. // checkPodInfo tests that NodePublish was called with expected volume_context
  617. func checkPodInfo(cs clientset.Interface, namespace, driverPodName, driverContainerName string, pod *v1.Pod, expectPodInfo bool) error {
  618. expectedAttributes := map[string]string{
  619. "csi.storage.k8s.io/pod.name": pod.Name,
  620. "csi.storage.k8s.io/pod.namespace": namespace,
  621. "csi.storage.k8s.io/pod.uid": string(pod.UID),
  622. "csi.storage.k8s.io/serviceAccount.name": "default",
  623. }
  624. // Load logs of driver pod
  625. log, err := framework.GetPodLogs(cs, namespace, driverPodName, driverContainerName)
  626. if err != nil {
  627. return fmt.Errorf("could not load CSI driver logs: %s", err)
  628. }
  629. e2elog.Logf("CSI driver logs:\n%s", log)
  630. // Find NodePublish in the logs
  631. foundAttributes := sets.NewString()
  632. logLines := strings.Split(log, "\n")
  633. for _, line := range logLines {
  634. if !strings.HasPrefix(line, "gRPCCall:") {
  635. continue
  636. }
  637. line = strings.TrimPrefix(line, "gRPCCall:")
  638. // Dummy structure that parses just volume_attributes out of logged CSI call
  639. type MockCSICall struct {
  640. Method string
  641. Request struct {
  642. VolumeContext map[string]string `json:"volume_context"`
  643. }
  644. }
  645. var call MockCSICall
  646. err := json.Unmarshal([]byte(line), &call)
  647. if err != nil {
  648. e2elog.Logf("Could not parse CSI driver log line %q: %s", line, err)
  649. continue
  650. }
  651. if call.Method != "/csi.v1.Node/NodePublishVolume" {
  652. continue
  653. }
  654. // Check that NodePublish had expected attributes
  655. for k, v := range expectedAttributes {
  656. vv, found := call.Request.VolumeContext[k]
  657. if found && v == vv {
  658. foundAttributes.Insert(k)
  659. e2elog.Logf("Found volume attribute %s: %s", k, v)
  660. }
  661. }
  662. // Process just the first NodePublish, the rest of the log is useless.
  663. break
  664. }
  665. if expectPodInfo {
  666. if foundAttributes.Len() != len(expectedAttributes) {
  667. return fmt.Errorf("number of found volume attributes does not match, expected %d, got %d", len(expectedAttributes), foundAttributes.Len())
  668. }
  669. return nil
  670. }
  671. if foundAttributes.Len() != 0 {
  672. return fmt.Errorf("some unexpected volume attributes were found: %+v", foundAttributes.List())
  673. }
  674. return nil
  675. }
  676. func waitForCSIDriver(cs clientset.Interface, driverName string) error {
  677. timeout := 4 * time.Minute
  678. e2elog.Logf("waiting up to %v for CSIDriver %q", timeout, driverName)
  679. for start := time.Now(); time.Since(start) < timeout; time.Sleep(framework.Poll) {
  680. _, err := cs.StorageV1beta1().CSIDrivers().Get(driverName, metav1.GetOptions{})
  681. if !errors.IsNotFound(err) {
  682. return err
  683. }
  684. }
  685. return fmt.Errorf("gave up after waiting %v for CSIDriver %q", timeout, driverName)
  686. }
  687. func destroyCSIDriver(cs clientset.Interface, driverName string) {
  688. driverGet, err := cs.StorageV1beta1().CSIDrivers().Get(driverName, metav1.GetOptions{})
  689. if err == nil {
  690. e2elog.Logf("deleting %s.%s: %s", driverGet.TypeMeta.APIVersion, driverGet.TypeMeta.Kind, driverGet.ObjectMeta.Name)
  691. // Uncomment the following line to get full dump of CSIDriver object
  692. // e2elog.Logf("%s", framework.PrettyPrint(driverGet))
  693. cs.StorageV1beta1().CSIDrivers().Delete(driverName, nil)
  694. }
  695. }
  696. func getVolumeHandle(cs clientset.Interface, claim *v1.PersistentVolumeClaim) string {
  697. // re-get the claim to the latest state with bound volume
  698. claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
  699. if err != nil {
  700. framework.ExpectNoError(err, "Cannot get PVC")
  701. return ""
  702. }
  703. pvName := claim.Spec.VolumeName
  704. pv, err := cs.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
  705. if err != nil {
  706. framework.ExpectNoError(err, "Cannot get PV")
  707. return ""
  708. }
  709. if pv.Spec.CSI == nil {
  710. gomega.Expect(pv.Spec.CSI).NotTo(gomega.BeNil())
  711. return ""
  712. }
  713. return pv.Spec.CSI.VolumeHandle
  714. }