provisioning.go 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684
  1. /*
  2. Copyright 2018 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package testsuites
  14. import (
  15. "fmt"
  16. "time"
  17. "github.com/onsi/ginkgo"
  18. "github.com/onsi/gomega"
  19. v1 "k8s.io/api/core/v1"
  20. storagev1 "k8s.io/api/storage/v1"
  21. apierrs "k8s.io/apimachinery/pkg/api/errors"
  22. "k8s.io/apimachinery/pkg/api/resource"
  23. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  24. "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
  25. "k8s.io/apimachinery/pkg/labels"
  26. "k8s.io/client-go/dynamic"
  27. clientset "k8s.io/client-go/kubernetes"
  28. "k8s.io/kubernetes/test/e2e/framework"
  29. e2elog "k8s.io/kubernetes/test/e2e/framework/log"
  30. "k8s.io/kubernetes/test/e2e/framework/volume"
  31. "k8s.io/kubernetes/test/e2e/storage/testpatterns"
  32. )
  33. // StorageClassTest represents parameters to be used by provisioning tests.
  34. // Not all parameters are used by all tests.
  35. type StorageClassTest struct {
  36. Client clientset.Interface
  37. Claim *v1.PersistentVolumeClaim
  38. Class *storagev1.StorageClass
  39. Name string
  40. CloudProviders []string
  41. Provisioner string
  42. StorageClassName string
  43. Parameters map[string]string
  44. DelayBinding bool
  45. ClaimSize string
  46. ExpectedSize string
  47. PvCheck func(claim *v1.PersistentVolumeClaim)
  48. VolumeMode v1.PersistentVolumeMode
  49. AllowVolumeExpansion bool
  50. }
  51. type provisioningTestSuite struct {
  52. tsInfo TestSuiteInfo
  53. }
  54. var _ TestSuite = &provisioningTestSuite{}
  55. // InitProvisioningTestSuite returns provisioningTestSuite that implements TestSuite interface
  56. func InitProvisioningTestSuite() TestSuite {
  57. return &provisioningTestSuite{
  58. tsInfo: TestSuiteInfo{
  59. name: "provisioning",
  60. testPatterns: []testpatterns.TestPattern{
  61. testpatterns.DefaultFsDynamicPV,
  62. testpatterns.NtfsDynamicPV,
  63. },
  64. },
  65. }
  66. }
  67. func (p *provisioningTestSuite) getTestSuiteInfo() TestSuiteInfo {
  68. return p.tsInfo
  69. }
  70. func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
  71. type local struct {
  72. config *PerTestConfig
  73. testCleanup func()
  74. testCase *StorageClassTest
  75. cs clientset.Interface
  76. pvc *v1.PersistentVolumeClaim
  77. sc *storagev1.StorageClass
  78. intreeOps opCounts
  79. migratedOps opCounts
  80. }
  81. var (
  82. dInfo = driver.GetDriverInfo()
  83. dDriver DynamicPVTestDriver
  84. l local
  85. )
  86. ginkgo.BeforeEach(func() {
  87. // Check preconditions.
  88. if pattern.VolType != testpatterns.DynamicPV {
  89. framework.Skipf("Suite %q does not support %v", p.tsInfo.name, pattern.VolType)
  90. }
  91. ok := false
  92. dDriver, ok = driver.(DynamicPVTestDriver)
  93. if !ok {
  94. framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType)
  95. }
  96. })
  97. // This intentionally comes after checking the preconditions because it
  98. // registers its own BeforeEach which creates the namespace. Beware that it
  99. // also registers an AfterEach which renders f unusable. Any code using
  100. // f must run inside an It or Context callback.
  101. f := framework.NewDefaultFramework("provisioning")
  102. init := func() {
  103. l = local{}
  104. // Now do the more expensive test initialization.
  105. l.config, l.testCleanup = driver.PrepareTest(f)
  106. l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName)
  107. l.cs = l.config.Framework.ClientSet
  108. claimSize := dDriver.GetClaimSize()
  109. l.sc = dDriver.GetDynamicProvisionStorageClass(l.config, pattern.FsType)
  110. if l.sc == nil {
  111. framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
  112. }
  113. l.pvc = getClaim(claimSize, l.config.Framework.Namespace.Name)
  114. l.pvc.Spec.StorageClassName = &l.sc.Name
  115. e2elog.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", l.sc, l.pvc)
  116. l.testCase = &StorageClassTest{
  117. Client: l.config.Framework.ClientSet,
  118. Claim: l.pvc,
  119. Class: l.sc,
  120. ClaimSize: claimSize,
  121. ExpectedSize: claimSize,
  122. }
  123. }
  124. cleanup := func() {
  125. if l.testCleanup != nil {
  126. l.testCleanup()
  127. l.testCleanup = nil
  128. }
  129. validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps)
  130. }
  131. ginkgo.It("should provision storage with defaults", func() {
  132. init()
  133. defer cleanup()
  134. l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
  135. PVWriteReadSingleNodeCheck(l.cs, claim, framework.NodeSelection{Name: l.config.ClientNodeName})
  136. }
  137. l.testCase.TestDynamicProvisioning()
  138. })
  139. ginkgo.It("should provision storage with mount options", func() {
  140. if dInfo.SupportedMountOption == nil {
  141. framework.Skipf("Driver %q does not define supported mount option - skipping", dInfo.Name)
  142. }
  143. init()
  144. defer cleanup()
  145. l.testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List()
  146. l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
  147. PVWriteReadSingleNodeCheck(l.cs, claim, framework.NodeSelection{Name: l.config.ClientNodeName})
  148. }
  149. l.testCase.TestDynamicProvisioning()
  150. })
  151. ginkgo.It("should access volume from different nodes", func() {
  152. init()
  153. defer cleanup()
  154. // The assumption is that if the test hasn't been
  155. // locked onto a single node, then the driver is
  156. // usable on all of them *and* supports accessing a volume
  157. // from any node.
  158. if l.config.ClientNodeName != "" {
  159. framework.Skipf("Driver %q only supports testing on one node - skipping", dInfo.Name)
  160. }
  161. // Ensure that we actually have more than one node.
  162. nodes := framework.GetReadySchedulableNodesOrDie(l.cs)
  163. if len(nodes.Items) <= 1 {
  164. framework.Skipf("need more than one node - skipping")
  165. }
  166. l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
  167. PVMultiNodeCheck(l.cs, claim, framework.NodeSelection{Name: l.config.ClientNodeName})
  168. }
  169. l.testCase.TestDynamicProvisioning()
  170. })
  171. ginkgo.It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func() {
  172. if !dInfo.Capabilities[CapDataSource] {
  173. framework.Skipf("Driver %q does not support populate data from snapshot - skipping", dInfo.Name)
  174. }
  175. sDriver, ok := driver.(SnapshottableTestDriver)
  176. if !ok {
  177. framework.Failf("Driver %q has CapDataSource but does not implement SnapshottableTestDriver", dInfo.Name)
  178. }
  179. init()
  180. defer cleanup()
  181. dc := l.config.Framework.DynamicClient
  182. vsc := sDriver.GetSnapshotClass(l.config)
  183. dataSource, cleanupFunc := prepareDataSourceForProvisioning(framework.NodeSelection{Name: l.config.ClientNodeName}, l.cs, dc, l.pvc, l.sc, vsc)
  184. defer cleanupFunc()
  185. l.pvc.Spec.DataSource = dataSource
  186. l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
  187. ginkgo.By("checking whether the created volume has the pre-populated data")
  188. command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace)
  189. RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, framework.NodeSelection{Name: l.config.ClientNodeName})
  190. }
  191. l.testCase.TestDynamicProvisioning()
  192. })
  193. }
  194. // TestDynamicProvisioning tests dynamic provisioning with specified StorageClassTest
  195. func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
  196. client := t.Client
  197. gomega.Expect(client).NotTo(gomega.BeNil(), "StorageClassTest.Client is required")
  198. claim := t.Claim
  199. gomega.Expect(claim).NotTo(gomega.BeNil(), "StorageClassTest.Claim is required")
  200. class := t.Class
  201. var err error
  202. if class != nil {
  203. gomega.Expect(*claim.Spec.StorageClassName).To(gomega.Equal(class.Name))
  204. ginkgo.By("creating a StorageClass " + class.Name)
  205. _, err = client.StorageV1().StorageClasses().Create(class)
  206. // The "should provision storage with snapshot data source" test already has created the class.
  207. // TODO: make class creation optional and remove the IsAlreadyExists exception
  208. gomega.Expect(err == nil || apierrs.IsAlreadyExists(err)).To(gomega.Equal(true))
  209. class, err = client.StorageV1().StorageClasses().Get(class.Name, metav1.GetOptions{})
  210. framework.ExpectNoError(err)
  211. defer func() {
  212. e2elog.Logf("deleting storage class %s", class.Name)
  213. framework.ExpectNoError(client.StorageV1().StorageClasses().Delete(class.Name, nil))
  214. }()
  215. }
  216. ginkgo.By("creating a claim")
  217. claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
  218. framework.ExpectNoError(err)
  219. defer func() {
  220. e2elog.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
  221. // typically this claim has already been deleted
  222. err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)
  223. if err != nil && !apierrs.IsNotFound(err) {
  224. framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err)
  225. }
  226. }()
  227. // Run the checker
  228. if t.PvCheck != nil {
  229. t.PvCheck(claim)
  230. }
  231. pv := t.checkProvisioning(client, claim, class)
  232. ginkgo.By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name))
  233. framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil))
  234. // Wait for the PV to get deleted if reclaim policy is Delete. (If it's
  235. // Retain, there's no use waiting because the PV won't be auto-deleted and
  236. // it's expected for the caller to do it.) Technically, the first few delete
  237. // attempts may fail, as the volume is still attached to a node because
  238. // kubelet is slowly cleaning up the previous pod, however it should succeed
  239. // in a couple of minutes. Wait 20 minutes to recover from random cloud
  240. // hiccups.
  241. if pv != nil && pv.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete {
  242. ginkgo.By(fmt.Sprintf("deleting the claim's PV %q", pv.Name))
  243. framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute))
  244. }
  245. return pv
  246. }
  247. // checkProvisioning verifies that the claim is bound and has the correct properities
  248. func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storagev1.StorageClass) *v1.PersistentVolume {
  249. err := framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
  250. framework.ExpectNoError(err)
  251. ginkgo.By("checking the claim")
  252. pv, err := framework.GetBoundPV(client, claim)
  253. framework.ExpectNoError(err)
  254. // Check sizes
  255. expectedCapacity := resource.MustParse(t.ExpectedSize)
  256. pvCapacity := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)]
  257. gomega.Expect(pvCapacity.Value()).To(gomega.Equal(expectedCapacity.Value()), "pvCapacity is not equal to expectedCapacity")
  258. requestedCapacity := resource.MustParse(t.ClaimSize)
  259. claimCapacity := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
  260. gomega.Expect(claimCapacity.Value()).To(gomega.Equal(requestedCapacity.Value()), "claimCapacity is not equal to requestedCapacity")
  261. // Check PV properties
  262. ginkgo.By("checking the PV")
  263. // Every access mode in PV should be in PVC
  264. gomega.Expect(pv.Spec.AccessModes).NotTo(gomega.BeZero())
  265. for _, pvMode := range pv.Spec.AccessModes {
  266. found := false
  267. for _, pvcMode := range claim.Spec.AccessModes {
  268. if pvMode == pvcMode {
  269. found = true
  270. break
  271. }
  272. }
  273. gomega.Expect(found).To(gomega.BeTrue())
  274. }
  275. gomega.Expect(pv.Spec.ClaimRef.Name).To(gomega.Equal(claim.ObjectMeta.Name))
  276. gomega.Expect(pv.Spec.ClaimRef.Namespace).To(gomega.Equal(claim.ObjectMeta.Namespace))
  277. if class == nil {
  278. gomega.Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(gomega.Equal(v1.PersistentVolumeReclaimDelete))
  279. } else {
  280. gomega.Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(gomega.Equal(*class.ReclaimPolicy))
  281. gomega.Expect(pv.Spec.MountOptions).To(gomega.Equal(class.MountOptions))
  282. }
  283. if claim.Spec.VolumeMode != nil {
  284. gomega.Expect(pv.Spec.VolumeMode).NotTo(gomega.BeNil())
  285. gomega.Expect(*pv.Spec.VolumeMode).To(gomega.Equal(*claim.Spec.VolumeMode))
  286. }
  287. return pv
  288. }
  289. // PVWriteReadSingleNodeCheck checks that a PV retains data on a single node
  290. // and returns the PV.
  291. //
  292. // It starts two pods:
  293. // - The first pod writes 'hello word' to the /mnt/test (= the volume) on one node.
  294. // - The second pod runs grep 'hello world' on /mnt/test on the same node.
  295. //
  296. // The node is selected by Kubernetes when scheduling the first
  297. // pod. It's then selected via its name for the second pod.
  298. //
  299. // If both succeed, Kubernetes actually allocated something that is
  300. // persistent across pods.
  301. //
  302. // This is a common test that can be called from a StorageClassTest.PvCheck.
  303. func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, node framework.NodeSelection) *v1.PersistentVolume {
  304. ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
  305. command := "echo 'hello world' > /mnt/test/data"
  306. pod := StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node)
  307. defer func() {
  308. // pod might be nil now.
  309. StopPod(client, pod)
  310. }()
  311. framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
  312. runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
  313. framework.ExpectNoError(err, "get pod")
  314. actualNodeName := runningPod.Spec.NodeName
  315. StopPod(client, pod)
  316. pod = nil // Don't stop twice.
  317. // Get a new copy of the PV
  318. volume, err := framework.GetBoundPV(client, claim)
  319. framework.ExpectNoError(err)
  320. ginkgo.By(fmt.Sprintf("checking the created volume has the correct mount options, is readable and retains data on the same node %q", actualNodeName))
  321. command = "grep 'hello world' /mnt/test/data"
  322. // We give the second pod the additional responsibility of checking the volume has
  323. // been mounted with the PV's mount options, if the PV was provisioned with any
  324. for _, option := range volume.Spec.MountOptions {
  325. // Get entry, get mount options at 6th word, replace brackets with commas
  326. command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option)
  327. }
  328. command += " || (mount | grep 'on /mnt/test'; false)"
  329. if framework.NodeOSDistroIs("windows") {
  330. command = "select-string 'hello world' /mnt/test/data"
  331. }
  332. RunInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, framework.NodeSelection{Name: actualNodeName})
  333. return volume
  334. }
  335. // PVMultiNodeCheck checks that a PV retains data when moved between nodes.
  336. //
  337. // It starts these pods:
  338. // - The first pod writes 'hello word' to the /mnt/test (= the volume) on one node.
  339. // - The second pod runs grep 'hello world' on /mnt/test on another node.
  340. //
  341. // The first node is selected by Kubernetes when scheduling the first pod. The second pod uses the same criteria, except that a special anti-affinity
  342. // for the first node gets added. This test can only pass if the cluster has more than one
  343. // suitable node. The caller has to ensure that.
  344. //
  345. // If all succeeds, Kubernetes actually allocated something that is
  346. // persistent across pods and across nodes.
  347. //
  348. // This is a common test that can be called from a StorageClassTest.PvCheck.
  349. func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, node framework.NodeSelection) {
  350. gomega.Expect(node.Name).To(gomega.Equal(""), "this test only works when not locked onto a single node")
  351. var pod *v1.Pod
  352. defer func() {
  353. // passing pod = nil is okay.
  354. StopPod(client, pod)
  355. }()
  356. ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
  357. command := "echo 'hello world' > /mnt/test/data"
  358. pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node)
  359. framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
  360. runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
  361. framework.ExpectNoError(err, "get pod")
  362. actualNodeName := runningPod.Spec.NodeName
  363. StopPod(client, pod)
  364. pod = nil // Don't stop twice.
  365. // Add node-anti-affinity.
  366. secondNode := node
  367. framework.SetAntiAffinity(&secondNode, actualNodeName)
  368. ginkgo.By(fmt.Sprintf("checking the created volume is readable and retains data on another node %+v", secondNode))
  369. command = "grep 'hello world' /mnt/test/data"
  370. if framework.NodeOSDistroIs("windows") {
  371. command = "select-string 'hello world' /mnt/test/data"
  372. }
  373. pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-reader-node2", command, secondNode)
  374. framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
  375. runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
  376. framework.ExpectNoError(err, "get pod")
  377. gomega.Expect(runningPod.Spec.NodeName).NotTo(gomega.Equal(actualNodeName), "second pod should have run on a different node")
  378. StopPod(client, pod)
  379. pod = nil
  380. }
  381. func (t StorageClassTest) TestBindingWaitForFirstConsumer(nodeSelector map[string]string, expectUnschedulable bool) (*v1.PersistentVolume, *v1.Node) {
  382. pvs, node := t.TestBindingWaitForFirstConsumerMultiPVC([]*v1.PersistentVolumeClaim{t.Claim}, nodeSelector, expectUnschedulable)
  383. if pvs == nil {
  384. return nil, node
  385. }
  386. return pvs[0], node
  387. }
  388. func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.PersistentVolumeClaim, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) {
  389. var err error
  390. gomega.Expect(len(claims)).ToNot(gomega.Equal(0))
  391. namespace := claims[0].Namespace
  392. ginkgo.By("creating a storage class " + t.Class.Name)
  393. class, err := t.Client.StorageV1().StorageClasses().Create(t.Class)
  394. framework.ExpectNoError(err)
  395. defer deleteStorageClass(t.Client, class.Name)
  396. ginkgo.By("creating claims")
  397. var claimNames []string
  398. var createdClaims []*v1.PersistentVolumeClaim
  399. for _, claim := range claims {
  400. c, err := t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
  401. claimNames = append(claimNames, c.Name)
  402. createdClaims = append(createdClaims, c)
  403. framework.ExpectNoError(err)
  404. }
  405. defer func() {
  406. var errors map[string]error
  407. for _, claim := range createdClaims {
  408. err := framework.DeletePersistentVolumeClaim(t.Client, claim.Name, claim.Namespace)
  409. if err != nil {
  410. errors[claim.Name] = err
  411. }
  412. }
  413. if len(errors) > 0 {
  414. for claimName, err := range errors {
  415. e2elog.Logf("Failed to delete PVC: %s due to error: %v", claimName, err)
  416. }
  417. }
  418. }()
  419. // Wait for ClaimProvisionTimeout (across all PVCs in parallel) and make sure the phase did not become Bound i.e. the Wait errors out
  420. ginkgo.By("checking the claims are in pending state")
  421. err = framework.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, framework.ClaimProvisionShortTimeout, true)
  422. framework.ExpectError(err)
  423. verifyPVCsPending(t.Client, createdClaims)
  424. ginkgo.By("creating a pod referring to the claims")
  425. // Create a pod referring to the claim and wait for it to get to running
  426. var pod *v1.Pod
  427. if expectUnschedulable {
  428. pod, err = framework.CreateUnschedulablePod(t.Client, namespace, nodeSelector, createdClaims, true /* isPrivileged */, "" /* command */)
  429. } else {
  430. pod, err = framework.CreatePod(t.Client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */)
  431. }
  432. framework.ExpectNoError(err)
  433. defer func() {
  434. framework.DeletePodOrFail(t.Client, pod.Namespace, pod.Name)
  435. framework.WaitForPodToDisappear(t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
  436. }()
  437. if expectUnschedulable {
  438. // Verify that no claims are provisioned.
  439. verifyPVCsPending(t.Client, createdClaims)
  440. return nil, nil
  441. }
  442. // collect node details
  443. node, err := t.Client.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
  444. framework.ExpectNoError(err)
  445. ginkgo.By("re-checking the claims to see they binded")
  446. var pvs []*v1.PersistentVolume
  447. for _, claim := range createdClaims {
  448. // Get new copy of the claim
  449. claim, err = t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
  450. framework.ExpectNoError(err)
  451. // make sure claim did bind
  452. err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.Client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
  453. framework.ExpectNoError(err)
  454. pv, err := t.Client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
  455. framework.ExpectNoError(err)
  456. pvs = append(pvs, pv)
  457. }
  458. gomega.Expect(len(pvs)).To(gomega.Equal(len(createdClaims)))
  459. return pvs, node
  460. }
  461. // RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
  462. // It starts, checks, collects output and stops it.
  463. func RunInPodWithVolume(c clientset.Interface, ns, claimName, podName, command string, node framework.NodeSelection) {
  464. pod := StartInPodWithVolume(c, ns, claimName, podName, command, node)
  465. defer StopPod(c, pod)
  466. framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
  467. }
  468. // StartInPodWithVolume starts a command in a pod with given claim mounted to /mnt directory
  469. // The caller is responsible for checking the pod and deleting it.
  470. func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command string, node framework.NodeSelection) *v1.Pod {
  471. pod := &v1.Pod{
  472. TypeMeta: metav1.TypeMeta{
  473. Kind: "Pod",
  474. APIVersion: "v1",
  475. },
  476. ObjectMeta: metav1.ObjectMeta{
  477. GenerateName: podName + "-",
  478. Labels: map[string]string{
  479. "app": podName,
  480. },
  481. },
  482. Spec: v1.PodSpec{
  483. NodeName: node.Name,
  484. NodeSelector: node.Selector,
  485. Affinity: node.Affinity,
  486. Containers: []v1.Container{
  487. {
  488. Name: "volume-tester",
  489. Image: volume.GetTestImage(framework.BusyBoxImage),
  490. Command: volume.GenerateScriptCmd(command),
  491. VolumeMounts: []v1.VolumeMount{
  492. {
  493. Name: "my-volume",
  494. MountPath: "/mnt/test",
  495. },
  496. },
  497. },
  498. },
  499. RestartPolicy: v1.RestartPolicyNever,
  500. Volumes: []v1.Volume{
  501. {
  502. Name: "my-volume",
  503. VolumeSource: v1.VolumeSource{
  504. PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
  505. ClaimName: claimName,
  506. ReadOnly: false,
  507. },
  508. },
  509. },
  510. },
  511. },
  512. }
  513. pod, err := c.CoreV1().Pods(ns).Create(pod)
  514. framework.ExpectNoError(err, "Failed to create pod: %v", err)
  515. return pod
  516. }
  517. // StopPod first tries to log the output of the pod's container, then deletes the pod.
  518. func StopPod(c clientset.Interface, pod *v1.Pod) {
  519. if pod == nil {
  520. return
  521. }
  522. body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do().Raw()
  523. if err != nil {
  524. e2elog.Logf("Error getting logs for pod %s: %v", pod.Name, err)
  525. } else {
  526. e2elog.Logf("Pod %s has the following logs: %s", pod.Name, body)
  527. }
  528. framework.DeletePodOrFail(c, pod.Namespace, pod.Name)
  529. }
  530. func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) {
  531. for _, claim := range pvcs {
  532. // Get new copy of the claim
  533. claim, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
  534. framework.ExpectNoError(err)
  535. gomega.Expect(claim.Status.Phase).To(gomega.Equal(v1.ClaimPending))
  536. }
  537. }
  538. func prepareDataSourceForProvisioning(
  539. node framework.NodeSelection,
  540. client clientset.Interface,
  541. dynamicClient dynamic.Interface,
  542. initClaim *v1.PersistentVolumeClaim,
  543. class *storagev1.StorageClass,
  544. snapshotClass *unstructured.Unstructured,
  545. ) (*v1.TypedLocalObjectReference, func()) {
  546. var err error
  547. if class != nil {
  548. ginkgo.By("[Initialize dataSource]creating a StorageClass " + class.Name)
  549. _, err = client.StorageV1().StorageClasses().Create(class)
  550. framework.ExpectNoError(err)
  551. }
  552. ginkgo.By("[Initialize dataSource]creating a initClaim")
  553. updatedClaim, err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Create(initClaim)
  554. framework.ExpectNoError(err)
  555. err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, updatedClaim.Namespace, updatedClaim.Name, framework.Poll, framework.ClaimProvisionTimeout)
  556. framework.ExpectNoError(err)
  557. ginkgo.By("[Initialize dataSource]checking the initClaim")
  558. // Get new copy of the initClaim
  559. _, err = client.CoreV1().PersistentVolumeClaims(updatedClaim.Namespace).Get(updatedClaim.Name, metav1.GetOptions{})
  560. framework.ExpectNoError(err)
  561. // write namespace to the /mnt/test (= the volume).
  562. ginkgo.By("[Initialize dataSource]write data to volume")
  563. command := fmt.Sprintf("echo '%s' > /mnt/test/initialData", updatedClaim.GetNamespace())
  564. RunInPodWithVolume(client, updatedClaim.Namespace, updatedClaim.Name, "pvc-snapshot-writer", command, node)
  565. ginkgo.By("[Initialize dataSource]creating a SnapshotClass")
  566. snapshotClass, err = dynamicClient.Resource(snapshotClassGVR).Create(snapshotClass, metav1.CreateOptions{})
  567. ginkgo.By("[Initialize dataSource]creating a snapshot")
  568. snapshot := getSnapshot(updatedClaim.Name, updatedClaim.Namespace, snapshotClass.GetName())
  569. snapshot, err = dynamicClient.Resource(snapshotGVR).Namespace(updatedClaim.Namespace).Create(snapshot, metav1.CreateOptions{})
  570. framework.ExpectNoError(err)
  571. WaitForSnapshotReady(dynamicClient, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout)
  572. framework.ExpectNoError(err)
  573. ginkgo.By("[Initialize dataSource]checking the snapshot")
  574. // Get new copy of the snapshot
  575. snapshot, err = dynamicClient.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Get(snapshot.GetName(), metav1.GetOptions{})
  576. framework.ExpectNoError(err)
  577. group := "snapshot.storage.k8s.io"
  578. dataSourceRef := &v1.TypedLocalObjectReference{
  579. APIGroup: &group,
  580. Kind: "VolumeSnapshot",
  581. Name: snapshot.GetName(),
  582. }
  583. cleanupFunc := func() {
  584. e2elog.Logf("deleting snapshot %q/%q", snapshot.GetNamespace(), snapshot.GetName())
  585. err = dynamicClient.Resource(snapshotGVR).Namespace(updatedClaim.Namespace).Delete(snapshot.GetName(), nil)
  586. if err != nil && !apierrs.IsNotFound(err) {
  587. framework.Failf("Error deleting snapshot %q. Error: %v", snapshot.GetName(), err)
  588. }
  589. e2elog.Logf("deleting initClaim %q/%q", updatedClaim.Namespace, updatedClaim.Name)
  590. err = client.CoreV1().PersistentVolumeClaims(updatedClaim.Namespace).Delete(updatedClaim.Name, nil)
  591. if err != nil && !apierrs.IsNotFound(err) {
  592. framework.Failf("Error deleting initClaim %q. Error: %v", updatedClaim.Name, err)
  593. }
  594. e2elog.Logf("deleting SnapshotClass %s", snapshotClass.GetName())
  595. framework.ExpectNoError(dynamicClient.Resource(snapshotClassGVR).Delete(snapshotClass.GetName(), nil))
  596. }
  597. return dataSourceRef, cleanupFunc
  598. }