multivolume.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480
  1. /*
  2. Copyright 2019 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package testsuites
  14. import (
  15. "fmt"
  16. "time"
  17. "github.com/onsi/ginkgo"
  18. v1 "k8s.io/api/core/v1"
  19. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  20. clientset "k8s.io/client-go/kubernetes"
  21. "k8s.io/kubernetes/test/e2e/framework"
  22. "k8s.io/kubernetes/test/e2e/storage/testpatterns"
  23. "k8s.io/kubernetes/test/e2e/storage/utils"
  24. )
  25. type multiVolumeTestSuite struct {
  26. tsInfo TestSuiteInfo
  27. }
  28. var _ TestSuite = &multiVolumeTestSuite{}
  29. // InitMultiVolumeTestSuite returns multiVolumeTestSuite that implements TestSuite interface
  30. func InitMultiVolumeTestSuite() TestSuite {
  31. return &multiVolumeTestSuite{
  32. tsInfo: TestSuiteInfo{
  33. name: "multiVolume [Slow]",
  34. testPatterns: []testpatterns.TestPattern{
  35. testpatterns.FsVolModePreprovisionedPV,
  36. testpatterns.FsVolModeDynamicPV,
  37. testpatterns.BlockVolModePreprovisionedPV,
  38. testpatterns.BlockVolModeDynamicPV,
  39. },
  40. },
  41. }
  42. }
  43. func (t *multiVolumeTestSuite) getTestSuiteInfo() TestSuiteInfo {
  44. return t.tsInfo
  45. }
  46. func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
  47. type local struct {
  48. config *PerTestConfig
  49. testCleanup func()
  50. cs clientset.Interface
  51. ns *v1.Namespace
  52. driver TestDriver
  53. resources []*genericVolumeTestResource
  54. intreeOps opCounts
  55. migratedOps opCounts
  56. }
  57. var (
  58. dInfo = driver.GetDriverInfo()
  59. l local
  60. )
  61. ginkgo.BeforeEach(func() {
  62. // Check preconditions.
  63. if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[CapBlock] {
  64. framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolMode)
  65. }
  66. })
  67. // This intentionally comes after checking the preconditions because it
  68. // registers its own BeforeEach which creates the namespace. Beware that it
  69. // also registers an AfterEach which renders f unusable. Any code using
  70. // f must run inside an It or Context callback.
  71. f := framework.NewDefaultFramework("multivolume")
  72. init := func() {
  73. l = local{}
  74. l.ns = f.Namespace
  75. l.cs = f.ClientSet
  76. l.driver = driver
  77. // Now do the more expensive test initialization.
  78. l.config, l.testCleanup = driver.PrepareTest(f)
  79. l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName)
  80. }
  81. cleanup := func() {
  82. for _, resource := range l.resources {
  83. resource.cleanupResource()
  84. }
  85. if l.testCleanup != nil {
  86. l.testCleanup()
  87. l.testCleanup = nil
  88. }
  89. validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps)
  90. }
  91. // This tests below configuration:
  92. // [pod1] same node [pod2]
  93. // [ node1 ] ==> [ node1 ]
  94. // / \ <- same volume mode / \
  95. // [volume1] [volume2] [volume1] [volume2]
  96. ginkgo.It("should access to two volumes with the same volume mode and retain data across pod recreation on the same node", func() {
  97. // Currently, multiple volumes are not generally available for pre-provisoined volume,
  98. // because containerized storage servers, such as iSCSI and rbd, are just returning
  99. // a static volume inside container, not actually creating a new volume per request.
  100. if pattern.VolType == testpatterns.PreprovisionedPV {
  101. framework.Skipf("This test doesn't work with pre-provisioned volume -- skipping")
  102. }
  103. init()
  104. defer cleanup()
  105. var pvcs []*v1.PersistentVolumeClaim
  106. numVols := 2
  107. for i := 0; i < numVols; i++ {
  108. resource := createGenericVolumeTestResource(driver, l.config, pattern)
  109. l.resources = append(l.resources, resource)
  110. pvcs = append(pvcs, resource.pvc)
  111. }
  112. TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
  113. framework.NodeSelection{Name: l.config.ClientNodeName}, pvcs, true /* sameNode */)
  114. })
  115. // This tests below configuration:
  116. // [pod1] different node [pod2]
  117. // [ node1 ] ==> [ node2 ]
  118. // / \ <- same volume mode / \
  119. // [volume1] [volume2] [volume1] [volume2]
  120. ginkgo.It("should access to two volumes with the same volume mode and retain data across pod recreation on different node", func() {
  121. // Currently, multiple volumes are not generally available for pre-provisoined volume,
  122. // because containerized storage servers, such as iSCSI and rbd, are just returning
  123. // a static volume inside container, not actually creating a new volume per request.
  124. if pattern.VolType == testpatterns.PreprovisionedPV {
  125. framework.Skipf("This test doesn't work with pre-provisioned volume -- skipping")
  126. }
  127. init()
  128. defer cleanup()
  129. // Check different-node test requirement
  130. nodes := framework.GetReadySchedulableNodesOrDie(l.cs)
  131. if len(nodes.Items) < 2 {
  132. framework.Skipf("Number of available nodes is less than 2 - skipping")
  133. }
  134. if l.config.ClientNodeName != "" {
  135. framework.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name)
  136. }
  137. var pvcs []*v1.PersistentVolumeClaim
  138. numVols := 2
  139. for i := 0; i < numVols; i++ {
  140. resource := createGenericVolumeTestResource(driver, l.config, pattern)
  141. l.resources = append(l.resources, resource)
  142. pvcs = append(pvcs, resource.pvc)
  143. }
  144. TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
  145. framework.NodeSelection{Name: l.config.ClientNodeName}, pvcs, false /* sameNode */)
  146. })
  147. // This tests below configuration (only <block, filesystem> pattern is tested):
  148. // [pod1] same node [pod2]
  149. // [ node1 ] ==> [ node1 ]
  150. // / \ <- different volume mode / \
  151. // [volume1] [volume2] [volume1] [volume2]
  152. ginkgo.It("should access to two volumes with different volume mode and retain data across pod recreation on the same node", func() {
  153. if pattern.VolMode == v1.PersistentVolumeFilesystem {
  154. framework.Skipf("Filesystem volume case should be covered by block volume case -- skipping")
  155. }
  156. // Currently, multiple volumes are not generally available for pre-provisoined volume,
  157. // because containerized storage servers, such as iSCSI and rbd, are just returning
  158. // a static volume inside container, not actually creating a new volume per request.
  159. if pattern.VolType == testpatterns.PreprovisionedPV {
  160. framework.Skipf("This test doesn't work with pre-provisioned volume -- skipping")
  161. }
  162. init()
  163. defer cleanup()
  164. var pvcs []*v1.PersistentVolumeClaim
  165. numVols := 2
  166. for i := 0; i < numVols; i++ {
  167. curPattern := pattern
  168. if i != 0 {
  169. // 1st volume should be block and set filesystem for 2nd and later volumes
  170. curPattern.VolMode = v1.PersistentVolumeFilesystem
  171. }
  172. resource := createGenericVolumeTestResource(driver, l.config, curPattern)
  173. l.resources = append(l.resources, resource)
  174. pvcs = append(pvcs, resource.pvc)
  175. }
  176. TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
  177. framework.NodeSelection{Name: l.config.ClientNodeName}, pvcs, true /* sameNode */)
  178. })
  179. // This tests below configuration (only <block, filesystem> pattern is tested):
  180. // [pod1] different node [pod2]
  181. // [ node1 ] ==> [ node2 ]
  182. // / \ <- different volume mode / \
  183. // [volume1] [volume2] [volume1] [volume2]
  184. ginkgo.It("should access to two volumes with different volume mode and retain data across pod recreation on different node", func() {
  185. if pattern.VolMode == v1.PersistentVolumeFilesystem {
  186. framework.Skipf("Filesystem volume case should be covered by block volume case -- skipping")
  187. }
  188. // Currently, multiple volumes are not generally available for pre-provisoined volume,
  189. // because containerized storage servers, such as iSCSI and rbd, are just returning
  190. // a static volume inside container, not actually creating a new volume per request.
  191. if pattern.VolType == testpatterns.PreprovisionedPV {
  192. framework.Skipf("This test doesn't work with pre-provisioned volume -- skipping")
  193. }
  194. init()
  195. defer cleanup()
  196. // Check different-node test requirement
  197. nodes := framework.GetReadySchedulableNodesOrDie(l.cs)
  198. if len(nodes.Items) < 2 {
  199. framework.Skipf("Number of available nodes is less than 2 - skipping")
  200. }
  201. if l.config.ClientNodeName != "" {
  202. framework.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name)
  203. }
  204. var pvcs []*v1.PersistentVolumeClaim
  205. numVols := 2
  206. for i := 0; i < numVols; i++ {
  207. curPattern := pattern
  208. if i != 0 {
  209. // 1st volume should be block and set filesystem for 2nd and later volumes
  210. curPattern.VolMode = v1.PersistentVolumeFilesystem
  211. }
  212. resource := createGenericVolumeTestResource(driver, l.config, curPattern)
  213. l.resources = append(l.resources, resource)
  214. pvcs = append(pvcs, resource.pvc)
  215. }
  216. TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
  217. framework.NodeSelection{Name: l.config.ClientNodeName}, pvcs, false /* sameNode */)
  218. })
  219. // This tests below configuration:
  220. // [pod1] [pod2]
  221. // [ node1 ]
  222. // \ / <- same volume mode
  223. // [volume1]
  224. ginkgo.It("should concurrently access the single volume from pods on the same node", func() {
  225. init()
  226. defer cleanup()
  227. numPods := 2
  228. if !l.driver.GetDriverInfo().Capabilities[CapMultiPODs] {
  229. framework.Skipf("Driver %q does not support multiple concurrent pods - skipping", dInfo.Name)
  230. }
  231. // Create volume
  232. resource := createGenericVolumeTestResource(l.driver, l.config, pattern)
  233. l.resources = append(l.resources, resource)
  234. // Test access to the volume from pods on different node
  235. TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
  236. framework.NodeSelection{Name: l.config.ClientNodeName}, resource.pvc, numPods, true /* sameNode */)
  237. })
  238. // This tests below configuration:
  239. // [pod1] [pod2]
  240. // [ node1 ] [ node2 ]
  241. // \ / <- same volume mode
  242. // [volume1]
  243. ginkgo.It("should concurrently access the single volume from pods on different node", func() {
  244. init()
  245. defer cleanup()
  246. numPods := 2
  247. if !l.driver.GetDriverInfo().Capabilities[CapRWX] {
  248. framework.Skipf("Driver %s doesn't support %v -- skipping", l.driver.GetDriverInfo().Name, CapRWX)
  249. }
  250. // Check different-node test requirement
  251. nodes := framework.GetReadySchedulableNodesOrDie(l.cs)
  252. if len(nodes.Items) < numPods {
  253. framework.Skipf(fmt.Sprintf("Number of available nodes is less than %d - skipping", numPods))
  254. }
  255. if l.config.ClientNodeName != "" {
  256. framework.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name)
  257. }
  258. // Create volume
  259. resource := createGenericVolumeTestResource(l.driver, l.config, pattern)
  260. l.resources = append(l.resources, resource)
  261. // Test access to the volume from pods on different node
  262. TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
  263. framework.NodeSelection{Name: l.config.ClientNodeName}, resource.pvc, numPods, false /* sameNode */)
  264. })
  265. }
  266. // testAccessMultipleVolumes tests access to multiple volumes from single pod on the specified node
  267. // If readSeedBase > 0, read test are done before write/read test assuming that there is already data written.
  268. func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, ns string,
  269. node framework.NodeSelection, pvcs []*v1.PersistentVolumeClaim, readSeedBase int64, writeSeedBase int64) string {
  270. ginkgo.By(fmt.Sprintf("Creating pod on %+v with multiple volumes", node))
  271. pod, err := framework.CreateSecPodWithNodeSelection(cs, ns, pvcs,
  272. false, "", false, false, framework.SELinuxLabel,
  273. nil, node, framework.PodStartTimeout)
  274. defer func() {
  275. framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
  276. }()
  277. framework.ExpectNoError(err)
  278. byteLen := 64
  279. for i, pvc := range pvcs {
  280. // CreateSecPodWithNodeSelection make volumes accessible via /mnt/volume({i} + 1)
  281. index := i + 1
  282. path := fmt.Sprintf("/mnt/volume%d", index)
  283. ginkgo.By(fmt.Sprintf("Checking if the volume%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
  284. utils.CheckVolumeModeOfPath(pod, *pvc.Spec.VolumeMode, path)
  285. if readSeedBase > 0 {
  286. ginkgo.By(fmt.Sprintf("Checking if read from the volume%d works properly", index))
  287. utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, readSeedBase+int64(i))
  288. }
  289. ginkgo.By(fmt.Sprintf("Checking if write to the volume%d works properly", index))
  290. utils.CheckWriteToPath(pod, *pvc.Spec.VolumeMode, path, byteLen, writeSeedBase+int64(i))
  291. ginkgo.By(fmt.Sprintf("Checking if read from the volume%d works properly", index))
  292. utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, writeSeedBase+int64(i))
  293. }
  294. pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
  295. framework.ExpectNoError(err, "get pod")
  296. return pod.Spec.NodeName
  297. }
  298. // TestAccessMultipleVolumesAcrossPodRecreation tests access to multiple volumes from single pod,
  299. // then recreate pod on the same or different node depending on requiresSameNode,
  300. // and recheck access to the volumes from the recreated pod
  301. func TestAccessMultipleVolumesAcrossPodRecreation(f *framework.Framework, cs clientset.Interface, ns string,
  302. node framework.NodeSelection, pvcs []*v1.PersistentVolumeClaim, requiresSameNode bool) {
  303. // No data is written in volume, so passing negative value
  304. readSeedBase := int64(-1)
  305. writeSeedBase := time.Now().UTC().UnixNano()
  306. // Test access to multiple volumes on the specified node
  307. nodeName := testAccessMultipleVolumes(f, cs, ns, node, pvcs, readSeedBase, writeSeedBase)
  308. // Set affinity depending on requiresSameNode
  309. if requiresSameNode {
  310. framework.SetAffinity(&node, nodeName)
  311. } else {
  312. framework.SetAntiAffinity(&node, nodeName)
  313. }
  314. // Test access to multiple volumes again on the node updated above
  315. // Setting previous writeSeed to current readSeed to check previous data is retained
  316. readSeedBase = writeSeedBase
  317. // Update writeSeed with new value
  318. writeSeedBase = time.Now().UTC().UnixNano()
  319. _ = testAccessMultipleVolumes(f, cs, ns, node, pvcs, readSeedBase, writeSeedBase)
  320. }
  321. // TestConcurrentAccessToSingleVolume tests access to a single volume from multiple pods,
  322. // then delete the last pod, and recheck access to the volume after pod deletion to check if other
  323. // pod deletion doesn't affect. Pods are deployed on the same node or different nodes depending on requiresSameNode.
  324. // Read/write check are done across pod, by check reading both what pod{n-1} and pod{n} wrote from pod{n}.
  325. func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Interface, ns string,
  326. node framework.NodeSelection, pvc *v1.PersistentVolumeClaim, numPods int, requiresSameNode bool) {
  327. var pods []*v1.Pod
  328. // Create each pod with pvc
  329. for i := 0; i < numPods; i++ {
  330. index := i + 1
  331. ginkgo.By(fmt.Sprintf("Creating pod%d with a volume on %+v", index, node))
  332. pod, err := framework.CreateSecPodWithNodeSelection(cs, ns,
  333. []*v1.PersistentVolumeClaim{pvc},
  334. false, "", false, false, framework.SELinuxLabel,
  335. nil, node, framework.PodStartTimeout)
  336. defer func() {
  337. framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
  338. }()
  339. framework.ExpectNoError(err)
  340. pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
  341. pods = append(pods, pod)
  342. framework.ExpectNoError(err, fmt.Sprintf("get pod%d", index))
  343. actualNodeName := pod.Spec.NodeName
  344. // Set affinity depending on requiresSameNode
  345. if requiresSameNode {
  346. framework.SetAffinity(&node, actualNodeName)
  347. } else {
  348. framework.SetAntiAffinity(&node, actualNodeName)
  349. }
  350. }
  351. var seed int64
  352. byteLen := 64
  353. path := "/mnt/volume1"
  354. // Check if volume can be accessed from each pod
  355. for i, pod := range pods {
  356. index := i + 1
  357. ginkgo.By(fmt.Sprintf("Checking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
  358. utils.CheckVolumeModeOfPath(pod, *pvc.Spec.VolumeMode, path)
  359. if i != 0 {
  360. ginkgo.By(fmt.Sprintf("From pod%d, checking if reading the data that pod%d write works properly", index, index-1))
  361. // For 1st pod, no one has written data yet, so pass the read check
  362. utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
  363. }
  364. // Update the seed and check if write/read works properly
  365. seed = time.Now().UTC().UnixNano()
  366. ginkgo.By(fmt.Sprintf("Checking if write to the volume in pod%d works properly", index))
  367. utils.CheckWriteToPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
  368. ginkgo.By(fmt.Sprintf("Checking if read from the volume in pod%d works properly", index))
  369. utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
  370. }
  371. // Delete the last pod and remove from slice of pods
  372. if len(pods) < 2 {
  373. framework.Failf("Number of pods shouldn't be less than 2, but got %d", len(pods))
  374. }
  375. lastPod := pods[len(pods)-1]
  376. framework.ExpectNoError(framework.DeletePodWithWait(f, cs, lastPod))
  377. pods = pods[:len(pods)-1]
  378. // Recheck if pv can be accessed from each pod after the last pod deletion
  379. for i, pod := range pods {
  380. index := i + 1
  381. // index of pod and index of pvc match, because pods are created above way
  382. ginkgo.By(fmt.Sprintf("Rechecking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
  383. utils.CheckVolumeModeOfPath(pod, *pvc.Spec.VolumeMode, "/mnt/volume1")
  384. if i == 0 {
  385. // This time there should be data that last pod wrote, for 1st pod
  386. ginkgo.By(fmt.Sprintf("From pod%d, rechecking if reading the data that last pod write works properly", index))
  387. } else {
  388. ginkgo.By(fmt.Sprintf("From pod%d, rechecking if reading the data that pod%d write works properly", index, index-1))
  389. }
  390. utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
  391. // Update the seed and check if write/read works properly
  392. seed = time.Now().UTC().UnixNano()
  393. ginkgo.By(fmt.Sprintf("Rechecking if write to the volume in pod%d works properly", index))
  394. utils.CheckWriteToPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
  395. ginkgo.By(fmt.Sprintf("Rechecking if read from the volume in pod%d works properly", index))
  396. utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
  397. }
  398. }