multivolume.go 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582
  1. /*
  2. Copyright 2019 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package testsuites
  14. import (
  15. "context"
  16. "fmt"
  17. "time"
  18. "github.com/onsi/ginkgo"
  19. v1 "k8s.io/api/core/v1"
  20. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  21. "k8s.io/apimachinery/pkg/util/errors"
  22. clientset "k8s.io/client-go/kubernetes"
  23. "k8s.io/kubernetes/test/e2e/framework"
  24. e2enode "k8s.io/kubernetes/test/e2e/framework/node"
  25. e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
  26. e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
  27. e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
  28. "k8s.io/kubernetes/test/e2e/framework/volume"
  29. "k8s.io/kubernetes/test/e2e/storage/testpatterns"
  30. "k8s.io/kubernetes/test/e2e/storage/utils"
  31. )
  32. type multiVolumeTestSuite struct {
  33. tsInfo TestSuiteInfo
  34. }
  35. var _ TestSuite = &multiVolumeTestSuite{}
  36. // InitMultiVolumeTestSuite returns multiVolumeTestSuite that implements TestSuite interface
  37. func InitMultiVolumeTestSuite() TestSuite {
  38. return &multiVolumeTestSuite{
  39. tsInfo: TestSuiteInfo{
  40. Name: "multiVolume [Slow]",
  41. TestPatterns: []testpatterns.TestPattern{
  42. testpatterns.FsVolModePreprovisionedPV,
  43. testpatterns.FsVolModeDynamicPV,
  44. testpatterns.BlockVolModePreprovisionedPV,
  45. testpatterns.BlockVolModeDynamicPV,
  46. },
  47. SupportedSizeRange: volume.SizeRange{
  48. Min: "1Mi",
  49. },
  50. },
  51. }
  52. }
  53. func (t *multiVolumeTestSuite) GetTestSuiteInfo() TestSuiteInfo {
  54. return t.tsInfo
  55. }
  56. func (t *multiVolumeTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
  57. skipVolTypePatterns(pattern, driver, testpatterns.NewVolTypeMap(testpatterns.PreprovisionedPV))
  58. }
  59. func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
  60. type local struct {
  61. config *PerTestConfig
  62. driverCleanup func()
  63. cs clientset.Interface
  64. ns *v1.Namespace
  65. driver TestDriver
  66. resources []*VolumeResource
  67. intreeOps opCounts
  68. migratedOps opCounts
  69. }
  70. var (
  71. dInfo = driver.GetDriverInfo()
  72. l local
  73. )
  74. ginkgo.BeforeEach(func() {
  75. // Check preconditions.
  76. if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[CapBlock] {
  77. e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolMode)
  78. }
  79. })
  80. // This intentionally comes after checking the preconditions because it
  81. // registers its own BeforeEach which creates the namespace. Beware that it
  82. // also registers an AfterEach which renders f unusable. Any code using
  83. // f must run inside an It or Context callback.
  84. f := framework.NewDefaultFramework("multivolume")
  85. init := func() {
  86. l = local{}
  87. l.ns = f.Namespace
  88. l.cs = f.ClientSet
  89. l.driver = driver
  90. // Now do the more expensive test initialization.
  91. l.config, l.driverCleanup = driver.PrepareTest(f)
  92. l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName)
  93. }
  94. cleanup := func() {
  95. var errs []error
  96. for _, resource := range l.resources {
  97. errs = append(errs, resource.CleanupResource())
  98. }
  99. errs = append(errs, tryFunc(l.driverCleanup))
  100. l.driverCleanup = nil
  101. framework.ExpectNoError(errors.NewAggregate(errs), "while cleanup resource")
  102. validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps)
  103. }
  104. // This tests below configuration:
  105. // [pod1] same node [pod2]
  106. // [ node1 ] ==> [ node1 ]
  107. // / \ <- same volume mode / \
  108. // [volume1] [volume2] [volume1] [volume2]
  109. ginkgo.It("should access to two volumes with the same volume mode and retain data across pod recreation on the same node", func() {
  110. // Currently, multiple volumes are not generally available for pre-provisoined volume,
  111. // because containerized storage servers, such as iSCSI and rbd, are just returning
  112. // a static volume inside container, not actually creating a new volume per request.
  113. if pattern.VolType == testpatterns.PreprovisionedPV {
  114. e2eskipper.Skipf("This test doesn't work with pre-provisioned volume -- skipping")
  115. }
  116. init()
  117. defer cleanup()
  118. var pvcs []*v1.PersistentVolumeClaim
  119. numVols := 2
  120. for i := 0; i < numVols; i++ {
  121. testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
  122. resource := CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
  123. l.resources = append(l.resources, resource)
  124. pvcs = append(pvcs, resource.Pvc)
  125. }
  126. TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
  127. l.config.ClientNodeSelection, pvcs, true /* sameNode */)
  128. })
  129. // This tests below configuration:
  130. // [pod1] different node [pod2]
  131. // [ node1 ] ==> [ node2 ]
  132. // / \ <- same volume mode / \
  133. // [volume1] [volume2] [volume1] [volume2]
  134. ginkgo.It("should access to two volumes with the same volume mode and retain data across pod recreation on different node", func() {
  135. // Currently, multiple volumes are not generally available for pre-provisoined volume,
  136. // because containerized storage servers, such as iSCSI and rbd, are just returning
  137. // a static volume inside container, not actually creating a new volume per request.
  138. if pattern.VolType == testpatterns.PreprovisionedPV {
  139. e2eskipper.Skipf("This test doesn't work with pre-provisioned volume -- skipping")
  140. }
  141. init()
  142. defer cleanup()
  143. // Check different-node test requirement
  144. if l.driver.GetDriverInfo().Capabilities[CapSingleNodeVolume] {
  145. e2eskipper.Skipf("Driver %s only supports %v -- skipping", l.driver.GetDriverInfo().Name, CapSingleNodeVolume)
  146. }
  147. nodes, err := e2enode.GetReadySchedulableNodes(l.cs)
  148. framework.ExpectNoError(err)
  149. if len(nodes.Items) < 2 {
  150. e2eskipper.Skipf("Number of available nodes is less than 2 - skipping")
  151. }
  152. if l.config.ClientNodeSelection.Name != "" {
  153. e2eskipper.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name)
  154. }
  155. // For multi-node tests there must be enough nodes with the same toopology to schedule the pods
  156. topologyKeys := dInfo.TopologyKeys
  157. if len(topologyKeys) != 0 {
  158. if err = ensureTopologyRequirements(&l.config.ClientNodeSelection, nodes, l.cs, topologyKeys, 2); err != nil {
  159. framework.Failf("Error setting topology requirements: %v", err)
  160. }
  161. }
  162. var pvcs []*v1.PersistentVolumeClaim
  163. numVols := 2
  164. for i := 0; i < numVols; i++ {
  165. testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
  166. resource := CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
  167. l.resources = append(l.resources, resource)
  168. pvcs = append(pvcs, resource.Pvc)
  169. }
  170. TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
  171. l.config.ClientNodeSelection, pvcs, false /* sameNode */)
  172. })
  173. // This tests below configuration (only <block, filesystem> pattern is tested):
  174. // [pod1] same node [pod2]
  175. // [ node1 ] ==> [ node1 ]
  176. // / \ <- different volume mode / \
  177. // [volume1] [volume2] [volume1] [volume2]
  178. ginkgo.It("should access to two volumes with different volume mode and retain data across pod recreation on the same node", func() {
  179. if pattern.VolMode == v1.PersistentVolumeFilesystem {
  180. e2eskipper.Skipf("Filesystem volume case should be covered by block volume case -- skipping")
  181. }
  182. // Currently, multiple volumes are not generally available for pre-provisoined volume,
  183. // because containerized storage servers, such as iSCSI and rbd, are just returning
  184. // a static volume inside container, not actually creating a new volume per request.
  185. if pattern.VolType == testpatterns.PreprovisionedPV {
  186. e2eskipper.Skipf("This test doesn't work with pre-provisioned volume -- skipping")
  187. }
  188. init()
  189. defer cleanup()
  190. var pvcs []*v1.PersistentVolumeClaim
  191. numVols := 2
  192. for i := 0; i < numVols; i++ {
  193. curPattern := pattern
  194. if i != 0 {
  195. // 1st volume should be block and set filesystem for 2nd and later volumes
  196. curPattern.VolMode = v1.PersistentVolumeFilesystem
  197. }
  198. testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
  199. resource := CreateVolumeResource(driver, l.config, curPattern, testVolumeSizeRange)
  200. l.resources = append(l.resources, resource)
  201. pvcs = append(pvcs, resource.Pvc)
  202. }
  203. TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
  204. l.config.ClientNodeSelection, pvcs, true /* sameNode */)
  205. })
  206. // This tests below configuration (only <block, filesystem> pattern is tested):
  207. // [pod1] different node [pod2]
  208. // [ node1 ] ==> [ node2 ]
  209. // / \ <- different volume mode / \
  210. // [volume1] [volume2] [volume1] [volume2]
  211. ginkgo.It("should access to two volumes with different volume mode and retain data across pod recreation on different node", func() {
  212. if pattern.VolMode == v1.PersistentVolumeFilesystem {
  213. e2eskipper.Skipf("Filesystem volume case should be covered by block volume case -- skipping")
  214. }
  215. // Currently, multiple volumes are not generally available for pre-provisoined volume,
  216. // because containerized storage servers, such as iSCSI and rbd, are just returning
  217. // a static volume inside container, not actually creating a new volume per request.
  218. if pattern.VolType == testpatterns.PreprovisionedPV {
  219. e2eskipper.Skipf("This test doesn't work with pre-provisioned volume -- skipping")
  220. }
  221. init()
  222. defer cleanup()
  223. // Check different-node test requirement
  224. if l.driver.GetDriverInfo().Capabilities[CapSingleNodeVolume] {
  225. e2eskipper.Skipf("Driver %s only supports %v -- skipping", l.driver.GetDriverInfo().Name, CapSingleNodeVolume)
  226. }
  227. nodes, err := e2enode.GetReadySchedulableNodes(l.cs)
  228. framework.ExpectNoError(err)
  229. if len(nodes.Items) < 2 {
  230. e2eskipper.Skipf("Number of available nodes is less than 2 - skipping")
  231. }
  232. if l.config.ClientNodeSelection.Name != "" {
  233. e2eskipper.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name)
  234. }
  235. // For multi-node tests there must be enough nodes with the same toopology to schedule the pods
  236. topologyKeys := dInfo.TopologyKeys
  237. if len(topologyKeys) != 0 {
  238. if err = ensureTopologyRequirements(&l.config.ClientNodeSelection, nodes, l.cs, topologyKeys, 2); err != nil {
  239. framework.Failf("Error setting topology requirements: %v", err)
  240. }
  241. }
  242. var pvcs []*v1.PersistentVolumeClaim
  243. numVols := 2
  244. for i := 0; i < numVols; i++ {
  245. curPattern := pattern
  246. if i != 0 {
  247. // 1st volume should be block and set filesystem for 2nd and later volumes
  248. curPattern.VolMode = v1.PersistentVolumeFilesystem
  249. }
  250. testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
  251. resource := CreateVolumeResource(driver, l.config, curPattern, testVolumeSizeRange)
  252. l.resources = append(l.resources, resource)
  253. pvcs = append(pvcs, resource.Pvc)
  254. }
  255. TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
  256. l.config.ClientNodeSelection, pvcs, false /* sameNode */)
  257. })
  258. // This tests below configuration:
  259. // [pod1] [pod2]
  260. // [ node1 ]
  261. // \ / <- same volume mode
  262. // [volume1]
  263. ginkgo.It("should concurrently access the single volume from pods on the same node", func() {
  264. init()
  265. defer cleanup()
  266. numPods := 2
  267. if !l.driver.GetDriverInfo().Capabilities[CapMultiPODs] {
  268. e2eskipper.Skipf("Driver %q does not support multiple concurrent pods - skipping", dInfo.Name)
  269. }
  270. // Create volume
  271. testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
  272. resource := CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange)
  273. l.resources = append(l.resources, resource)
  274. // Test access to the volume from pods on different node
  275. TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
  276. l.config.ClientNodeSelection, resource.Pvc, numPods, true /* sameNode */)
  277. })
  278. // This tests below configuration:
  279. // [pod1] [pod2]
  280. // [ node1 ] [ node2 ]
  281. // \ / <- same volume mode
  282. // [volume1]
  283. ginkgo.It("should concurrently access the single volume from pods on different node", func() {
  284. init()
  285. defer cleanup()
  286. numPods := 2
  287. if !l.driver.GetDriverInfo().Capabilities[CapRWX] {
  288. e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", l.driver.GetDriverInfo().Name, CapRWX)
  289. }
  290. // Check different-node test requirement
  291. nodes, err := e2enode.GetReadySchedulableNodes(l.cs)
  292. framework.ExpectNoError(err)
  293. if len(nodes.Items) < numPods {
  294. e2eskipper.Skipf(fmt.Sprintf("Number of available nodes is less than %d - skipping", numPods))
  295. }
  296. if l.config.ClientNodeSelection.Name != "" {
  297. e2eskipper.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name)
  298. }
  299. // For multi-node tests there must be enough nodes with the same toopology to schedule the pods
  300. topologyKeys := dInfo.TopologyKeys
  301. if len(topologyKeys) != 0 {
  302. if err = ensureTopologyRequirements(&l.config.ClientNodeSelection, nodes, l.cs, topologyKeys, 2); err != nil {
  303. framework.Failf("Error setting topology requirements: %v", err)
  304. }
  305. }
  306. // Create volume
  307. testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
  308. resource := CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange)
  309. l.resources = append(l.resources, resource)
  310. // Test access to the volume from pods on different node
  311. TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
  312. l.config.ClientNodeSelection, resource.Pvc, numPods, false /* sameNode */)
  313. })
  314. }
  315. // testAccessMultipleVolumes tests access to multiple volumes from single pod on the specified node
  316. // If readSeedBase > 0, read test are done before write/read test assuming that there is already data written.
  317. func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, ns string,
  318. node e2epod.NodeSelection, pvcs []*v1.PersistentVolumeClaim, readSeedBase int64, writeSeedBase int64) string {
  319. ginkgo.By(fmt.Sprintf("Creating pod on %+v with multiple volumes", node))
  320. pod, err := e2epod.CreateSecPodWithNodeSelection(cs, ns, pvcs, nil,
  321. false, "", false, false, e2epv.SELinuxLabel,
  322. nil, node, framework.PodStartTimeout)
  323. defer func() {
  324. framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod))
  325. }()
  326. framework.ExpectNoError(err)
  327. byteLen := 64
  328. for i, pvc := range pvcs {
  329. // CreateSecPodWithNodeSelection make volumes accessible via /mnt/volume({i} + 1)
  330. index := i + 1
  331. path := fmt.Sprintf("/mnt/volume%d", index)
  332. ginkgo.By(fmt.Sprintf("Checking if the volume%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
  333. utils.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, path)
  334. if readSeedBase > 0 {
  335. ginkgo.By(fmt.Sprintf("Checking if read from the volume%d works properly", index))
  336. utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, readSeedBase+int64(i))
  337. }
  338. ginkgo.By(fmt.Sprintf("Checking if write to the volume%d works properly", index))
  339. utils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, writeSeedBase+int64(i))
  340. ginkgo.By(fmt.Sprintf("Checking if read from the volume%d works properly", index))
  341. utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, writeSeedBase+int64(i))
  342. }
  343. pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
  344. framework.ExpectNoError(err, "get pod")
  345. return pod.Spec.NodeName
  346. }
  347. // TestAccessMultipleVolumesAcrossPodRecreation tests access to multiple volumes from single pod,
  348. // then recreate pod on the same or different node depending on requiresSameNode,
  349. // and recheck access to the volumes from the recreated pod
  350. func TestAccessMultipleVolumesAcrossPodRecreation(f *framework.Framework, cs clientset.Interface, ns string,
  351. node e2epod.NodeSelection, pvcs []*v1.PersistentVolumeClaim, requiresSameNode bool) {
  352. // No data is written in volume, so passing negative value
  353. readSeedBase := int64(-1)
  354. writeSeedBase := time.Now().UTC().UnixNano()
  355. // Test access to multiple volumes on the specified node
  356. nodeName := testAccessMultipleVolumes(f, cs, ns, node, pvcs, readSeedBase, writeSeedBase)
  357. // Set affinity depending on requiresSameNode
  358. if requiresSameNode {
  359. e2epod.SetAffinity(&node, nodeName)
  360. } else {
  361. e2epod.SetAntiAffinity(&node, nodeName)
  362. }
  363. // Test access to multiple volumes again on the node updated above
  364. // Setting previous writeSeed to current readSeed to check previous data is retained
  365. readSeedBase = writeSeedBase
  366. // Update writeSeed with new value
  367. writeSeedBase = time.Now().UTC().UnixNano()
  368. _ = testAccessMultipleVolumes(f, cs, ns, node, pvcs, readSeedBase, writeSeedBase)
  369. }
  370. // TestConcurrentAccessToSingleVolume tests access to a single volume from multiple pods,
  371. // then delete the last pod, and recheck access to the volume after pod deletion to check if other
  372. // pod deletion doesn't affect. Pods are deployed on the same node or different nodes depending on requiresSameNode.
  373. // Read/write check are done across pod, by check reading both what pod{n-1} and pod{n} wrote from pod{n}.
  374. func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Interface, ns string,
  375. node e2epod.NodeSelection, pvc *v1.PersistentVolumeClaim, numPods int, requiresSameNode bool) {
  376. var pods []*v1.Pod
  377. // Create each pod with pvc
  378. for i := 0; i < numPods; i++ {
  379. index := i + 1
  380. ginkgo.By(fmt.Sprintf("Creating pod%d with a volume on %+v", index, node))
  381. pod, err := e2epod.CreateSecPodWithNodeSelection(cs, ns,
  382. []*v1.PersistentVolumeClaim{pvc}, nil,
  383. false, "", false, false, e2epv.SELinuxLabel,
  384. nil, node, framework.PodStartTimeout)
  385. defer func() {
  386. framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod))
  387. }()
  388. framework.ExpectNoError(err)
  389. pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
  390. pods = append(pods, pod)
  391. framework.ExpectNoError(err, fmt.Sprintf("get pod%d", index))
  392. actualNodeName := pod.Spec.NodeName
  393. // Set affinity depending on requiresSameNode
  394. if requiresSameNode {
  395. e2epod.SetAffinity(&node, actualNodeName)
  396. } else {
  397. e2epod.SetAntiAffinity(&node, actualNodeName)
  398. }
  399. }
  400. var seed int64
  401. byteLen := 64
  402. path := "/mnt/volume1"
  403. // Check if volume can be accessed from each pod
  404. for i, pod := range pods {
  405. index := i + 1
  406. ginkgo.By(fmt.Sprintf("Checking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
  407. utils.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, path)
  408. if i != 0 {
  409. ginkgo.By(fmt.Sprintf("From pod%d, checking if reading the data that pod%d write works properly", index, index-1))
  410. // For 1st pod, no one has written data yet, so pass the read check
  411. utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
  412. }
  413. // Update the seed and check if write/read works properly
  414. seed = time.Now().UTC().UnixNano()
  415. ginkgo.By(fmt.Sprintf("Checking if write to the volume in pod%d works properly", index))
  416. utils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
  417. ginkgo.By(fmt.Sprintf("Checking if read from the volume in pod%d works properly", index))
  418. utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
  419. }
  420. // Delete the last pod and remove from slice of pods
  421. if len(pods) < 2 {
  422. framework.Failf("Number of pods shouldn't be less than 2, but got %d", len(pods))
  423. }
  424. lastPod := pods[len(pods)-1]
  425. framework.ExpectNoError(e2epod.DeletePodWithWait(cs, lastPod))
  426. pods = pods[:len(pods)-1]
  427. // Recheck if pv can be accessed from each pod after the last pod deletion
  428. for i, pod := range pods {
  429. index := i + 1
  430. // index of pod and index of pvc match, because pods are created above way
  431. ginkgo.By(fmt.Sprintf("Rechecking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
  432. utils.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, "/mnt/volume1")
  433. if i == 0 {
  434. // This time there should be data that last pod wrote, for 1st pod
  435. ginkgo.By(fmt.Sprintf("From pod%d, rechecking if reading the data that last pod write works properly", index))
  436. } else {
  437. ginkgo.By(fmt.Sprintf("From pod%d, rechecking if reading the data that pod%d write works properly", index, index-1))
  438. }
  439. utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
  440. // Update the seed and check if write/read works properly
  441. seed = time.Now().UTC().UnixNano()
  442. ginkgo.By(fmt.Sprintf("Rechecking if write to the volume in pod%d works properly", index))
  443. utils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
  444. ginkgo.By(fmt.Sprintf("Rechecking if read from the volume in pod%d works properly", index))
  445. utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
  446. }
  447. }
  448. // getCurrentTopologies() goes through all Nodes and returns unique driver topologies and count of Nodes per topology
  449. func getCurrentTopologiesNumber(cs clientset.Interface, nodes *v1.NodeList, keys []string) ([]topology, []int, error) {
  450. topos := []topology{}
  451. topoCount := []int{}
  452. // TODO: scale?
  453. for _, n := range nodes.Items {
  454. topo := map[string]string{}
  455. for _, k := range keys {
  456. v, ok := n.Labels[k]
  457. if ok {
  458. topo[k] = v
  459. }
  460. }
  461. found := false
  462. for i, existingTopo := range topos {
  463. if topologyEqual(existingTopo, topo) {
  464. found = true
  465. topoCount[i]++
  466. break
  467. }
  468. }
  469. if !found {
  470. framework.Logf("found topology %v", topo)
  471. topos = append(topos, topo)
  472. topoCount = append(topoCount, 1)
  473. }
  474. }
  475. return topos, topoCount, nil
  476. }
  477. // ensureTopologyRequirements sets nodeSelection affinity according to given topology keys for drivers that provide them
  478. func ensureTopologyRequirements(nodeSelection *e2epod.NodeSelection, nodes *v1.NodeList, cs clientset.Interface, topologyKeys []string, minCount int) error {
  479. topologyList, topologyCount, err := getCurrentTopologiesNumber(cs, nodes, topologyKeys)
  480. if err != nil {
  481. return err
  482. }
  483. suitableTopologies := []topology{}
  484. for i, topo := range topologyList {
  485. if topologyCount[i] >= minCount {
  486. suitableTopologies = append(suitableTopologies, topo)
  487. }
  488. }
  489. if len(suitableTopologies) == 0 {
  490. e2eskipper.Skipf("No topology with at least %d nodes found - skipping", minCount)
  491. }
  492. // Take the first suitable topology
  493. e2epod.SetNodeAffinityTopologyRequirement(nodeSelection, suitableTopologies[0])
  494. return nil
  495. }