vsphere_volume_placement.go 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393
  1. /*
  2. Copyright 2017 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package vsphere
  14. import (
  15. "fmt"
  16. "strconv"
  17. "time"
  18. "github.com/onsi/ginkgo"
  19. "github.com/onsi/gomega"
  20. "k8s.io/api/core/v1"
  21. "k8s.io/apimachinery/pkg/util/uuid"
  22. clientset "k8s.io/client-go/kubernetes"
  23. "k8s.io/kubernetes/test/e2e/framework"
  24. "k8s.io/kubernetes/test/e2e/storage/utils"
  25. )
  26. var _ = utils.SIGDescribe("Volume Placement", func() {
  27. f := framework.NewDefaultFramework("volume-placement")
  28. const (
  29. NodeLabelKey = "vsphere_e2e_label_volume_placement"
  30. )
  31. var (
  32. c clientset.Interface
  33. ns string
  34. volumePaths []string
  35. node1Name string
  36. node1KeyValueLabel map[string]string
  37. node2Name string
  38. node2KeyValueLabel map[string]string
  39. isNodeLabeled bool
  40. nodeInfo *NodeInfo
  41. vsp *VSphere
  42. )
  43. ginkgo.BeforeEach(func() {
  44. framework.SkipUnlessProviderIs("vsphere")
  45. Bootstrap(f)
  46. c = f.ClientSet
  47. ns = f.Namespace.Name
  48. framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
  49. if !isNodeLabeled {
  50. node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(c, ns)
  51. isNodeLabeled = true
  52. nodeInfo = TestContext.NodeMapper.GetNodeInfo(node1Name)
  53. vsp = nodeInfo.VSphere
  54. }
  55. ginkgo.By("creating vmdk")
  56. volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
  57. framework.ExpectNoError(err)
  58. volumePaths = append(volumePaths, volumePath)
  59. })
  60. ginkgo.AfterEach(func() {
  61. for _, volumePath := range volumePaths {
  62. vsp.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
  63. }
  64. volumePaths = nil
  65. })
  66. /*
  67. Steps
  68. 1. Remove labels assigned to node 1 and node 2
  69. 2. Delete VMDK volume
  70. */
  71. framework.AddCleanupAction(func() {
  72. // Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
  73. if len(ns) > 0 {
  74. if len(node1KeyValueLabel) > 0 {
  75. framework.RemoveLabelOffNode(c, node1Name, NodeLabelKey)
  76. }
  77. if len(node2KeyValueLabel) > 0 {
  78. framework.RemoveLabelOffNode(c, node2Name, NodeLabelKey)
  79. }
  80. }
  81. })
  82. /*
  83. Steps
  84. 1. Create pod Spec with volume path of the vmdk and NodeSelector set to label assigned to node1.
  85. 2. Create pod and wait for pod to become ready.
  86. 3. Verify volume is attached to the node1.
  87. 4. Create empty file on the volume to verify volume is writable.
  88. 5. Verify newly created file and previously created files exist on the volume.
  89. 6. Delete pod.
  90. 7. Wait for volume to be detached from the node1.
  91. 8. Repeat Step 1 to 7 and make sure back to back pod creation on same worker node with the same volume is working as expected.
  92. */
  93. ginkgo.It("should create and delete pod with the same volume source on the same worker node", func() {
  94. var volumeFiles []string
  95. pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
  96. // Create empty files on the mounted volumes on the pod to verify volume is writable
  97. // Verify newly and previously created files present on the volume mounted on the pod
  98. newEmptyFileName := fmt.Sprintf("/mnt/volume1/%v_1.txt", ns)
  99. volumeFiles = append(volumeFiles, newEmptyFileName)
  100. createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
  101. deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
  102. ginkgo.By(fmt.Sprintf("Creating pod on the same node: %v", node1Name))
  103. pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
  104. // Create empty files on the mounted volumes on the pod to verify volume is writable
  105. // Verify newly and previously created files present on the volume mounted on the pod
  106. newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns)
  107. volumeFiles = append(volumeFiles, newEmptyFileName)
  108. createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
  109. deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
  110. })
  111. /*
  112. Steps
  113. 1. Create pod Spec with volume path of the vmdk1 and NodeSelector set to node1's label.
  114. 2. Create pod and wait for POD to become ready.
  115. 3. Verify volume is attached to the node1.
  116. 4. Create empty file on the volume to verify volume is writable.
  117. 5. Verify newly created file and previously created files exist on the volume.
  118. 6. Delete pod.
  119. 7. Wait for volume to be detached from the node1.
  120. 8. Create pod Spec with volume path of the vmdk1 and NodeSelector set to node2's label.
  121. 9. Create pod and wait for pod to become ready.
  122. 10. Verify volume is attached to the node2.
  123. 11. Create empty file on the volume to verify volume is writable.
  124. 12. Verify newly created file and previously created files exist on the volume.
  125. 13. Delete pod.
  126. */
  127. ginkgo.It("should create and delete pod with the same volume source attach/detach to different worker nodes", func() {
  128. var volumeFiles []string
  129. pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
  130. // Create empty files on the mounted volumes on the pod to verify volume is writable
  131. // Verify newly and previously created files present on the volume mounted on the pod
  132. newEmptyFileName := fmt.Sprintf("/mnt/volume1/%v_1.txt", ns)
  133. volumeFiles = append(volumeFiles, newEmptyFileName)
  134. createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
  135. deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
  136. ginkgo.By(fmt.Sprintf("Creating pod on the another node: %v", node2Name))
  137. pod = createPodWithVolumeAndNodeSelector(c, ns, node2Name, node2KeyValueLabel, volumePaths)
  138. newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns)
  139. volumeFiles = append(volumeFiles, newEmptyFileName)
  140. // Create empty files on the mounted volumes on the pod to verify volume is writable
  141. // Verify newly and previously created files present on the volume mounted on the pod
  142. createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
  143. deletePodAndWaitForVolumeToDetach(f, c, pod, node2Name, volumePaths)
  144. })
  145. /*
  146. Test multiple volumes from same datastore within the same pod
  147. 1. Create volumes - vmdk2
  148. 2. Create pod Spec with volume path of vmdk1 (vmdk1 is created in test setup) and vmdk2.
  149. 3. Create pod using spec created in step-2 and wait for pod to become ready.
  150. 4. Verify both volumes are attached to the node on which pod are created. Write some data to make sure volume are accessible.
  151. 5. Delete pod.
  152. 6. Wait for vmdk1 and vmdk2 to be detached from node.
  153. 7. Create pod using spec created in step-2 and wait for pod to become ready.
  154. 8. Verify both volumes are attached to the node on which PODs are created. Verify volume contents are matching with the content written in step 4.
  155. 9. Delete POD.
  156. 10. Wait for vmdk1 and vmdk2 to be detached from node.
  157. */
  158. ginkgo.It("should create and delete pod with multiple volumes from same datastore", func() {
  159. ginkgo.By("creating another vmdk")
  160. volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
  161. framework.ExpectNoError(err)
  162. volumePaths = append(volumePaths, volumePath)
  163. ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume: %v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
  164. pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
  165. // Create empty files on the mounted volumes on the pod to verify volume is writable
  166. // Verify newly and previously created files present on the volume mounted on the pod
  167. volumeFiles := []string{
  168. fmt.Sprintf("/mnt/volume1/%v_1.txt", ns),
  169. fmt.Sprintf("/mnt/volume2/%v_1.txt", ns),
  170. }
  171. createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles)
  172. deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
  173. ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
  174. pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
  175. // Create empty files on the mounted volumes on the pod to verify volume is writable
  176. // Verify newly and previously created files present on the volume mounted on the pod
  177. newEmptyFilesNames := []string{
  178. fmt.Sprintf("/mnt/volume1/%v_2.txt", ns),
  179. fmt.Sprintf("/mnt/volume2/%v_2.txt", ns),
  180. }
  181. volumeFiles = append(volumeFiles, newEmptyFilesNames[0])
  182. volumeFiles = append(volumeFiles, newEmptyFilesNames[1])
  183. createAndVerifyFilesOnVolume(ns, pod.Name, newEmptyFilesNames, volumeFiles)
  184. })
  185. /*
  186. Test multiple volumes from different datastore within the same pod
  187. 1. Create volumes - vmdk2 on non default shared datastore.
  188. 2. Create pod Spec with volume path of vmdk1 (vmdk1 is created in test setup on default datastore) and vmdk2.
  189. 3. Create pod using spec created in step-2 and wait for pod to become ready.
  190. 4. Verify both volumes are attached to the node on which pod are created. Write some data to make sure volume are accessible.
  191. 5. Delete pod.
  192. 6. Wait for vmdk1 and vmdk2 to be detached from node.
  193. 7. Create pod using spec created in step-2 and wait for pod to become ready.
  194. 8. Verify both volumes are attached to the node on which PODs are created. Verify volume contents are matching with the content written in step 4.
  195. 9. Delete POD.
  196. 10. Wait for vmdk1 and vmdk2 to be detached from node.
  197. */
  198. ginkgo.It("should create and delete pod with multiple volumes from different datastore", func() {
  199. ginkgo.By("creating another vmdk on non default shared datastore")
  200. var volumeOptions *VolumeOptions
  201. volumeOptions = new(VolumeOptions)
  202. volumeOptions.CapacityKB = 2097152
  203. volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
  204. volumeOptions.Datastore = GetAndExpectStringEnvVar(SecondSharedDatastore)
  205. volumePath, err := vsp.CreateVolume(volumeOptions, nodeInfo.DataCenterRef)
  206. framework.ExpectNoError(err)
  207. volumePaths = append(volumePaths, volumePath)
  208. ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
  209. pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
  210. // Create empty files on the mounted volumes on the pod to verify volume is writable
  211. // Verify newly and previously created files present on the volume mounted on the pod
  212. volumeFiles := []string{
  213. fmt.Sprintf("/mnt/volume1/%v_1.txt", ns),
  214. fmt.Sprintf("/mnt/volume2/%v_1.txt", ns),
  215. }
  216. createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles)
  217. deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
  218. ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
  219. pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
  220. // Create empty files on the mounted volumes on the pod to verify volume is writable
  221. // Verify newly and previously created files present on the volume mounted on the pod
  222. newEmptyFileNames := []string{
  223. fmt.Sprintf("/mnt/volume1/%v_2.txt", ns),
  224. fmt.Sprintf("/mnt/volume2/%v_2.txt", ns),
  225. }
  226. volumeFiles = append(volumeFiles, newEmptyFileNames[0])
  227. volumeFiles = append(volumeFiles, newEmptyFileNames[1])
  228. createAndVerifyFilesOnVolume(ns, pod.Name, newEmptyFileNames, volumeFiles)
  229. deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
  230. })
  231. /*
  232. Test Back-to-back pod creation/deletion with different volume sources on the same worker node
  233. 1. Create volumes - vmdk2
  234. 2. Create pod Spec - pod-SpecA with volume path of vmdk1 and NodeSelector set to label assigned to node1.
  235. 3. Create pod Spec - pod-SpecB with volume path of vmdk2 and NodeSelector set to label assigned to node1.
  236. 4. Create pod-A using pod-SpecA and wait for pod to become ready.
  237. 5. Create pod-B using pod-SpecB and wait for POD to become ready.
  238. 6. Verify volumes are attached to the node.
  239. 7. Create empty file on the volume to make sure volume is accessible. (Perform this step on pod-A and pod-B)
  240. 8. Verify file created in step 5 is present on the volume. (perform this step on pod-A and pod-B)
  241. 9. Delete pod-A and pod-B
  242. 10. Repeatedly (5 times) perform step 4 to 9 and verify associated volume's content is matching.
  243. 11. Wait for vmdk1 and vmdk2 to be detached from node.
  244. */
  245. ginkgo.It("test back to back pod creation and deletion with different volume sources on the same worker node", func() {
  246. var (
  247. podA *v1.Pod
  248. podB *v1.Pod
  249. testvolumePathsPodA []string
  250. testvolumePathsPodB []string
  251. podAFiles []string
  252. podBFiles []string
  253. )
  254. defer func() {
  255. ginkgo.By("clean up undeleted pods")
  256. framework.ExpectNoError(framework.DeletePodWithWait(f, c, podA), "defer: Failed to delete pod ", podA.Name)
  257. framework.ExpectNoError(framework.DeletePodWithWait(f, c, podB), "defer: Failed to delete pod ", podB.Name)
  258. ginkgo.By(fmt.Sprintf("wait for volumes to be detached from the node: %v", node1Name))
  259. for _, volumePath := range volumePaths {
  260. framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, node1Name))
  261. }
  262. }()
  263. testvolumePathsPodA = append(testvolumePathsPodA, volumePaths[0])
  264. // Create another VMDK Volume
  265. ginkgo.By("creating another vmdk")
  266. volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
  267. framework.ExpectNoError(err)
  268. volumePaths = append(volumePaths, volumePath)
  269. testvolumePathsPodB = append(testvolumePathsPodA, volumePath)
  270. for index := 0; index < 5; index++ {
  271. ginkgo.By(fmt.Sprintf("Creating pod-A on the node: %v with volume: %v", node1Name, testvolumePathsPodA[0]))
  272. podA = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodA)
  273. ginkgo.By(fmt.Sprintf("Creating pod-B on the node: %v with volume: %v", node1Name, testvolumePathsPodB[0]))
  274. podB = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodB)
  275. podAFileName := fmt.Sprintf("/mnt/volume1/podA_%v_%v.txt", ns, index+1)
  276. podBFileName := fmt.Sprintf("/mnt/volume1/podB_%v_%v.txt", ns, index+1)
  277. podAFiles = append(podAFiles, podAFileName)
  278. podBFiles = append(podBFiles, podBFileName)
  279. // Create empty files on the mounted volumes on the pod to verify volume is writable
  280. ginkgo.By("Creating empty file on volume mounted on pod-A")
  281. framework.CreateEmptyFileOnPod(ns, podA.Name, podAFileName)
  282. ginkgo.By("Creating empty file volume mounted on pod-B")
  283. framework.CreateEmptyFileOnPod(ns, podB.Name, podBFileName)
  284. // Verify newly and previously created files present on the volume mounted on the pod
  285. ginkgo.By("Verify newly Created file and previously created files present on volume mounted on pod-A")
  286. verifyFilesExistOnVSphereVolume(ns, podA.Name, podAFiles...)
  287. ginkgo.By("Verify newly Created file and previously created files present on volume mounted on pod-B")
  288. verifyFilesExistOnVSphereVolume(ns, podB.Name, podBFiles...)
  289. ginkgo.By("Deleting pod-A")
  290. framework.ExpectNoError(framework.DeletePodWithWait(f, c, podA), "Failed to delete pod ", podA.Name)
  291. ginkgo.By("Deleting pod-B")
  292. framework.ExpectNoError(framework.DeletePodWithWait(f, c, podB), "Failed to delete pod ", podB.Name)
  293. }
  294. })
  295. })
  296. func testSetupVolumePlacement(client clientset.Interface, namespace string) (node1Name string, node1KeyValueLabel map[string]string, node2Name string, node2KeyValueLabel map[string]string) {
  297. nodes := framework.GetReadySchedulableNodesOrDie(client)
  298. if len(nodes.Items) < 2 {
  299. framework.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items))
  300. }
  301. node1Name = nodes.Items[0].Name
  302. node2Name = nodes.Items[1].Name
  303. node1LabelValue := "vsphere_e2e_" + string(uuid.NewUUID())
  304. node1KeyValueLabel = make(map[string]string)
  305. node1KeyValueLabel[NodeLabelKey] = node1LabelValue
  306. framework.AddOrUpdateLabelOnNode(client, node1Name, NodeLabelKey, node1LabelValue)
  307. node2LabelValue := "vsphere_e2e_" + string(uuid.NewUUID())
  308. node2KeyValueLabel = make(map[string]string)
  309. node2KeyValueLabel[NodeLabelKey] = node2LabelValue
  310. framework.AddOrUpdateLabelOnNode(client, node2Name, NodeLabelKey, node2LabelValue)
  311. return node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel
  312. }
  313. func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace string, nodeName string, nodeKeyValueLabel map[string]string, volumePaths []string) *v1.Pod {
  314. var pod *v1.Pod
  315. var err error
  316. ginkgo.By(fmt.Sprintf("Creating pod on the node: %v", nodeName))
  317. podspec := getVSpherePodSpecWithVolumePaths(volumePaths, nodeKeyValueLabel, nil)
  318. pod, err = client.CoreV1().Pods(namespace).Create(podspec)
  319. framework.ExpectNoError(err)
  320. ginkgo.By("Waiting for pod to be ready")
  321. gomega.Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed())
  322. ginkgo.By(fmt.Sprintf("Verify volume is attached to the node:%v", nodeName))
  323. for _, volumePath := range volumePaths {
  324. isAttached, err := diskIsAttached(volumePath, nodeName)
  325. framework.ExpectNoError(err)
  326. gomega.Expect(isAttached).To(gomega.BeTrue(), "disk:"+volumePath+" is not attached with the node")
  327. }
  328. return pod
  329. }
  330. func createAndVerifyFilesOnVolume(namespace string, podname string, newEmptyfilesToCreate []string, filesToCheck []string) {
  331. // Create empty files on the mounted volumes on the pod to verify volume is writable
  332. ginkgo.By(fmt.Sprintf("Creating empty file on volume mounted on: %v", podname))
  333. createEmptyFilesOnVSphereVolume(namespace, podname, newEmptyfilesToCreate)
  334. // Verify newly and previously created files present on the volume mounted on the pod
  335. ginkgo.By(fmt.Sprintf("Verify newly Created file and previously created files present on volume mounted on: %v", podname))
  336. verifyFilesExistOnVSphereVolume(namespace, podname, filesToCheck...)
  337. }
  338. func deletePodAndWaitForVolumeToDetach(f *framework.Framework, c clientset.Interface, pod *v1.Pod, nodeName string, volumePaths []string) {
  339. ginkgo.By("Deleting pod")
  340. framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name)
  341. ginkgo.By("Waiting for volume to be detached from the node")
  342. for _, volumePath := range volumePaths {
  343. framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, nodeName))
  344. }
  345. }