vsphere_volume_placement.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399
  1. /*
  2. Copyright 2017 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package vsphere
  14. import (
  15. "context"
  16. "fmt"
  17. "strconv"
  18. "time"
  19. "github.com/onsi/ginkgo"
  20. "github.com/onsi/gomega"
  21. v1 "k8s.io/api/core/v1"
  22. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  23. "k8s.io/apimachinery/pkg/util/uuid"
  24. clientset "k8s.io/client-go/kubernetes"
  25. "k8s.io/kubernetes/test/e2e/framework"
  26. e2enode "k8s.io/kubernetes/test/e2e/framework/node"
  27. e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
  28. e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
  29. "k8s.io/kubernetes/test/e2e/storage/utils"
  30. )
  31. var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() {
  32. f := framework.NewDefaultFramework("volume-placement")
  33. const (
  34. NodeLabelKey = "vsphere_e2e_label_volume_placement"
  35. )
  36. var (
  37. c clientset.Interface
  38. ns string
  39. volumePaths []string
  40. node1Name string
  41. node1KeyValueLabel map[string]string
  42. node2Name string
  43. node2KeyValueLabel map[string]string
  44. isNodeLabeled bool
  45. nodeInfo *NodeInfo
  46. vsp *VSphere
  47. )
  48. ginkgo.BeforeEach(func() {
  49. e2eskipper.SkipUnlessProviderIs("vsphere")
  50. Bootstrap(f)
  51. c = f.ClientSet
  52. ns = f.Namespace.Name
  53. framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
  54. if !isNodeLabeled {
  55. node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(c, ns)
  56. isNodeLabeled = true
  57. nodeInfo = TestContext.NodeMapper.GetNodeInfo(node1Name)
  58. vsp = nodeInfo.VSphere
  59. }
  60. ginkgo.By("creating vmdk")
  61. volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
  62. framework.ExpectNoError(err)
  63. volumePaths = append(volumePaths, volumePath)
  64. })
  65. ginkgo.AfterEach(func() {
  66. for _, volumePath := range volumePaths {
  67. vsp.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
  68. }
  69. volumePaths = nil
  70. })
  71. /*
  72. Steps
  73. 1. Remove labels assigned to node 1 and node 2
  74. 2. Delete VMDK volume
  75. */
  76. framework.AddCleanupAction(func() {
  77. // Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
  78. if len(ns) > 0 {
  79. if len(node1KeyValueLabel) > 0 {
  80. framework.RemoveLabelOffNode(c, node1Name, NodeLabelKey)
  81. }
  82. if len(node2KeyValueLabel) > 0 {
  83. framework.RemoveLabelOffNode(c, node2Name, NodeLabelKey)
  84. }
  85. }
  86. })
  87. /*
  88. Steps
  89. 1. Create pod Spec with volume path of the vmdk and NodeSelector set to label assigned to node1.
  90. 2. Create pod and wait for pod to become ready.
  91. 3. Verify volume is attached to the node1.
  92. 4. Create empty file on the volume to verify volume is writable.
  93. 5. Verify newly created file and previously created files exist on the volume.
  94. 6. Delete pod.
  95. 7. Wait for volume to be detached from the node1.
  96. 8. Repeat Step 1 to 7 and make sure back to back pod creation on same worker node with the same volume is working as expected.
  97. */
  98. ginkgo.It("should create and delete pod with the same volume source on the same worker node", func() {
  99. var volumeFiles []string
  100. pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
  101. // Create empty files on the mounted volumes on the pod to verify volume is writable
  102. // Verify newly and previously created files present on the volume mounted on the pod
  103. newEmptyFileName := fmt.Sprintf("/mnt/volume1/%v_1.txt", ns)
  104. volumeFiles = append(volumeFiles, newEmptyFileName)
  105. createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
  106. deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
  107. ginkgo.By(fmt.Sprintf("Creating pod on the same node: %v", node1Name))
  108. pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
  109. // Create empty files on the mounted volumes on the pod to verify volume is writable
  110. // Verify newly and previously created files present on the volume mounted on the pod
  111. newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns)
  112. volumeFiles = append(volumeFiles, newEmptyFileName)
  113. createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
  114. deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
  115. })
  116. /*
  117. Steps
  118. 1. Create pod Spec with volume path of the vmdk1 and NodeSelector set to node1's label.
  119. 2. Create pod and wait for POD to become ready.
  120. 3. Verify volume is attached to the node1.
  121. 4. Create empty file on the volume to verify volume is writable.
  122. 5. Verify newly created file and previously created files exist on the volume.
  123. 6. Delete pod.
  124. 7. Wait for volume to be detached from the node1.
  125. 8. Create pod Spec with volume path of the vmdk1 and NodeSelector set to node2's label.
  126. 9. Create pod and wait for pod to become ready.
  127. 10. Verify volume is attached to the node2.
  128. 11. Create empty file on the volume to verify volume is writable.
  129. 12. Verify newly created file and previously created files exist on the volume.
  130. 13. Delete pod.
  131. */
  132. ginkgo.It("should create and delete pod with the same volume source attach/detach to different worker nodes", func() {
  133. var volumeFiles []string
  134. pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
  135. // Create empty files on the mounted volumes on the pod to verify volume is writable
  136. // Verify newly and previously created files present on the volume mounted on the pod
  137. newEmptyFileName := fmt.Sprintf("/mnt/volume1/%v_1.txt", ns)
  138. volumeFiles = append(volumeFiles, newEmptyFileName)
  139. createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
  140. deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
  141. ginkgo.By(fmt.Sprintf("Creating pod on the another node: %v", node2Name))
  142. pod = createPodWithVolumeAndNodeSelector(c, ns, node2Name, node2KeyValueLabel, volumePaths)
  143. newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns)
  144. volumeFiles = append(volumeFiles, newEmptyFileName)
  145. // Create empty files on the mounted volumes on the pod to verify volume is writable
  146. // Verify newly and previously created files present on the volume mounted on the pod
  147. createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
  148. deletePodAndWaitForVolumeToDetach(f, c, pod, node2Name, volumePaths)
  149. })
  150. /*
  151. Test multiple volumes from same datastore within the same pod
  152. 1. Create volumes - vmdk2
  153. 2. Create pod Spec with volume path of vmdk1 (vmdk1 is created in test setup) and vmdk2.
  154. 3. Create pod using spec created in step-2 and wait for pod to become ready.
  155. 4. Verify both volumes are attached to the node on which pod are created. Write some data to make sure volume are accessible.
  156. 5. Delete pod.
  157. 6. Wait for vmdk1 and vmdk2 to be detached from node.
  158. 7. Create pod using spec created in step-2 and wait for pod to become ready.
  159. 8. Verify both volumes are attached to the node on which PODs are created. Verify volume contents are matching with the content written in step 4.
  160. 9. Delete POD.
  161. 10. Wait for vmdk1 and vmdk2 to be detached from node.
  162. */
  163. ginkgo.It("should create and delete pod with multiple volumes from same datastore", func() {
  164. ginkgo.By("creating another vmdk")
  165. volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
  166. framework.ExpectNoError(err)
  167. volumePaths = append(volumePaths, volumePath)
  168. ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume: %v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
  169. pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
  170. // Create empty files on the mounted volumes on the pod to verify volume is writable
  171. // Verify newly and previously created files present on the volume mounted on the pod
  172. volumeFiles := []string{
  173. fmt.Sprintf("/mnt/volume1/%v_1.txt", ns),
  174. fmt.Sprintf("/mnt/volume2/%v_1.txt", ns),
  175. }
  176. createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles)
  177. deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
  178. ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
  179. pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
  180. // Create empty files on the mounted volumes on the pod to verify volume is writable
  181. // Verify newly and previously created files present on the volume mounted on the pod
  182. newEmptyFilesNames := []string{
  183. fmt.Sprintf("/mnt/volume1/%v_2.txt", ns),
  184. fmt.Sprintf("/mnt/volume2/%v_2.txt", ns),
  185. }
  186. volumeFiles = append(volumeFiles, newEmptyFilesNames[0])
  187. volumeFiles = append(volumeFiles, newEmptyFilesNames[1])
  188. createAndVerifyFilesOnVolume(ns, pod.Name, newEmptyFilesNames, volumeFiles)
  189. })
  190. /*
  191. Test multiple volumes from different datastore within the same pod
  192. 1. Create volumes - vmdk2 on non default shared datastore.
  193. 2. Create pod Spec with volume path of vmdk1 (vmdk1 is created in test setup on default datastore) and vmdk2.
  194. 3. Create pod using spec created in step-2 and wait for pod to become ready.
  195. 4. Verify both volumes are attached to the node on which pod are created. Write some data to make sure volume are accessible.
  196. 5. Delete pod.
  197. 6. Wait for vmdk1 and vmdk2 to be detached from node.
  198. 7. Create pod using spec created in step-2 and wait for pod to become ready.
  199. 8. Verify both volumes are attached to the node on which PODs are created. Verify volume contents are matching with the content written in step 4.
  200. 9. Delete POD.
  201. 10. Wait for vmdk1 and vmdk2 to be detached from node.
  202. */
  203. ginkgo.It("should create and delete pod with multiple volumes from different datastore", func() {
  204. ginkgo.By("creating another vmdk on non default shared datastore")
  205. var volumeOptions *VolumeOptions
  206. volumeOptions = new(VolumeOptions)
  207. volumeOptions.CapacityKB = 2097152
  208. volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
  209. volumeOptions.Datastore = GetAndExpectStringEnvVar(SecondSharedDatastore)
  210. volumePath, err := vsp.CreateVolume(volumeOptions, nodeInfo.DataCenterRef)
  211. framework.ExpectNoError(err)
  212. volumePaths = append(volumePaths, volumePath)
  213. ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
  214. pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
  215. // Create empty files on the mounted volumes on the pod to verify volume is writable
  216. // Verify newly and previously created files present on the volume mounted on the pod
  217. volumeFiles := []string{
  218. fmt.Sprintf("/mnt/volume1/%v_1.txt", ns),
  219. fmt.Sprintf("/mnt/volume2/%v_1.txt", ns),
  220. }
  221. createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles)
  222. deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
  223. ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
  224. pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
  225. // Create empty files on the mounted volumes on the pod to verify volume is writable
  226. // Verify newly and previously created files present on the volume mounted on the pod
  227. newEmptyFileNames := []string{
  228. fmt.Sprintf("/mnt/volume1/%v_2.txt", ns),
  229. fmt.Sprintf("/mnt/volume2/%v_2.txt", ns),
  230. }
  231. volumeFiles = append(volumeFiles, newEmptyFileNames[0])
  232. volumeFiles = append(volumeFiles, newEmptyFileNames[1])
  233. createAndVerifyFilesOnVolume(ns, pod.Name, newEmptyFileNames, volumeFiles)
  234. deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
  235. })
  236. /*
  237. Test Back-to-back pod creation/deletion with different volume sources on the same worker node
  238. 1. Create volumes - vmdk2
  239. 2. Create pod Spec - pod-SpecA with volume path of vmdk1 and NodeSelector set to label assigned to node1.
  240. 3. Create pod Spec - pod-SpecB with volume path of vmdk2 and NodeSelector set to label assigned to node1.
  241. 4. Create pod-A using pod-SpecA and wait for pod to become ready.
  242. 5. Create pod-B using pod-SpecB and wait for POD to become ready.
  243. 6. Verify volumes are attached to the node.
  244. 7. Create empty file on the volume to make sure volume is accessible. (Perform this step on pod-A and pod-B)
  245. 8. Verify file created in step 5 is present on the volume. (perform this step on pod-A and pod-B)
  246. 9. Delete pod-A and pod-B
  247. 10. Repeatedly (5 times) perform step 4 to 9 and verify associated volume's content is matching.
  248. 11. Wait for vmdk1 and vmdk2 to be detached from node.
  249. */
  250. ginkgo.It("test back to back pod creation and deletion with different volume sources on the same worker node", func() {
  251. var (
  252. podA *v1.Pod
  253. podB *v1.Pod
  254. testvolumePathsPodA []string
  255. testvolumePathsPodB []string
  256. podAFiles []string
  257. podBFiles []string
  258. )
  259. defer func() {
  260. ginkgo.By("clean up undeleted pods")
  261. framework.ExpectNoError(e2epod.DeletePodWithWait(c, podA), "defer: Failed to delete pod ", podA.Name)
  262. framework.ExpectNoError(e2epod.DeletePodWithWait(c, podB), "defer: Failed to delete pod ", podB.Name)
  263. ginkgo.By(fmt.Sprintf("wait for volumes to be detached from the node: %v", node1Name))
  264. for _, volumePath := range volumePaths {
  265. framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, node1Name))
  266. }
  267. }()
  268. testvolumePathsPodA = append(testvolumePathsPodA, volumePaths[0])
  269. // Create another VMDK Volume
  270. ginkgo.By("creating another vmdk")
  271. volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
  272. framework.ExpectNoError(err)
  273. volumePaths = append(volumePaths, volumePath)
  274. testvolumePathsPodB = append(testvolumePathsPodA, volumePath)
  275. for index := 0; index < 5; index++ {
  276. ginkgo.By(fmt.Sprintf("Creating pod-A on the node: %v with volume: %v", node1Name, testvolumePathsPodA[0]))
  277. podA = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodA)
  278. ginkgo.By(fmt.Sprintf("Creating pod-B on the node: %v with volume: %v", node1Name, testvolumePathsPodB[0]))
  279. podB = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodB)
  280. podAFileName := fmt.Sprintf("/mnt/volume1/podA_%v_%v.txt", ns, index+1)
  281. podBFileName := fmt.Sprintf("/mnt/volume1/podB_%v_%v.txt", ns, index+1)
  282. podAFiles = append(podAFiles, podAFileName)
  283. podBFiles = append(podBFiles, podBFileName)
  284. // Create empty files on the mounted volumes on the pod to verify volume is writable
  285. ginkgo.By("Creating empty file on volume mounted on pod-A")
  286. framework.CreateEmptyFileOnPod(ns, podA.Name, podAFileName)
  287. ginkgo.By("Creating empty file volume mounted on pod-B")
  288. framework.CreateEmptyFileOnPod(ns, podB.Name, podBFileName)
  289. // Verify newly and previously created files present on the volume mounted on the pod
  290. ginkgo.By("Verify newly Created file and previously created files present on volume mounted on pod-A")
  291. verifyFilesExistOnVSphereVolume(ns, podA.Name, podAFiles...)
  292. ginkgo.By("Verify newly Created file and previously created files present on volume mounted on pod-B")
  293. verifyFilesExistOnVSphereVolume(ns, podB.Name, podBFiles...)
  294. ginkgo.By("Deleting pod-A")
  295. framework.ExpectNoError(e2epod.DeletePodWithWait(c, podA), "Failed to delete pod ", podA.Name)
  296. ginkgo.By("Deleting pod-B")
  297. framework.ExpectNoError(e2epod.DeletePodWithWait(c, podB), "Failed to delete pod ", podB.Name)
  298. }
  299. })
  300. })
  301. func testSetupVolumePlacement(client clientset.Interface, namespace string) (node1Name string, node1KeyValueLabel map[string]string, node2Name string, node2KeyValueLabel map[string]string) {
  302. nodes, err := e2enode.GetBoundedReadySchedulableNodes(client, 2)
  303. framework.ExpectNoError(err)
  304. if len(nodes.Items) < 2 {
  305. e2eskipper.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items))
  306. }
  307. node1Name = nodes.Items[0].Name
  308. node2Name = nodes.Items[1].Name
  309. node1LabelValue := "vsphere_e2e_" + string(uuid.NewUUID())
  310. node1KeyValueLabel = make(map[string]string)
  311. node1KeyValueLabel[NodeLabelKey] = node1LabelValue
  312. framework.AddOrUpdateLabelOnNode(client, node1Name, NodeLabelKey, node1LabelValue)
  313. node2LabelValue := "vsphere_e2e_" + string(uuid.NewUUID())
  314. node2KeyValueLabel = make(map[string]string)
  315. node2KeyValueLabel[NodeLabelKey] = node2LabelValue
  316. framework.AddOrUpdateLabelOnNode(client, node2Name, NodeLabelKey, node2LabelValue)
  317. return node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel
  318. }
  319. func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace string, nodeName string, nodeKeyValueLabel map[string]string, volumePaths []string) *v1.Pod {
  320. var pod *v1.Pod
  321. var err error
  322. ginkgo.By(fmt.Sprintf("Creating pod on the node: %v", nodeName))
  323. podspec := getVSpherePodSpecWithVolumePaths(volumePaths, nodeKeyValueLabel, nil)
  324. pod, err = client.CoreV1().Pods(namespace).Create(context.TODO(), podspec, metav1.CreateOptions{})
  325. framework.ExpectNoError(err)
  326. ginkgo.By("Waiting for pod to be ready")
  327. gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed())
  328. ginkgo.By(fmt.Sprintf("Verify volume is attached to the node:%v", nodeName))
  329. for _, volumePath := range volumePaths {
  330. isAttached, err := diskIsAttached(volumePath, nodeName)
  331. framework.ExpectNoError(err)
  332. framework.ExpectEqual(isAttached, true, "disk:"+volumePath+" is not attached with the node")
  333. }
  334. return pod
  335. }
  336. func createAndVerifyFilesOnVolume(namespace string, podname string, newEmptyfilesToCreate []string, filesToCheck []string) {
  337. // Create empty files on the mounted volumes on the pod to verify volume is writable
  338. ginkgo.By(fmt.Sprintf("Creating empty file on volume mounted on: %v", podname))
  339. createEmptyFilesOnVSphereVolume(namespace, podname, newEmptyfilesToCreate)
  340. // Verify newly and previously created files present on the volume mounted on the pod
  341. ginkgo.By(fmt.Sprintf("Verify newly Created file and previously created files present on volume mounted on: %v", podname))
  342. verifyFilesExistOnVSphereVolume(namespace, podname, filesToCheck...)
  343. }
  344. func deletePodAndWaitForVolumeToDetach(f *framework.Framework, c clientset.Interface, pod *v1.Pod, nodeName string, volumePaths []string) {
  345. ginkgo.By("Deleting pod")
  346. framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod), "Failed to delete pod ", pod.Name)
  347. ginkgo.By("Waiting for volume to be detached from the node")
  348. for _, volumePath := range volumePaths {
  349. framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, nodeName))
  350. }
  351. }