vsphere_scale.go 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. /*
  2. Copyright 2017 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package vsphere
  14. import (
  15. "context"
  16. "fmt"
  17. "strconv"
  18. "github.com/onsi/ginkgo"
  19. "github.com/onsi/gomega"
  20. "k8s.io/api/core/v1"
  21. storagev1 "k8s.io/api/storage/v1"
  22. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  23. clientset "k8s.io/client-go/kubernetes"
  24. "k8s.io/kubernetes/test/e2e/framework"
  25. e2enode "k8s.io/kubernetes/test/e2e/framework/node"
  26. e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
  27. e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
  28. e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
  29. "k8s.io/kubernetes/test/e2e/storage/utils"
  30. )
  31. /*
  32. Perform vsphere volume life cycle management at scale based on user configurable value for number of volumes.
  33. The following actions will be performed as part of this test.
  34. 1. Create Storage Classes of 4 Categories (Default, SC with Non Default Datastore, SC with SPBM Policy, SC with VSAN Storage Capabilities.)
  35. 2. Read VCP_SCALE_VOLUME_COUNT, VCP_SCALE_INSTANCES, VCP_SCALE_VOLUMES_PER_POD, VSPHERE_SPBM_POLICY_NAME, VSPHERE_DATASTORE from System Environment.
  36. 3. Launch VCP_SCALE_INSTANCES goroutine for creating VCP_SCALE_VOLUME_COUNT volumes. Each goroutine is responsible for create/attach of VCP_SCALE_VOLUME_COUNT/VCP_SCALE_INSTANCES volumes.
  37. 4. Read VCP_SCALE_VOLUMES_PER_POD from System Environment. Each pod will be have VCP_SCALE_VOLUMES_PER_POD attached to it.
  38. 5. Once all the go routines are completed, we delete all the pods and volumes.
  39. */
  40. const (
  41. NodeLabelKey = "vsphere_e2e_label"
  42. )
  43. // NodeSelector holds
  44. type NodeSelector struct {
  45. labelKey string
  46. labelValue string
  47. }
  48. var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
  49. f := framework.NewDefaultFramework("vcp-at-scale")
  50. var (
  51. client clientset.Interface
  52. namespace string
  53. nodeSelectorList []*NodeSelector
  54. volumeCount int
  55. numberOfInstances int
  56. volumesPerPod int
  57. policyName string
  58. datastoreName string
  59. nodeVolumeMapChan chan map[string][]string
  60. nodes *v1.NodeList
  61. scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4}
  62. )
  63. ginkgo.BeforeEach(func() {
  64. e2eskipper.SkipUnlessProviderIs("vsphere")
  65. Bootstrap(f)
  66. client = f.ClientSet
  67. namespace = f.Namespace.Name
  68. nodeVolumeMapChan = make(chan map[string][]string)
  69. // Read the environment variables
  70. volumeCount = GetAndExpectIntEnvVar(VCPScaleVolumeCount)
  71. volumesPerPod = GetAndExpectIntEnvVar(VCPScaleVolumesPerPod)
  72. numberOfInstances = GetAndExpectIntEnvVar(VCPScaleInstances)
  73. framework.ExpectNotEqual(numberOfInstances > 5, true, "Maximum allowed instances are 5")
  74. framework.ExpectNotEqual(numberOfInstances > volumeCount, true, "Number of instances should be less than the total volume count")
  75. policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
  76. datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
  77. var err error
  78. nodes, err = e2enode.GetReadySchedulableNodes(client)
  79. framework.ExpectNoError(err)
  80. if len(nodes.Items) < 2 {
  81. e2eskipper.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items))
  82. }
  83. // Verify volume count specified by the user can be satisfied
  84. if volumeCount > volumesPerNode*len(nodes.Items) {
  85. e2eskipper.Skipf("Cannot attach %d volumes to %d nodes. Maximum volumes that can be attached on %d nodes is %d", volumeCount, len(nodes.Items), len(nodes.Items), volumesPerNode*len(nodes.Items))
  86. }
  87. nodeSelectorList = createNodeLabels(client, namespace, nodes)
  88. })
  89. /*
  90. Remove labels from all the nodes
  91. */
  92. framework.AddCleanupAction(func() {
  93. // Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
  94. if len(namespace) > 0 {
  95. for _, node := range nodes.Items {
  96. framework.RemoveLabelOffNode(client, node.Name, NodeLabelKey)
  97. }
  98. }
  99. })
  100. ginkgo.It("vsphere scale tests", func() {
  101. var pvcClaimList []string
  102. nodeVolumeMap := make(map[string][]string)
  103. // Volumes will be provisioned with each different types of Storage Class
  104. scArrays := make([]*storagev1.StorageClass, len(scNames))
  105. for index, scname := range scNames {
  106. // Create vSphere Storage Class
  107. ginkgo.By(fmt.Sprintf("Creating Storage Class : %q", scname))
  108. var sc *storagev1.StorageClass
  109. scParams := make(map[string]string)
  110. var err error
  111. switch scname {
  112. case storageclass1:
  113. scParams = nil
  114. case storageclass2:
  115. scParams[PolicyHostFailuresToTolerate] = "1"
  116. case storageclass3:
  117. scParams[SpbmStoragePolicy] = policyName
  118. case storageclass4:
  119. scParams[Datastore] = datastoreName
  120. }
  121. sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(scname, scParams, nil, ""), metav1.CreateOptions{})
  122. gomega.Expect(sc).NotTo(gomega.BeNil(), "Storage class is empty")
  123. framework.ExpectNoError(err, "Failed to create storage class")
  124. defer client.StorageV1().StorageClasses().Delete(context.TODO(), scname, nil)
  125. scArrays[index] = sc
  126. }
  127. volumeCountPerInstance := volumeCount / numberOfInstances
  128. for instanceCount := 0; instanceCount < numberOfInstances; instanceCount++ {
  129. if instanceCount == numberOfInstances-1 {
  130. volumeCountPerInstance = volumeCount
  131. }
  132. volumeCount = volumeCount - volumeCountPerInstance
  133. go VolumeCreateAndAttach(client, namespace, scArrays, volumeCountPerInstance, volumesPerPod, nodeSelectorList, nodeVolumeMapChan)
  134. }
  135. // Get the list of all volumes attached to each node from the go routines by reading the data from the channel
  136. for instanceCount := 0; instanceCount < numberOfInstances; instanceCount++ {
  137. for node, volumeList := range <-nodeVolumeMapChan {
  138. nodeVolumeMap[node] = append(nodeVolumeMap[node], volumeList...)
  139. }
  140. }
  141. podList, err := client.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
  142. framework.ExpectNoError(err, "Failed to list pods")
  143. for _, pod := range podList.Items {
  144. pvcClaimList = append(pvcClaimList, getClaimsForPod(&pod, volumesPerPod)...)
  145. ginkgo.By("Deleting pod")
  146. err = e2epod.DeletePodWithWait(client, &pod)
  147. framework.ExpectNoError(err)
  148. }
  149. ginkgo.By("Waiting for volumes to be detached from the node")
  150. err = waitForVSphereDisksToDetach(nodeVolumeMap)
  151. framework.ExpectNoError(err)
  152. for _, pvcClaim := range pvcClaimList {
  153. err = e2epv.DeletePersistentVolumeClaim(client, pvcClaim, namespace)
  154. framework.ExpectNoError(err)
  155. }
  156. })
  157. })
  158. // Get PVC claims for the pod
  159. func getClaimsForPod(pod *v1.Pod, volumesPerPod int) []string {
  160. pvcClaimList := make([]string, volumesPerPod)
  161. for i, volumespec := range pod.Spec.Volumes {
  162. if volumespec.PersistentVolumeClaim != nil {
  163. pvcClaimList[i] = volumespec.PersistentVolumeClaim.ClaimName
  164. }
  165. }
  166. return pvcClaimList
  167. }
  168. // VolumeCreateAndAttach peforms create and attach operations of vSphere persistent volumes at scale
  169. func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*storagev1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string) {
  170. defer ginkgo.GinkgoRecover()
  171. nodeVolumeMap := make(map[string][]string)
  172. nodeSelectorIndex := 0
  173. for index := 0; index < volumeCountPerInstance; index = index + volumesPerPod {
  174. if (volumeCountPerInstance - index) < volumesPerPod {
  175. volumesPerPod = volumeCountPerInstance - index
  176. }
  177. pvclaims := make([]*v1.PersistentVolumeClaim, volumesPerPod)
  178. for i := 0; i < volumesPerPod; i++ {
  179. ginkgo.By("Creating PVC using the Storage Class")
  180. pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", sc[index%len(sc)]))
  181. framework.ExpectNoError(err)
  182. pvclaims[i] = pvclaim
  183. }
  184. ginkgo.By("Waiting for claim to be in bound phase")
  185. persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
  186. framework.ExpectNoError(err)
  187. ginkgo.By("Creating pod to attach PV to the node")
  188. nodeSelector := nodeSelectorList[nodeSelectorIndex%len(nodeSelectorList)]
  189. // Create pod to attach Volume to Node
  190. pod, err := e2epod.CreatePod(client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, false, "")
  191. framework.ExpectNoError(err)
  192. for _, pv := range persistentvolumes {
  193. nodeVolumeMap[pod.Spec.NodeName] = append(nodeVolumeMap[pod.Spec.NodeName], pv.Spec.VsphereVolume.VolumePath)
  194. }
  195. ginkgo.By("Verify the volume is accessible and available in the pod")
  196. verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
  197. nodeSelectorIndex++
  198. }
  199. nodeVolumeMapChan <- nodeVolumeMap
  200. close(nodeVolumeMapChan)
  201. }
  202. func createNodeLabels(client clientset.Interface, namespace string, nodes *v1.NodeList) []*NodeSelector {
  203. var nodeSelectorList []*NodeSelector
  204. for i, node := range nodes.Items {
  205. labelVal := "vsphere_e2e_" + strconv.Itoa(i)
  206. nodeSelector := &NodeSelector{
  207. labelKey: NodeLabelKey,
  208. labelValue: labelVal,
  209. }
  210. nodeSelectorList = append(nodeSelectorList, nodeSelector)
  211. framework.AddOrUpdateLabelOnNode(client, node.Name, NodeLabelKey, labelVal)
  212. }
  213. return nodeSelectorList
  214. }