vsphere_stress.go 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180
  1. /*
  2. Copyright 2017 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package vsphere
  14. import (
  15. "context"
  16. "fmt"
  17. "sync"
  18. "github.com/onsi/ginkgo"
  19. "github.com/onsi/gomega"
  20. "k8s.io/api/core/v1"
  21. storagev1 "k8s.io/api/storage/v1"
  22. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  23. clientset "k8s.io/client-go/kubernetes"
  24. "k8s.io/kubernetes/test/e2e/framework"
  25. e2enode "k8s.io/kubernetes/test/e2e/framework/node"
  26. e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
  27. e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
  28. e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
  29. "k8s.io/kubernetes/test/e2e/storage/utils"
  30. )
  31. /*
  32. Induce stress to create volumes in parallel with multiple threads based on user configurable values for number of threads and iterations per thread.
  33. The following actions will be performed as part of this test.
  34. 1. Create Storage Classes of 4 Categories (Default, SC with Non Default Datastore, SC with SPBM Policy, SC with VSAN Storage Capabilities.)
  35. 2. READ VCP_STRESS_INSTANCES, VCP_STRESS_ITERATIONS, VSPHERE_SPBM_POLICY_NAME and VSPHERE_DATASTORE from System Environment.
  36. 3. Launch goroutine for volume lifecycle operations.
  37. 4. Each instance of routine iterates for n times, where n is read from system env - VCP_STRESS_ITERATIONS
  38. 5. Each iteration creates 1 PVC, 1 POD using the provisioned PV, Verify disk is attached to the node, Verify pod can access the volume, delete the pod and finally delete the PVC.
  39. */
  40. var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", func() {
  41. f := framework.NewDefaultFramework("vcp-stress")
  42. var (
  43. client clientset.Interface
  44. namespace string
  45. instances int
  46. iterations int
  47. policyName string
  48. datastoreName string
  49. scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4}
  50. )
  51. ginkgo.BeforeEach(func() {
  52. e2eskipper.SkipUnlessProviderIs("vsphere")
  53. Bootstrap(f)
  54. client = f.ClientSet
  55. namespace = f.Namespace.Name
  56. nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
  57. framework.ExpectNoError(err)
  58. // if VCP_STRESS_INSTANCES = 12 and VCP_STRESS_ITERATIONS is 10. 12 threads will run in parallel for 10 times.
  59. // Resulting 120 Volumes and POD Creation. Volumes will be provisioned with each different types of Storage Class,
  60. // Each iteration creates PVC, verify PV is provisioned, then creates a pod, verify volume is attached to the node, and then delete the pod and delete pvc.
  61. instances = GetAndExpectIntEnvVar(VCPStressInstances)
  62. framework.ExpectEqual(instances <= volumesPerNode*len(nodeList.Items), true, fmt.Sprintf("Number of Instances should be less or equal: %v", volumesPerNode*len(nodeList.Items)))
  63. framework.ExpectEqual(instances > len(scNames), true, "VCP_STRESS_INSTANCES should be greater than 3 to utilize all 4 types of storage classes")
  64. iterations = GetAndExpectIntEnvVar(VCPStressIterations)
  65. framework.ExpectEqual(iterations > 0, true, "VCP_STRESS_ITERATIONS should be greater than 0")
  66. policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
  67. datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
  68. })
  69. ginkgo.It("vsphere stress tests", func() {
  70. scArrays := make([]*storagev1.StorageClass, len(scNames))
  71. for index, scname := range scNames {
  72. // Create vSphere Storage Class
  73. ginkgo.By(fmt.Sprintf("Creating Storage Class : %v", scname))
  74. var sc *storagev1.StorageClass
  75. var err error
  76. switch scname {
  77. case storageclass1:
  78. sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(storageclass1, nil, nil, ""), metav1.CreateOptions{})
  79. case storageclass2:
  80. var scVSanParameters map[string]string
  81. scVSanParameters = make(map[string]string)
  82. scVSanParameters[PolicyHostFailuresToTolerate] = "1"
  83. sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil, ""), metav1.CreateOptions{})
  84. case storageclass3:
  85. var scSPBMPolicyParameters map[string]string
  86. scSPBMPolicyParameters = make(map[string]string)
  87. scSPBMPolicyParameters[SpbmStoragePolicy] = policyName
  88. sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters, nil, ""), metav1.CreateOptions{})
  89. case storageclass4:
  90. var scWithDSParameters map[string]string
  91. scWithDSParameters = make(map[string]string)
  92. scWithDSParameters[Datastore] = datastoreName
  93. scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters, nil, "")
  94. sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), scWithDatastoreSpec, metav1.CreateOptions{})
  95. }
  96. gomega.Expect(sc).NotTo(gomega.BeNil())
  97. framework.ExpectNoError(err)
  98. defer client.StorageV1().StorageClasses().Delete(context.TODO(), scname, nil)
  99. scArrays[index] = sc
  100. }
  101. var wg sync.WaitGroup
  102. wg.Add(instances)
  103. for instanceCount := 0; instanceCount < instances; instanceCount++ {
  104. instanceID := fmt.Sprintf("Thread:%v", instanceCount+1)
  105. go PerformVolumeLifeCycleInParallel(f, client, namespace, instanceID, scArrays[instanceCount%len(scArrays)], iterations, &wg)
  106. }
  107. wg.Wait()
  108. })
  109. })
  110. // PerformVolumeLifeCycleInParallel performs volume lifecycle operations
  111. // Called as a go routine to perform operations in parallel
  112. func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.Interface, namespace string, instanceID string, sc *storagev1.StorageClass, iterations int, wg *sync.WaitGroup) {
  113. defer wg.Done()
  114. defer ginkgo.GinkgoRecover()
  115. for iterationCount := 0; iterationCount < iterations; iterationCount++ {
  116. logPrefix := fmt.Sprintf("Instance: [%v], Iteration: [%v] :", instanceID, iterationCount+1)
  117. ginkgo.By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name))
  118. pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "1Gi", sc))
  119. framework.ExpectNoError(err)
  120. defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
  121. var pvclaims []*v1.PersistentVolumeClaim
  122. pvclaims = append(pvclaims, pvclaim)
  123. ginkgo.By(fmt.Sprintf("%v Waiting for claim: %v to be in bound phase", logPrefix, pvclaim.Name))
  124. persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
  125. framework.ExpectNoError(err)
  126. ginkgo.By(fmt.Sprintf("%v Creating Pod using the claim: %v", logPrefix, pvclaim.Name))
  127. // Create pod to attach Volume to Node
  128. pod, err := e2epod.CreatePod(client, namespace, nil, pvclaims, false, "")
  129. framework.ExpectNoError(err)
  130. ginkgo.By(fmt.Sprintf("%v Waiting for the Pod: %v to be in the running state", logPrefix, pod.Name))
  131. err = f.WaitForPodRunningSlow(pod.Name)
  132. framework.ExpectNoError(err)
  133. // Get the copy of the Pod to know the assigned node name.
  134. pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
  135. framework.ExpectNoError(err)
  136. ginkgo.By(fmt.Sprintf("%v Verifing the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
  137. isVolumeAttached, verifyDiskAttachedError := diskIsAttached(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
  138. framework.ExpectEqual(isVolumeAttached, true)
  139. framework.ExpectNoError(verifyDiskAttachedError)
  140. ginkgo.By(fmt.Sprintf("%v Verifing the volume: %v is accessible in the pod: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Name))
  141. verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
  142. ginkgo.By(fmt.Sprintf("%v Deleting pod: %v", logPrefix, pod.Name))
  143. err = e2epod.DeletePodWithWait(client, pod)
  144. framework.ExpectNoError(err)
  145. ginkgo.By(fmt.Sprintf("%v Waiting for volume: %v to be detached from the node: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
  146. err = waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
  147. framework.ExpectNoError(err)
  148. ginkgo.By(fmt.Sprintf("%v Deleting the Claim: %v", logPrefix, pvclaim.Name))
  149. err = e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
  150. framework.ExpectNoError(err)
  151. }
  152. }