ubernetes_lite_volumes.go 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. /*
  2. Copyright 2017 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package scheduling
  14. import (
  15. "fmt"
  16. "strconv"
  17. "github.com/onsi/ginkgo"
  18. "github.com/onsi/gomega"
  19. compute "google.golang.org/api/compute/v1"
  20. "k8s.io/api/core/v1"
  21. "k8s.io/apimachinery/pkg/api/resource"
  22. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  23. "k8s.io/apimachinery/pkg/util/sets"
  24. "k8s.io/apimachinery/pkg/util/uuid"
  25. "k8s.io/kubernetes/test/e2e/framework"
  26. e2elog "k8s.io/kubernetes/test/e2e/framework/log"
  27. "k8s.io/kubernetes/test/e2e/framework/providers/gce"
  28. )
  29. var _ = SIGDescribe("Multi-AZ Cluster Volumes [sig-storage]", func() {
  30. f := framework.NewDefaultFramework("multi-az")
  31. var zoneCount int
  32. var err error
  33. image := framework.ServeHostnameImage
  34. ginkgo.BeforeEach(func() {
  35. framework.SkipUnlessProviderIs("gce", "gke")
  36. if zoneCount <= 0 {
  37. zoneCount, err = getZoneCount(f.ClientSet)
  38. framework.ExpectNoError(err)
  39. }
  40. ginkgo.By(fmt.Sprintf("Checking for multi-zone cluster. Zone count = %d", zoneCount))
  41. msg := fmt.Sprintf("Zone count is %d, only run for multi-zone clusters, skipping test", zoneCount)
  42. framework.SkipUnlessAtLeast(zoneCount, 2, msg)
  43. // TODO: SkipUnlessDefaultScheduler() // Non-default schedulers might not spread
  44. })
  45. ginkgo.It("should schedule pods in the same zones as statically provisioned PVs", func() {
  46. PodsUseStaticPVsOrFail(f, (2*zoneCount)+1, image)
  47. })
  48. ginkgo.It("should only be allowed to provision PDs in zones where nodes exist", func() {
  49. OnlyAllowNodeZones(f, zoneCount, image)
  50. })
  51. })
  52. // OnlyAllowNodeZones tests that GetAllCurrentZones returns only zones with Nodes
  53. func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
  54. gceCloud, err := gce.GetGCECloud()
  55. framework.ExpectNoError(err)
  56. // Get all the zones that the nodes are in
  57. expectedZones, err := gceCloud.GetAllZonesFromCloudProvider()
  58. framework.ExpectNoError(err)
  59. e2elog.Logf("Expected zones: %v", expectedZones)
  60. // Get all the zones in this current region
  61. region := gceCloud.Region()
  62. allZonesInRegion, err := gceCloud.ListZonesInRegion(region)
  63. framework.ExpectNoError(err)
  64. var extraZone string
  65. for _, zone := range allZonesInRegion {
  66. if !expectedZones.Has(zone.Name) {
  67. extraZone = zone.Name
  68. break
  69. }
  70. }
  71. gomega.Expect(extraZone).NotTo(gomega.Equal(""), fmt.Sprintf("No extra zones available in region %s", region))
  72. ginkgo.By(fmt.Sprintf("starting a compute instance in unused zone: %v\n", extraZone))
  73. project := framework.TestContext.CloudConfig.ProjectID
  74. zone := extraZone
  75. myuuid := string(uuid.NewUUID())
  76. name := "compute-" + myuuid
  77. imageURL := "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20140606"
  78. rb := &compute.Instance{
  79. MachineType: "zones/" + zone + "/machineTypes/f1-micro",
  80. Disks: []*compute.AttachedDisk{
  81. {
  82. AutoDelete: true,
  83. Boot: true,
  84. Type: "PERSISTENT",
  85. InitializeParams: &compute.AttachedDiskInitializeParams{
  86. DiskName: "my-root-pd-" + myuuid,
  87. SourceImage: imageURL,
  88. },
  89. },
  90. },
  91. NetworkInterfaces: []*compute.NetworkInterface{
  92. {
  93. AccessConfigs: []*compute.AccessConfig{
  94. {
  95. Type: "ONE_TO_ONE_NAT",
  96. Name: "External NAT",
  97. },
  98. },
  99. Network: "/global/networks/default",
  100. },
  101. },
  102. Name: name,
  103. }
  104. err = gceCloud.InsertInstance(project, zone, rb)
  105. framework.ExpectNoError(err)
  106. defer func() {
  107. // Teardown of the compute instance
  108. e2elog.Logf("Deleting compute resource: %v", name)
  109. err := gceCloud.DeleteInstance(project, zone, name)
  110. framework.ExpectNoError(err)
  111. }()
  112. ginkgo.By("Creating zoneCount+1 PVCs and making sure PDs are only provisioned in zones with nodes")
  113. // Create some (zoneCount+1) PVCs with names of form "pvc-x" where x is 1...zoneCount+1
  114. // This will exploit ChooseZoneForVolume in pkg/volume/util.go to provision them in all the zones it "sees"
  115. var pvcList []*v1.PersistentVolumeClaim
  116. c := f.ClientSet
  117. ns := f.Namespace.Name
  118. for index := 1; index <= zoneCount+1; index++ {
  119. pvc := newNamedDefaultClaim(ns, index)
  120. pvc, err = framework.CreatePVC(c, ns, pvc)
  121. framework.ExpectNoError(err)
  122. pvcList = append(pvcList, pvc)
  123. // Defer the cleanup
  124. defer func() {
  125. e2elog.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name)
  126. err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil)
  127. if err != nil {
  128. framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err)
  129. }
  130. }()
  131. }
  132. // Wait for all claims bound
  133. for _, claim := range pvcList {
  134. err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
  135. framework.ExpectNoError(err)
  136. }
  137. pvZones := sets.NewString()
  138. ginkgo.By("Checking that PDs have been provisioned in only the expected zones")
  139. for _, claim := range pvcList {
  140. // Get a new copy of the claim to have all fields populated
  141. claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
  142. framework.ExpectNoError(err)
  143. // Get the related PV
  144. pv, err := c.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
  145. framework.ExpectNoError(err)
  146. pvZone, ok := pv.ObjectMeta.Labels[v1.LabelZoneFailureDomain]
  147. gomega.Expect(ok).To(gomega.BeTrue(), "PV has no LabelZone to be found")
  148. pvZones.Insert(pvZone)
  149. }
  150. gomega.Expect(pvZones.Equal(expectedZones)).To(gomega.BeTrue(), fmt.Sprintf("PDs provisioned in unwanted zones. We want zones: %v, got: %v", expectedZones, pvZones))
  151. }
  152. type staticPVTestConfig struct {
  153. pvSource *v1.PersistentVolumeSource
  154. pv *v1.PersistentVolume
  155. pvc *v1.PersistentVolumeClaim
  156. pod *v1.Pod
  157. }
  158. // PodsUseStaticPVsOrFail Check that the pods using statically
  159. // created PVs get scheduled to the same zone that the PV is in.
  160. func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) {
  161. var err error
  162. c := f.ClientSet
  163. ns := f.Namespace.Name
  164. zones, err := framework.GetClusterZones(c)
  165. framework.ExpectNoError(err)
  166. zonelist := zones.List()
  167. ginkgo.By("Creating static PVs across zones")
  168. configs := make([]*staticPVTestConfig, podCount)
  169. for i := range configs {
  170. configs[i] = &staticPVTestConfig{}
  171. }
  172. defer func() {
  173. ginkgo.By("Cleaning up pods and PVs")
  174. for _, config := range configs {
  175. framework.DeletePodOrFail(c, ns, config.pod.Name)
  176. }
  177. for _, config := range configs {
  178. framework.WaitForPodNoLongerRunningInNamespace(c, config.pod.Name, ns)
  179. framework.PVPVCCleanup(c, ns, config.pv, config.pvc)
  180. err = framework.DeletePVSource(config.pvSource)
  181. framework.ExpectNoError(err)
  182. }
  183. }()
  184. for i, config := range configs {
  185. zone := zonelist[i%len(zones)]
  186. config.pvSource, err = framework.CreatePVSource(zone)
  187. framework.ExpectNoError(err)
  188. pvConfig := framework.PersistentVolumeConfig{
  189. NamePrefix: "multizone-pv",
  190. PVSource: *config.pvSource,
  191. Prebind: nil,
  192. }
  193. className := ""
  194. pvcConfig := framework.PersistentVolumeClaimConfig{StorageClassName: &className}
  195. config.pv, config.pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, true)
  196. framework.ExpectNoError(err)
  197. }
  198. ginkgo.By("Waiting for all PVCs to be bound")
  199. for _, config := range configs {
  200. framework.WaitOnPVandPVC(c, ns, config.pv, config.pvc)
  201. }
  202. ginkgo.By("Creating pods for each static PV")
  203. for _, config := range configs {
  204. podConfig := framework.MakePod(ns, nil, []*v1.PersistentVolumeClaim{config.pvc}, false, "")
  205. config.pod, err = c.CoreV1().Pods(ns).Create(podConfig)
  206. framework.ExpectNoError(err)
  207. }
  208. ginkgo.By("Waiting for all pods to be running")
  209. for _, config := range configs {
  210. err = framework.WaitForPodRunningInNamespace(c, config.pod)
  211. framework.ExpectNoError(err)
  212. }
  213. }
  214. func newNamedDefaultClaim(ns string, index int) *v1.PersistentVolumeClaim {
  215. claim := v1.PersistentVolumeClaim{
  216. ObjectMeta: metav1.ObjectMeta{
  217. Name: "pvc-" + strconv.Itoa(index),
  218. Namespace: ns,
  219. },
  220. Spec: v1.PersistentVolumeClaimSpec{
  221. AccessModes: []v1.PersistentVolumeAccessMode{
  222. v1.ReadWriteOnce,
  223. },
  224. Resources: v1.ResourceRequirements{
  225. Requests: v1.ResourceList{
  226. v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
  227. },
  228. },
  229. },
  230. }
  231. return &claim
  232. }