limit_range.go 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. /*
  2. Copyright 2015 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package scheduling
  14. import (
  15. "fmt"
  16. "reflect"
  17. "time"
  18. "k8s.io/api/core/v1"
  19. "k8s.io/apimachinery/pkg/api/resource"
  20. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  21. "k8s.io/apimachinery/pkg/labels"
  22. "k8s.io/apimachinery/pkg/util/wait"
  23. "k8s.io/apimachinery/pkg/watch"
  24. "k8s.io/kubernetes/test/e2e/framework"
  25. e2elog "k8s.io/kubernetes/test/e2e/framework/log"
  26. "github.com/onsi/ginkgo"
  27. "github.com/onsi/gomega"
  28. )
  29. const (
  30. podName = "pfpod"
  31. )
  32. var _ = SIGDescribe("LimitRange", func() {
  33. f := framework.NewDefaultFramework("limitrange")
  34. ginkgo.It("should create a LimitRange with defaults and ensure pod has those defaults applied.", func() {
  35. ginkgo.By("Creating a LimitRange")
  36. min := getResourceList("50m", "100Mi", "100Gi")
  37. max := getResourceList("500m", "500Mi", "500Gi")
  38. defaultLimit := getResourceList("500m", "500Mi", "500Gi")
  39. defaultRequest := getResourceList("100m", "200Mi", "200Gi")
  40. maxLimitRequestRatio := v1.ResourceList{}
  41. limitRange := newLimitRange("limit-range", v1.LimitTypeContainer,
  42. min, max,
  43. defaultLimit, defaultRequest,
  44. maxLimitRequestRatio)
  45. ginkgo.By("Setting up watch")
  46. selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": limitRange.Name}))
  47. options := metav1.ListOptions{LabelSelector: selector.String()}
  48. limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)
  49. framework.ExpectNoError(err, "failed to query for limitRanges")
  50. gomega.Expect(len(limitRanges.Items)).To(gomega.Equal(0))
  51. options = metav1.ListOptions{
  52. LabelSelector: selector.String(),
  53. ResourceVersion: limitRanges.ListMeta.ResourceVersion,
  54. }
  55. w, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Watch(metav1.ListOptions{})
  56. framework.ExpectNoError(err, "failed to set up watch")
  57. ginkgo.By("Submitting a LimitRange")
  58. limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(limitRange)
  59. framework.ExpectNoError(err)
  60. ginkgo.By("Verifying LimitRange creation was observed")
  61. select {
  62. case event, _ := <-w.ResultChan():
  63. if event.Type != watch.Added {
  64. framework.Failf("Failed to observe pod creation: %v", event)
  65. }
  66. case <-time.After(framework.ServiceRespondingTimeout):
  67. framework.Failf("Timeout while waiting for LimitRange creation")
  68. }
  69. ginkgo.By("Fetching the LimitRange to ensure it has proper values")
  70. limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{})
  71. framework.ExpectNoError(err)
  72. expected := v1.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit}
  73. actual := v1.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default}
  74. err = equalResourceRequirement(expected, actual)
  75. framework.ExpectNoError(err)
  76. ginkgo.By("Creating a Pod with no resource requirements")
  77. pod := f.NewTestPod("pod-no-resources", v1.ResourceList{}, v1.ResourceList{})
  78. pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
  79. framework.ExpectNoError(err)
  80. ginkgo.By("Ensuring Pod has resource requirements applied from LimitRange")
  81. pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
  82. framework.ExpectNoError(err)
  83. for i := range pod.Spec.Containers {
  84. err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
  85. if err != nil {
  86. // Print the pod to help in debugging.
  87. e2elog.Logf("Pod %+v does not have the expected requirements", pod)
  88. framework.ExpectNoError(err)
  89. }
  90. }
  91. ginkgo.By("Creating a Pod with partial resource requirements")
  92. pod = f.NewTestPod("pod-partial-resources", getResourceList("", "150Mi", "150Gi"), getResourceList("300m", "", ""))
  93. pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
  94. framework.ExpectNoError(err)
  95. ginkgo.By("Ensuring Pod has merged resource requirements applied from LimitRange")
  96. pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
  97. framework.ExpectNoError(err)
  98. // This is an interesting case, so it's worth a comment
  99. // If you specify a Limit, and no Request, the Limit will default to the Request
  100. // This means that the LimitRange.DefaultRequest will ONLY take affect if a container.resources.limit is not supplied
  101. expected = v1.ResourceRequirements{Requests: getResourceList("300m", "150Mi", "150Gi"), Limits: getResourceList("300m", "500Mi", "500Gi")}
  102. for i := range pod.Spec.Containers {
  103. err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
  104. if err != nil {
  105. // Print the pod to help in debugging.
  106. e2elog.Logf("Pod %+v does not have the expected requirements", pod)
  107. framework.ExpectNoError(err)
  108. }
  109. }
  110. ginkgo.By("Failing to create a Pod with less than min resources")
  111. pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
  112. pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
  113. framework.ExpectError(err)
  114. ginkgo.By("Failing to create a Pod with more than max resources")
  115. pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
  116. pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
  117. framework.ExpectError(err)
  118. ginkgo.By("Updating a LimitRange")
  119. newMin := getResourceList("9m", "49Mi", "49Gi")
  120. limitRange.Spec.Limits[0].Min = newMin
  121. limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Update(limitRange)
  122. framework.ExpectNoError(err)
  123. ginkgo.By("Verifying LimitRange updating is effective")
  124. err = wait.Poll(time.Second*2, time.Second*20, func() (bool, error) {
  125. limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{})
  126. framework.ExpectNoError(err)
  127. return reflect.DeepEqual(limitRange.Spec.Limits[0].Min, newMin), nil
  128. })
  129. framework.ExpectNoError(err)
  130. ginkgo.By("Creating a Pod with less than former min resources")
  131. pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
  132. pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
  133. framework.ExpectNoError(err)
  134. ginkgo.By("Failing to create a Pod with more than max resources")
  135. pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
  136. pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
  137. framework.ExpectError(err)
  138. ginkgo.By("Deleting a LimitRange")
  139. err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Delete(limitRange.Name, metav1.NewDeleteOptions(30))
  140. framework.ExpectNoError(err)
  141. ginkgo.By("Verifying the LimitRange was deleted")
  142. err = wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
  143. selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": limitRange.Name}))
  144. options := metav1.ListOptions{LabelSelector: selector.String()}
  145. limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)
  146. if err != nil {
  147. e2elog.Logf("Unable to retrieve LimitRanges: %v", err)
  148. return false, nil
  149. }
  150. if len(limitRanges.Items) == 0 {
  151. e2elog.Logf("limitRange is already deleted")
  152. return true, nil
  153. }
  154. if len(limitRanges.Items) > 0 {
  155. if limitRanges.Items[0].ObjectMeta.DeletionTimestamp == nil {
  156. e2elog.Logf("deletion has not yet been observed")
  157. return false, nil
  158. }
  159. return true, nil
  160. }
  161. return false, nil
  162. })
  163. framework.ExpectNoError(err, "kubelet never observed the termination notice")
  164. ginkgo.By("Creating a Pod with more than former max resources")
  165. pod = f.NewTestPod(podName+"2", getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
  166. pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
  167. framework.ExpectNoError(err)
  168. })
  169. })
  170. func equalResourceRequirement(expected v1.ResourceRequirements, actual v1.ResourceRequirements) error {
  171. e2elog.Logf("Verifying requests: expected %v with actual %v", expected.Requests, actual.Requests)
  172. err := equalResourceList(expected.Requests, actual.Requests)
  173. if err != nil {
  174. return err
  175. }
  176. e2elog.Logf("Verifying limits: expected %v with actual %v", expected.Limits, actual.Limits)
  177. err = equalResourceList(expected.Limits, actual.Limits)
  178. return err
  179. }
  180. func equalResourceList(expected v1.ResourceList, actual v1.ResourceList) error {
  181. for k, v := range expected {
  182. if actualValue, found := actual[k]; !found || (v.Cmp(actualValue) != 0) {
  183. return fmt.Errorf("resource %v expected %v actual %v", k, v.String(), actualValue.String())
  184. }
  185. }
  186. for k, v := range actual {
  187. if expectedValue, found := expected[k]; !found || (v.Cmp(expectedValue) != 0) {
  188. return fmt.Errorf("resource %v expected %v actual %v", k, expectedValue.String(), v.String())
  189. }
  190. }
  191. return nil
  192. }
  193. func getResourceList(cpu, memory string, ephemeralStorage string) v1.ResourceList {
  194. res := v1.ResourceList{}
  195. if cpu != "" {
  196. res[v1.ResourceCPU] = resource.MustParse(cpu)
  197. }
  198. if memory != "" {
  199. res[v1.ResourceMemory] = resource.MustParse(memory)
  200. }
  201. if ephemeralStorage != "" {
  202. res[v1.ResourceEphemeralStorage] = resource.MustParse(ephemeralStorage)
  203. }
  204. return res
  205. }
  206. // newLimitRange returns a limit range with specified data
  207. func newLimitRange(name string, limitType v1.LimitType,
  208. min, max,
  209. defaultLimit, defaultRequest,
  210. maxLimitRequestRatio v1.ResourceList) *v1.LimitRange {
  211. return &v1.LimitRange{
  212. ObjectMeta: metav1.ObjectMeta{
  213. Name: name,
  214. },
  215. Spec: v1.LimitRangeSpec{
  216. Limits: []v1.LimitRangeItem{
  217. {
  218. Type: limitType,
  219. Min: min,
  220. Max: max,
  221. Default: defaultLimit,
  222. DefaultRequest: defaultRequest,
  223. MaxLimitRequestRatio: maxLimitRequestRatio,
  224. },
  225. },
  226. },
  227. }
  228. }