memory_limits.go 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. /*
  2. Copyright 2019 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package windows
  14. import (
  15. "context"
  16. "fmt"
  17. "strconv"
  18. "time"
  19. v1 "k8s.io/api/core/v1"
  20. "k8s.io/apimachinery/pkg/api/resource"
  21. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  22. "k8s.io/apimachinery/pkg/labels"
  23. "k8s.io/apimachinery/pkg/util/uuid"
  24. "k8s.io/kubernetes/test/e2e/framework"
  25. e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
  26. e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
  27. imageutils "k8s.io/kubernetes/test/utils/image"
  28. "github.com/onsi/ginkgo"
  29. "github.com/onsi/gomega"
  30. )
  31. var _ = SIGDescribe("[Feature:Windows] Memory Limits [Serial] [Slow]", func() {
  32. f := framework.NewDefaultFramework("memory-limit-test-windows")
  33. ginkgo.BeforeEach(func() {
  34. // NOTE(vyta): these tests are Windows specific
  35. e2eskipper.SkipUnlessNodeOSDistroIs("windows")
  36. })
  37. ginkgo.Context("Allocatable node memory", func() {
  38. ginkgo.It("should be equal to a calculated allocatable memory value", func() {
  39. checkNodeAllocatableTest(f)
  40. })
  41. })
  42. ginkgo.Context("attempt to deploy past allocatable memory limits", func() {
  43. ginkgo.It("should fail deployments of pods once there isn't enough memory", func() {
  44. overrideAllocatableMemoryTest(f, 4)
  45. })
  46. })
  47. })
  48. type nodeMemory struct {
  49. // capacity
  50. capacity resource.Quantity
  51. // allocatable memory
  52. allocatable resource.Quantity
  53. // memory reserved for OS level processes
  54. systemReserve resource.Quantity
  55. // memory reserved for kubelet (not implemented)
  56. kubeReserve resource.Quantity
  57. // grace period memory limit (not implemented)
  58. softEviction resource.Quantity
  59. // no grace period memory limit
  60. hardEviction resource.Quantity
  61. }
  62. // runDensityBatchTest runs the density batch pod creation test
  63. // checks that a calculated value for NodeAllocatable is equal to the reported value
  64. func checkNodeAllocatableTest(f *framework.Framework) {
  65. nodeMem := getNodeMemory(f)
  66. framework.Logf("nodeMem says: %+v", nodeMem)
  67. // calculate the allocatable mem based on capacity - reserved amounts
  68. calculatedNodeAlloc := nodeMem.capacity.DeepCopy()
  69. calculatedNodeAlloc.Sub(nodeMem.systemReserve)
  70. calculatedNodeAlloc.Sub(nodeMem.kubeReserve)
  71. calculatedNodeAlloc.Sub(nodeMem.softEviction)
  72. calculatedNodeAlloc.Sub(nodeMem.hardEviction)
  73. ginkgo.By(fmt.Sprintf("Checking stated allocatable memory %v against calculated allocatable memory %v", &nodeMem.allocatable, calculatedNodeAlloc))
  74. // sanity check against stated allocatable
  75. framework.ExpectEqual(calculatedNodeAlloc.Cmp(nodeMem.allocatable), 0)
  76. }
  77. // Deploys `allocatablePods + 1` pods, each with a memory limit of `1/allocatablePods` of the total allocatable
  78. // memory, then confirms that the last pod failed because of failedScheduling
  79. func overrideAllocatableMemoryTest(f *framework.Framework, allocatablePods int) {
  80. const (
  81. podType = "memory_limit_test_pod"
  82. )
  83. totalAllocatable := getTotalAllocatableMemory(f)
  84. memValue := totalAllocatable.Value()
  85. memPerPod := memValue / int64(allocatablePods)
  86. ginkgo.By(fmt.Sprintf("Deploying %d pods with mem limit %v, then one additional pod", allocatablePods, memPerPod))
  87. // these should all work
  88. pods := newMemLimitTestPods(allocatablePods, imageutils.GetPauseImageName(), podType, strconv.FormatInt(memPerPod, 10))
  89. f.PodClient().CreateBatch(pods)
  90. failurePods := newMemLimitTestPods(1, imageutils.GetPauseImageName(), podType, strconv.FormatInt(memPerPod, 10))
  91. f.PodClient().Create(failurePods[0])
  92. gomega.Eventually(func() bool {
  93. eventList, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
  94. framework.ExpectNoError(err)
  95. for _, e := range eventList.Items {
  96. // Look for an event that shows FailedScheduling
  97. if e.Type == "Warning" && e.Reason == "FailedScheduling" && e.InvolvedObject.Name == failurePods[0].ObjectMeta.Name {
  98. framework.Logf("Found %+v event with message %+v", e.Reason, e.Message)
  99. return true
  100. }
  101. }
  102. return false
  103. }, 3*time.Minute, 10*time.Second).Should(gomega.Equal(true))
  104. }
  105. // newMemLimitTestPods creates a list of pods (specification) for test.
  106. func newMemLimitTestPods(numPods int, imageName, podType string, memoryLimit string) []*v1.Pod {
  107. var pods []*v1.Pod
  108. memLimitQuantity, err := resource.ParseQuantity(memoryLimit)
  109. framework.ExpectNoError(err)
  110. for i := 0; i < numPods; i++ {
  111. podName := "test-" + string(uuid.NewUUID())
  112. pod := v1.Pod{
  113. ObjectMeta: metav1.ObjectMeta{
  114. Name: podName,
  115. Labels: map[string]string{
  116. "type": podType,
  117. "name": podName,
  118. },
  119. },
  120. Spec: v1.PodSpec{
  121. // Restart policy is always (default).
  122. Containers: []v1.Container{
  123. {
  124. Image: imageName,
  125. Name: podName,
  126. Resources: v1.ResourceRequirements{
  127. Limits: v1.ResourceList{
  128. v1.ResourceMemory: memLimitQuantity,
  129. },
  130. },
  131. },
  132. },
  133. NodeSelector: map[string]string{
  134. "kubernetes.io/os": "windows",
  135. },
  136. },
  137. }
  138. pods = append(pods, &pod)
  139. }
  140. return pods
  141. }
  142. // getNodeMemory populates a nodeMemory struct with information from the first
  143. func getNodeMemory(f *framework.Framework) nodeMemory {
  144. selector := labels.Set{"kubernetes.io/os": "windows"}.AsSelector()
  145. nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{
  146. LabelSelector: selector.String(),
  147. })
  148. framework.ExpectNoError(err)
  149. // Assuming that agent nodes have the same config
  150. // Make sure there is >0 agent nodes, then use the first one for info
  151. framework.ExpectNotEqual(nodeList.Size(), 0)
  152. ginkgo.By("Getting memory details from node status and kubelet config")
  153. status := nodeList.Items[0].Status
  154. nodeName := nodeList.Items[0].ObjectMeta.Name
  155. kubeletConfig, err := e2ekubelet.GetCurrentKubeletConfig(nodeName, f.Namespace.Name, true)
  156. framework.ExpectNoError(err)
  157. systemReserve, err := resource.ParseQuantity(kubeletConfig.SystemReserved["memory"])
  158. if err != nil {
  159. systemReserve = *resource.NewQuantity(0, resource.BinarySI)
  160. }
  161. kubeReserve, err := resource.ParseQuantity(kubeletConfig.KubeReserved["memory"])
  162. if err != nil {
  163. kubeReserve = *resource.NewQuantity(0, resource.BinarySI)
  164. }
  165. hardEviction, err := resource.ParseQuantity(kubeletConfig.EvictionHard["memory.available"])
  166. if err != nil {
  167. hardEviction = *resource.NewQuantity(0, resource.BinarySI)
  168. }
  169. softEviction, err := resource.ParseQuantity(kubeletConfig.EvictionSoft["memory.available"])
  170. if err != nil {
  171. softEviction = *resource.NewQuantity(0, resource.BinarySI)
  172. }
  173. nodeMem := nodeMemory{
  174. capacity: status.Capacity[v1.ResourceMemory],
  175. allocatable: status.Allocatable[v1.ResourceMemory],
  176. systemReserve: systemReserve,
  177. hardEviction: hardEviction,
  178. // these are not implemented and are here for future use - will always be 0 at the moment
  179. kubeReserve: kubeReserve,
  180. softEviction: softEviction,
  181. }
  182. return nodeMem
  183. }
  184. // getTotalAllocatableMemory gets the sum of all agent node's allocatable memory
  185. func getTotalAllocatableMemory(f *framework.Framework) *resource.Quantity {
  186. selector := labels.Set{"kubernetes.io/os": "windows"}.AsSelector()
  187. nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{
  188. LabelSelector: selector.String(),
  189. })
  190. framework.ExpectNoError(err)
  191. ginkgo.By("Summing allocatable memory across all agent nodes")
  192. totalAllocatable := resource.NewQuantity(0, resource.BinarySI)
  193. for _, node := range nodeList.Items {
  194. status := node.Status
  195. totalAllocatable.Add(status.Allocatable[v1.ResourceMemory])
  196. }
  197. return totalAllocatable
  198. }