memory_limits.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338
  1. /*
  2. Copyright 2019 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package windows
  14. import (
  15. "crypto/tls"
  16. "encoding/json"
  17. "fmt"
  18. "io/ioutil"
  19. "net/http"
  20. "regexp"
  21. "strconv"
  22. "time"
  23. v1 "k8s.io/api/core/v1"
  24. "k8s.io/apimachinery/pkg/api/resource"
  25. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  26. "k8s.io/apimachinery/pkg/labels"
  27. "k8s.io/apimachinery/pkg/util/uuid"
  28. "k8s.io/client-go/kubernetes/scheme"
  29. kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
  30. kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
  31. "k8s.io/kubernetes/test/e2e/framework"
  32. e2elog "k8s.io/kubernetes/test/e2e/framework/log"
  33. imageutils "k8s.io/kubernetes/test/utils/image"
  34. "github.com/onsi/ginkgo"
  35. "github.com/onsi/gomega"
  36. )
  37. var _ = SIGDescribe("[Feature:Windows] Memory Limits [Serial] [Slow]", func() {
  38. f := framework.NewDefaultFramework("memory-limit-test-windows")
  39. ginkgo.BeforeEach(func() {
  40. // NOTE(vyta): these tests are Windows specific
  41. framework.SkipUnlessNodeOSDistroIs("windows")
  42. })
  43. ginkgo.Context("Allocatable node memory", func() {
  44. ginkgo.It("should be equal to a calculated allocatable memory value", func() {
  45. checkNodeAllocatableTest(f)
  46. })
  47. })
  48. ginkgo.Context("attempt to deploy past allocatable memory limits", func() {
  49. ginkgo.It("should fail deployments of pods once there isn't enough memory", func() {
  50. overrideAllocatableMemoryTest(f, 4)
  51. })
  52. })
  53. })
  54. type nodeMemory struct {
  55. // capacity
  56. capacity resource.Quantity
  57. // allocatable memory
  58. allocatable resource.Quantity
  59. // memory reserved for OS level processes
  60. systemReserve resource.Quantity
  61. // memory reserved for kubelet (not implemented)
  62. kubeReserve resource.Quantity
  63. // grace period memory limit (not implemented)
  64. softEviction resource.Quantity
  65. // no grace period memory limit
  66. hardEviction resource.Quantity
  67. }
  68. // runDensityBatchTest runs the density batch pod creation test
  69. // checks that a calculated value for NodeAllocatable is equal to the reported value
  70. func checkNodeAllocatableTest(f *framework.Framework) {
  71. nodeMem := getNodeMemory(f)
  72. e2elog.Logf("nodeMem says: %+v", nodeMem)
  73. // calculate the allocatable mem based on capacity - reserved amounts
  74. calculatedNodeAlloc := nodeMem.capacity.Copy()
  75. calculatedNodeAlloc.Sub(nodeMem.systemReserve)
  76. calculatedNodeAlloc.Sub(nodeMem.kubeReserve)
  77. calculatedNodeAlloc.Sub(nodeMem.softEviction)
  78. calculatedNodeAlloc.Sub(nodeMem.hardEviction)
  79. ginkgo.By(fmt.Sprintf("Checking stated allocatable memory %v against calculated allocatable memory %v", &nodeMem.allocatable, calculatedNodeAlloc))
  80. // sanity check against stated allocatable
  81. gomega.Expect(calculatedNodeAlloc.Cmp(nodeMem.allocatable)).To(gomega.Equal(0))
  82. }
  83. // Deploys `allocatablePods + 1` pods, each with a memory limit of `1/allocatablePods` of the total allocatable
  84. // memory, then confirms that the last pod failed because of failedScheduling
  85. func overrideAllocatableMemoryTest(f *framework.Framework, allocatablePods int) {
  86. const (
  87. podType = "memory_limit_test_pod"
  88. )
  89. totalAllocatable := getTotalAllocatableMemory(f)
  90. memValue := totalAllocatable.Value()
  91. memPerPod := memValue / int64(allocatablePods)
  92. ginkgo.By(fmt.Sprintf("Deploying %d pods with mem limit %v, then one additional pod", allocatablePods, memPerPod))
  93. // these should all work
  94. pods := newMemLimitTestPods(allocatablePods, imageutils.GetPauseImageName(), podType, strconv.FormatInt(memPerPod, 10))
  95. f.PodClient().CreateBatch(pods)
  96. failurePods := newMemLimitTestPods(1, imageutils.GetPauseImageName(), podType, strconv.FormatInt(memPerPod, 10))
  97. f.PodClient().Create(failurePods[0])
  98. gomega.Eventually(func() bool {
  99. eventList, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(metav1.ListOptions{})
  100. framework.ExpectNoError(err)
  101. for _, e := range eventList.Items {
  102. // Look for an event that shows FailedScheduling
  103. if e.Type == "Warning" && e.Reason == "FailedScheduling" && e.InvolvedObject.Name == failurePods[0].ObjectMeta.Name {
  104. e2elog.Logf("Found %+v event with message %+v", e.Reason, e.Message)
  105. return true
  106. }
  107. }
  108. return false
  109. }, 3*time.Minute, 10*time.Second).Should(gomega.Equal(true))
  110. }
  111. // newMemLimitTestPods creates a list of pods (specification) for test.
  112. func newMemLimitTestPods(numPods int, imageName, podType string, memoryLimit string) []*v1.Pod {
  113. var pods []*v1.Pod
  114. memLimitQuantity, err := resource.ParseQuantity(memoryLimit)
  115. framework.ExpectNoError(err)
  116. for i := 0; i < numPods; i++ {
  117. podName := "test-" + string(uuid.NewUUID())
  118. pod := v1.Pod{
  119. ObjectMeta: metav1.ObjectMeta{
  120. Name: podName,
  121. Labels: map[string]string{
  122. "type": podType,
  123. "name": podName,
  124. },
  125. },
  126. Spec: v1.PodSpec{
  127. // Restart policy is always (default).
  128. Containers: []v1.Container{
  129. {
  130. Image: imageName,
  131. Name: podName,
  132. Resources: v1.ResourceRequirements{
  133. Limits: v1.ResourceList{
  134. v1.ResourceMemory: memLimitQuantity,
  135. },
  136. },
  137. },
  138. },
  139. NodeSelector: map[string]string{
  140. "beta.kubernetes.io/os": "windows",
  141. },
  142. },
  143. }
  144. pods = append(pods, &pod)
  145. }
  146. return pods
  147. }
  148. // getNodeMemory populates a nodeMemory struct with information from the first
  149. func getNodeMemory(f *framework.Framework) nodeMemory {
  150. selector := labels.Set{"beta.kubernetes.io/os": "windows"}.AsSelector()
  151. nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{
  152. LabelSelector: selector.String(),
  153. })
  154. framework.ExpectNoError(err)
  155. // Assuming that agent nodes have the same config
  156. // Make sure there is >0 agent nodes, then use the first one for info
  157. gomega.Expect(nodeList.Size()).NotTo(gomega.Equal(0))
  158. ginkgo.By("Getting memory details from node status and kubelet config")
  159. status := nodeList.Items[0].Status
  160. nodeName := nodeList.Items[0].ObjectMeta.Name
  161. kubeletConfig, err := getCurrentKubeletConfig(nodeName)
  162. framework.ExpectNoError(err)
  163. systemReserve, err := resource.ParseQuantity(kubeletConfig.SystemReserved["memory"])
  164. if err != nil {
  165. systemReserve = *resource.NewQuantity(0, resource.BinarySI)
  166. }
  167. kubeReserve, err := resource.ParseQuantity(kubeletConfig.KubeReserved["memory"])
  168. if err != nil {
  169. kubeReserve = *resource.NewQuantity(0, resource.BinarySI)
  170. }
  171. hardEviction, err := resource.ParseQuantity(kubeletConfig.EvictionHard["memory.available"])
  172. if err != nil {
  173. hardEviction = *resource.NewQuantity(0, resource.BinarySI)
  174. }
  175. softEviction, err := resource.ParseQuantity(kubeletConfig.EvictionSoft["memory.available"])
  176. if err != nil {
  177. softEviction = *resource.NewQuantity(0, resource.BinarySI)
  178. }
  179. nodeMem := nodeMemory{
  180. capacity: status.Capacity[v1.ResourceMemory],
  181. allocatable: status.Allocatable[v1.ResourceMemory],
  182. systemReserve: systemReserve,
  183. hardEviction: hardEviction,
  184. // these are not implemented and are here for future use - will always be 0 at the moment
  185. kubeReserve: kubeReserve,
  186. softEviction: softEviction,
  187. }
  188. return nodeMem
  189. }
  190. // getTotalAllocatableMemory gets the sum of all agent node's allocatable memory
  191. func getTotalAllocatableMemory(f *framework.Framework) *resource.Quantity {
  192. selector := labels.Set{"beta.kubernetes.io/os": "windows"}.AsSelector()
  193. nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{
  194. LabelSelector: selector.String(),
  195. })
  196. framework.ExpectNoError(err)
  197. ginkgo.By("Summing allocatable memory across all agent nodes")
  198. totalAllocatable := resource.NewQuantity(0, resource.BinarySI)
  199. for _, node := range nodeList.Items {
  200. status := node.Status
  201. totalAllocatable.Add(status.Allocatable[v1.ResourceMemory])
  202. }
  203. return totalAllocatable
  204. }
  205. // getCurrentKubeletConfig modified from test/e2e_node/util.go
  206. func getCurrentKubeletConfig(nodeName string) (*kubeletconfig.KubeletConfiguration, error) {
  207. resp := pollConfigz(5*time.Minute, 5*time.Second, nodeName)
  208. kubeCfg, err := decodeConfigz(resp)
  209. if err != nil {
  210. return nil, err
  211. }
  212. return kubeCfg, nil
  213. }
  214. // Causes the test to fail, or returns a status 200 response from the /configz endpoint
  215. func pollConfigz(timeout time.Duration, pollInterval time.Duration, nodeName string) *http.Response {
  216. // start local proxy, so we can send graceful deletion over query string, rather than body parameter
  217. ginkgo.By("Opening proxy to cluster")
  218. cmd := framework.KubectlCmd("proxy", "-p", "0")
  219. stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
  220. framework.ExpectNoError(err)
  221. defer stdout.Close()
  222. defer stderr.Close()
  223. defer framework.TryKill(cmd)
  224. buf := make([]byte, 128)
  225. var n int
  226. n, err = stdout.Read(buf)
  227. framework.ExpectNoError(err)
  228. output := string(buf[:n])
  229. proxyRegexp := regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)")
  230. match := proxyRegexp.FindStringSubmatch(output)
  231. gomega.Expect(len(match)).To(gomega.Equal(2))
  232. port, err := strconv.Atoi(match[1])
  233. framework.ExpectNoError(err)
  234. ginkgo.By("http requesting node kubelet /configz")
  235. endpoint := fmt.Sprintf("http://127.0.0.1:%d/api/v1/nodes/%s/proxy/configz", port, nodeName)
  236. tr := &http.Transport{
  237. TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
  238. }
  239. client := &http.Client{Transport: tr}
  240. req, err := http.NewRequest("GET", endpoint, nil)
  241. framework.ExpectNoError(err)
  242. req.Header.Add("Accept", "application/json")
  243. var resp *http.Response
  244. gomega.Eventually(func() bool {
  245. resp, err = client.Do(req)
  246. if err != nil {
  247. e2elog.Logf("Failed to get /configz, retrying. Error: %v", err)
  248. return false
  249. }
  250. if resp.StatusCode != 200 {
  251. e2elog.Logf("/configz response status not 200, retrying. Response was: %+v", resp)
  252. return false
  253. }
  254. return true
  255. }, timeout, pollInterval).Should(gomega.Equal(true))
  256. return resp
  257. }
  258. // Decodes the http response from /configz and returns a kubeletconfig.KubeletConfiguration (internal type).
  259. func decodeConfigz(resp *http.Response) (*kubeletconfig.KubeletConfiguration, error) {
  260. // This hack because /configz reports the following structure:
  261. // {"kubeletconfig": {the JSON representation of kubeletconfigv1beta1.KubeletConfiguration}}
  262. type configzWrapper struct {
  263. ComponentConfig kubeletconfigv1beta1.KubeletConfiguration `json:"kubeletconfig"`
  264. }
  265. configz := configzWrapper{}
  266. kubeCfg := kubeletconfig.KubeletConfiguration{}
  267. contentsBytes, err := ioutil.ReadAll(resp.Body)
  268. if err != nil {
  269. return nil, err
  270. }
  271. err = json.Unmarshal(contentsBytes, &configz)
  272. if err != nil {
  273. return nil, err
  274. }
  275. err = scheme.Scheme.Convert(&configz.ComponentConfig, &kubeCfg, nil)
  276. if err != nil {
  277. return nil, err
  278. }
  279. return &kubeCfg, nil
  280. }