node_container_manager_test.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. // +build linux
  2. /*
  3. Copyright 2017 The Kubernetes Authors.
  4. Licensed under the Apache License, Version 2.0 (the "License");
  5. you may not use this file except in compliance with the License.
  6. You may obtain a copy of the License at
  7. http://www.apache.org/licenses/LICENSE-2.0
  8. Unless required by applicable law or agreed to in writing, software
  9. distributed under the License is distributed on an "AS IS" BASIS,
  10. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  11. See the License for the specific language governing permissions and
  12. limitations under the License.
  13. */
  14. package e2e_node
  15. import (
  16. "fmt"
  17. "io/ioutil"
  18. "path/filepath"
  19. "strconv"
  20. "strings"
  21. "time"
  22. "k8s.io/api/core/v1"
  23. "k8s.io/apimachinery/pkg/api/resource"
  24. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  25. kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
  26. "k8s.io/kubernetes/pkg/kubelet/cm"
  27. "k8s.io/kubernetes/pkg/kubelet/stats/pidlimit"
  28. "k8s.io/kubernetes/test/e2e/framework"
  29. . "github.com/onsi/ginkgo"
  30. . "github.com/onsi/gomega"
  31. )
  32. func setDesiredConfiguration(initialConfig *kubeletconfig.KubeletConfiguration) {
  33. initialConfig.EnforceNodeAllocatable = []string{"pods", kubeReservedCgroup, systemReservedCgroup}
  34. initialConfig.SystemReserved = map[string]string{
  35. string(v1.ResourceCPU): "100m",
  36. string(v1.ResourceMemory): "100Mi",
  37. string(pidlimit.PIDs): "1000",
  38. }
  39. initialConfig.KubeReserved = map[string]string{
  40. string(v1.ResourceCPU): "100m",
  41. string(v1.ResourceMemory): "100Mi",
  42. string(pidlimit.PIDs): "738",
  43. }
  44. initialConfig.EvictionHard = map[string]string{"memory.available": "100Mi"}
  45. // Necessary for allocatable cgroup creation.
  46. initialConfig.CgroupsPerQOS = true
  47. initialConfig.KubeReservedCgroup = kubeReservedCgroup
  48. initialConfig.SystemReservedCgroup = systemReservedCgroup
  49. }
  50. var _ = framework.KubeDescribe("Node Container Manager [Serial]", func() {
  51. f := framework.NewDefaultFramework("node-container-manager")
  52. Describe("Validate Node Allocatable [NodeFeature:NodeAllocatable]", func() {
  53. It("sets up the node and runs the test", func() {
  54. framework.ExpectNoError(runTest(f))
  55. })
  56. })
  57. })
  58. func expectFileValToEqual(filePath string, expectedValue, delta int64) error {
  59. out, err := ioutil.ReadFile(filePath)
  60. if err != nil {
  61. return fmt.Errorf("failed to read file %q", filePath)
  62. }
  63. actual, err := strconv.ParseInt(strings.TrimSpace(string(out)), 10, 64)
  64. if err != nil {
  65. return fmt.Errorf("failed to parse output %v", err)
  66. }
  67. // Ensure that values are within a delta range to work around rounding errors.
  68. if (actual < (expectedValue - delta)) || (actual > (expectedValue + delta)) {
  69. return fmt.Errorf("Expected value at %q to be between %d and %d. Got %d", filePath, (expectedValue - delta), (expectedValue + delta), actual)
  70. }
  71. return nil
  72. }
  73. func getAllocatableLimits(cpu, memory, pids string, capacity v1.ResourceList) (*resource.Quantity, *resource.Quantity, *resource.Quantity) {
  74. var allocatableCPU, allocatableMemory, allocatablePIDs *resource.Quantity
  75. // Total cpu reservation is 200m.
  76. for k, v := range capacity {
  77. if k == v1.ResourceCPU {
  78. allocatableCPU = v.Copy()
  79. allocatableCPU.Sub(resource.MustParse(cpu))
  80. }
  81. if k == v1.ResourceMemory {
  82. allocatableMemory = v.Copy()
  83. allocatableMemory.Sub(resource.MustParse(memory))
  84. }
  85. }
  86. // Process IDs are not a node allocatable, so we have to do this ad hoc
  87. pidlimits, err := pidlimit.Stats()
  88. if err == nil && pidlimits != nil && pidlimits.MaxPID != nil {
  89. allocatablePIDs = resource.NewQuantity(int64(*pidlimits.MaxPID), resource.DecimalSI)
  90. allocatablePIDs.Sub(resource.MustParse(pids))
  91. }
  92. return allocatableCPU, allocatableMemory, allocatablePIDs
  93. }
  94. const (
  95. kubeReservedCgroup = "kube-reserved"
  96. systemReservedCgroup = "system-reserved"
  97. )
  98. func createIfNotExists(cm cm.CgroupManager, cgroupConfig *cm.CgroupConfig) error {
  99. if !cm.Exists(cgroupConfig.Name) {
  100. if err := cm.Create(cgroupConfig); err != nil {
  101. return err
  102. }
  103. }
  104. return nil
  105. }
  106. func createTemporaryCgroupsForReservation(cgroupManager cm.CgroupManager) error {
  107. // Create kube reserved cgroup
  108. cgroupConfig := &cm.CgroupConfig{
  109. Name: cm.NewCgroupName(cm.RootCgroupName, kubeReservedCgroup),
  110. }
  111. if err := createIfNotExists(cgroupManager, cgroupConfig); err != nil {
  112. return err
  113. }
  114. // Create system reserved cgroup
  115. cgroupConfig.Name = cm.NewCgroupName(cm.RootCgroupName, systemReservedCgroup)
  116. return createIfNotExists(cgroupManager, cgroupConfig)
  117. }
  118. func destroyTemporaryCgroupsForReservation(cgroupManager cm.CgroupManager) error {
  119. // Create kube reserved cgroup
  120. cgroupConfig := &cm.CgroupConfig{
  121. Name: cm.NewCgroupName(cm.RootCgroupName, kubeReservedCgroup),
  122. }
  123. if err := cgroupManager.Destroy(cgroupConfig); err != nil {
  124. return err
  125. }
  126. cgroupConfig.Name = cm.NewCgroupName(cm.RootCgroupName, systemReservedCgroup)
  127. return cgroupManager.Destroy(cgroupConfig)
  128. }
  129. func runTest(f *framework.Framework) error {
  130. var oldCfg *kubeletconfig.KubeletConfiguration
  131. subsystems, err := cm.GetCgroupSubsystems()
  132. if err != nil {
  133. return err
  134. }
  135. // Get current kubelet configuration
  136. oldCfg, err = getCurrentKubeletConfig()
  137. if err != nil {
  138. return err
  139. }
  140. // Create a cgroup manager object for manipulating cgroups.
  141. cgroupManager := cm.NewCgroupManager(subsystems, oldCfg.CgroupDriver)
  142. defer destroyTemporaryCgroupsForReservation(cgroupManager)
  143. defer func() {
  144. if oldCfg != nil {
  145. framework.ExpectNoError(setKubeletConfiguration(f, oldCfg))
  146. }
  147. }()
  148. if err := createTemporaryCgroupsForReservation(cgroupManager); err != nil {
  149. return err
  150. }
  151. newCfg := oldCfg.DeepCopy()
  152. // Change existing kubelet configuration
  153. setDesiredConfiguration(newCfg)
  154. // Set the new kubelet configuration.
  155. err = setKubeletConfiguration(f, newCfg)
  156. if err != nil {
  157. return err
  158. }
  159. // Set new config and current config.
  160. currentConfig := newCfg
  161. expectedNAPodCgroup := cm.ParseCgroupfsToCgroupName(currentConfig.CgroupRoot)
  162. expectedNAPodCgroup = cm.NewCgroupName(expectedNAPodCgroup, "kubepods")
  163. if !cgroupManager.Exists(expectedNAPodCgroup) {
  164. return fmt.Errorf("Expected Node Allocatable Cgroup Does not exist")
  165. }
  166. // TODO: Update cgroupManager to expose a Status interface to get current Cgroup Settings.
  167. // The node may not have updated capacity and allocatable yet, so check that it happens eventually.
  168. Eventually(func() error {
  169. nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
  170. if err != nil {
  171. return err
  172. }
  173. if len(nodeList.Items) != 1 {
  174. return fmt.Errorf("Unexpected number of node objects for node e2e. Expects only one node: %+v", nodeList)
  175. }
  176. node := nodeList.Items[0]
  177. capacity := node.Status.Capacity
  178. allocatableCPU, allocatableMemory, allocatablePIDs := getAllocatableLimits("200m", "200Mi", "1738", capacity)
  179. // Total Memory reservation is 200Mi excluding eviction thresholds.
  180. // Expect CPU shares on node allocatable cgroup to equal allocatable.
  181. if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["cpu"], "kubepods", "cpu.shares"), int64(cm.MilliCPUToShares(allocatableCPU.MilliValue())), 10); err != nil {
  182. return err
  183. }
  184. // Expect Memory limit on node allocatable cgroup to equal allocatable.
  185. if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["memory"], "kubepods", "memory.limit_in_bytes"), allocatableMemory.Value(), 0); err != nil {
  186. return err
  187. }
  188. // Expect PID limit on node allocatable cgroup to equal allocatable.
  189. if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["pids"], "kubepods", "pids.max"), allocatablePIDs.Value(), 0); err != nil {
  190. return err
  191. }
  192. // Check that Allocatable reported to scheduler includes eviction thresholds.
  193. schedulerAllocatable := node.Status.Allocatable
  194. // Memory allocatable should take into account eviction thresholds.
  195. // Process IDs are not a scheduler resource and as such cannot be tested here.
  196. allocatableCPU, allocatableMemory, _ = getAllocatableLimits("200m", "300Mi", "1738", capacity)
  197. // Expect allocatable to include all resources in capacity.
  198. if len(schedulerAllocatable) != len(capacity) {
  199. return fmt.Errorf("Expected all resources in capacity to be found in allocatable")
  200. }
  201. // CPU based evictions are not supported.
  202. if allocatableCPU.Cmp(schedulerAllocatable[v1.ResourceCPU]) != 0 {
  203. return fmt.Errorf("Unexpected cpu allocatable value exposed by the node. Expected: %v, got: %v, capacity: %v", allocatableCPU, schedulerAllocatable[v1.ResourceCPU], capacity[v1.ResourceCPU])
  204. }
  205. if allocatableMemory.Cmp(schedulerAllocatable[v1.ResourceMemory]) != 0 {
  206. return fmt.Errorf("Unexpected memory allocatable value exposed by the node. Expected: %v, got: %v, capacity: %v", allocatableMemory, schedulerAllocatable[v1.ResourceMemory], capacity[v1.ResourceMemory])
  207. }
  208. return nil
  209. }, time.Minute, 5*time.Second).Should(BeNil())
  210. kubeReservedCgroupName := cm.NewCgroupName(cm.RootCgroupName, kubeReservedCgroup)
  211. if !cgroupManager.Exists(kubeReservedCgroupName) {
  212. return fmt.Errorf("Expected kube reserved cgroup Does not exist")
  213. }
  214. // Expect CPU shares on kube reserved cgroup to equal it's reservation which is `100m`.
  215. kubeReservedCPU := resource.MustParse(currentConfig.KubeReserved[string(v1.ResourceCPU)])
  216. if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["cpu"], cgroupManager.Name(kubeReservedCgroupName), "cpu.shares"), int64(cm.MilliCPUToShares(kubeReservedCPU.MilliValue())), 10); err != nil {
  217. return err
  218. }
  219. // Expect Memory limit kube reserved cgroup to equal configured value `100Mi`.
  220. kubeReservedMemory := resource.MustParse(currentConfig.KubeReserved[string(v1.ResourceMemory)])
  221. if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["memory"], cgroupManager.Name(kubeReservedCgroupName), "memory.limit_in_bytes"), kubeReservedMemory.Value(), 0); err != nil {
  222. return err
  223. }
  224. // Expect process ID limit kube reserved cgroup to equal configured value `738`.
  225. kubeReservedPIDs := resource.MustParse(currentConfig.KubeReserved[string(pidlimit.PIDs)])
  226. if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["pids"], cgroupManager.Name(kubeReservedCgroupName), "pids.max"), kubeReservedPIDs.Value(), 0); err != nil {
  227. return err
  228. }
  229. systemReservedCgroupName := cm.NewCgroupName(cm.RootCgroupName, systemReservedCgroup)
  230. if !cgroupManager.Exists(systemReservedCgroupName) {
  231. return fmt.Errorf("Expected system reserved cgroup Does not exist")
  232. }
  233. // Expect CPU shares on system reserved cgroup to equal it's reservation which is `100m`.
  234. systemReservedCPU := resource.MustParse(currentConfig.SystemReserved[string(v1.ResourceCPU)])
  235. if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["cpu"], cgroupManager.Name(systemReservedCgroupName), "cpu.shares"), int64(cm.MilliCPUToShares(systemReservedCPU.MilliValue())), 10); err != nil {
  236. return err
  237. }
  238. // Expect Memory limit on node allocatable cgroup to equal allocatable.
  239. systemReservedMemory := resource.MustParse(currentConfig.SystemReserved[string(v1.ResourceMemory)])
  240. if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["memory"], cgroupManager.Name(systemReservedCgroupName), "memory.limit_in_bytes"), systemReservedMemory.Value(), 0); err != nil {
  241. return err
  242. }
  243. // Expect process ID limit system reserved cgroup to equal configured value `1000`.
  244. systemReservedPIDs := resource.MustParse(currentConfig.SystemReserved[string(pidlimit.PIDs)])
  245. if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["pids"], cgroupManager.Name(systemReservedCgroupName), "pids.max"), systemReservedPIDs.Value(), 0); err != nil {
  246. return err
  247. }
  248. return nil
  249. }