predicate_test.go 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. /*
  2. Copyright 2018 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package lifecycle
  14. import (
  15. "reflect"
  16. "testing"
  17. "k8s.io/api/core/v1"
  18. "k8s.io/apimachinery/pkg/api/resource"
  19. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  20. v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
  21. "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
  22. "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
  23. schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
  24. )
  25. var (
  26. quantity = *resource.NewQuantity(1, resource.DecimalSI)
  27. )
  28. func TestRemoveMissingExtendedResources(t *testing.T) {
  29. for _, test := range []struct {
  30. desc string
  31. pod *v1.Pod
  32. node *v1.Node
  33. expectedPod *v1.Pod
  34. }{
  35. {
  36. desc: "requests in Limits should be ignored",
  37. pod: makeTestPod(
  38. v1.ResourceList{}, // Requests
  39. v1.ResourceList{"foo.com/bar": quantity}, // Limits
  40. ),
  41. node: makeTestNode(
  42. v1.ResourceList{"foo.com/baz": quantity}, // Allocatable
  43. ),
  44. expectedPod: makeTestPod(
  45. v1.ResourceList{}, // Requests
  46. v1.ResourceList{"foo.com/bar": quantity}, // Limits
  47. ),
  48. },
  49. {
  50. desc: "requests for resources available in node should not be removed",
  51. pod: makeTestPod(
  52. v1.ResourceList{"foo.com/bar": quantity}, // Requests
  53. v1.ResourceList{}, // Limits
  54. ),
  55. node: makeTestNode(
  56. v1.ResourceList{"foo.com/bar": quantity}, // Allocatable
  57. ),
  58. expectedPod: makeTestPod(
  59. v1.ResourceList{"foo.com/bar": quantity}, // Requests
  60. v1.ResourceList{}), // Limits
  61. },
  62. {
  63. desc: "requests for resources unavailable in node should be removed",
  64. pod: makeTestPod(
  65. v1.ResourceList{"foo.com/bar": quantity}, // Requests
  66. v1.ResourceList{}, // Limits
  67. ),
  68. node: makeTestNode(
  69. v1.ResourceList{"foo.com/baz": quantity}, // Allocatable
  70. ),
  71. expectedPod: makeTestPod(
  72. v1.ResourceList{}, // Requests
  73. v1.ResourceList{}, // Limits
  74. ),
  75. },
  76. } {
  77. nodeInfo := schedulernodeinfo.NewNodeInfo()
  78. nodeInfo.SetNode(test.node)
  79. pod := removeMissingExtendedResources(test.pod, nodeInfo)
  80. if !reflect.DeepEqual(pod, test.expectedPod) {
  81. t.Errorf("%s: Expected pod\n%v\ngot\n%v\n", test.desc, test.expectedPod, pod)
  82. }
  83. }
  84. }
  85. func makeTestPod(requests, limits v1.ResourceList) *v1.Pod {
  86. return &v1.Pod{
  87. Spec: v1.PodSpec{
  88. Containers: []v1.Container{
  89. {
  90. Resources: v1.ResourceRequirements{
  91. Requests: requests,
  92. Limits: limits,
  93. },
  94. },
  95. },
  96. },
  97. }
  98. }
  99. func makeTestNode(allocatable v1.ResourceList) *v1.Node {
  100. return &v1.Node{
  101. Status: v1.NodeStatus{
  102. Allocatable: allocatable,
  103. },
  104. }
  105. }
  106. var (
  107. extendedResourceA = v1.ResourceName("example.com/aaa")
  108. hugePageResourceA = v1helper.HugePageResourceName(resource.MustParse("2Mi"))
  109. )
  110. func makeResources(milliCPU, memory, pods, extendedA, storage, hugePageA int64) v1.NodeResources {
  111. return v1.NodeResources{
  112. Capacity: v1.ResourceList{
  113. v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
  114. v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
  115. v1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
  116. extendedResourceA: *resource.NewQuantity(extendedA, resource.DecimalSI),
  117. v1.ResourceEphemeralStorage: *resource.NewQuantity(storage, resource.BinarySI),
  118. hugePageResourceA: *resource.NewQuantity(hugePageA, resource.BinarySI),
  119. },
  120. }
  121. }
  122. func makeAllocatableResources(milliCPU, memory, pods, extendedA, storage, hugePageA int64) v1.ResourceList {
  123. return v1.ResourceList{
  124. v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
  125. v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
  126. v1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
  127. extendedResourceA: *resource.NewQuantity(extendedA, resource.DecimalSI),
  128. v1.ResourceEphemeralStorage: *resource.NewQuantity(storage, resource.BinarySI),
  129. hugePageResourceA: *resource.NewQuantity(hugePageA, resource.BinarySI),
  130. }
  131. }
  132. func newResourcePod(usage ...schedulernodeinfo.Resource) *v1.Pod {
  133. containers := []v1.Container{}
  134. for _, req := range usage {
  135. containers = append(containers, v1.Container{
  136. Resources: v1.ResourceRequirements{Requests: req.ResourceList()},
  137. })
  138. }
  139. return &v1.Pod{
  140. Spec: v1.PodSpec{
  141. Containers: containers,
  142. },
  143. }
  144. }
  145. func newPodWithPort(hostPorts ...int) *v1.Pod {
  146. networkPorts := []v1.ContainerPort{}
  147. for _, port := range hostPorts {
  148. networkPorts = append(networkPorts, v1.ContainerPort{HostPort: int32(port)})
  149. }
  150. return &v1.Pod{
  151. Spec: v1.PodSpec{
  152. Containers: []v1.Container{
  153. {
  154. Ports: networkPorts,
  155. },
  156. },
  157. },
  158. }
  159. }
  160. func TestGeneralPredicates(t *testing.T) {
  161. resourceTests := []struct {
  162. pod *v1.Pod
  163. nodeInfo *schedulernodeinfo.NodeInfo
  164. node *v1.Node
  165. fits bool
  166. name string
  167. wErr error
  168. reasons []PredicateFailureReason
  169. }{
  170. {
  171. pod: &v1.Pod{},
  172. nodeInfo: schedulernodeinfo.NewNodeInfo(
  173. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})),
  174. node: &v1.Node{
  175. ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
  176. Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
  177. },
  178. fits: true,
  179. wErr: nil,
  180. name: "no resources/port/host requested always fits",
  181. },
  182. {
  183. pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 10}),
  184. nodeInfo: schedulernodeinfo.NewNodeInfo(
  185. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
  186. node: &v1.Node{
  187. ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
  188. Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
  189. },
  190. fits: false,
  191. wErr: nil,
  192. reasons: []PredicateFailureReason{
  193. &InsufficientResourceError{ResourceName: v1.ResourceCPU, Requested: 8, Used: 5, Capacity: 10},
  194. &InsufficientResourceError{ResourceName: v1.ResourceMemory, Requested: 10, Used: 19, Capacity: 20},
  195. },
  196. name: "not enough cpu and memory resource",
  197. },
  198. {
  199. pod: &v1.Pod{
  200. Spec: v1.PodSpec{
  201. NodeName: "machine2",
  202. },
  203. },
  204. nodeInfo: schedulernodeinfo.NewNodeInfo(),
  205. node: &v1.Node{
  206. ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
  207. Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
  208. },
  209. fits: false,
  210. wErr: nil,
  211. reasons: []PredicateFailureReason{&PredicateFailureError{nodename.Name, nodename.ErrReason}},
  212. name: "host not match",
  213. },
  214. {
  215. pod: newPodWithPort(123),
  216. nodeInfo: schedulernodeinfo.NewNodeInfo(newPodWithPort(123)),
  217. node: &v1.Node{
  218. ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
  219. Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
  220. },
  221. fits: false,
  222. wErr: nil,
  223. reasons: []PredicateFailureReason{&PredicateFailureError{nodeports.Name, nodeports.ErrReason}},
  224. name: "hostport conflict",
  225. },
  226. }
  227. for _, test := range resourceTests {
  228. t.Run(test.name, func(t *testing.T) {
  229. test.nodeInfo.SetNode(test.node)
  230. reasons, err := GeneralPredicates(test.pod, test.nodeInfo)
  231. fits := len(reasons) == 0 && err == nil
  232. if err != nil {
  233. t.Errorf("unexpected error: %v", err)
  234. }
  235. if !fits && !reflect.DeepEqual(reasons, test.reasons) {
  236. t.Errorf("unexpected failure reasons: %v, want: %v", reasons, test.reasons)
  237. }
  238. if fits != test.fits {
  239. t.Errorf("expected: %v got %v", test.fits, fits)
  240. }
  241. })
  242. }
  243. }