fit_test.go 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527
  1. /*
  2. Copyright 2019 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package noderesources
  14. import (
  15. "context"
  16. "fmt"
  17. "k8s.io/apimachinery/pkg/runtime"
  18. "reflect"
  19. "testing"
  20. v1 "k8s.io/api/core/v1"
  21. "k8s.io/apimachinery/pkg/api/resource"
  22. utilfeature "k8s.io/apiserver/pkg/util/feature"
  23. featuregatetesting "k8s.io/component-base/featuregate/testing"
  24. v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
  25. "k8s.io/kubernetes/pkg/features"
  26. framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
  27. schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
  28. )
  29. var (
  30. extendedResourceA = v1.ResourceName("example.com/aaa")
  31. extendedResourceB = v1.ResourceName("example.com/bbb")
  32. kubernetesIOResourceA = v1.ResourceName("kubernetes.io/something")
  33. kubernetesIOResourceB = v1.ResourceName("subdomain.kubernetes.io/something")
  34. hugePageResourceA = v1helper.HugePageResourceName(resource.MustParse("2Mi"))
  35. )
  36. func makeResources(milliCPU, memory, pods, extendedA, storage, hugePageA int64) v1.NodeResources {
  37. return v1.NodeResources{
  38. Capacity: v1.ResourceList{
  39. v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
  40. v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
  41. v1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
  42. extendedResourceA: *resource.NewQuantity(extendedA, resource.DecimalSI),
  43. v1.ResourceEphemeralStorage: *resource.NewQuantity(storage, resource.BinarySI),
  44. hugePageResourceA: *resource.NewQuantity(hugePageA, resource.BinarySI),
  45. },
  46. }
  47. }
  48. func makeAllocatableResources(milliCPU, memory, pods, extendedA, storage, hugePageA int64) v1.ResourceList {
  49. return v1.ResourceList{
  50. v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
  51. v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
  52. v1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
  53. extendedResourceA: *resource.NewQuantity(extendedA, resource.DecimalSI),
  54. v1.ResourceEphemeralStorage: *resource.NewQuantity(storage, resource.BinarySI),
  55. hugePageResourceA: *resource.NewQuantity(hugePageA, resource.BinarySI),
  56. }
  57. }
  58. func newResourcePod(usage ...schedulernodeinfo.Resource) *v1.Pod {
  59. containers := []v1.Container{}
  60. for _, req := range usage {
  61. containers = append(containers, v1.Container{
  62. Resources: v1.ResourceRequirements{Requests: req.ResourceList()},
  63. })
  64. }
  65. return &v1.Pod{
  66. Spec: v1.PodSpec{
  67. Containers: containers,
  68. },
  69. }
  70. }
  71. func newResourceInitPod(pod *v1.Pod, usage ...schedulernodeinfo.Resource) *v1.Pod {
  72. pod.Spec.InitContainers = newResourcePod(usage...).Spec.Containers
  73. return pod
  74. }
  75. func newResourceOverheadPod(pod *v1.Pod, overhead v1.ResourceList) *v1.Pod {
  76. pod.Spec.Overhead = overhead
  77. return pod
  78. }
  79. func getErrReason(rn v1.ResourceName) string {
  80. return fmt.Sprintf("Insufficient %v", rn)
  81. }
  82. func TestEnoughRequests(t *testing.T) {
  83. defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodOverhead, true)()
  84. enoughPodsTests := []struct {
  85. pod *v1.Pod
  86. nodeInfo *schedulernodeinfo.NodeInfo
  87. name string
  88. ignoredResources []byte
  89. wantInsufficientResources []InsufficientResource
  90. wantStatus *framework.Status
  91. }{
  92. {
  93. pod: &v1.Pod{},
  94. nodeInfo: schedulernodeinfo.NewNodeInfo(
  95. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})),
  96. name: "no resources requested always fits",
  97. wantInsufficientResources: []InsufficientResource{},
  98. },
  99. {
  100. pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
  101. nodeInfo: schedulernodeinfo.NewNodeInfo(
  102. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})),
  103. name: "too many resources fails",
  104. wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU), getErrReason(v1.ResourceMemory)),
  105. wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 1, 10, 10}, {v1.ResourceMemory, getErrReason(v1.ResourceMemory), 1, 20, 20}},
  106. },
  107. {
  108. pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 3, Memory: 1}),
  109. nodeInfo: schedulernodeinfo.NewNodeInfo(
  110. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 19})),
  111. name: "too many resources fails due to init container cpu",
  112. wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
  113. wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 3, 8, 10}},
  114. },
  115. {
  116. pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 3, Memory: 1}, schedulernodeinfo.Resource{MilliCPU: 2, Memory: 1}),
  117. nodeInfo: schedulernodeinfo.NewNodeInfo(
  118. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 19})),
  119. name: "too many resources fails due to highest init container cpu",
  120. wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
  121. wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 3, 8, 10}},
  122. },
  123. {
  124. pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 3}),
  125. nodeInfo: schedulernodeinfo.NewNodeInfo(
  126. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})),
  127. name: "too many resources fails due to init container memory",
  128. wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
  129. wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 3, 19, 20}},
  130. },
  131. {
  132. pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 3}, schedulernodeinfo.Resource{MilliCPU: 1, Memory: 2}),
  133. nodeInfo: schedulernodeinfo.NewNodeInfo(
  134. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})),
  135. name: "too many resources fails due to highest init container memory",
  136. wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
  137. wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 3, 19, 20}},
  138. },
  139. {
  140. pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
  141. nodeInfo: schedulernodeinfo.NewNodeInfo(
  142. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})),
  143. name: "init container fits because it's the max, not sum, of containers and init containers",
  144. wantInsufficientResources: []InsufficientResource{},
  145. },
  146. {
  147. pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}, schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
  148. nodeInfo: schedulernodeinfo.NewNodeInfo(
  149. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})),
  150. name: "multiple init containers fit because it's the max, not sum, of containers and init containers",
  151. wantInsufficientResources: []InsufficientResource{},
  152. },
  153. {
  154. pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
  155. nodeInfo: schedulernodeinfo.NewNodeInfo(
  156. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})),
  157. name: "both resources fit",
  158. wantInsufficientResources: []InsufficientResource{},
  159. },
  160. {
  161. pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 1}),
  162. nodeInfo: schedulernodeinfo.NewNodeInfo(
  163. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 5})),
  164. name: "one resource memory fits",
  165. wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
  166. wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 2, 9, 10}},
  167. },
  168. {
  169. pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 2}),
  170. nodeInfo: schedulernodeinfo.NewNodeInfo(
  171. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
  172. name: "one resource cpu fits",
  173. wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
  174. wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 2, 19, 20}},
  175. },
  176. {
  177. pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
  178. nodeInfo: schedulernodeinfo.NewNodeInfo(
  179. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
  180. name: "equal edge case",
  181. wantInsufficientResources: []InsufficientResource{},
  182. },
  183. {
  184. pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 4, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
  185. nodeInfo: schedulernodeinfo.NewNodeInfo(
  186. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
  187. name: "equal edge case for init container",
  188. wantInsufficientResources: []InsufficientResource{},
  189. },
  190. {
  191. pod: newResourcePod(schedulernodeinfo.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
  192. nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{})),
  193. name: "extended resource fits",
  194. wantInsufficientResources: []InsufficientResource{},
  195. },
  196. {
  197. pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), schedulernodeinfo.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
  198. nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{})),
  199. name: "extended resource fits for init container",
  200. wantInsufficientResources: []InsufficientResource{},
  201. },
  202. {
  203. pod: newResourcePod(
  204. schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}),
  205. nodeInfo: schedulernodeinfo.NewNodeInfo(
  206. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
  207. name: "extended resource capacity enforced",
  208. wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
  209. wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 10, 0, 5}},
  210. },
  211. {
  212. pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
  213. schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}),
  214. nodeInfo: schedulernodeinfo.NewNodeInfo(
  215. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
  216. name: "extended resource capacity enforced for init container",
  217. wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
  218. wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 10, 0, 5}},
  219. },
  220. {
  221. pod: newResourcePod(
  222. schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
  223. nodeInfo: schedulernodeinfo.NewNodeInfo(
  224. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
  225. name: "extended resource allocatable enforced",
  226. wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
  227. wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 1, 5, 5}},
  228. },
  229. {
  230. pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
  231. schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
  232. nodeInfo: schedulernodeinfo.NewNodeInfo(
  233. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
  234. name: "extended resource allocatable enforced for init container",
  235. wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
  236. wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 1, 5, 5}},
  237. },
  238. {
  239. pod: newResourcePod(
  240. schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}},
  241. schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
  242. nodeInfo: schedulernodeinfo.NewNodeInfo(
  243. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
  244. name: "extended resource allocatable enforced for multiple containers",
  245. wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
  246. wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 6, 2, 5}},
  247. },
  248. {
  249. pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
  250. schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}},
  251. schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
  252. nodeInfo: schedulernodeinfo.NewNodeInfo(
  253. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
  254. name: "extended resource allocatable admits multiple init containers",
  255. wantInsufficientResources: []InsufficientResource{},
  256. },
  257. {
  258. pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
  259. schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 6}},
  260. schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
  261. nodeInfo: schedulernodeinfo.NewNodeInfo(
  262. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
  263. name: "extended resource allocatable enforced for multiple init containers",
  264. wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
  265. wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 6, 2, 5}},
  266. },
  267. {
  268. pod: newResourcePod(
  269. schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
  270. nodeInfo: schedulernodeinfo.NewNodeInfo(
  271. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
  272. name: "extended resource allocatable enforced for unknown resource",
  273. wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceB)),
  274. wantInsufficientResources: []InsufficientResource{{extendedResourceB, getErrReason(extendedResourceB), 1, 0, 0}},
  275. },
  276. {
  277. pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
  278. schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
  279. nodeInfo: schedulernodeinfo.NewNodeInfo(
  280. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
  281. name: "extended resource allocatable enforced for unknown resource for init container",
  282. wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceB)),
  283. wantInsufficientResources: []InsufficientResource{{extendedResourceB, getErrReason(extendedResourceB), 1, 0, 0}},
  284. },
  285. {
  286. pod: newResourcePod(
  287. schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceA: 10}}),
  288. nodeInfo: schedulernodeinfo.NewNodeInfo(
  289. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
  290. name: "kubernetes.io resource capacity enforced",
  291. wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(kubernetesIOResourceA)),
  292. wantInsufficientResources: []InsufficientResource{{kubernetesIOResourceA, getErrReason(kubernetesIOResourceA), 10, 0, 0}},
  293. },
  294. {
  295. pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
  296. schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceB: 10}}),
  297. nodeInfo: schedulernodeinfo.NewNodeInfo(
  298. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
  299. name: "kubernetes.io resource capacity enforced for init container",
  300. wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(kubernetesIOResourceB)),
  301. wantInsufficientResources: []InsufficientResource{{kubernetesIOResourceB, getErrReason(kubernetesIOResourceB), 10, 0, 0}},
  302. },
  303. {
  304. pod: newResourcePod(
  305. schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
  306. nodeInfo: schedulernodeinfo.NewNodeInfo(
  307. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
  308. name: "hugepages resource capacity enforced",
  309. wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
  310. wantInsufficientResources: []InsufficientResource{{hugePageResourceA, getErrReason(hugePageResourceA), 10, 0, 5}},
  311. },
  312. {
  313. pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
  314. schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
  315. nodeInfo: schedulernodeinfo.NewNodeInfo(
  316. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
  317. name: "hugepages resource capacity enforced for init container",
  318. wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
  319. wantInsufficientResources: []InsufficientResource{{hugePageResourceA, getErrReason(hugePageResourceA), 10, 0, 5}},
  320. },
  321. {
  322. pod: newResourcePod(
  323. schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}},
  324. schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}),
  325. nodeInfo: schedulernodeinfo.NewNodeInfo(
  326. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 2}})),
  327. name: "hugepages resource allocatable enforced for multiple containers",
  328. wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
  329. wantInsufficientResources: []InsufficientResource{{hugePageResourceA, getErrReason(hugePageResourceA), 6, 2, 5}},
  330. },
  331. {
  332. pod: newResourcePod(
  333. schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
  334. nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
  335. ignoredResources: []byte(`{"IgnoredResources" : ["example.com/bbb"]}`),
  336. name: "skip checking ignored extended resource",
  337. wantInsufficientResources: []InsufficientResource{},
  338. },
  339. {
  340. pod: newResourceOverheadPod(
  341. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
  342. v1.ResourceList{v1.ResourceCPU: resource.MustParse("3m"), v1.ResourceMemory: resource.MustParse("13")},
  343. ),
  344. nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})),
  345. name: "resources + pod overhead fits",
  346. wantInsufficientResources: []InsufficientResource{},
  347. },
  348. {
  349. pod: newResourceOverheadPod(
  350. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
  351. v1.ResourceList{v1.ResourceCPU: resource.MustParse("1m"), v1.ResourceMemory: resource.MustParse("15")},
  352. ),
  353. nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})),
  354. name: "requests + overhead does not fit for memory",
  355. wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
  356. wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 16, 5, 20}},
  357. },
  358. }
  359. for _, test := range enoughPodsTests {
  360. t.Run(test.name, func(t *testing.T) {
  361. node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
  362. test.nodeInfo.SetNode(&node)
  363. args := &runtime.Unknown{Raw: test.ignoredResources}
  364. p, _ := NewFit(args, nil)
  365. cycleState := framework.NewCycleState()
  366. preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(context.Background(), cycleState, test.pod)
  367. if !preFilterStatus.IsSuccess() {
  368. t.Errorf("prefilter failed with status: %v", preFilterStatus)
  369. }
  370. gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), cycleState, test.pod, test.nodeInfo)
  371. if !reflect.DeepEqual(gotStatus, test.wantStatus) {
  372. t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
  373. }
  374. gotInsufficientResources := Fits(test.pod, test.nodeInfo, p.(*Fit).ignoredResources)
  375. if !reflect.DeepEqual(gotInsufficientResources, test.wantInsufficientResources) {
  376. t.Errorf("insufficient resources do not match: %v, want: %v", gotInsufficientResources, test.wantInsufficientResources)
  377. }
  378. })
  379. }
  380. }
  381. func TestPreFilterDisabled(t *testing.T) {
  382. pod := &v1.Pod{}
  383. nodeInfo := schedulernodeinfo.NewNodeInfo()
  384. node := v1.Node{}
  385. nodeInfo.SetNode(&node)
  386. p, _ := NewFit(nil, nil)
  387. cycleState := framework.NewCycleState()
  388. gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), cycleState, pod, nodeInfo)
  389. wantStatus := framework.NewStatus(framework.Error, `error reading "PreFilterNodeResourcesFit" from cycleState: not found`)
  390. if !reflect.DeepEqual(gotStatus, wantStatus) {
  391. t.Errorf("status does not match: %v, want: %v", gotStatus, wantStatus)
  392. }
  393. }
  394. func TestNotEnoughRequests(t *testing.T) {
  395. defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodOverhead, true)()
  396. notEnoughPodsTests := []struct {
  397. pod *v1.Pod
  398. nodeInfo *schedulernodeinfo.NodeInfo
  399. fits bool
  400. name string
  401. wantStatus *framework.Status
  402. }{
  403. {
  404. pod: &v1.Pod{},
  405. nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})),
  406. name: "even without specified resources predicate fails when there's no space for additional pod",
  407. wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"),
  408. },
  409. {
  410. pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
  411. nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})),
  412. name: "even if both resources fit predicate fails when there's no space for additional pod",
  413. wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"),
  414. },
  415. {
  416. pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
  417. nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
  418. name: "even for equal edge case predicate fails when there's no space for additional pod",
  419. wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"),
  420. },
  421. {
  422. pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
  423. nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
  424. name: "even for equal edge case predicate fails when there's no space for additional pod due to init container",
  425. wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"),
  426. },
  427. }
  428. for _, test := range notEnoughPodsTests {
  429. t.Run(test.name, func(t *testing.T) {
  430. node := v1.Node{Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 1, 0, 0, 0)}}
  431. test.nodeInfo.SetNode(&node)
  432. p, _ := NewFit(nil, nil)
  433. cycleState := framework.NewCycleState()
  434. preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(context.Background(), cycleState, test.pod)
  435. if !preFilterStatus.IsSuccess() {
  436. t.Errorf("prefilter failed with status: %v", preFilterStatus)
  437. }
  438. gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), cycleState, test.pod, test.nodeInfo)
  439. if !reflect.DeepEqual(gotStatus, test.wantStatus) {
  440. t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
  441. }
  442. })
  443. }
  444. }
  445. func TestStorageRequests(t *testing.T) {
  446. defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodOverhead, true)()
  447. storagePodsTests := []struct {
  448. pod *v1.Pod
  449. nodeInfo *schedulernodeinfo.NodeInfo
  450. name string
  451. wantStatus *framework.Status
  452. }{
  453. {
  454. pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
  455. nodeInfo: schedulernodeinfo.NewNodeInfo(
  456. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 10})),
  457. name: "due to container scratch disk",
  458. wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
  459. },
  460. {
  461. pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
  462. nodeInfo: schedulernodeinfo.NewNodeInfo(
  463. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 10})),
  464. name: "pod fit",
  465. },
  466. {
  467. pod: newResourcePod(schedulernodeinfo.Resource{EphemeralStorage: 25}),
  468. nodeInfo: schedulernodeinfo.NewNodeInfo(
  469. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 2})),
  470. name: "storage ephemeral local storage request exceeds allocatable",
  471. wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceEphemeralStorage)),
  472. },
  473. {
  474. pod: newResourcePod(schedulernodeinfo.Resource{EphemeralStorage: 10}),
  475. nodeInfo: schedulernodeinfo.NewNodeInfo(
  476. newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 2})),
  477. name: "pod fits",
  478. },
  479. }
  480. for _, test := range storagePodsTests {
  481. t.Run(test.name, func(t *testing.T) {
  482. node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
  483. test.nodeInfo.SetNode(&node)
  484. p, _ := NewFit(nil, nil)
  485. cycleState := framework.NewCycleState()
  486. preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(context.Background(), cycleState, test.pod)
  487. if !preFilterStatus.IsSuccess() {
  488. t.Errorf("prefilter failed with status: %v", preFilterStatus)
  489. }
  490. gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), cycleState, test.pod, test.nodeInfo)
  491. if !reflect.DeepEqual(gotStatus, test.wantStatus) {
  492. t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
  493. }
  494. })
  495. }
  496. }